From 5f2f9c0640d06007e078ea197c39f1968064cc96 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Fri, 3 Jan 2020 17:42:17 +0100 Subject: [PATCH 01/41] Enable flake8-logging-format --- datadog_checks_dev/datadog_checks/dev/plugin/tox.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py index db03ce1424791..065b8767d52d0 100644 --- a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py +++ b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py @@ -63,9 +63,7 @@ def add_style_checker(config, sections, make_envconfig, reader): 'flake8 --config=../.flake8 .', 'black --check --diff .', 'isort --check-only --diff --recursive .', - 'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, ' - 'but not mandatory, yet.\')"', - '- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` + 'flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` ] ), } From c2017c3934ff0a203aeb00e06c5bf97a755fa7d5 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 16:11:01 +0100 Subject: [PATCH 02/41] Trigger PR All --- datadog_checks_base/datadog_checks/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/datadog_checks_base/datadog_checks/__init__.py b/datadog_checks_base/datadog_checks/__init__.py index 06fe3d5375970..98a8409fc27fa 100644 --- a/datadog_checks_base/datadog_checks/__init__.py +++ b/datadog_checks_base/datadog_checks/__init__.py @@ -1,5 +1,4 @@ # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -# __path__ = __import__('pkgutil').extend_path(__path__, __name__) From 245ea7053d654ee2a551e89b7400f09c4271fcf9 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 16:43:19 +0100 Subject: [PATCH 03/41] Formatted using script --- couch/datadog_checks/couch/couch.py | 4 +--- .../datadog_checks/downloader/download.py | 8 ++++---- docker_daemon/test/test_docker_daemon.py | 10 +++++----- ibm_mq/datadog_checks/ibm_mq/config.py | 2 +- ibm_mq/datadog_checks/ibm_mq/ibm_mq.py | 2 +- ibm_mq/tests/conftest.py | 4 ++-- .../datadog_checks/openstack_controller/api.py | 6 +++--- 7 files changed, 17 insertions(+), 19 deletions(-) diff --git a/couch/datadog_checks/couch/couch.py b/couch/datadog_checks/couch/couch.py index 5c2ee6c868a3b..9c8b8713f73cd 100644 --- a/couch/datadog_checks/couch/couch.py +++ b/couch/datadog_checks/couch/couch.py @@ -237,9 +237,7 @@ def _build_system_metrics(self, data, tags, prefix='couchdb.erlang'): if 'count' in val: self.gauge("{0}.{1}.size".format(prefix, key), val['count'], queue_tags) else: - self.agent_check.log.debug( - "Queue %s does not have a key 'count'. It will be ignored." % queue - ) + self.agent_check.log.debug("Queue %s does not have a key 'count'. It will be ignored.", queue) else: self.gauge("{0}.{1}.size".format(prefix, key), val, queue_tags) elif key == "distribution": diff --git a/datadog_checks_downloader/datadog_checks/downloader/download.py b/datadog_checks_downloader/datadog_checks/downloader/download.py index 9b033e96f5c81..f0b5e23b8215d 100644 --- a/datadog_checks_downloader/datadog_checks/downloader/download.py +++ b/datadog_checks_downloader/datadog_checks/downloader/download.py @@ -104,7 +104,7 @@ def __download_with_tuf(self, target_relpath): # Either the target has not been updated... if not len(updated_targets): - logger.debug('{} has not been updated'.format(target_relpath)) + logger.debug('%s has not been updated', target_relpath) # or, it has been updated, in which case... else: # First, we use TUF to download and verify the target. @@ -113,7 +113,7 @@ def __download_with_tuf(self, target_relpath): assert updated_target == target self.__updater.download_target(updated_target, self.__targets_dir) - logger.info('TUF verified {}'.format(target_relpath)) + logger.info('TUF verified %s', target_relpath) target_abspath = os.path.join(self.__targets_dir, target_relpath) return target_abspath, target @@ -185,7 +185,7 @@ def __load_root_layout(self, target_relpath): return root_layout, root_layout_pubkeys, root_layout_params def __handle_in_toto_verification_exception(self, target_relpath, e): - logger.exception('in-toto failed to verify {}'.format(target_relpath)) + logger.exception('in-toto failed to verify %s', target_relpath) if isinstance(e, LinkNotFoundError) and str(e) == RevokedDeveloper.MSG: raise RevokedDeveloper(target_relpath, IN_TOTO_ROOT_LAYOUT) @@ -211,7 +211,7 @@ def __in_toto_verify(self, inspection_packet, target_relpath): except Exception as e: self.__handle_in_toto_verification_exception(target_relpath, e) else: - logger.info('in-toto verified {}'.format(target_relpath)) + logger.info('in-toto verified %s', target_relpath) finally: # Switch back to a parent directory we control, so that we can # safely delete temp dir. diff --git a/docker_daemon/test/test_docker_daemon.py b/docker_daemon/test/test_docker_daemon.py index b1bee2c2d7af9..049340f21e7f6 100644 --- a/docker_daemon/test/test_docker_daemon.py +++ b/docker_daemon/test/test_docker_daemon.py @@ -82,7 +82,7 @@ def test_event_attributes_tag(self): "nginx:latest", detach=True, name='event-tags-test', entrypoint='/bin/false') log.debug('start nginx:latest with entrypoint /bin/false') DockerUtil().client.start(container_fail) - log.debug('container exited with %s' % DockerUtil().client.wait(container_fail, 1)) + log.debug('container exited with %s', DockerUtil().client.wait(container_fail, 1)) # Wait 1 second after exit so the event will be picked up from time import sleep sleep(1) @@ -278,12 +278,12 @@ def setUp(self): self.docker_client.connect_container_to_network(cont['Id'], self.second_network) for c in self.containers: - log.info("Starting container: {0}".format(c)) + log.info("Starting container: %s", c) self.docker_client.start(c) def tearDown(self): for c in self.containers: - log.info("Stopping container: {0}".format(c)) + log.info("Stopping container: %s", c) self.docker_client.remove_container(c, force=True) self.docker_client.remove_network(self.second_network) @@ -829,8 +829,8 @@ def test_collect_exit_code(self): log.debug('start nginx:latest with entrypoint /bin/false') self.docker_client.start(container_ok) self.docker_client.start(container_fail) - log.debug('container exited with %s' % self.docker_client.wait(container_ok, 1)) - log.debug('container exited with %s' % self.docker_client.wait(container_fail, 1)) + log.debug('container exited with %s', self.docker_client.wait(container_ok, 1)) + log.debug('container exited with %s', self.docker_client.wait(container_fail, 1)) # After the container exits, we need to wait a second so the event isn't too recent # when the check runs, otherwise the event is not picked up from time import sleep diff --git a/ibm_mq/datadog_checks/ibm_mq/config.py b/ibm_mq/datadog_checks/ibm_mq/config.py index 09673bf828f25..f1d6cba9e439e 100644 --- a/ibm_mq/datadog_checks/ibm_mq/config.py +++ b/ibm_mq/datadog_checks/ibm_mq/config.py @@ -117,7 +117,7 @@ def _compile_tag_re(self): try: queue_tag_list.append([re.compile(regex_str), [t.strip() for t in tags.split(',')]]) except TypeError: - log.warning('{} is not a valid regular expression and will be ignored'.format(regex_str)) + log.warning('%s is not a valid regular expression and will be ignored', regex_str) return queue_tag_list @property diff --git a/ibm_mq/datadog_checks/ibm_mq/ibm_mq.py b/ibm_mq/datadog_checks/ibm_mq/ibm_mq.py index e53b7b209c134..6ff978bd89dc2 100644 --- a/ibm_mq/datadog_checks/ibm_mq/ibm_mq.py +++ b/ibm_mq/datadog_checks/ibm_mq/ibm_mq.py @@ -59,7 +59,7 @@ def check(self, instance): config.check_properly_configured() if not pymqi: - log.error("You need to install pymqi: {}".format(pymqiException)) + log.error("You need to install pymqi: %s", pymqiException) raise errors.PymqiException("You need to install pymqi: {}".format(pymqiException)) try: diff --git a/ibm_mq/tests/conftest.py b/ibm_mq/tests/conftest.py index 13004cb8c87d5..8a8418385bc6d 100644 --- a/ibm_mq/tests/conftest.py +++ b/ibm_mq/tests/conftest.py @@ -71,10 +71,10 @@ def publish(): for i in range(10): try: message = 'Hello from Python! Message {}'.format(i) - log.info("sending message: {}".format(message)) + log.info("sending message: %s", message) queue.put(message.encode()) except Exception as e: - log.info("exception publishing: {}".format(e)) + log.info("exception publishing: %s", e) queue.close() qmgr.disconnect() return diff --git a/openstack_controller/datadog_checks/openstack_controller/api.py b/openstack_controller/datadog_checks/openstack_controller/api.py index 63519f54021ca..b7d03cd954d94 100644 --- a/openstack_controller/datadog_checks/openstack_controller/api.py +++ b/openstack_controller/datadog_checks/openstack_controller/api.py @@ -300,7 +300,7 @@ def _make_request(self, url, params=None): else: raise e except Exception: - self.logger.exception("Unexpected error contacting openstack endpoint {}".format(url)) + self.logger.exception("Unexpected error contacting openstack endpoint %s", url) raise jresp = resp.json() self.logger.debug("url: %s || response: %s", url, jresp) @@ -405,7 +405,7 @@ def get_networks(self): networks = self._make_request(url) return networks.get('networks') except Exception as e: - self.logger.warning('Unable to get the list of all network ids: {}'.format(e)) + self.logger.warning('Unable to get the list of all network ids: %s', e) raise e def get_projects(self): @@ -418,7 +418,7 @@ def get_projects(self): return r.get('projects', []) except Exception as e: - self.logger.warning('Unable to get projects: {}'.format(e)) + self.logger.warning('Unable to get projects: %s', e) raise e From b4dbbdb2bb34eb7d4c0fb10aae14bdd5c7a8c237 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 16:57:34 +0100 Subject: [PATCH 04/41] Fix lint --- couch/datadog_checks/couch/couch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/couch/datadog_checks/couch/couch.py b/couch/datadog_checks/couch/couch.py index 9c8b8713f73cd..b4e9a9533ff49 100644 --- a/couch/datadog_checks/couch/couch.py +++ b/couch/datadog_checks/couch/couch.py @@ -237,7 +237,9 @@ def _build_system_metrics(self, data, tags, prefix='couchdb.erlang'): if 'count' in val: self.gauge("{0}.{1}.size".format(prefix, key), val['count'], queue_tags) else: - self.agent_check.log.debug("Queue %s does not have a key 'count'. It will be ignored.", queue) + self.agent_check.log.debug( + "Queue %s does not have a key 'count'. It will be ignored.", queue + ) else: self.gauge("{0}.{1}.size".format(prefix, key), val, queue_tags) elif key == "distribution": From 33f3c62bad8422208060d6c72e5ffeab8f538d0c Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:01:01 +0100 Subject: [PATCH 05/41] Fix directory --- directory/datadog_checks/directory/directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/directory/datadog_checks/directory/directory.py b/directory/datadog_checks/directory/directory.py index 4478307c7ce86..f9523cafebdeb 100644 --- a/directory/datadog_checks/directory/directory.py +++ b/directory/datadog_checks/directory/directory.py @@ -133,7 +133,7 @@ def _get_stats( file_stat = file_entry.stat() except OSError as ose: - self.warning('DirectoryCheck: could not stat file {} - {}'.format(join(root, file_entry.name), ose)) + self.warning('DirectoryCheck: could not stat file %s - %s', join(root, file_entry.name), ose) else: # file specific metrics directory_bytes += file_stat.st_size From f61a664f7f9b476bbeac133b1bfe06bd5e2f6668 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:02:52 +0100 Subject: [PATCH 06/41] Fix disk --- disk/datadog_checks/disk/disk.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/disk/datadog_checks/disk/disk.py b/disk/datadog_checks/disk/disk.py index a51872bc7e8d3..ebd64cd555990 100644 --- a/disk/datadog_checks/disk/disk.py +++ b/disk/datadog_checks/disk/disk.py @@ -263,25 +263,25 @@ def _compile_pattern_filters(self, instance): device_blacklist_extras = [] mount_point_blacklist_extras = [] - deprecation_message = '`{old}` is deprecated and will be removed in 6.9. Please use `{new}` instead.' + deprecation_message = '`%s` is deprecated and will be removed in 6.9. Please use `%s` instead.' if 'excluded_filesystems' in instance: file_system_blacklist_extras.extend( '{}$'.format(pattern) for pattern in instance['excluded_filesystems'] if pattern ) - self.warning(deprecation_message.format(old='excluded_filesystems', new='file_system_blacklist')) + self.warning(deprecation_message, 'excluded_filesystems', 'file_system_blacklist') if 'excluded_disks' in instance: device_blacklist_extras.extend('{}$'.format(pattern) for pattern in instance['excluded_disks'] if pattern) - self.warning(deprecation_message.format(old='excluded_disks', new='device_blacklist')) + self.warning(deprecation_message, 'excluded_disks', 'device_blacklist') if 'excluded_disk_re' in instance: device_blacklist_extras.append(instance['excluded_disk_re']) - self.warning(deprecation_message.format(old='excluded_disk_re', new='device_blacklist')) + self.warning(deprecation_message, 'excluded_disk_re', 'device_blacklist') if 'excluded_mountpoint_re' in instance: mount_point_blacklist_extras.append(instance['excluded_mountpoint_re']) - self.warning(deprecation_message.format(old='excluded_mountpoint_re', new='mount_point_blacklist')) + self.warning(deprecation_message, 'excluded_mountpoint_re', 'mount_point_blacklist') # Any without valid patterns will become None self._file_system_whitelist = self._compile_valid_patterns(self._file_system_whitelist, casing=re.I) From 0a7b10d10ed02b786ce1ab26d2735d5f42732341 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:05:20 +0100 Subject: [PATCH 07/41] Fix gitlab --- gitlab/datadog_checks/gitlab/gitlab.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gitlab/datadog_checks/gitlab/gitlab.py b/gitlab/datadog_checks/gitlab/gitlab.py index 1a29a7e6c67d4..b5f702c001714 100644 --- a/gitlab/datadog_checks/gitlab/gitlab.py +++ b/gitlab/datadog_checks/gitlab/gitlab.py @@ -110,7 +110,7 @@ def _check_health_endpoint(self, instance, check_type, tags): if url is None: # Simply ignore this service check if not configured - self.log.debug("gitlab_url not configured, service check {} skipped".format(check_type)) + self.log.debug("gitlab_url not configured, service check %s skipped", check_type) return service_check_tags = self._service_check_tags(url) @@ -121,7 +121,7 @@ def _check_health_endpoint(self, instance, check_type, tags): check_url = '{}/-/{}'.format(url, check_type) try: - self.log.debug("checking {} against {}".format(check_type, check_url)) + self.log.debug("checking %s against %s", check_type, check_url) r = self.http.get(check_url) if r.status_code != 200: self.service_check( @@ -153,4 +153,4 @@ def _check_health_endpoint(self, instance, check_type, tags): raise else: self.service_check(service_check_name, OpenMetricsBaseCheck.OK, tags=service_check_tags) - self.log.debug("gitlab check {} succeeded".format(check_type)) + self.log.debug("gitlab check %s succeeded", check_type) From 73bca46a6a74c76213af9d97b2483572cb167696 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:11:01 +0100 Subject: [PATCH 08/41] Fix haproxy --- haproxy/datadog_checks/haproxy/haproxy.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/haproxy/datadog_checks/haproxy/haproxy.py b/haproxy/datadog_checks/haproxy/haproxy.py index 47594107e0459..0ea01022c212e 100644 --- a/haproxy/datadog_checks/haproxy/haproxy.py +++ b/haproxy/datadog_checks/haproxy/haproxy.py @@ -200,7 +200,7 @@ def _collect_version_from_http(self, url): self.log.debug("unable to find HAProxy version info") else: version = re.search(r"HAProxy version ([^,]+)", raw_version).group(1) - self.log.debug(u"HAProxy version is {}".format(version)) + self.log.debug("HAProxy version is %s", version) self.set_metadata('version', version) def _fetch_socket_data(self, parsed_url): @@ -577,14 +577,13 @@ def _process_status_metric( try: service, _, hostname, status = host_status except Exception: + service, _, status = host_status if collect_status_metrics_by_host: self.warning( - '`collect_status_metrics_by_host` is enabled but no host info\ - could be extracted from HAProxy stats endpoint for {0}'.format( - service - ) + '`collect_status_metrics_by_host` is enabled but no host info could be extracted from HAProxy ' + 'stats endpoint for %s', + service, ) - service, _, status = host_status if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter): continue From 1c1ec8c3dde208af597dc141040a91bc9e9a2115 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:14:53 +0100 Subject: [PATCH 09/41] Fix ibm_db2 --- ibm_db2/datadog_checks/ibm_db2/ibm_db2.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py b/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py index 391e13a656652..6191a28f2a497 100644 --- a/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py +++ b/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py @@ -480,8 +480,9 @@ def query_custom(self): column_type = column.get('type') if not column_type: # no cov self.log.error( - 'Column field `type` is required for column `{}` ' - 'of metric_prefix `{}`'.format(name, metric_prefix) + 'Column field `type` is required for column `%s` ' 'of metric_prefix `%s`', + name, + metric_prefix, ) break @@ -490,16 +491,20 @@ def query_custom(self): else: if not hasattr(self, column_type): self.log.error( - 'Invalid submission method `{}` for metric column `{}` of ' - 'metric_prefix `{}`'.format(column_type, name, metric_prefix) + 'Invalid submission method `%s` for metric column `%s` of ' 'metric_prefix `%s`', + column_type, + name, + metric_prefix, ) break try: metric_info.append(('{}.{}'.format(metric_prefix, name), float(value), column_type)) except (ValueError, TypeError): # no cov self.log.error( - 'Non-numeric value `{}` for metric column `{}` of ' - 'metric_prefix `{}`'.format(value, name, metric_prefix) + 'Non-numeric value `%s` for metric column `%s` of ' 'metric_prefix `%s`', + value, + name, + metric_prefix, ) break From a80467b24f351b88423253297b27350ebc234a96 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:22:11 +0100 Subject: [PATCH 10/41] Fix kafka_consumer --- .../datadog_checks/kafka_consumer/kafka_consumer.py | 10 +++++----- .../datadog_checks/kafka_consumer/legacy_0_10_2.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py b/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py index d4879b584a78c..7bf96ad51d951 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py +++ b/kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py @@ -102,11 +102,11 @@ def check(self, instance): total_contexts = len(self._consumer_offsets) + len(self._highwater_offsets) if total_contexts > self._context_limit: self.warning( - """Discovered {} metric contexts - this exceeds the maximum number of {} contexts permitted by the + """Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics - and partitions you wish to monitor.""".format( - total_contexts, self._context_limit - ) + and partitions you wish to monitor.""", + total_contexts, + self._context_limit, ) # Report the metics @@ -145,7 +145,7 @@ def _create_kafka_admin_client(self): ssl_crlfile=self.instance.get('ssl_crlfile'), ssl_password=self.instance.get('ssl_password'), ) - self.log.debug("KafkaAdminClient api_version: {}".format(kafka_admin_client.config['api_version'])) + self.log.debug("KafkaAdminClient api_version: %s", kafka_admin_client.config['api_version']) # Force initial population of the local cluster metadata cache kafka_admin_client._client.poll(future=kafka_admin_client._client.cluster.request_update()) if kafka_admin_client._client.cluster.topics(exclude_internal_topics=False) is None: diff --git a/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py b/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py index 98c1c129821d5..d0e5b4c55d35e 100644 --- a/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py +++ b/kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py @@ -109,11 +109,11 @@ def check(self, instance): ) if total_contexts > self._context_limit: self.warning( - """Discovered {} metric contexts - this exceeds the maximum number of {} contexts permitted by the + """Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics - and partitions you wish to monitor.""".format( - total_contexts, self._context_limit - ) + and partitions you wish to monitor.""", + total_contexts, + self._context_limit, ) # Report the metics From cc0fdf2901f4813cf866fb152f8e36d64b359dac Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:25:55 +0100 Subject: [PATCH 11/41] Fix kong --- kong/datadog_checks/kong/kong.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kong/datadog_checks/kong/kong.py b/kong/datadog_checks/kong/kong.py index 7e60bacb80ca2..96d0c4c18a85e 100644 --- a/kong/datadog_checks/kong/kong.py +++ b/kong/datadog_checks/kong/kong.py @@ -38,9 +38,9 @@ def _fetch_data(self, instance): service_check_tags = ['kong_host:%s' % host, 'kong_port:%s' % port] + tags try: - self.log.debug(u"Querying URL: {0}".format(url)) + self.log.debug("Querying URL: %s", url) response = requests.get(url, headers=headers(self.agentConfig), verify=ssl_validation) - self.log.debug(u"Kong status `response`: {0}".format(response)) + self.log.debug("Kong status `response`: %s", response) response.raise_for_status() except Exception: self.service_check(service_check_name, Kong.CRITICAL, tags=service_check_tags) From 9829680b5002fccf89f42637493b79f1e7710108 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:44:58 +0100 Subject: [PATCH 12/41] Fix kubelet --- kubelet/datadog_checks/kubelet/cadvisor.py | 4 ++-- kubelet/datadog_checks/kubelet/prometheus.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/kubelet/datadog_checks/kubelet/cadvisor.py b/kubelet/datadog_checks/kubelet/cadvisor.py index 2c9176ac8c4ec..8b5b55ab5b320 100644 --- a/kubelet/datadog_checks/kubelet/cadvisor.py +++ b/kubelet/datadog_checks/kubelet/cadvisor.py @@ -107,7 +107,7 @@ def _update_metrics(self, instance, cadvisor_url, pod_list, pod_list_utils): try: self._update_container_metrics(instance, subcontainer, pod_list, pod_list_utils) except Exception as e: - self.log.error("Unable to collect metrics for container: {0} ({1})".format(c_id, e)) + self.log.error("Unable to collect metrics for container: %s (%s)", c_id, e) def _publish_raw_metrics(self, metric, dat, tags, is_pod, depth=0): """ @@ -180,7 +180,7 @@ def _update_container_metrics(self, instance, subcontainer, pod_list, pod_list_u ) ) if pod_list_utils.is_excluded(cid): - self.log.debug("Filtering out " + cid) + self.log.debug("Filtering out %s", cid) return tags = tagger.tag(replace_container_rt_prefix(cid), tagger.HIGH) or [] diff --git a/kubelet/datadog_checks/kubelet/prometheus.py b/kubelet/datadog_checks/kubelet/prometheus.py index 12d9bc51bdac0..9b910c330b15c 100644 --- a/kubelet/datadog_checks/kubelet/prometheus.py +++ b/kubelet/datadog_checks/kubelet/prometheus.py @@ -439,8 +439,9 @@ def _process_limit_metric(self, m_name, metric, cache, scraper_config, pct_m_nam self.gauge(pct_m_name, float(usage / float(limit)), tags) else: self.log.debug( - "No corresponding usage found for metric %s and " - "container %s, skipping usage_pct for now." % (pct_m_name, c_name) + "No corresponding usage found for metric %s and container %s, skipping usage_pct " "for now.", + pct_m_name, + c_name, ) def container_cpu_usage_seconds_total(self, metric, scraper_config): From 4b537b007584a8b23970b3bbd441d07dbd32dee3 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:56:08 +0100 Subject: [PATCH 13/41] Fix mapreduce --- mapreduce/datadog_checks/mapreduce/mapreduce.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mapreduce/datadog_checks/mapreduce/mapreduce.py b/mapreduce/datadog_checks/mapreduce/mapreduce.py index 82dc7605c6834..ab4685c0d24fc 100644 --- a/mapreduce/datadog_checks/mapreduce/mapreduce.py +++ b/mapreduce/datadog_checks/mapreduce/mapreduce.py @@ -130,8 +130,8 @@ def check(self, instance): cluster_name = instance.get('cluster_name') if cluster_name is None: self.warning( - "The cluster_name must be specified in the instance configuration, " - "defaulting to '{}'".format(self.DEFAULT_CLUSTER_NAME) + "The cluster_name must be specified in the instance configuration, defaulting to '%s'", + self.DEFAULT_CLUSTER_NAME, ) cluster_name = self.DEFAULT_CLUSTER_NAME From ab73f559e4e23bb7d4a3e369cb73b4e8128ff1b1 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:57:58 +0100 Subject: [PATCH 14/41] Fix mesos_master --- mesos_master/datadog_checks/mesos_master/mesos_master.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mesos_master/datadog_checks/mesos_master/mesos_master.py b/mesos_master/datadog_checks/mesos_master/mesos_master.py index 6f8b30af5e9fa..754a579755415 100644 --- a/mesos_master/datadog_checks/mesos_master/mesos_master.py +++ b/mesos_master/datadog_checks/mesos_master/mesos_master.py @@ -178,7 +178,7 @@ def _get_json(self, url, failure_expected=False, tags=None): msg = str(e) status = AgentCheck.CRITICAL finally: - self.log.debug('Request to url : {0}, timeout: {1}, message: {2}'.format(url, timeout, msg)) + self.log.debug('Request to url : %s, timeout: %s, message: %s', url, timeout, msg) self._send_service_check(url, status, failure_expected=failure_expected, tags=tags, message=msg) if response.encoding is None: @@ -210,9 +210,7 @@ def _get_master_state(self, url, tags): # Mesos version < 0.25 old_endpoint = endpoint + '.json' self.log.info( - 'Unable to fetch state from {0}. Retrying with the deprecated endpoint: {1}.'.format( - endpoint, old_endpoint - ) + 'Unable to fetch state from %s. Retrying with the deprecated endpoint: %s.', endpoint, old_endpoint ) master_state = self._get_json(old_endpoint, tags=tags) return master_state From 8bccdad420897d22cf8be0900b4b5fe227bf265b Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 17:59:30 +0100 Subject: [PATCH 15/41] Fix mysql --- mysql/datadog_checks/mysql/mysql.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index 4a8bc67b658e5..09ce27f95dad8 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -953,11 +953,8 @@ def _get_stats_from_innodb_status(self, db): with closing(db.cursor()) as cursor: cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS") except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: - self.warning( - "Privilege error or engine unavailable accessing the INNODB status \ - tables (must grant PROCESS): %s" - % str(e) - ) + self.warning("Privilege error or engine unavailable accessing the INNODB status tables " + "(must grant PROCESS): %s", e) return {} if cursor.rowcount < 1: From 14492103e6416ad9747a029b397b3087d187f814 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:00:18 +0100 Subject: [PATCH 16/41] Fix mysql --- mysql/datadog_checks/mysql/mysql.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index 09ce27f95dad8..031ff47634bf0 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -953,8 +953,10 @@ def _get_stats_from_innodb_status(self, db): with closing(db.cursor()) as cursor: cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS") except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: - self.warning("Privilege error or engine unavailable accessing the INNODB status tables " - "(must grant PROCESS): %s", e) + self.warning( + "Privilege error or engine unavailable accessing the INNODB status tables " "(must grant PROCESS): %s", + e, + ) return {} if cursor.rowcount < 1: From ffcd0a35b78b4244bd3066bf5358c973b0a1f027 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:05:06 +0100 Subject: [PATCH 17/41] Fix nginx --- nginx/datadog_checks/nginx/nginx.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nginx/datadog_checks/nginx/nginx.py b/nginx/datadog_checks/nginx/nginx.py index e56d123c9f2a9..0043100681b25 100644 --- a/nginx/datadog_checks/nginx/nginx.py +++ b/nginx/datadog_checks/nginx/nginx.py @@ -85,8 +85,8 @@ def check(self, instance): # for unpaid versions self._set_version_metadata(version) - self.log.debug(u"Nginx status `response`: {}".format(response)) - self.log.debug(u"Nginx status `content_type`: {}".format(content_type)) + self.log.debug("Nginx status `response`: %s", response) + self.log.debug("Nginx status `content_type`: %s", content_type) if content_type.startswith('application/json'): metrics = self.parse_json(response, tags) @@ -100,7 +100,7 @@ def check(self, instance): # since we can't get everything in one place anymore. for endpoint, nest in chain(iteritems(PLUS_API_ENDPOINTS), iteritems(PLUS_API_STREAM_ENDPOINTS)): response = self._get_plus_api_data(url, plus_api_version, endpoint, nest) - self.log.debug(u"Nginx Plus API version {} `response`: {}".format(plus_api_version, response)) + self.log.debug("Nginx Plus API version %s `response`: %s", plus_api_version, response) metrics.extend(self.parse_json(response, tags)) funcs = {'gauge': self.gauge, 'rate': self.rate, 'count': self.monotonic_count} @@ -141,7 +141,7 @@ def check(self, instance): self._set_version_metadata(value) except Exception as e: - self.log.error(u'Could not submit metric: %s: %s' % (repr(row), str(e))) + self.log.error('Could not submit metric: %s: %s', repr(row), e) @classmethod def _get_instance_params(cls, instance): @@ -176,7 +176,7 @@ def _perform_service_check(self, instance, url): service_check_name = 'nginx.can_connect' service_check_tags = ['host:%s' % nginx_host, 'port:%s' % nginx_port] + custom_tags try: - self.log.debug(u"Querying URL: {}".format(url)) + self.log.debug("Querying URL: %s", url) r = self._perform_request(url) except Exception: self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags) @@ -201,7 +201,7 @@ def _get_plus_api_data(self, api_url, plus_api_version, endpoint, nest): url = "/".join([api_url, plus_api_version, endpoint]) payload = {} try: - self.log.debug(u"Querying URL: {}".format(url)) + self.log.debug("Querying URL: %s", url) r = self._perform_request(url) payload = self._nest_payload(nest, r.json()) except Exception as e: @@ -220,7 +220,7 @@ def _set_version_metadata(self, version): version = version.split('/')[1] self.set_metadata('version', version) - self.log.debug(u"Nginx version `server`: {}".format(version)) + self.log.debug("Nginx version `server`: %s", version) else: self.log.warning(u"could not retrieve nginx version info") From 2f738e5098d1297f5e44b74360482bf1169b319f Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:08:56 +0100 Subject: [PATCH 18/41] Fix openstack --- .../datadog_checks/openstack/openstack.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/openstack/datadog_checks/openstack/openstack.py b/openstack/datadog_checks/openstack/openstack.py index b79374e27e0ff..b05ab5dd84cac 100644 --- a/openstack/datadog_checks/openstack/openstack.py +++ b/openstack/datadog_checks/openstack/openstack.py @@ -698,7 +698,7 @@ def get_network_stats(self, tags): if not network_ids: self.warning( "Your check is not configured to monitor any networks.\n" - + "Please list `network_ids` under your init_config" + "Please list `network_ids` under your init_config" ) for nid in network_ids: @@ -779,7 +779,7 @@ def get_all_hypervisor_ids(self, filter_by_host=None): if not self.init_config.get("hypervisor_ids"): self.warning( "Nova API v2 requires admin privileges to index hypervisors. " - + "Please specify the hypervisor you wish to monitor under the `hypervisor_ids` section" + "Please specify the hypervisor you wish to monitor under the `hypervisor_ids` section" ) return [] return self.init_config.get("hypervisor_ids") @@ -832,7 +832,7 @@ def get_stats_for_single_hypervisor(self, hyp_id, instance, host_tags=None, cust try: uptime = self.get_uptime_for_single_hypervisor(hyp['id']) except Exception as e: - self.warning('Unable to get uptime for hypervisor {0}: {1}'.format(hyp['id'], str(e))) + self.warning('Unable to get uptime for hypervisor %s: %s', hyp['id'], e) uptime = {} hyp_state = hyp.get('state', None) @@ -1008,7 +1008,7 @@ def _is_valid_metric(label): project_name = project.get('name') - self.log.debug("Collecting metrics for project. name: {0} id: {1}".format(project_name, project['id'])) + self.log.debug("Collecting metrics for project. name: %s id: %s", project_name, project['id']) url = '{0}/limits'.format(self.get_nova_endpoint()) headers = {'X-Auth-Token': self.get_auth_token()} @@ -1124,9 +1124,9 @@ def ensure_auth_scope(self, instance): ) except KeystoneUnreachable as e: self.warning( - "The agent could not contact the specified identity server at %s . \ - Are you sure it is up at that address?" - % self.init_config.get("keystone_server_url") + "The agent could not contact the specified identity server at %s . " + "Are you sure it is up at that address?", + self.init_config.get("keystone_server_url"), ) self.log.debug("Problem grabbing auth token: %s", e) self.service_check( @@ -1250,8 +1250,8 @@ def check(self, instance): self.get_stats_for_single_hypervisor(hyp, instance, host_tags=host_tags, custom_tags=custom_tags) else: self.warning( - "Couldn't get hypervisor to monitor for host: %s" - % self.get_my_hostname(split_hostname_on_first_period=split_hostname_on_first_period) + "Couldn't get hypervisor to monitor for host: %s", + self.get_my_hostname(split_hostname_on_first_period=split_hostname_on_first_period), ) if projects: @@ -1276,9 +1276,9 @@ def check(self, instance): elif isinstance(e, IncompleteIdentity): self.warning( "Please specify the user via the `user` variable in your init_config.\n" - + "This is the user you would use to authenticate with Keystone v3 via password auth.\n" - + "The user should look like:" - + "{'password': 'my_password', 'name': 'my_name', 'domain': {'id': 'my_domain_id'}}" + "This is the user you would use to authenticate with Keystone v3 via password auth.\n" + "The user should look like: " + "{'password': 'my_password', 'name': 'my_name', 'domain': {'id': 'my_domain_id'}}" ) else: self.warning("Configuration Incomplete! Check your openstack.yaml file") From 3256d06fba37cc0e78d38c792b11206950ebb830 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:27:47 +0100 Subject: [PATCH 19/41] Fix oracle --- oracle/datadog_checks/oracle/oracle.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/oracle/datadog_checks/oracle/oracle.py b/oracle/datadog_checks/oracle/oracle.py index c60a0c4635567..901087db49a3b 100644 --- a/oracle/datadog_checks/oracle/oracle.py +++ b/oracle/datadog_checks/oracle/oracle.py @@ -186,8 +186,9 @@ def _get_custom_metrics(self, con, custom_queries, global_tags): column_type = column.get('type') if not column_type: self.log.error( - 'column field `type` is required for column `{}` ' - 'of metric_prefix `{}`'.format(name, metric_prefix) + 'column field `type` is required for column `%s` ' 'of metric_prefix `%s`', + name, + metric_prefix, ) break @@ -196,16 +197,20 @@ def _get_custom_metrics(self, con, custom_queries, global_tags): else: if not hasattr(self, column_type): self.log.error( - 'invalid submission method `{}` for column `{}` of ' - 'metric_prefix `{}`'.format(column_type, name, metric_prefix) + 'invalid submission method `%s` for column `%s` "' '"of metric_prefix `%s`', + column_type, + name, + metric_prefix, ) break try: metric_info.append(('{}.{}'.format(metric_prefix, name), float(value), column_type)) except (ValueError, TypeError): self.log.error( - 'non-numeric value `{}` for metric column `{}` of ' - 'metric_prefix `{}`'.format(value, name, metric_prefix) + 'non-numeric value `%s` for metric column `%s` ' 'of metric_prefix `%s`', + value, + name, + metric_prefix, ) break From d5a2eb1688bbb2a4c8b28de6fe7ccc7499ce4ae6 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:32:21 +0100 Subject: [PATCH 20/41] Fix postfix --- postfix/datadog_checks/postfix/postfix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/postfix/datadog_checks/postfix/postfix.py b/postfix/datadog_checks/postfix/postfix.py index 8586b1c738569..d0f9bbb30cb55 100644 --- a/postfix/datadog_checks/postfix/postfix.py +++ b/postfix/datadog_checks/postfix/postfix.py @@ -100,7 +100,7 @@ def _get_config(self, instance): if not self.init_config.get('postqueue', False): self.log.debug('postqueue : get_config') - self.log.debug('postqueue: {}'.format(self.init_config.get('postqueue', False))) + self.log.debug('postqueue: %s', self.init_config.get('postqueue', False)) if not queues or not directory: raise Exception('using sudo: missing required yaml config entry') else: From e45765ba306e8c7b90e9ae12b9022eea1fe0a978 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:33:59 +0100 Subject: [PATCH 21/41] Fix process --- process/datadog_checks/process/process.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/process/datadog_checks/process/process.py b/process/datadog_checks/process/process.py index c99268495c72d..c395ec3c84630 100644 --- a/process/datadog_checks/process/process.py +++ b/process/datadog_checks/process/process.py @@ -361,7 +361,8 @@ def check(self, instance): self.warning( 'The `procfs_path` defined in `process.yaml is different from the one defined in ' '`datadog.conf` This is currently not supported by the Agent. Defaulting to the ' - 'value defined in `datadog.conf`:{}'.format(psutil.PROCFS_PATH) + 'value defined in `datadog.conf`: %s', + psutil.PROCFS_PATH, ) elif self._deprecated_init_procfs: self.warning( From 4c88b8202f5d2a08aa53cc2a3cb4be45667d2e07 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:35:50 +0100 Subject: [PATCH 22/41] Fix rabbitmq --- rabbitmq/datadog_checks/rabbitmq/rabbitmq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py index a149ef88101cb..d7cc40829e553 100644 --- a/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py +++ b/rabbitmq/datadog_checks/rabbitmq/rabbitmq.py @@ -210,9 +210,9 @@ def _collect_metadata(self, base_url): version = str(overview_response['rabbitmq_version']) if version: self.set_metadata('version', version) - self.log.debug(u"found rabbitmq version {}".format(version)) + self.log.debug("found rabbitmq version %s", version) else: - self.log.warning(u"could not retrieve rabbitmq version information") + self.log.warning("could not retrieve rabbitmq version information") def _get_vhosts(self, instance, base_url): vhosts = instance.get('vhosts') From af15871eb1a6e6b00ac9f830780c9c810ae7f098 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 18:40:11 +0100 Subject: [PATCH 23/41] Fix redisdb --- redisdb/datadog_checks/redisdb/redisdb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/redisdb/datadog_checks/redisdb/redisdb.py b/redisdb/datadog_checks/redisdb/redisdb.py index e19c46e704bc1..c8e324b0ebc6f 100644 --- a/redisdb/datadog_checks/redisdb/redisdb.py +++ b/redisdb/datadog_checks/redisdb/redisdb.py @@ -411,7 +411,7 @@ def _check_slowlog(self, instance, custom_tags): max_slow_entries = int(conn.config_get(MAX_SLOW_ENTRIES_KEY)[MAX_SLOW_ENTRIES_KEY]) if max_slow_entries > DEFAULT_MAX_SLOW_ENTRIES: self.warning( - "Redis {0} is higher than {1}. Defaulting to {1}. " + "Redis {0} is higher than {1}. Defaulting to {1}. " # noqa: G001 "If you need a higher value, please set {0} in your check config".format( MAX_SLOW_ENTRIES_KEY, DEFAULT_MAX_SLOW_ENTRIES ) From 87ab89955b91b30e2c6fcebda316cdec4bc3d96f Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:10:47 +0100 Subject: [PATCH 24/41] Fix gitlab_runner --- gitlab_runner/datadog_checks/gitlab_runner/gitlab_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gitlab_runner/datadog_checks/gitlab_runner/gitlab_runner.py b/gitlab_runner/datadog_checks/gitlab_runner/gitlab_runner.py index 224b7963a3889..352939c3d381e 100644 --- a/gitlab_runner/datadog_checks/gitlab_runner/gitlab_runner.py +++ b/gitlab_runner/datadog_checks/gitlab_runner/gitlab_runner.py @@ -108,7 +108,7 @@ def _check_connectivity_to_master(self, instance, tags): service_check_tags.extend(tags) try: - self.log.debug("checking connectivity against {}".format(url)) + self.log.debug("checking connectivity against %s", url) r = self.http.get(url) if r.status_code != 200: self.service_check( From d699d434d28e49872cd921c1e81632369362554b Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:19:30 +0100 Subject: [PATCH 25/41] Fix spark --- spark/datadog_checks/spark/spark.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spark/datadog_checks/spark/spark.py b/spark/datadog_checks/spark/spark.py index b96a50068a9b4..336884f46b8c2 100644 --- a/spark/datadog_checks/spark/spark.py +++ b/spark/datadog_checks/spark/spark.py @@ -245,8 +245,8 @@ def _get_running_apps(self, instance): cluster_mode = instance.get(SPARK_CLUSTER_MODE) if cluster_mode is None: self.log.warning( - 'The value for `spark_cluster_mode` was not set in the configuration. ' - 'Defaulting to "%s"' % SPARK_YARN_MODE + 'The value for `spark_cluster_mode` was not set in the configuration. Defaulting to "%s"', + SPARK_YARN_MODE, ) cluster_mode = SPARK_YARN_MODE From 4c20b8b52e9e10955fd2d760e0afe9ed012779ae Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:29:50 +0100 Subject: [PATCH 26/41] Fix sqlserver --- sqlserver/datadog_checks/sqlserver/sqlserver.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sqlserver/datadog_checks/sqlserver/sqlserver.py b/sqlserver/datadog_checks/sqlserver/sqlserver.py index 6a2559b46c17b..ed38224f9d7d7 100644 --- a/sqlserver/datadog_checks/sqlserver/sqlserver.py +++ b/sqlserver/datadog_checks/sqlserver/sqlserver.py @@ -219,26 +219,26 @@ def _make_metric_list_to_collect(self, instance, custom_metrics): except SQLConnectionError: raise except Exception: - self.log.warning("Can't load the metric {}, ignoring".format(name), exc_info=True) + self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True) continue # Load any custom metrics from conf.d/sqlserver.yaml for row in custom_metrics: db_table = row.get('table', DEFAULT_PERFORMANCE_TABLE) if db_table not in self.valid_tables: - self.log.error('{} has an invalid table name: {}'.format(row['name'], db_table)) + self.log.error('%s has an invalid table name: %s', row['name'], db_table) continue if db_table == DEFAULT_PERFORMANCE_TABLE: user_type = row.get('type') if user_type is not None and user_type not in VALID_METRIC_TYPES: - self.log.error('{} has an invalid metric type: {}'.format(row['name'], user_type)) + self.log.error('%s has an invalid metric type: %s', row['name'], user_type) sql_type = None try: if user_type is None: sql_type, base_name = self.get_sql_type(instance, row['counter_name']) except Exception: - self.log.warning("Can't load the metric {}, ignoring".format(row['name']), exc_info=True) + self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True) continue metrics_to_collect.append( @@ -258,7 +258,7 @@ def _make_metric_list_to_collect(self, instance, custom_metrics): wait_stat_metrics = [] vfs_metrics = [] clerk_metrics = [] - self.log.debug("metrics to collect %s", str(metrics_to_collect)) + self.log.debug("metrics to collect %s", metrics_to_collect) for m in metrics_to_collect: if type(m) is SqlSimpleMetric: self.log.debug("Adding simple metric %s", m.sql_name) @@ -323,7 +323,7 @@ def _get_connector(self, instance): self.log.warning("Invalid database connector %s using default %s", connector, self.connector) connector = self.connector else: - self.log.debug("Overriding default connector for {} with {}".format(instance['host'], connector)) + self.log.debug("Overriding default connector for %s with %s", instance['host'], connector) return connector def _get_adoprovider(self, instance): From 818b78f97e387e683a57481506f5b17f3e657567 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:30:41 +0100 Subject: [PATCH 27/41] Fix teamcity --- teamcity/datadog_checks/teamcity/teamcity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/teamcity/datadog_checks/teamcity/teamcity.py b/teamcity/datadog_checks/teamcity/teamcity.py index 0baf7517bbd4a..6e5c66192ccfb 100644 --- a/teamcity/datadog_checks/teamcity/teamcity.py +++ b/teamcity/datadog_checks/teamcity/teamcity.py @@ -63,7 +63,7 @@ def _initialize_if_required(self, instance_name, server, build_conf, basic_http_ self.last_build_ids[instance_name] = last_build_id def _build_and_send_event(self, new_build, instance_name, is_deployment, host, tags): - self.log.debug("Found new build with id {}, saving and alerting.".format(new_build["id"])) + self.log.debug("Found new build with id %s, saving and alerting.", new_build["id"]) self.last_build_ids[instance_name] = new_build["id"] event_dict = {"timestamp": int(time.time()), "source_type_name": "teamcity", "host": host, "tags": []} From aa72139d51462069addcb315c43bc6c1d3c8e4ee Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:33:32 +0100 Subject: [PATCH 28/41] Fix tls --- tls/datadog_checks/tls/tls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tls/datadog_checks/tls/tls.py b/tls/datadog_checks/tls/tls.py index 32b8aadfdad92..13954e3f31d55 100644 --- a/tls/datadog_checks/tls/tls.py +++ b/tls/datadog_checks/tls/tls.py @@ -292,11 +292,11 @@ def create_connection(self): if err is not None: raise err else: - raise socket.error('No valid addresses found, try checking your IPv6 connectivity') + raise socket.error('No valid addresses found, try checking your IPv6 connectivity') # noqa: G except socket.gaierror as e: err_code, message = e.args if err_code == socket.EAI_NODATA or err_code == socket.EAI_NONAME: - raise socket.error('Unable to resolve host, check your DNS: {}'.format(message)) + raise socket.error('Unable to resolve host, check your DNS: {}'.format(message)) # noqa: G raise From 1c822ad91adced17a9aed43e633275f618977bbb Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:36:00 +0100 Subject: [PATCH 29/41] Fix twemproxy --- twemproxy/datadog_checks/twemproxy/twemproxy.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/twemproxy/datadog_checks/twemproxy/twemproxy.py b/twemproxy/datadog_checks/twemproxy/twemproxy.py index 66dc811114736..c846ddc145c87 100644 --- a/twemproxy/datadog_checks/twemproxy/twemproxy.py +++ b/twemproxy/datadog_checks/twemproxy/twemproxy.py @@ -84,10 +84,10 @@ def check(self, instance): tags = instance.get('tags', []) response = self._get_data(instance) - self.log.debug(u"Twemproxy `response`: {0}".format(response)) + self.log.debug("Twemproxy `response`: %s", response) if not response: - self.log.warning(u"No response received from twemproxy.") + self.log.warning("No response received from twemproxy.") return metrics = Twemproxy.parse_json(response, tags) @@ -99,7 +99,7 @@ def check(self, instance): else: self.rate(name, value, tags) except Exception as e: - self.log.error(u'Could not submit metric: %s: %s', repr(row), str(e)) + self.log.error('Could not submit metric: %s: %s', repr(row), e) def _get_data(self, instance): host = instance.get('host') @@ -114,7 +114,7 @@ def _get_data(self, instance): try: addrs = socket.getaddrinfo(host, port, 0, 0, socket.IPPROTO_TCP) except socket.gaierror as e: - self.log.warning("unable to retrieve address info for %s:%s - %s", host, port, e) + self.log.warning("Unable to retrieve address info for %s:%s - %s", host, port, e) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) return None @@ -125,7 +125,7 @@ def _get_data(self, instance): client = socket.socket(*addr[0:3]) client.connect(addr[-1]) - self.log.debug(u"Querying: {0}:{1}".format(host, port)) + self.log.debug("Querying: %s:%s", host, port) while 1: data = ensure_unicode(client.recv(1024)) if not data: @@ -135,7 +135,7 @@ def _get_data(self, instance): client.close() break except socket.error as e: - self.log.warning("unable to connect to %s - %s", addr[-1], e) + self.log.warning("Unable to connect to %s - %s", addr[-1], e) status = AgentCheck.OK if response else AgentCheck.CRITICAL self.service_check(self.SERVICE_CHECK_NAME, status, tags=service_check_tags) From 4c75bc90540ece2f1b6e70a7d8883a1e7163179c Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:39:08 +0100 Subject: [PATCH 30/41] Fix win32_event_log --- .../datadog_checks/win32_event_log/win32_event_log.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py b/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py index 49dc41065a4c5..591ab9ea650fe 100644 --- a/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py +++ b/win32_event_log/datadog_checks/win32_event_log/win32_event_log.py @@ -132,11 +132,11 @@ def check(self, instance): wmi_sampler.sample() except TimeoutException: self.log.warning( - u"[Win32EventLog] WMI query timed out." - u" class={wmi_class} - properties={wmi_properties} -" - u" filters={filters} - tags={tags}".format( - wmi_class=self.EVENT_CLASS, wmi_properties=event_properties, filters=filters, tags=instance_tags - ) + "[Win32EventLog] WMI query timed out. class=%s - properties=%s - filters=%s - tags=%s", + self.EVENT_CLASS, + event_properties, + filters, + instance_tags, ) else: for ev in wmi_sampler: From fdce52307da85781661d8c71e68932107bd2d97b Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:42:11 +0100 Subject: [PATCH 31/41] Fix wmi_check --- wmi_check/datadog_checks/wmi_check/wmi_check.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/wmi_check/datadog_checks/wmi_check/wmi_check.py b/wmi_check/datadog_checks/wmi_check/wmi_check.py index 3abed9ea99e8e..6212e0a43a647 100644 --- a/wmi_check/datadog_checks/wmi_check/wmi_check.py +++ b/wmi_check/datadog_checks/wmi_check/wmi_check.py @@ -71,11 +71,11 @@ def check(self, instance): metrics = self._extract_metrics(wmi_sampler, tag_by, tag_queries, constant_tags) except TimeoutException: self.log.warning( - u"WMI query timed out." - u" class={wmi_class} - properties={wmi_properties} -" - u" filters={filters} - tag_queries={tag_queries}".format( - wmi_class=wmi_class, wmi_properties=properties, filters=filters, tag_queries=tag_queries - ) + "WMI query timed out. class=%s - properties=%s - filters=%s - tag_queries=%s", + wmi_class, + properties, + filters, + tag_queries, ) else: self._submit_metrics(metrics, metric_name_and_type_by_property) From 8deb7749dc1a5a496126599318a1bace983d2f13 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:43:58 +0100 Subject: [PATCH 32/41] Fix yarn --- yarn/datadog_checks/yarn/yarn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yarn/datadog_checks/yarn/yarn.py b/yarn/datadog_checks/yarn/yarn.py index d0981e7622814..866078cdad5ea 100644 --- a/yarn/datadog_checks/yarn/yarn.py +++ b/yarn/datadog_checks/yarn/yarn.py @@ -197,8 +197,8 @@ def check(self, instance): cluster_name = instance.get('cluster_name') if cluster_name is None: self.warning( - "The cluster_name must be specified in the instance configuration, " - "defaulting to '{}'".format(DEFAULT_CLUSTER_NAME) + "The cluster_name must be specified in the instance configuration, defaulting to '%s'", + DEFAULT_CLUSTER_NAME, ) cluster_name = DEFAULT_CLUSTER_NAME From 4fa0d72204d0a13652e480207b908b09cc034581 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:46:07 +0100 Subject: [PATCH 33/41] Fix zk --- zk/datadog_checks/zk/zk.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/zk/datadog_checks/zk/zk.py b/zk/datadog_checks/zk/zk.py index 125af18346556..411481db1d9ed 100644 --- a/zk/datadog_checks/zk/zk.py +++ b/zk/datadog_checks/zk/zk.py @@ -372,12 +372,10 @@ def parse_mntr(self, buf): metrics.append(ZKMetric(metric_name, metric_value, metric_type)) except ValueError: - self.log.warning(u"Cannot format `mntr` value. key={key}, value{value}".format(key=key, value=value)) + self.log.warning("Cannot format `mntr` value. key=%s, value=%s", key, value) continue except Exception: - self.log.exception( - u"Unexpected exception occurred while parsing `mntr` command content:\n{buf}".format(buf=buf) - ) + self.log.exception("Unexpected exception occurred while parsing `mntr` command content:\n%s", buf) return (metrics, mode) From 8d447c7da685cd72a35d68bfea595fb22c19af2e Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 19:48:07 +0100 Subject: [PATCH 34/41] Revert tox.py --- datadog_checks_dev/datadog_checks/dev/plugin/tox.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py index 065b8767d52d0..db03ce1424791 100644 --- a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py +++ b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py @@ -63,7 +63,9 @@ def add_style_checker(config, sections, make_envconfig, reader): 'flake8 --config=../.flake8 .', 'black --check --diff .', 'isort --check-only --diff --recursive .', - 'flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` + 'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, ' + 'but not mandatory, yet.\')"', + '- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` ] ), } From 7f54c7d972879688ee39ddc26a329b37bf39884c Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 20:10:24 +0100 Subject: [PATCH 35/41] Clean up --- ibm_db2/datadog_checks/ibm_db2/ibm_db2.py | 6 +++--- kubelet/datadog_checks/kubelet/prometheus.py | 2 +- mysql/datadog_checks/mysql/mysql.py | 2 +- oracle/datadog_checks/oracle/oracle.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py b/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py index 6191a28f2a497..3b677f6dae58d 100644 --- a/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py +++ b/ibm_db2/datadog_checks/ibm_db2/ibm_db2.py @@ -480,7 +480,7 @@ def query_custom(self): column_type = column.get('type') if not column_type: # no cov self.log.error( - 'Column field `type` is required for column `%s` ' 'of metric_prefix `%s`', + 'Column field `type` is required for column `%s` of metric_prefix `%s`', name, metric_prefix, ) @@ -491,7 +491,7 @@ def query_custom(self): else: if not hasattr(self, column_type): self.log.error( - 'Invalid submission method `%s` for metric column `%s` of ' 'metric_prefix `%s`', + 'Invalid submission method `%s` for metric column `%s` of metric_prefix `%s`', column_type, name, metric_prefix, @@ -501,7 +501,7 @@ def query_custom(self): metric_info.append(('{}.{}'.format(metric_prefix, name), float(value), column_type)) except (ValueError, TypeError): # no cov self.log.error( - 'Non-numeric value `%s` for metric column `%s` of ' 'metric_prefix `%s`', + 'Non-numeric value `%s` for metric column `%s` of metric_prefix `%s`', value, name, metric_prefix, diff --git a/kubelet/datadog_checks/kubelet/prometheus.py b/kubelet/datadog_checks/kubelet/prometheus.py index 9b910c330b15c..cf2fac43caf7a 100644 --- a/kubelet/datadog_checks/kubelet/prometheus.py +++ b/kubelet/datadog_checks/kubelet/prometheus.py @@ -439,7 +439,7 @@ def _process_limit_metric(self, m_name, metric, cache, scraper_config, pct_m_nam self.gauge(pct_m_name, float(usage / float(limit)), tags) else: self.log.debug( - "No corresponding usage found for metric %s and container %s, skipping usage_pct " "for now.", + "No corresponding usage found for metric %s and container %s, skipping usage_pct for now.", pct_m_name, c_name, ) diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index 031ff47634bf0..b603faecc29f6 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -954,7 +954,7 @@ def _get_stats_from_innodb_status(self, db): cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS") except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: self.warning( - "Privilege error or engine unavailable accessing the INNODB status tables " "(must grant PROCESS): %s", + "Privilege error or engine unavailable accessing the INNODB status tables (must grant PROCESS): %s", e, ) return {} diff --git a/oracle/datadog_checks/oracle/oracle.py b/oracle/datadog_checks/oracle/oracle.py index 901087db49a3b..e709036d16994 100644 --- a/oracle/datadog_checks/oracle/oracle.py +++ b/oracle/datadog_checks/oracle/oracle.py @@ -186,7 +186,7 @@ def _get_custom_metrics(self, con, custom_queries, global_tags): column_type = column.get('type') if not column_type: self.log.error( - 'column field `type` is required for column `%s` ' 'of metric_prefix `%s`', + 'column field `type` is required for column `%s` of metric_prefix `%s`', name, metric_prefix, ) @@ -197,7 +197,7 @@ def _get_custom_metrics(self, con, custom_queries, global_tags): else: if not hasattr(self, column_type): self.log.error( - 'invalid submission method `%s` for column `%s` "' '"of metric_prefix `%s`', + 'invalid submission method `%s` for column `%s` of metric_prefix `%s`', column_type, name, metric_prefix, @@ -207,7 +207,7 @@ def _get_custom_metrics(self, con, custom_queries, global_tags): metric_info.append(('{}.{}'.format(metric_prefix, name), float(value), column_type)) except (ValueError, TypeError): self.log.error( - 'non-numeric value `%s` for metric column `%s` ' 'of metric_prefix `%s`', + 'non-numeric value `%s` for metric column `%s` of metric_prefix `%s`', value, name, metric_prefix, From 34c5366b979b62c50aa5ceed10c68a79b0f39e44 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 21:35:53 +0100 Subject: [PATCH 36/41] Fix mysql --- mysql/datadog_checks/mysql/mysql.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mysql/datadog_checks/mysql/mysql.py b/mysql/datadog_checks/mysql/mysql.py index b603faecc29f6..614e265e6181e 100644 --- a/mysql/datadog_checks/mysql/mysql.py +++ b/mysql/datadog_checks/mysql/mysql.py @@ -954,8 +954,7 @@ def _get_stats_from_innodb_status(self, db): cursor.execute("SHOW /*!50000 ENGINE*/ INNODB STATUS") except (pymysql.err.InternalError, pymysql.err.OperationalError, pymysql.err.NotSupportedError) as e: self.warning( - "Privilege error or engine unavailable accessing the INNODB status tables (must grant PROCESS): %s", - e, + "Privilege error or engine unavailable accessing the INNODB status tables (must grant PROCESS): %s", e, ) return {} From 389d3d8b2177552edfe73ba8982b56af6f80b50d Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 21:53:03 +0100 Subject: [PATCH 37/41] Fix oracle --- oracle/tests/test_metrics.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/oracle/tests/test_metrics.py b/oracle/tests/test_metrics.py index d542b9f5c6e38..d81dc083076a8 100644 --- a/oracle/tests/test_metrics.py +++ b/oracle/tests/test_metrics.py @@ -142,21 +142,27 @@ def test__get_custom_metrics_misconfigured(check): # No type in column check._get_custom_metrics(con, custom_queries, []) - log.error.assert_called_once_with('column field `type` is required for column `foo` of metric_prefix `foo`') + log.error.assert_called_once_with( + 'column field `type` is required for column `%s` of metric_prefix `%s`', 'foo', 'foo' + ) log.reset_mock() col2["type"] = "invalid" # Invalid type column check._get_custom_metrics(con, custom_queries, []) - log.error.assert_called_once_with('invalid submission method `invalid` for column `foo` of metric_prefix `foo`') + log.error.assert_called_once_with( + 'invalid submission method `%s` for column `%s` of metric_prefix `%s`', 'invalid', 'foo', 'foo' + ) log.reset_mock() col2["type"] = "gauge" # Non numeric value check._get_custom_metrics(con, custom_queries, []) - log.error.assert_called_once_with('non-numeric value `bar` for metric column `foo` of metric_prefix `foo`') + log.error.assert_called_once_with( + 'non-numeric value `%s` for metric column `%s` of metric_prefix `%s`', 'bar', 'foo', 'foo' + ) # No metric sent if errors gauge.assert_not_called() From e4576d33be6e40aa6e7c4f5e46cf0e2852ce0363 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Fri, 3 Jan 2020 17:42:17 +0100 Subject: [PATCH 38/41] Enable flake8-logging-format --- datadog_checks_dev/datadog_checks/dev/plugin/tox.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py index db03ce1424791..065b8767d52d0 100644 --- a/datadog_checks_dev/datadog_checks/dev/plugin/tox.py +++ b/datadog_checks_dev/datadog_checks/dev/plugin/tox.py @@ -63,9 +63,7 @@ def add_style_checker(config, sections, make_envconfig, reader): 'flake8 --config=../.flake8 .', 'black --check --diff .', 'isort --check-only --diff --recursive .', - 'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, ' - 'but not mandatory, yet.\')"', - '- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` + 'flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format` ] ), } From 04f60b4ca45b490a5f483ee623d12f4aa2609a05 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 22:15:26 +0100 Subject: [PATCH 39/41] Fixed openstack_controller --- .../datadog_checks/openstack_controller/api.py | 8 +++++--- .../openstack_controller.py | 17 +++++++++-------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/openstack_controller/datadog_checks/openstack_controller/api.py b/openstack_controller/datadog_checks/openstack_controller/api.py index b7d03cd954d94..53a7c271ef49f 100644 --- a/openstack_controller/datadog_checks/openstack_controller/api.py +++ b/openstack_controller/datadog_checks/openstack_controller/api.py @@ -378,9 +378,11 @@ def _get_paginated_list(self, url, obj, query_params): # Only catch HTTPErrors to enable the retry mechanism. # Other exceptions raised by _make_request (e.g. AuthenticationNeeded) should be caught downstream self.logger.debug( - "Error making paginated request to {}, lowering limit from {} to {}: {}".format( - url, query_params['limit'], query_params['limit'] // 2, e - ) + "Error making paginated request to %s, lowering limit from %s to %s: %s", + url, + query_params['limit'], + query_params['limit'] // 2, + e, ) query_params['limit'] //= 2 retry += 1 diff --git a/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py b/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py index 2550c9e9cfc4f..327a24c5f3dcf 100644 --- a/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py +++ b/openstack_controller/datadog_checks/openstack_controller/openstack_controller.py @@ -289,7 +289,7 @@ def get_stats_for_single_hypervisor( try: load_averages = self.get_loads_for_single_hypervisor(hyp['id']) except Exception as e: - self.warning('Unable to get loads averages for hypervisor {}: {}'.format(hyp['id'], e)) + self.warning('Unable to get loads averages for hypervisor %s: %s', hyp['id'], e) load_averages = [] if load_averages and len(load_averages) == 3: for i, avg in enumerate([1, 5, 15]): @@ -466,7 +466,7 @@ def _is_valid_metric(label): project_name = project.get('name') project_id = project.get('id') - self.log.debug("Collecting metrics for project. name: {} id: {}".format(project_name, project['id'])) + self.log.debug("Collecting metrics for project. name: %s id: %s", project_name, project['id']) server_stats = self.get_project_limits(project['id']) server_tags.append('tenant_id:{}'.format(project_id)) @@ -479,7 +479,7 @@ def _is_valid_metric(label): metric_key = PROJECT_METRICS[st] self.gauge("openstack.nova.limits.{}".format(metric_key), server_stats[st], tags=server_tags) except KeyError: - self.warning("Unexpected response, not submitting limits metrics for project id {}".format(project['id'])) + self.warning("Unexpected response, not submitting limits metrics for project id %s", project['id']) def get_flavors(self): query_params = {} @@ -612,8 +612,9 @@ def init_api(self, instance_config, keystone_server_url, custom_tags): ) except KeystoneUnreachable as e: self.warning( - "The agent could not contact the specified identity server at {} . " - "Are you sure it is up at that address?".format(keystone_server_url) + "The agent could not contact the specified identity server at `%s`. " + "Are you sure it is up at that address?", + keystone_server_url, ) self.log.debug("Problem grabbing auth token: %s", e) self.service_check( @@ -748,9 +749,9 @@ def check(self, instance): if isinstance(e, IncompleteIdentity): self.warning( "Please specify the user via the `user` variable in your init_config.\n" - + "This is the user you would use to authenticate with Keystone v3 via password auth.\n" - + "The user should look like:" - + "{'password': 'my_password', 'name': 'my_name', 'domain': {'id': 'my_domain_id'}}" + "This is the user you would use to authenticate with Keystone v3 via password auth.\n" + "The user should look like: " + "{'password': 'my_password', 'name': 'my_name', 'domain': {'id': 'my_domain_id'}}" ) else: self.warning("Configuration Incomplete: %s! Check your openstack.yaml file", e) From a6d251aec157d3ee615bdf503221cff5c0a855a4 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 22:16:42 +0100 Subject: [PATCH 40/41] Fix riak --- riak/datadog_checks/riak/riak.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/riak/datadog_checks/riak/riak.py b/riak/datadog_checks/riak/riak.py index c60139cf78b84..7c447aa0c557e 100644 --- a/riak/datadog_checks/riak/riak.py +++ b/riak/datadog_checks/riak/riak.py @@ -274,14 +274,12 @@ def safe_submit_metric(self, name, value, tags=None): self.gauge(name, float(value), tags=tags) return except ValueError: - self.log.debug("metric name {0} cannot be converted to a float: {1}".format(name, value)) + self.log.debug("metric name %s cannot be converted to a float: %s", name, value) pass try: self.gauge(name, unicodedata.numeric(value), tags=tags) return except (TypeError, ValueError): - self.log.debug( - "metric name {0} cannot be converted to a float even using unicode tools: {1}".format(name, value) - ) + self.log.debug("metric name %s cannot be converted to a float even using unicode tools: %s", name, value) pass From 0b63df412777da55d6d7cb612b907ca597cc9cd4 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Mon, 6 Jan 2020 22:17:37 +0100 Subject: [PATCH 41/41] Fix riakcs --- riakcs/datadog_checks/riakcs/riakcs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riakcs/datadog_checks/riakcs/riakcs.py b/riakcs/datadog_checks/riakcs/riakcs.py index dc73b55f0a2df..e7716b35dd8b3 100644 --- a/riakcs/datadog_checks/riakcs/riakcs.py +++ b/riakcs/datadog_checks/riakcs/riakcs.py @@ -94,7 +94,7 @@ def _connect(self, instance): try: s3 = S3Connection(**s3_settings) except Exception as e: - self.log.error("Error connecting to {0}: {1}".format(aggregation_key, e)) + self.log.error("Error connecting to %s: %s", aggregation_key, e) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=tags, message=str(e)) raise @@ -110,7 +110,7 @@ def _get_stats(self, s3, aggregation_key, tags): stats = self.load_json(stats_str) except Exception as e: - self.log.error("Error retrieving stats from {0}: {1}".format(aggregation_key, e)) + self.log.error("Error retrieving stats from %s: %s", aggregation_key, e) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=tags, message=str(e)) raise