Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[QA] use lazy logging format #5400

Closed
wants to merge 41 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
5f2f9c0
Enable flake8-logging-format
AlexandreYang Jan 3, 2020
c2017c3
Trigger PR All
AlexandreYang Jan 6, 2020
245ea70
Formatted using script
AlexandreYang Jan 6, 2020
b4dbbdb
Fix lint
AlexandreYang Jan 6, 2020
33f3c62
Fix directory
AlexandreYang Jan 6, 2020
f61a664
Fix disk
AlexandreYang Jan 6, 2020
0a7b10d
Fix gitlab
AlexandreYang Jan 6, 2020
73bca46
Fix haproxy
AlexandreYang Jan 6, 2020
1c1ec8c
Fix ibm_db2
AlexandreYang Jan 6, 2020
a80467b
Fix kafka_consumer
AlexandreYang Jan 6, 2020
cc0fdf2
Fix kong
AlexandreYang Jan 6, 2020
9829680
Fix kubelet
AlexandreYang Jan 6, 2020
4b537b0
Fix mapreduce
AlexandreYang Jan 6, 2020
ab73f55
Fix mesos_master
AlexandreYang Jan 6, 2020
8bccdad
Fix mysql
AlexandreYang Jan 6, 2020
1449210
Fix mysql
AlexandreYang Jan 6, 2020
ffcd0a3
Fix nginx
AlexandreYang Jan 6, 2020
2f738e5
Fix openstack
AlexandreYang Jan 6, 2020
3256d06
Fix oracle
AlexandreYang Jan 6, 2020
d5a2eb1
Fix postfix
AlexandreYang Jan 6, 2020
e45765b
Fix process
AlexandreYang Jan 6, 2020
4c88b82
Fix rabbitmq
AlexandreYang Jan 6, 2020
af15871
Fix redisdb
AlexandreYang Jan 6, 2020
87ab899
Fix gitlab_runner
AlexandreYang Jan 6, 2020
d699d43
Fix spark
AlexandreYang Jan 6, 2020
4c20b8b
Fix sqlserver
AlexandreYang Jan 6, 2020
818b78f
Fix teamcity
AlexandreYang Jan 6, 2020
aa72139
Fix tls
AlexandreYang Jan 6, 2020
1c822ad
Fix twemproxy
AlexandreYang Jan 6, 2020
4c75bc9
Fix win32_event_log
AlexandreYang Jan 6, 2020
fdce523
Fix wmi_check
AlexandreYang Jan 6, 2020
8deb774
Fix yarn
AlexandreYang Jan 6, 2020
4fa0d72
Fix zk
AlexandreYang Jan 6, 2020
8d447c7
Revert tox.py
AlexandreYang Jan 6, 2020
7f54c7d
Clean up
AlexandreYang Jan 6, 2020
34c5366
Fix mysql
AlexandreYang Jan 6, 2020
389d3d8
Fix oracle
AlexandreYang Jan 6, 2020
e4576d3
Enable flake8-logging-format
AlexandreYang Jan 3, 2020
04f60b4
Fixed openstack_controller
AlexandreYang Jan 6, 2020
a6d251a
Fix riak
AlexandreYang Jan 6, 2020
0b63df4
Fix riakcs
AlexandreYang Jan 6, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion couch/datadog_checks/couch/couch.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def _build_system_metrics(self, data, tags, prefix='couchdb.erlang'):
self.gauge("{0}.{1}.size".format(prefix, key), val['count'], queue_tags)
else:
self.agent_check.log.debug(
"Queue %s does not have a key 'count'. It will be ignored." % queue
"Queue %s does not have a key 'count'. It will be ignored.", queue
)
else:
self.gauge("{0}.{1}.size".format(prefix, key), val, queue_tags)
Expand Down
1 change: 0 additions & 1 deletion datadog_checks_base/datadog_checks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
#
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
4 changes: 1 addition & 3 deletions datadog_checks_dev/datadog_checks/dev/plugin/tox.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ def add_style_checker(config, sections, make_envconfig, reader):
'flake8 --config=../.flake8 .',
'black --check --diff .',
'isort --check-only --diff --recursive .',
'python -c "print(\'\\n[WARNING] Complying with following lint rules is recommended, '
'but not mandatory, yet.\')"',
'- flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format`
'flake8 --config=../.flake8 --enable-extensions=G --select=G .', # lint `flake8-logging-format`
]
),
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def __download_with_tuf(self, target_relpath):

# Either the target has not been updated...
if not len(updated_targets):
logger.debug('{} has not been updated'.format(target_relpath))
logger.debug('%s has not been updated', target_relpath)
# or, it has been updated, in which case...
else:
# First, we use TUF to download and verify the target.
Expand All @@ -113,7 +113,7 @@ def __download_with_tuf(self, target_relpath):
assert updated_target == target
self.__updater.download_target(updated_target, self.__targets_dir)

logger.info('TUF verified {}'.format(target_relpath))
logger.info('TUF verified %s', target_relpath)

target_abspath = os.path.join(self.__targets_dir, target_relpath)
return target_abspath, target
Expand Down Expand Up @@ -185,7 +185,7 @@ def __load_root_layout(self, target_relpath):
return root_layout, root_layout_pubkeys, root_layout_params

def __handle_in_toto_verification_exception(self, target_relpath, e):
logger.exception('in-toto failed to verify {}'.format(target_relpath))
logger.exception('in-toto failed to verify %s', target_relpath)

if isinstance(e, LinkNotFoundError) and str(e) == RevokedDeveloper.MSG:
raise RevokedDeveloper(target_relpath, IN_TOTO_ROOT_LAYOUT)
Expand All @@ -211,7 +211,7 @@ def __in_toto_verify(self, inspection_packet, target_relpath):
except Exception as e:
self.__handle_in_toto_verification_exception(target_relpath, e)
else:
logger.info('in-toto verified {}'.format(target_relpath))
logger.info('in-toto verified %s', target_relpath)
finally:
# Switch back to a parent directory we control, so that we can
# safely delete temp dir.
Expand Down
2 changes: 1 addition & 1 deletion directory/datadog_checks/directory/directory.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def _get_stats(
file_stat = file_entry.stat()

except OSError as ose:
self.warning('DirectoryCheck: could not stat file {} - {}'.format(join(root, file_entry.name), ose))
self.warning('DirectoryCheck: could not stat file %s - %s', join(root, file_entry.name), ose)
else:
# file specific metrics
directory_bytes += file_stat.st_size
Expand Down
10 changes: 5 additions & 5 deletions disk/datadog_checks/disk/disk.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,25 +263,25 @@ def _compile_pattern_filters(self, instance):
device_blacklist_extras = []
mount_point_blacklist_extras = []

deprecation_message = '`{old}` is deprecated and will be removed in 6.9. Please use `{new}` instead.'
deprecation_message = '`%s` is deprecated and will be removed in 6.9. Please use `%s` instead.'

if 'excluded_filesystems' in instance:
file_system_blacklist_extras.extend(
'{}$'.format(pattern) for pattern in instance['excluded_filesystems'] if pattern
)
self.warning(deprecation_message.format(old='excluded_filesystems', new='file_system_blacklist'))
self.warning(deprecation_message, 'excluded_filesystems', 'file_system_blacklist')

if 'excluded_disks' in instance:
device_blacklist_extras.extend('{}$'.format(pattern) for pattern in instance['excluded_disks'] if pattern)
self.warning(deprecation_message.format(old='excluded_disks', new='device_blacklist'))
self.warning(deprecation_message, 'excluded_disks', 'device_blacklist')

if 'excluded_disk_re' in instance:
device_blacklist_extras.append(instance['excluded_disk_re'])
self.warning(deprecation_message.format(old='excluded_disk_re', new='device_blacklist'))
self.warning(deprecation_message, 'excluded_disk_re', 'device_blacklist')

if 'excluded_mountpoint_re' in instance:
mount_point_blacklist_extras.append(instance['excluded_mountpoint_re'])
self.warning(deprecation_message.format(old='excluded_mountpoint_re', new='mount_point_blacklist'))
self.warning(deprecation_message, 'excluded_mountpoint_re', 'mount_point_blacklist')

# Any without valid patterns will become None
self._file_system_whitelist = self._compile_valid_patterns(self._file_system_whitelist, casing=re.I)
Expand Down
10 changes: 5 additions & 5 deletions docker_daemon/test/test_docker_daemon.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_event_attributes_tag(self):
"nginx:latest", detach=True, name='event-tags-test', entrypoint='/bin/false')
log.debug('start nginx:latest with entrypoint /bin/false')
DockerUtil().client.start(container_fail)
log.debug('container exited with %s' % DockerUtil().client.wait(container_fail, 1))
log.debug('container exited with %s', DockerUtil().client.wait(container_fail, 1))
# Wait 1 second after exit so the event will be picked up
from time import sleep
sleep(1)
Expand Down Expand Up @@ -278,12 +278,12 @@ def setUp(self):
self.docker_client.connect_container_to_network(cont['Id'], self.second_network)

for c in self.containers:
log.info("Starting container: {0}".format(c))
log.info("Starting container: %s", c)
self.docker_client.start(c)

def tearDown(self):
for c in self.containers:
log.info("Stopping container: {0}".format(c))
log.info("Stopping container: %s", c)
self.docker_client.remove_container(c, force=True)
self.docker_client.remove_network(self.second_network)

Expand Down Expand Up @@ -829,8 +829,8 @@ def test_collect_exit_code(self):
log.debug('start nginx:latest with entrypoint /bin/false')
self.docker_client.start(container_ok)
self.docker_client.start(container_fail)
log.debug('container exited with %s' % self.docker_client.wait(container_ok, 1))
log.debug('container exited with %s' % self.docker_client.wait(container_fail, 1))
log.debug('container exited with %s', self.docker_client.wait(container_ok, 1))
log.debug('container exited with %s', self.docker_client.wait(container_fail, 1))
# After the container exits, we need to wait a second so the event isn't too recent
# when the check runs, otherwise the event is not picked up
from time import sleep
Expand Down
6 changes: 3 additions & 3 deletions gitlab/datadog_checks/gitlab/gitlab.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _check_health_endpoint(self, instance, check_type, tags):

if url is None:
# Simply ignore this service check if not configured
self.log.debug("gitlab_url not configured, service check {} skipped".format(check_type))
self.log.debug("gitlab_url not configured, service check %s skipped", check_type)
return

service_check_tags = self._service_check_tags(url)
Expand All @@ -121,7 +121,7 @@ def _check_health_endpoint(self, instance, check_type, tags):
check_url = '{}/-/{}'.format(url, check_type)

try:
self.log.debug("checking {} against {}".format(check_type, check_url))
self.log.debug("checking %s against %s", check_type, check_url)
r = self.http.get(check_url)
if r.status_code != 200:
self.service_check(
Expand Down Expand Up @@ -153,4 +153,4 @@ def _check_health_endpoint(self, instance, check_type, tags):
raise
else:
self.service_check(service_check_name, OpenMetricsBaseCheck.OK, tags=service_check_tags)
self.log.debug("gitlab check {} succeeded".format(check_type))
self.log.debug("gitlab check %s succeeded", check_type)
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def _check_connectivity_to_master(self, instance, tags):
service_check_tags.extend(tags)

try:
self.log.debug("checking connectivity against {}".format(url))
self.log.debug("checking connectivity against %s", url)
r = self.http.get(url)
if r.status_code != 200:
self.service_check(
Expand Down
11 changes: 5 additions & 6 deletions haproxy/datadog_checks/haproxy/haproxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def _collect_version_from_http(self, url):
self.log.debug("unable to find HAProxy version info")
else:
version = re.search(r"HAProxy version ([^,]+)", raw_version).group(1)
self.log.debug(u"HAProxy version is {}".format(version))
self.log.debug("HAProxy version is %s", version)
self.set_metadata('version', version)

def _fetch_socket_data(self, parsed_url):
Expand Down Expand Up @@ -577,14 +577,13 @@ def _process_status_metric(
try:
service, _, hostname, status = host_status
except Exception:
service, _, status = host_status
if collect_status_metrics_by_host:
self.warning(
'`collect_status_metrics_by_host` is enabled but no host info\
could be extracted from HAProxy stats endpoint for {0}'.format(
service
)
'`collect_status_metrics_by_host` is enabled but no host info could be extracted from HAProxy '
'stats endpoint for %s',
service,
)
service, _, status = host_status

if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
Expand Down
17 changes: 11 additions & 6 deletions ibm_db2/datadog_checks/ibm_db2/ibm_db2.py
Original file line number Diff line number Diff line change
Expand Up @@ -480,8 +480,9 @@ def query_custom(self):
column_type = column.get('type')
if not column_type: # no cov
self.log.error(
'Column field `type` is required for column `{}` '
'of metric_prefix `{}`'.format(name, metric_prefix)
'Column field `type` is required for column `%s` of metric_prefix `%s`',
name,
metric_prefix,
)
break

Expand All @@ -490,16 +491,20 @@ def query_custom(self):
else:
if not hasattr(self, column_type):
self.log.error(
'Invalid submission method `{}` for metric column `{}` of '
'metric_prefix `{}`'.format(column_type, name, metric_prefix)
'Invalid submission method `%s` for metric column `%s` of metric_prefix `%s`',
column_type,
name,
metric_prefix,
)
break
try:
metric_info.append(('{}.{}'.format(metric_prefix, name), float(value), column_type))
except (ValueError, TypeError): # no cov
self.log.error(
'Non-numeric value `{}` for metric column `{}` of '
'metric_prefix `{}`'.format(value, name, metric_prefix)
'Non-numeric value `%s` for metric column `%s` of metric_prefix `%s`',
value,
name,
metric_prefix,
)
break

Expand Down
2 changes: 1 addition & 1 deletion ibm_mq/datadog_checks/ibm_mq/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def _compile_tag_re(self):
try:
queue_tag_list.append([re.compile(regex_str), [t.strip() for t in tags.split(',')]])
except TypeError:
log.warning('{} is not a valid regular expression and will be ignored'.format(regex_str))
log.warning('%s is not a valid regular expression and will be ignored', regex_str)
return queue_tag_list

@property
Expand Down
2 changes: 1 addition & 1 deletion ibm_mq/datadog_checks/ibm_mq/ibm_mq.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def check(self, instance):
config.check_properly_configured()

if not pymqi:
log.error("You need to install pymqi: {}".format(pymqiException))
log.error("You need to install pymqi: %s", pymqiException)
raise errors.PymqiException("You need to install pymqi: {}".format(pymqiException))

try:
Expand Down
4 changes: 2 additions & 2 deletions ibm_mq/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ def publish():
for i in range(10):
try:
message = 'Hello from Python! Message {}'.format(i)
log.info("sending message: {}".format(message))
log.info("sending message: %s", message)
queue.put(message.encode())
except Exception as e:
log.info("exception publishing: {}".format(e))
log.info("exception publishing: %s", e)
queue.close()
qmgr.disconnect()
return
Expand Down
10 changes: 5 additions & 5 deletions kafka_consumer/datadog_checks/kafka_consumer/kafka_consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,11 @@ def check(self, instance):
total_contexts = len(self._consumer_offsets) + len(self._highwater_offsets)
if total_contexts > self._context_limit:
self.warning(
"""Discovered {} metric contexts - this exceeds the maximum number of {} contexts permitted by the
"""Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the
check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics
and partitions you wish to monitor.""".format(
total_contexts, self._context_limit
)
and partitions you wish to monitor.""",
total_contexts,
self._context_limit,
)

# Report the metics
Expand Down Expand Up @@ -145,7 +145,7 @@ def _create_kafka_admin_client(self):
ssl_crlfile=self.instance.get('ssl_crlfile'),
ssl_password=self.instance.get('ssl_password'),
)
self.log.debug("KafkaAdminClient api_version: {}".format(kafka_admin_client.config['api_version']))
self.log.debug("KafkaAdminClient api_version: %s", kafka_admin_client.config['api_version'])
# Force initial population of the local cluster metadata cache
kafka_admin_client._client.poll(future=kafka_admin_client._client.cluster.request_update())
if kafka_admin_client._client.cluster.topics(exclude_internal_topics=False) is None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ def check(self, instance):
)
if total_contexts > self._context_limit:
self.warning(
"""Discovered {} metric contexts - this exceeds the maximum number of {} contexts permitted by the
"""Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the
check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics
and partitions you wish to monitor.""".format(
total_contexts, self._context_limit
)
and partitions you wish to monitor.""",
total_contexts,
self._context_limit,
)

# Report the metics
Expand Down
4 changes: 2 additions & 2 deletions kong/datadog_checks/kong/kong.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def _fetch_data(self, instance):
service_check_tags = ['kong_host:%s' % host, 'kong_port:%s' % port] + tags

try:
self.log.debug(u"Querying URL: {0}".format(url))
self.log.debug("Querying URL: %s", url)
response = requests.get(url, headers=headers(self.agentConfig), verify=ssl_validation)
self.log.debug(u"Kong status `response`: {0}".format(response))
self.log.debug("Kong status `response`: %s", response)
response.raise_for_status()
except Exception:
self.service_check(service_check_name, Kong.CRITICAL, tags=service_check_tags)
Expand Down
4 changes: 2 additions & 2 deletions kubelet/datadog_checks/kubelet/cadvisor.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def _update_metrics(self, instance, cadvisor_url, pod_list, pod_list_utils):
try:
self._update_container_metrics(instance, subcontainer, pod_list, pod_list_utils)
except Exception as e:
self.log.error("Unable to collect metrics for container: {0} ({1})".format(c_id, e))
self.log.error("Unable to collect metrics for container: %s (%s)", c_id, e)

def _publish_raw_metrics(self, metric, dat, tags, is_pod, depth=0):
"""
Expand Down Expand Up @@ -180,7 +180,7 @@ def _update_container_metrics(self, instance, subcontainer, pod_list, pod_list_u
)
)
if pod_list_utils.is_excluded(cid):
self.log.debug("Filtering out " + cid)
self.log.debug("Filtering out %s", cid)
return
tags = tagger.tag(replace_container_rt_prefix(cid), tagger.HIGH) or []

Expand Down
5 changes: 3 additions & 2 deletions kubelet/datadog_checks/kubelet/prometheus.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,9 @@ def _process_limit_metric(self, m_name, metric, cache, scraper_config, pct_m_nam
self.gauge(pct_m_name, float(usage / float(limit)), tags)
else:
self.log.debug(
"No corresponding usage found for metric %s and "
"container %s, skipping usage_pct for now." % (pct_m_name, c_name)
"No corresponding usage found for metric %s and container %s, skipping usage_pct for now.",
pct_m_name,
c_name,
)

def container_cpu_usage_seconds_total(self, metric, scraper_config):
Expand Down
4 changes: 2 additions & 2 deletions mapreduce/datadog_checks/mapreduce/mapreduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ def check(self, instance):
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning(
"The cluster_name must be specified in the instance configuration, "
"defaulting to '{}'".format(self.DEFAULT_CLUSTER_NAME)
"The cluster_name must be specified in the instance configuration, defaulting to '%s'",
self.DEFAULT_CLUSTER_NAME,
)
cluster_name = self.DEFAULT_CLUSTER_NAME

Expand Down
6 changes: 2 additions & 4 deletions mesos_master/datadog_checks/mesos_master/mesos_master.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def _get_json(self, url, failure_expected=False, tags=None):
msg = str(e)
status = AgentCheck.CRITICAL
finally:
self.log.debug('Request to url : {0}, timeout: {1}, message: {2}'.format(url, timeout, msg))
self.log.debug('Request to url : %s, timeout: %s, message: %s', url, timeout, msg)
self._send_service_check(url, status, failure_expected=failure_expected, tags=tags, message=msg)

if response.encoding is None:
Expand Down Expand Up @@ -210,9 +210,7 @@ def _get_master_state(self, url, tags):
# Mesos version < 0.25
old_endpoint = endpoint + '.json'
self.log.info(
'Unable to fetch state from {0}. Retrying with the deprecated endpoint: {1}.'.format(
endpoint, old_endpoint
)
'Unable to fetch state from %s. Retrying with the deprecated endpoint: %s.', endpoint, old_endpoint
)
master_state = self._get_json(old_endpoint, tags=tags)
return master_state
Expand Down
Loading