From eb96449463c551d0abd40de5ef6e70223c5502cd Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 28 Jan 2025 10:32:41 +0100 Subject: [PATCH] Various pylint fixups (#2221) SUMMARY simplifiable-if-statement (use return bool(...) rather than if (...); return True ; else ; return False consider-using-dict-items redefined-builtin (using id and input as variables) no-else-break redefined-outer-name (mostly fixtures) pointless-string-statement (Strings being used as comments, function documentation before function rather than as first line) Remove unused variable assignments ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/connection/aws_ssm.py plugins/module_utils/base.py plugins/module_utils/ec2.py plugins/module_utils/networkfirewall.py plugins/modules/application_autoscaling_policy.py plugins/modules/autoscaling_launch_config.py plugins/modules/autoscaling_lifecycle_hook.py plugins/modules/autoscaling_policy.py plugins/modules/batch_job_definition.py plugins/modules/cloudfront_distribution.py plugins/modules/cloudfront_response_headers_policy.py plugins/modules/codebuild_project.py plugins/modules/config_aggregation_authorization.py plugins/modules/config_delivery_channel.py plugins/modules/config_recorder.py plugins/modules/config_rule.py plugins/modules/data_pipeline.py plugins/modules/directconnect_gateway.py plugins/modules/directconnect_link_aggregation_group.py plugins/modules/dms_endpoint.py plugins/modules/dms_replication_subnet_group.py plugins/modules/dynamodb_table.py plugins/modules/dynamodb_ttl.py plugins/modules/ec2_carrier_gateway.py plugins/modules/ec2_win_password.py plugins/modules/ecs_ecr.py plugins/modules/ecs_service_info.py plugins/modules/ecs_taskdefinition.py plugins/modules/efs.py plugins/modules/efs_info.py plugins/modules/eks_cluster.py plugins/modules/elasticache.py plugins/modules/elasticache_parameter_group.py plugins/modules/elasticbeanstalk_app.py plugins/modules/elb_target.py plugins/modules/elb_target_group.py plugins/modules/iam_server_certificate.py plugins/modules/kinesis_stream.py plugins/modules/lightsail_snapshot.py plugins/modules/mq_broker.py plugins/modules/mq_user.py plugins/modules/mq_user_info.py plugins/modules/msk_cluster.py plugins/modules/networkfirewall_policy.py plugins/modules/opensearch_info.py plugins/modules/route53_wait.py plugins/modules/s3_cors.py plugins/modules/s3_lifecycle.py plugins/modules/s3_logging.py plugins/modules/s3_website.py plugins/modules/secretsmanager_secret.py plugins/modules/ses_identity.py plugins/modules/sqs_queue.py plugins/modules/storagegateway_info.py plugins/modules/waf_condition.py plugins/modules/waf_rule.py plugins/modules/wafv2_ip_set.py plugins/modules/wafv2_ip_set_info.py plugins/modules/wafv2_resources.py plugins/modules/wafv2_resources_info.py plugins/modules/wafv2_rule_group.py plugins/modules/wafv2_rule_group_info.py plugins/modules/wafv2_web_acl.py plugins/modules/wafv2_web_acl_info.py ADDITIONAL INFORMATION Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- changelogs/fragments/20250127-pylint.yml | 4 ++ plugins/connection/aws_ssm.py | 2 +- plugins/module_utils/base.py | 2 +- plugins/module_utils/ec2.py | 20 +++--- plugins/module_utils/networkfirewall.py | 4 +- .../modules/application_autoscaling_policy.py | 5 +- plugins/modules/autoscaling_launch_config.py | 4 +- plugins/modules/autoscaling_lifecycle_hook.py | 2 - plugins/modules/autoscaling_policy.py | 6 +- plugins/modules/batch_job_definition.py | 3 - plugins/modules/cloudfront_distribution.py | 6 +- .../cloudfront_response_headers_policy.py | 9 +-- plugins/modules/codebuild_project.py | 4 -- .../config_aggregation_authorization.py | 6 +- plugins/modules/config_delivery_channel.py | 2 +- plugins/modules/config_recorder.py | 6 +- plugins/modules/config_rule.py | 3 +- plugins/modules/data_pipeline.py | 10 +-- plugins/modules/directconnect_gateway.py | 5 +- .../directconnect_link_aggregation_group.py | 2 +- plugins/modules/dms_endpoint.py | 26 +++---- .../modules/dms_replication_subnet_group.py | 6 +- plugins/modules/dynamodb_table.py | 8 +-- plugins/modules/dynamodb_ttl.py | 1 - plugins/modules/ec2_carrier_gateway.py | 2 +- plugins/modules/ec2_win_password.py | 6 +- plugins/modules/ecs_ecr.py | 2 +- plugins/modules/ecs_service_info.py | 10 +-- plugins/modules/ecs_taskdefinition.py | 2 +- plugins/modules/efs.py | 21 +++--- plugins/modules/efs_info.py | 14 ++-- plugins/modules/eks_cluster.py | 6 +- plugins/modules/elasticache.py | 1 - .../modules/elasticache_parameter_group.py | 6 +- plugins/modules/elasticbeanstalk_app.py | 3 +- plugins/modules/elb_target.py | 2 +- plugins/modules/elb_target_group.py | 7 +- plugins/modules/iam_server_certificate.py | 4 +- plugins/modules/kinesis_stream.py | 5 +- plugins/modules/lightsail_snapshot.py | 2 +- plugins/modules/mq_broker.py | 3 - plugins/modules/mq_user.py | 4 +- plugins/modules/mq_user_info.py | 2 +- plugins/modules/msk_cluster.py | 4 +- plugins/modules/networkfirewall_policy.py | 4 +- plugins/modules/opensearch_info.py | 2 +- plugins/modules/route53_wait.py | 4 +- plugins/modules/s3_cors.py | 4 +- plugins/modules/s3_lifecycle.py | 2 - plugins/modules/s3_logging.py | 3 +- plugins/modules/s3_website.py | 2 +- plugins/modules/secretsmanager_secret.py | 1 - plugins/modules/ses_identity.py | 19 ++--- plugins/modules/sqs_queue.py | 2 +- plugins/modules/storagegateway_info.py | 53 +++++++------- plugins/modules/waf_condition.py | 2 +- plugins/modules/waf_rule.py | 4 +- plugins/modules/wafv2_ip_set.py | 26 +++---- plugins/modules/wafv2_ip_set_info.py | 12 ++-- plugins/modules/wafv2_resources.py | 12 ++-- plugins/modules/wafv2_resources_info.py | 12 ++-- plugins/modules/wafv2_rule_group.py | 4 +- plugins/modules/wafv2_rule_group_info.py | 12 ++-- plugins/modules/wafv2_web_acl.py | 28 ++++---- plugins/modules/wafv2_web_acl_info.py | 13 ++-- tests/unit/plugins/inventory/test_aws_mq.py | 12 ++-- .../dms_endpoint/test_compare_params.py | 69 +++++++++++++++++++ .../plugins/modules/test_data_pipeline.py | 4 +- ...st_directconnect_link_aggregation_group.py | 4 +- .../unit/plugins/modules/test_route53_wait.py | 12 ++-- 70 files changed, 295 insertions(+), 279 deletions(-) create mode 100644 changelogs/fragments/20250127-pylint.yml create mode 100644 tests/unit/plugins/modules/dms_endpoint/test_compare_params.py diff --git a/changelogs/fragments/20250127-pylint.yml b/changelogs/fragments/20250127-pylint.yml new file mode 100644 index 00000000000..80ac48cf19a --- /dev/null +++ b/changelogs/fragments/20250127-pylint.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - various modules - linting fixups (https://github.com/ansible-collections/community.aws/pull/2221). + - dms_endpoint - improve resilience of parameter comparison (https://github.com/ansible-collections/community.aws/pull/2221). diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py index eb56a348280..a355642e5be 100644 --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -299,7 +299,7 @@ try: import boto3 from botocore.client import Config -except ImportError as e: +except ImportError: pass from functools import wraps diff --git a/plugins/module_utils/base.py b/plugins/module_utils/base.py index 86b846c63be..68277fc0bfd 100644 --- a/plugins/module_utils/base.py +++ b/plugins/module_utils/base.py @@ -332,7 +332,7 @@ def _flush_update(self): if not self.module.check_mode: self._do_update_resource() - response = self._wait_for_update() + self._wait_for_update() self.updated_resource = self.get_resource() else: # (CHECK_MODE) self.updated_resource = self._normalize_resource(self._generate_updated_resource()) diff --git a/plugins/module_utils/ec2.py b/plugins/module_utils/ec2.py index 59b617f20f0..d8fc06d13d5 100644 --- a/plugins/module_utils/ec2.py +++ b/plugins/module_utils/ec2.py @@ -55,7 +55,7 @@ class BaseEc2Manager(Ec2Boto3Mixin, BaseResourceManager): # If the resource supports using "TagSpecifications" on creation we can TAGS_ON_CREATE = "TagSpecifications" - def __init__(self, module, id=None): + def __init__(self, module, resource_id=None): r""" Parameters: module (AnsibleAWSModule): An Ansible module. @@ -63,7 +63,7 @@ def __init__(self, module, id=None): super(BaseEc2Manager, self).__init__(module) self.client = self._create_client() self._tagging_updates = dict() - self.resource_id = id + self.resource_id = resource_id # Name parameter is unique (by region) and can not be modified. if self.resource_id: @@ -97,22 +97,22 @@ def _paginated_describe_tags(self, **params): return paginator.paginate(**params).build_full_result() @Boto3Mixin.aws_error_handler("list tags on resource") - def _describe_tags(self, id=None): - if not id: - id = self.resource_id - filters = ansible_dict_to_boto3_filter_list({"resource-id": id}) + def _describe_tags(self, resource_id=None): + if not resource_id: + resource_id = self.resource_id + filters = ansible_dict_to_boto3_filter_list({"resource-id": resource_id}) tags = self._paginated_describe_tags(Filters=filters) return tags - def _get_tags(self, id=None): - if id is None: - id = self.resource_id + def _get_tags(self, resource_id=None): + if resource_id is None: + resource_id = self.resource_id # If the Tags are available from the resource, then use them if self.TAGS_ON_RESOURCE: tags = self._preupdate_resource.get("Tags", []) # Otherwise we'll have to look them up else: - tags = self._describe_tags(id=id) + tags = self._describe_tags(resource_id=resource_id) return boto3_tag_list_to_ansible_dict(tags) def _do_tagging(self): diff --git a/plugins/module_utils/networkfirewall.py b/plugins/module_utils/networkfirewall.py index 19a372514df..5df81bd3208 100644 --- a/plugins/module_utils/networkfirewall.py +++ b/plugins/module_utils/networkfirewall.py @@ -977,7 +977,7 @@ def _do_update_resource(self): params["RuleGroup"] = resource if not self.module.check_mode: - response = self._update_rule_group(**params) + self._update_rule_group(**params) return True @@ -1349,7 +1349,7 @@ def _do_update_resource(self): params["FirewallPolicy"] = resource if not self.module.check_mode: - response = self._update_policy(**params) + self._update_policy(**params) return True diff --git a/plugins/modules/application_autoscaling_policy.py b/plugins/modules/application_autoscaling_policy.py index beb2247ac28..cddce1da1db 100644 --- a/plugins/modules/application_autoscaling_policy.py +++ b/plugins/modules/application_autoscaling_policy.py @@ -297,10 +297,7 @@ # Merge the results of the scalable target creation and policy deletion/creation # There's no risk in overriding values since mutual keys have the same values in our case def merge_results(scalable_target_result, policy_result): - if scalable_target_result["changed"] or policy_result["changed"]: - changed = True - else: - changed = False + changed = bool(scalable_target_result["changed"] or policy_result["changed"]) merged_response = scalable_target_result["response"].copy() merged_response.update(policy_result["response"]) diff --git a/plugins/modules/autoscaling_launch_config.py b/plugins/modules/autoscaling_launch_config.py index cd411e57606..a22e0de77c5 100644 --- a/plugins/modules/autoscaling_launch_config.py +++ b/plugins/modules/autoscaling_launch_config.py @@ -537,7 +537,7 @@ def create_launch_config(connection, module): ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get Security Group IDs") - except ValueError as e: + except ValueError: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) user_data = module.params.get("user_data") user_data_path = module.params.get("user_data_path") @@ -571,7 +571,7 @@ def create_launch_config(connection, module): try: with open(user_data_path, "r") as user_data_file: user_data = user_data_file.read() - except IOError as e: + except IOError: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: diff --git a/plugins/modules/autoscaling_lifecycle_hook.py b/plugins/modules/autoscaling_lifecycle_hook.py index a77fcce0ad0..0177f8eacc2 100644 --- a/plugins/modules/autoscaling_lifecycle_hook.py +++ b/plugins/modules/autoscaling_lifecycle_hook.py @@ -296,8 +296,6 @@ def main(): connection = module.client("autoscaling") - changed = False - if state == "present": create_lifecycle_hook(connection, module) elif state == "absent": diff --git a/plugins/modules/autoscaling_policy.py b/plugins/modules/autoscaling_policy.py index 6d69d849226..6e26edf0cf3 100644 --- a/plugins/modules/autoscaling_policy.py +++ b/plugins/modules/autoscaling_policy.py @@ -494,10 +494,10 @@ def create_scaling_policy(connection, module): changed = True else: policy = policies[0] - for key in params: - if params[key] != policy.get(key): + for key, old_value in params.items(): + if old_value != policy.get(key): changed = True - before[key] = params[key] + before[key] = old_value after[key] = policy.get(key) if changed: diff --git a/plugins/modules/batch_job_definition.py b/plugins/modules/batch_job_definition.py index fb2b1996d04..49ace153f4a 100644 --- a/plugins/modules/batch_job_definition.py +++ b/plugins/modules/batch_job_definition.py @@ -385,12 +385,9 @@ def manage_state(module, batch_client): changed = False current_state = "absent" state = module.params["state"] - job_definition_name = module.params["job_definition_name"] action_taken = "none" response = None - check_mode = module.check_mode - # check if the job definition exists current_job_definition = get_current_job_definition(module, batch_client) if current_job_definition: diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py index 13718cfb896..9b695f72a1d 100644 --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1769,12 +1769,12 @@ def validate_origins( origins, default_origin_domain_name, default_origin_path, - create_distribution, + create, purge_origins=False, ): try: if origins is None: - if default_origin_domain_name is None and not create_distribution: + if default_origin_domain_name is None and not create: if purge_origins: return None else: @@ -1784,7 +1784,7 @@ def validate_origins( else: origins = [] self.validate_is_list(origins, "origins") - if not origins and default_origin_domain_name is None and create_distribution: + if not origins and default_origin_domain_name is None and create: self.module.fail_json( msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one." ) diff --git a/plugins/modules/cloudfront_response_headers_policy.py b/plugins/modules/cloudfront_response_headers_policy.py index a7558e8a86d..1395b93fd10 100644 --- a/plugins/modules/cloudfront_response_headers_policy.py +++ b/plugins/modules/cloudfront_response_headers_policy.py @@ -163,19 +163,14 @@ def __init__(self, module): def find_response_headers_policy(self, name): try: policies = self.client.list_response_headers_policies()["ResponseHeadersPolicyList"]["Items"] - for policy in policies: if policy["ResponseHeadersPolicy"]["ResponseHeadersPolicyConfig"]["Name"] == name: - policy_id = policy["ResponseHeadersPolicy"]["Id"] # as the list_ request does not contain the Etag (which we need), we need to do another get_ request here - matching_policy = self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) - break - else: - matching_policy = None + return self.client.get_response_headers_policy(Id=policy["ResponseHeadersPolicy"]["Id"]) - return matching_policy except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error fetching policy information") + return None def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): cors_config = snake_dict_to_camel_dict(cors_config, capitalize_first=True) diff --git a/plugins/modules/codebuild_project.py b/plugins/modules/codebuild_project.py index 1f4630f73ca..1edc3470140 100644 --- a/plugins/modules/codebuild_project.py +++ b/plugins/modules/codebuild_project.py @@ -378,14 +378,12 @@ def do_update_project(client, params, formatted_params, found_project): def create_or_update_project(client, params): - resp = {} name = params["name"] # clean up params formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None)) # Check if project with that name already exists and if so update existing: found = describe_project(client=client, name=name) - changed = False if "name" not in found: return do_create_project(client, params, formatted_params) @@ -394,8 +392,6 @@ def create_or_update_project(client, params): def update_project(client, params): - name = params["name"] - try: resp = client.update_project(**params) return resp diff --git a/plugins/modules/config_aggregation_authorization.py b/plugins/modules/config_aggregation_authorization.py index 903d5a5e1fe..342057cf6e2 100644 --- a/plugins/modules/config_aggregation_authorization.py +++ b/plugins/modules/config_aggregation_authorization.py @@ -75,7 +75,7 @@ def resource_exists(client, module, params): def create_resource(client, module, params, result): try: - response = client.put_aggregation_authorization( + client.put_aggregation_authorization( AuthorizedAccountId=params["AuthorizedAccountId"], AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) @@ -97,7 +97,7 @@ def update_resource(client, module, params, result): if params != current_params: try: - response = client.put_aggregation_authorization( + client.put_aggregation_authorization( AuthorizedAccountId=params["AuthorizedAccountId"], AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) @@ -109,7 +109,7 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_aggregation_authorization( + client.delete_aggregation_authorization( AuthorizedAccountId=params["AuthorizedAccountId"], AuthorizedAwsRegion=params["AuthorizedAwsRegion"], ) diff --git a/plugins/modules/config_delivery_channel.py b/plugins/modules/config_delivery_channel.py index 1c3a3acdc49..f089b6e93e7 100644 --- a/plugins/modules/config_delivery_channel.py +++ b/plugins/modules/config_delivery_channel.py @@ -174,7 +174,7 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_delivery_channel(DeliveryChannelName=params["name"]) + client.delete_delivery_channel(DeliveryChannelName=params["name"]) result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/plugins/modules/config_recorder.py b/plugins/modules/config_recorder.py index 510bbaa2307..5cd0fdbc78d 100644 --- a/plugins/modules/config_recorder.py +++ b/plugins/modules/config_recorder.py @@ -106,7 +106,7 @@ def resource_exists(client, module, params): def create_resource(client, module, params, result): try: - response = client.put_configuration_recorder(ConfigurationRecorder=params) + client.put_configuration_recorder(ConfigurationRecorder=params) result["changed"] = True result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result @@ -119,7 +119,7 @@ def update_resource(client, module, params, result): if params != current_params["ConfigurationRecorders"][0]: try: - response = client.put_configuration_recorder(ConfigurationRecorder=params) + client.put_configuration_recorder(ConfigurationRecorder=params) result["changed"] = True result["recorder"] = camel_dict_to_snake_dict(resource_exists(client, module, params)) return result @@ -129,7 +129,7 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) + client.delete_configuration_recorder(ConfigurationRecorderName=params["name"]) result["changed"] = True return result except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: diff --git a/plugins/modules/config_rule.py b/plugins/modules/config_rule.py index b86a528dd55..e218c018970 100644 --- a/plugins/modules/config_rule.py +++ b/plugins/modules/config_rule.py @@ -165,7 +165,7 @@ def update_resource(client, module, params, result): def delete_resource(client, module, params, result): try: - response = client.delete_config_rule( + client.delete_config_rule( ConfigRuleName=params["ConfigRuleName"], aws_retry=True, ) @@ -202,7 +202,6 @@ def main(): result = {"changed": False} name = module.params.get("name") - resource_type = module.params.get("resource_type") state = module.params.get("state") params = {} diff --git a/plugins/modules/data_pipeline.py b/plugins/modules/data_pipeline.py index 85849324f33..20063b40b98 100644 --- a/plugins/modules/data_pipeline.py +++ b/plugins/modules/data_pipeline.py @@ -312,10 +312,7 @@ def check_dp_exists(client, dp_id): """ try: # pipeline_description raises DataPipelineNotFound - if pipeline_description(client, dp_id): - return True - else: - return False + return bool(pipeline_description(client, dp_id)) except DataPipelineNotFound: return False @@ -331,10 +328,7 @@ def check_dp_status(client, dp_id, status): """ if not isinstance(status, list): raise AssertionError() - if pipeline_field(client, dp_id, field="@pipelineState") in status: - return True - else: - return False + return bool(pipeline_field(client, dp_id, field="@pipelineState") in status) def pipeline_status_timeout(client, dp_id, status, timeout): diff --git a/plugins/modules/directconnect_gateway.py b/plugins/modules/directconnect_gateway.py index b231f0e8f44..4524a3c9cd3 100644 --- a/plugins/modules/directconnect_gateway.py +++ b/plugins/modules/directconnect_gateway.py @@ -135,8 +135,7 @@ def wait_for_status(client, module, gateway_id, virtual_gateway_id, status): if response["directConnectGatewayAssociations"][0]["associationState"] == status: status_achieved = True break - else: - time.sleep(polling_increment_secs) + time.sleep(polling_increment_secs) else: status_achieved = True break @@ -304,7 +303,6 @@ def ensure_absent(client, module): # then a match is considered to have been found and we will not create another dxgw. changed = False - result = dict() dx_gateway_id = module.params.get("direct_connect_gateway_id") existing_dxgw = find_dx_gateway(client, module, dx_gateway_id) if existing_dxgw is not None: @@ -333,7 +331,6 @@ def ensure_absent(client, module): ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete gateway") - result = resp["directConnectGateway"] return changed diff --git a/plugins/modules/directconnect_link_aggregation_group.py b/plugins/modules/directconnect_link_aggregation_group.py index 99224fee0f2..19870bcc465 100644 --- a/plugins/modules/directconnect_link_aggregation_group.py +++ b/plugins/modules/directconnect_link_aggregation_group.py @@ -358,7 +358,7 @@ def disassociate_vis(client, lag_id, virtual_interfaces): for vi in virtual_interfaces: delete_virtual_interface(client, vi["virtualInterfaceId"]) try: - response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) + client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: raise DirectConnectError( msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.", diff --git a/plugins/modules/dms_endpoint.py b/plugins/modules/dms_endpoint.py index f67a1263eaf..19612b90c61 100644 --- a/plugins/modules/dms_endpoint.py +++ b/plugins/modules/dms_endpoint.py @@ -516,21 +516,17 @@ def compare_params(param_described): """ param_described = dict(param_described) modparams = create_module_params() - # modify can't update tags - param_described.pop("Tags", None) - modparams.pop("Tags", None) - changed = False - for paramname in modparams: - if ( - paramname == "Password" - or paramname in param_described - and param_described[paramname] == modparams[paramname] - or str(param_described[paramname]).lower() == modparams[paramname] - ): - pass - else: - changed = True - return changed + + # Note: this currently isn't as relaxed for the nested settings, (eg. S3Settings) + for param_name, param_value in modparams.items(): + # modify can't update tags or password + if param_name in ["Tags", "Password"]: + continue + if param_name not in param_described: + return True + if param_described[param_name] != param_value and str(param_described[param_name]).lower() != param_value: + return True + return False def modify_dms_endpoint(connection, endpoint): diff --git a/plugins/modules/dms_replication_subnet_group.py b/plugins/modules/dms_replication_subnet_group.py index 772a54aa1fd..c4f9318a628 100644 --- a/plugins/modules/dms_replication_subnet_group.py +++ b/plugins/modules/dms_replication_subnet_group.py @@ -134,10 +134,10 @@ def compare_params(module, param_described): param_described.pop("VpcId") if "SubnetGroupStatus" in param_described.keys(): param_described.pop("SubnetGroupStatus") - for paramname in modparams.keys(): - if paramname in param_described.keys() and param_described.get(paramname) == modparams[paramname]: + for param_name, param_value in modparams.items(): + if param_name in param_described and param_described.get(param_name) == param_value: pass - elif paramname == "SubnetIds": + elif param_name == "SubnetIds": subnets = [] for subnet in param_described.get("Subnets"): subnets.append(subnet.get("SubnetIdentifier")) diff --git a/plugins/modules/dynamodb_table.py b/plugins/modules/dynamodb_table.py index 86ba2f05eb2..8878d6183dc 100644 --- a/plugins/modules/dynamodb_table.py +++ b/plugins/modules/dynamodb_table.py @@ -756,8 +756,8 @@ def _global_index_changes(current_table): index_changes = list() # TODO (future) it would be nice to add support for deleting an index - for name in global_index_map: - idx = dict(_generate_index(global_index_map[name], include_throughput=include_throughput)) + for name, current_value in global_index_map.items(): + idx = dict(_generate_index(current_value, include_throughput=include_throughput)) if name not in current_global_index_map: index_changes.append(dict(Create=idx)) else: @@ -765,7 +765,7 @@ def _global_index_changes(current_table): # TODO (future) it would be nice to throw a deprecation here # rather than dropping other changes on the floor _current = current_global_index_map[name] - _new = global_index_map[name] + _new = current_value if include_throughput: change = dict(_throughput_changes(_current, _new)) @@ -943,7 +943,7 @@ def create_table(): AttributeDefinitions=attributes, KeySchema=key_schema, Tags=tags, - BillingMode=billing_mode + BillingMode=billing_mode, # TODO (future) # StreamSpecification, # SSESpecification, diff --git a/plugins/modules/dynamodb_ttl.py b/plugins/modules/dynamodb_ttl.py index eca236cf49a..ba39e8a3935 100644 --- a/plugins/modules/dynamodb_ttl.py +++ b/plugins/modules/dynamodb_ttl.py @@ -126,7 +126,6 @@ def main(): module.fail_json_aws(e, msg="Failed to connect to AWS") result = {"changed": False} - state = module.params["state"] # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the # methods so it's easier to do here. diff --git a/plugins/modules/ec2_carrier_gateway.py b/plugins/modules/ec2_carrier_gateway.py index 97d62b5fc42..9e0e3fcb379 100644 --- a/plugins/modules/ec2_carrier_gateway.py +++ b/plugins/modules/ec2_carrier_gateway.py @@ -206,7 +206,7 @@ def ensure_cagw_present(self, vpc_id, tags, purge_tags): response = self._connection.create_carrier_gateway(VpcId=vpc_id, aws_retry=True) cagw = camel_dict_to_snake_dict(response["CarrierGateway"]) self._results["changed"] = True - except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway.") as e: + except is_boto3_error_message("You must be opted into a wavelength zone to create a carrier gateway."): self._module.fail_json(msg="You must be opted into a wavelength zone to create a carrier gateway") except botocore.exceptions.WaiterError as e: self._module.fail_json_aws(e, msg="No Carrier Gateway exists.") diff --git a/plugins/modules/ec2_win_password.py b/plugins/modules/ec2_win_password.py index a9ca8e94ca1..78dad599625 100644 --- a/plugins/modules/ec2_win_password.py +++ b/plugins/modules/ec2_win_password.py @@ -183,18 +183,18 @@ def ec2_win_password(module): except IOError as e: # Handle bad files module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}") - except (ValueError, TypeError) as e: + except (ValueError, TypeError): # Handle issues loading key module.fail_json(msg="unable to parse key file") elif b_key_data is not None and key_file is None: try: key = load_pem_private_key(b_key_data, b_key_passphrase, default_backend()) - except (ValueError, TypeError) as e: + except (ValueError, TypeError): module.fail_json(msg="unable to parse key data") try: decrypted = key.decrypt(decoded, PKCS1v15()) - except ValueError as e: + except ValueError: decrypted = None if decrypted is None: diff --git a/plugins/modules/ecs_ecr.py b/plugins/modules/ecs_ecr.py index 545b8274230..8804d2a58bc 100644 --- a/plugins/modules/ecs_ecr.py +++ b/plugins/modules/ecs_ecr.py @@ -523,7 +523,7 @@ def run(ecr, params): if scan_on_push != original_scan_on_push["imageScanningConfiguration"]["scanOnPush"]: result["changed"] = True result["repository"]["imageScanningConfiguration"]["scanOnPush"] = scan_on_push - response = ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) + ecr.put_image_scanning_configuration(registry_id, name, scan_on_push) elif state == "absent": result["name"] = name diff --git a/plugins/modules/ecs_service_info.py b/plugins/modules/ecs_service_info.py index 02a6abff207..4e3c62c7913 100644 --- a/plugins/modules/ecs_service_info.py +++ b/plugins/modules/ecs_service_info.py @@ -204,11 +204,11 @@ def extract_service_from(self, service): return service -def chunks(l, n): - """Yield successive n-sized chunks from l.""" - """ https://stackoverflow.com/a/312464 """ - for i in range(0, len(l), n): - yield l[i:i + n] # fmt: skip +def chunks(services, chunk_size): + """Yield successive chunk_size-sized chunks from services. + https://stackoverflow.com/a/312464""" + for i in range(0, len(services), chunk_size): + yield services[i:i + chunk_size] # fmt: skip def main(): diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py index 9f8871def49..d448228bf09 100644 --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -820,7 +820,7 @@ def describe_task(self, task_name): try: response = self.ecs.describe_task_definition(aws_retry=True, taskDefinition=task_name) return response["taskDefinition"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): return None def register_task( diff --git a/plugins/modules/efs.py b/plugins/modules/efs.py index 32992c4a3c2..c11a66eca01 100644 --- a/plugins/modules/efs.py +++ b/plugins/modules/efs.py @@ -248,7 +248,7 @@ try: import botocore -except ImportError as e: +except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict @@ -294,13 +294,13 @@ def get_file_systems(self, **kwargs): for item in items: item["Name"] = item["CreationToken"] item["CreationTime"] = str(item["CreationTime"]) - """ - In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it - AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose - And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) - AWS documentation is available here: - https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html - """ + + # In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it + # AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose + # And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) + # AWS documentation is available here: + # https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" if "Timestamp" in item["SizeInBytes"]: @@ -501,7 +501,7 @@ def update_lifecycle_policy(self, name, transition_to_ia): else: LifecyclePolicies = [{"TransitionToIA": "AFTER_" + transition_to_ia + "_DAYS"}] if current_policies.get("LifecyclePolicies") != LifecyclePolicies: - response = self.connection.put_lifecycle_configuration( + self.connection.put_lifecycle_configuration( FileSystemId=fs_id, LifecyclePolicies=LifecyclePolicies, ) @@ -663,8 +663,7 @@ def iterate_all(attr, map_method, **kwargs): sleep(wait) wait = wait * 2 continue - else: - raise + raise def targets_equal(keys, a, b): diff --git a/plugins/modules/efs_info.py b/plugins/modules/efs_info.py index 3a170a3915b..5c5f5529303 100644 --- a/plugins/modules/efs_info.py +++ b/plugins/modules/efs_info.py @@ -272,13 +272,13 @@ def get_file_systems(self, file_system_id=None, creation_token=None): results = list() for item in file_systems: item["CreationTime"] = str(item["CreationTime"]) - """ - In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it - AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose - And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) - AWS documentation is available here: - U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) - """ + + # In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it + # AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose + # And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount) + # AWS documentation is available here: + # U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" diff --git a/plugins/modules/eks_cluster.py b/plugins/modules/eks_cluster.py index a445def55c3..978a595e3fd 100644 --- a/plugins/modules/eks_cluster.py +++ b/plugins/modules/eks_cluster.py @@ -218,7 +218,7 @@ def ensure_present(client, module): if module.params["tags"]: params["tags"] = module.params["tags"] cluster = client.create_cluster(**params)["cluster"] - except botocore.exceptions.EndpointConnectionError as e: + except botocore.exceptions.EndpointConnectionError: module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg=f"Couldn't create cluster {name}") @@ -241,7 +241,7 @@ def ensure_absent(client, module): if not module.check_mode: try: client.delete_cluster(name=module.params["name"]) - except botocore.exceptions.EndpointConnectionError as e: + except botocore.exceptions.EndpointConnectionError: module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}") @@ -258,7 +258,7 @@ def get_cluster(client, module): return client.describe_cluster(name=name)["cluster"] except is_boto3_error_code("ResourceNotFoundException"): return None - except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except + except botocore.exceptions.EndpointConnectionError: # pylint: disable=duplicate-except module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except ( botocore.exceptions.BotoCoreError, diff --git a/plugins/modules/elasticache.py b/plugins/modules/elasticache.py index d45509cb606..fd823fcdda7 100644 --- a/plugins/modules/elasticache.py +++ b/plugins/modules/elasticache.py @@ -143,7 +143,6 @@ class ElastiCacheManager: - """Handles elasticache creation and destruction""" EXIST_STATUSES = ["available", "creating", "rebooting", "modifying"] diff --git a/plugins/modules/elasticache_parameter_group.py b/plugins/modules/elasticache_parameter_group.py index fa7f87a2f78..8cd749fe354 100644 --- a/plugins/modules/elasticache_parameter_group.py +++ b/plugins/modules/elasticache_parameter_group.py @@ -174,12 +174,12 @@ def check_valid_modification(module, values, modifiable_params): str_to_type = {"integer": int, "string": string_types} expected_type = str_to_type[modifiable_params[parameter][1]] if not isinstance(new_value, expected_type): - if expected_type == str: + if expected_type is str: if isinstance(new_value, bool): values[parameter] = "yes" if new_value else "no" else: values[parameter] = to_text(new_value) - elif expected_type == int: + elif expected_type is int: if isinstance(new_value, bool): values[parameter] = 1 if new_value else 0 else: @@ -284,7 +284,7 @@ def get_info(conn, name): try: data = conn.describe_cache_parameters(CacheParameterGroupName=name) return data - except botocore.exceptions.ClientError as e: + except botocore.exceptions.ClientError: return False diff --git a/plugins/modules/elasticbeanstalk_app.py b/plugins/modules/elasticbeanstalk_app.py index 1aaa4c4d8fe..4d2e7c547e0 100644 --- a/plugins/modules/elasticbeanstalk_app.py +++ b/plugins/modules/elasticbeanstalk_app.py @@ -111,7 +111,6 @@ def list_apps(ebs, app_name, module): def check_app(ebs, app, module): - app_name = module.params["app_name"] description = module.params["description"] state = module.params["state"] terminate_by_force = module.params["terminate_by_force"] @@ -173,7 +172,7 @@ def main(): if state == "present": if app is None: try: - create_app = ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) + ebs.create_application(**filter_empty(ApplicationName=app_name, Description=description)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Could not create application") diff --git a/plugins/modules/elb_target.py b/plugins/modules/elb_target.py index 22074d496de..9cf666b0aaa 100644 --- a/plugins/modules/elb_target.py +++ b/plugins/modules/elb_target.py @@ -271,7 +271,7 @@ def deregister_target(connection, module): try: deregister_target_with_backoff(connection, target_group_arn, target) changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): module.fail_json(msg=f"Unable to deregister target {target}") else: if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 71a859ead28..c5b49a23f92 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -506,8 +506,7 @@ def wait_for_status(connection, module, target_group_arn, targets, status): if response["TargetHealthDescriptions"][0]["TargetHealth"]["State"] == status: status_achieved = True break - else: - time.sleep(polling_increment_secs) + time.sleep(polling_increment_secs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't describe target health") @@ -835,7 +834,7 @@ def create_or_update_target_group(connection, module): if changed: if target.get("Id"): - response = connection.register_targets( + connection.register_targets( TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True, @@ -916,7 +915,7 @@ def create_or_update_target_group(connection, module): else: try: target = module.params.get("targets")[0] - response = connection.register_targets( + connection.register_targets( TargetGroupArn=target_group["TargetGroupArn"], Targets=[{"Id": target["Id"]}], aws_retry=True ) changed = True diff --git a/plugins/modules/iam_server_certificate.py b/plugins/modules/iam_server_certificate.py index 6a7734acacb..0d9dcfff7dc 100644 --- a/plugins/modules/iam_server_certificate.py +++ b/plugins/modules/iam_server_certificate.py @@ -247,7 +247,7 @@ def delete_server_certificate(current_cert): name = module.params.get("name") try: - result = client.delete_server_certificate( + client.delete_server_certificate( aws_retry=True, ServerCertificateName=name, ) @@ -340,10 +340,8 @@ def main(): state = module.params.get("state") name = module.params.get("name") - path = module.params.get("path") new_name = module.params.get("new_name") new_path = module.params.get("new_path") - dup_ok = module.params.get("dup_ok") current_cert = get_server_certificate(name) diff --git a/plugins/modules/kinesis_stream.py b/plugins/modules/kinesis_stream.py index d1ba65c86b2..2c1f5306159 100644 --- a/plugins/modules/kinesis_stream.py +++ b/plugins/modules/kinesis_stream.py @@ -298,7 +298,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=Fa status_achieved = True break - elif status != "DELETING": + if status != "DELETING": if find_success and stream: if stream.get("StreamStatus") == status: status_achieved = True @@ -412,7 +412,6 @@ def update_tags(client, stream_name, tags, check_mode=False): ) if not delete_success: return delete_success, changed, delete_msg - tag_msg = "Tags removed" if tags_to_set: create_success, create_msg = tags_action( @@ -958,7 +957,6 @@ def start_stream_encryption( success = False changed = False err_msg = "" - params = {"StreamName": stream_name} results = dict() stream_found, stream_msg, current_stream = find_stream(client, stream_name) @@ -1033,7 +1031,6 @@ def stop_stream_encryption( success = False changed = False err_msg = "" - params = {"StreamName": stream_name} results = dict() stream_found, stream_msg, current_stream = find_stream(client, stream_name) diff --git a/plugins/modules/lightsail_snapshot.py b/plugins/modules/lightsail_snapshot.py index 1d0d178aa49..b2c7c5bba29 100644 --- a/plugins/modules/lightsail_snapshot.py +++ b/plugins/modules/lightsail_snapshot.py @@ -166,7 +166,7 @@ def create_snapshot(module, client): def delete_snapshot(module, client): snapshot = find_instance_snapshot_info(module, client, module.params.get("snapshot_name")) if module.check_mode or snapshot is None: - changed = not (snapshot is None) + changed = snapshot is not None instance = snapshot if changed else {} module.exit_json(changed=changed, instance=instance) diff --git a/plugins/modules/mq_broker.py b/plugins/modules/mq_broker.py index 5a97fda9264..734e587cdd9 100644 --- a/plugins/modules/mq_broker.py +++ b/plugins/modules/mq_broker.py @@ -323,9 +323,6 @@ def _fill_kwargs(module, apply_defaults=True, ignore_create_params=False): def __list_needs_change(current, desired): if len(current) != len(desired): return True - # equal length: - c_sorted = sorted(current) - d_sorted = sorted(desired) for index, value in enumerate(current): if value != desired[index]: return True diff --git a/plugins/modules/mq_user.py b/plugins/modules/mq_user.py index 68e1fd62912..54c7f249956 100644 --- a/plugins/modules/mq_user.py +++ b/plugins/modules/mq_user.py @@ -98,7 +98,7 @@ try: import botocore -except ImportError as ex: +except ImportError: # handled by AnsibleAWSModule pass @@ -209,7 +209,7 @@ def ensure_user_present(conn, module): if not module.check_mode: kwargs["BrokerId"] = module.params["broker_id"] kwargs["Username"] = module.params["username"] - response = _update_user(conn, module, kwargs) + _update_user(conn, module, kwargs) # changed = True # diff --git a/plugins/modules/mq_user_info.py b/plugins/modules/mq_user_info.py index 64cf92da744..1d38e98d746 100644 --- a/plugins/modules/mq_user_info.py +++ b/plugins/modules/mq_user_info.py @@ -82,7 +82,7 @@ try: import botocore -except ImportError as ex: +except ImportError: # handled by AnsibleAWSModule pass diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py index 9ecf053f87f..5132ede011c 100644 --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -778,9 +778,7 @@ def main(): msg="The number of broker nodes must be a multiple of availability zones in the subnets parameter" ) if len(module.params["name"]) > 64: - module.fail_json( - module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit") - ) + module.fail_json(module.fail_json(msg=f'Cluster name "{module.params["name"]}" exceeds 64 character limit')) changed, response = create_or_update_cluster(client, module) elif module.params["state"] == "absent": changed, response = delete_cluster(client, module) diff --git a/plugins/modules/networkfirewall_policy.py b/plugins/modules/networkfirewall_policy.py index c742c95463b..ca0e5624754 100644 --- a/plugins/modules/networkfirewall_policy.py +++ b/plugins/modules/networkfirewall_policy.py @@ -395,8 +395,6 @@ def main(): manager.set_wait(module.params.get("wait", None)) manager.set_wait_timeout(module.params.get("wait_timeout", None)) - rule_order = module.params.get("stateful_rule_order") - if state == "absent": manager.delete() else: @@ -406,7 +404,7 @@ def main(): manager.set_custom_stateless_actions( module.params.get("stateless_custom_actions", None), module.params.get("purge_stateless_custom_actions", True), - ), + ) manager.set_stateful_rule_order(module.params.get("stateful_rule_order", None)) manager.set_stateful_rule_groups(module.params.get("stateful_rule_groups", None)) manager.set_stateless_rule_groups(module.params.get("stateless_rule_groups", None)) diff --git a/plugins/modules/opensearch_info.py b/plugins/modules/opensearch_info.py index 98fce3e03c9..6fcd4366adf 100644 --- a/plugins/modules/opensearch_info.py +++ b/plugins/modules/opensearch_info.py @@ -478,7 +478,7 @@ def domain_info(client, module): try: current_domain_tags = client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"] domain["Tags"] = boto3_tag_list_to_ansible_dict(current_domain_tags) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # This could potentially happen if a domain is deleted between the time # its domain status was queried and the tags were queried. domain["Tags"] = {} diff --git a/plugins/modules/route53_wait.py b/plugins/modules/route53_wait.py index 6b72681d4c1..2aeb2272cb4 100644 --- a/plugins/modules/route53_wait.py +++ b/plugins/modules/route53_wait.py @@ -144,9 +144,9 @@ def main(): changed_results = [] try: - for id, result in detect_task_results(result_in): + for description, result in detect_task_results(result_in): if result.get("wait_id"): - changed_results.append((id, result["wait_id"])) + changed_results.append((description, result["wait_id"])) except ValueError as exc: module.fail_json( msg=f"The value passed as result does not seem to be a registered route53 result: {to_native(exc)}" diff --git a/plugins/modules/s3_cors.py b/plugins/modules/s3_cors.py index d153c7df823..c9cf2d0546a 100644 --- a/plugins/modules/s3_cors.py +++ b/plugins/modules/s3_cors.py @@ -125,7 +125,7 @@ def create_or_update_bucket_cors(connection, module): if changed: try: - cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) + connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}") @@ -137,7 +137,7 @@ def destroy_bucket_cors(connection, module): changed = False try: - cors = connection.delete_bucket_cors(Bucket=name) + connection.delete_bucket_cors(Bucket=name) changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}") diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py index 64bce4516e1..e22c0b4624f 100644 --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -302,7 +302,6 @@ def filters_are_equal(filter1, filter2): def build_rule(client, module): - name = module.params.get("name") abort_incomplete_multipart_upload_days = module.params.get("abort_incomplete_multipart_upload_days") expiration_date = parse_date(module.params.get("expiration_date")) expiration_days = module.params.get("expiration_days") @@ -321,7 +320,6 @@ def build_rule(client, module): transition_date = parse_date(module.params.get("transition_date")) transition_days = module.params.get("transition_days") transitions = module.params.get("transitions") - purge_transitions = module.params.get("purge_transitions") if maximum_object_size is not None or minimum_object_size is not None: and_dict = dict(Prefix=prefix) diff --git a/plugins/modules/s3_logging.py b/plugins/modules/s3_logging.py index 3a78749945f..08e04e03c01 100644 --- a/plugins/modules/s3_logging.py +++ b/plugins/modules/s3_logging.py @@ -168,7 +168,6 @@ def enable_bucket_logging(connection, module): def disable_bucket_logging(connection, module): bucket_name = module.params.get("name") - changed = False try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) @@ -182,7 +181,7 @@ def disable_bucket_logging(connection, module): module.exit_json(changed=True) try: - response = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( + AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidTargetBucketForLogging"])( connection.put_bucket_logging )(Bucket=bucket_name, BucketLoggingStatus={}) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: diff --git a/plugins/modules/s3_website.py b/plugins/modules/s3_website.py index 1c212d11789..fb590b658ea 100644 --- a/plugins/modules/s3_website.py +++ b/plugins/modules/s3_website.py @@ -254,7 +254,7 @@ def enable_or_update_bucket_as_website(client_connection, resource_connection, m changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update bucket website configuration") - except KeyError as e: + except KeyError: try: bucket_website.put( WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests) diff --git a/plugins/modules/secretsmanager_secret.py b/plugins/modules/secretsmanager_secret.py index fb2ff8ebe96..e4fc4d08cbb 100644 --- a/plugins/modules/secretsmanager_secret.py +++ b/plugins/modules/secretsmanager_secret.py @@ -378,7 +378,6 @@ def remove_replication(self, name, regions): if self.module.check_mode: self.module.exit_json(changed=True) try: - replica_regions = [] response = self.client.remove_regions_from_replication(SecretId=name, RemoveReplicaRegions=regions) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to replicate secret") diff --git a/plugins/modules/ses_identity.py b/plugins/modules/ses_identity.py index 785519bd3ba..13e88808fa1 100644 --- a/plugins/modules/ses_identity.py +++ b/plugins/modules/ses_identity.py @@ -275,16 +275,17 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel # See: https://github.com/ansible/ansible/issues/36065 if identity in notification_attributes: break - else: - # Paranoia check for coding errors, we only requested one identity, so if we get a different one - # something has gone very wrong. - if len(notification_attributes) != 0: - module.fail_json( - msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( - identity, - notification_attributes.keys(), - ) + + # Paranoia check for coding errors, we only requested one identity, so if we get a different one + # something has gone very wrong. + if len(notification_attributes) != 0: + module.fail_json( + msg="Unexpected identity found in notification attributes, expected {0} but got {1!r}.".format( + identity, + notification_attributes.keys(), ) + ) + time.sleep(retryDelay) if identity not in notification_attributes: return None diff --git a/plugins/modules/sqs_queue.py b/plugins/modules/sqs_queue.py index ad3ce68a7ce..4c08c0249c0 100644 --- a/plugins/modules/sqs_queue.py +++ b/plugins/modules/sqs_queue.py @@ -462,7 +462,7 @@ def update_tags(client, queue_url, module): try: existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)["Tags"] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError) as e: + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, KeyError): existing_tags = {} tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) diff --git a/plugins/modules/storagegateway_info.py b/plugins/modules/storagegateway_info.py index 55b7c4685d4..cafe16414da 100644 --- a/plugins/modules/storagegateway_info.py +++ b/plugins/modules/storagegateway_info.py @@ -206,11 +206,10 @@ def fetch(self): self.module.exit_json(gateways=gateways) - """ - List all storage gateways for the AWS endpoint. - """ - def list_gateways(self): + """ + List all storage gateways for the AWS endpoint. + """ try: paginator = self.client.get_paginator("list_gateways") response = paginator.paginate( @@ -228,13 +227,12 @@ def list_gateways(self): except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateways") - """ - Read file share objects from AWS API response. - Drop the gateway_arn attribute from response, as it will be duplicate with parent object. - """ - @staticmethod def _read_gateway_fileshare_response(fileshares, aws_reponse): + """ + Read file share objects from AWS API response. + Drop the gateway_arn attribute from response, as it will be duplicate with parent object. + """ for share in aws_reponse["FileShareInfoList"]: share_obj = camel_dict_to_snake_dict(share) if "gateway_arn" in share_obj: @@ -243,11 +241,10 @@ def _read_gateway_fileshare_response(fileshares, aws_reponse): return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None - """ - List file shares attached to AWS storage gateway when in S3 mode. - """ - def list_gateway_file_shares(self, gateway): + """ + List file shares attached to AWS storage gateway when in S3 mode. + """ try: response = self.client.list_file_shares(GatewayARN=gateway["gateway_arn"], Limit=100) @@ -261,11 +258,10 @@ def list_gateway_file_shares(self, gateway): except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list gateway file shares") - """ - List storage gateway local disks - """ - def list_local_disks(self, gateway): + """ + List storage gateway local disks + """ try: gateway["local_disks"] = [ camel_dict_to_snake_dict(disk) @@ -274,13 +270,12 @@ def list_local_disks(self, gateway): except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") - """ - Read tape objects from AWS API response. - Drop the gateway_arn attribute from response, as it will be duplicate with parent object. - """ - @staticmethod def _read_gateway_tape_response(tapes, aws_response): + """ + Read tape objects from AWS API response. + Drop the gateway_arn attribute from response, as it will be duplicate with parent object. + """ for tape in aws_response["TapeInfos"]: tape_obj = camel_dict_to_snake_dict(tape) if "gateway_arn" in tape_obj: @@ -289,11 +284,10 @@ def _read_gateway_tape_response(tapes, aws_response): return aws_response["Marker"] if "Marker" in aws_response else None - """ - List VTL & VTS attached to AWS storage gateway in VTL mode - """ - def list_gateway_vtl(self, gateway): + """ + List VTL & VTS attached to AWS storage gateway in VTL mode + """ try: response = self.client.list_tapes(Limit=100) @@ -307,11 +301,10 @@ def list_gateway_vtl(self, gateway): except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes") - """ - List volumes attached to AWS storage gateway in CACHED or STORAGE mode - """ - def list_gateway_volumes(self, gateway): + """ + List volumes attached to AWS storage gateway in CACHED or STORAGE mode + """ try: paginator = self.client.get_paginator("list_volumes") response = paginator.paginate( diff --git a/plugins/modules/waf_condition.py b/plugins/modules/waf_condition.py index 560ddf5c150..a1fd0235334 100644 --- a/plugins/modules/waf_condition.py +++ b/plugins/modules/waf_condition.py @@ -694,7 +694,7 @@ def find_and_update_condition(self, condition_set_id): update["Updates"] = missing + extra func = getattr(self.client, "update_" + self.method_suffix) try: - result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) + run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Could not update condition") return changed, self.get_condition_by_id(condition_set_id) diff --git a/plugins/modules/waf_rule.py b/plugins/modules/waf_rule.py index 87a02bbbdda..d2a511abfa5 100644 --- a/plugins/modules/waf_rule.py +++ b/plugins/modules/waf_rule.py @@ -242,8 +242,8 @@ def find_and_update_rule(client, module, rule_id): [ format_for_deletion(condition) for condition in existing_conditions[condition_type].values() - if not all_conditions[condition_type][condition["data_id"]]["name"] - in desired_conditions[condition_type] + if all_conditions[condition_type][condition["data_id"]]["name"] + not in desired_conditions[condition_type] ] ) diff --git a/plugins/modules/wafv2_ip_set.py b/plugins/modules/wafv2_ip_set.py index b96ba0cb1c1..9ac9d3b5ba6 100644 --- a/plugins/modules/wafv2_ip_set.py +++ b/plugins/modules/wafv2_ip_set.py @@ -135,7 +135,7 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.name = name self.scope = scope self.fail_json_aws = fail_json_aws - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() + self.existing_set, self.set_id, self.locktoken, self.arn = self.get_set() def description(self): return self.existing_set.get("Description") @@ -150,7 +150,7 @@ def get(self): def remove(self): try: - response = self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) + self.wafv2.delete_ip_set(Name=self.name, Scope=self.scope, Id=self.set_id, LockToken=self.locktoken) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 ip set.") return {} @@ -170,18 +170,18 @@ def create(self, description, ip_address_version, addresses, tags): req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: - response = self.wafv2.create_ip_set(**req_obj) + self.wafv2.create_ip_set(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to create wafv2 ip set.") - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() + self.existing_set, self.set_id, self.locktoken, self.arn = self.get_set() return self._format_set(self.existing_set) def update(self, description, addresses): req_obj = { "Name": self.name, "Scope": self.scope, - "Id": self.id, + "Id": self.set_id, "Addresses": addresses, "LockToken": self.locktoken, } @@ -190,33 +190,33 @@ def update(self, description, addresses): req_obj["Description"] = description try: - response = self.wafv2.update_ip_set(**req_obj) + self.wafv2.update_ip_set(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 ip set.") - self.existing_set, self.id, self.locktoken, self.arn = self.get_set() + self.existing_set, self.set_id, self.locktoken, self.arn = self.get_set() return self._format_set(self.existing_set) def get_set(self): response = self.list() existing_set = None - id = None + set_id = None arn = None locktoken = None for item in response.get("IPSets"): if item.get("Name") == self.name: - id = item.get("Id") + set_id = item.get("Id") locktoken = item.get("LockToken") arn = item.get("ARN") - if id: + if set_id: try: - existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=id).get("IPSet") + existing_set = self.wafv2.get_ip_set(Name=self.name, Scope=self.scope, Id=set_id).get("IPSet") except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 ip set.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) existing_set["tags"] = tags - return existing_set, id, locktoken, arn + return existing_set, set_id, locktoken, arn def list(self, Nextmarker=None): # there is currently no paginator for wafv2 @@ -313,7 +313,7 @@ def main(): elif ips_updated or description_updated: retval = ip_set.update(description=description, addresses=addresses) elif tags_updated: - retval, id, locktoken, arn = ip_set.get_set() + retval, set_id, locktoken, arn = ip_set.get_set() else: if not check_mode: retval = ip_set.create( diff --git a/plugins/modules/wafv2_ip_set_info.py b/plugins/modules/wafv2_ip_set_info.py index caca5cd7081..de1e39d5f01 100644 --- a/plugins/modules/wafv2_ip_set_info.py +++ b/plugins/modules/wafv2_ip_set_info.py @@ -98,9 +98,9 @@ def list_ip_sets(wafv2, scope, fail_json_aws, Nextmarker=None): return response -def get_ip_set(wafv2, name, scope, id, fail_json_aws): +def get_ip_set(wafv2, name, scope, set_id, fail_json_aws): try: - response = wafv2.get_ip_set(Name=name, Scope=scope, Id=id) + response = wafv2.get_ip_set(Name=name, Scope=scope, Id=set_id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 ip set") return response @@ -124,17 +124,17 @@ def main(): # check if ip set exist response = list_ip_sets(wafv2, scope, module.fail_json_aws) - id = None + set_id = None for item in response.get("IPSets"): if item.get("Name") == name: - id = item.get("Id") + set_id = item.get("Id") arn = item.get("ARN") retval = {} existing_set = None - if id: - existing_set = get_ip_set(wafv2, name, scope, id, module.fail_json_aws) + if set_id: + existing_set = get_ip_set(wafv2, name, scope, set_id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_set.get("IPSet")) retval["tags"] = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) or {} module.exit_json(**retval) diff --git a/plugins/modules/wafv2_resources.py b/plugins/modules/wafv2_resources.py index b36f517120b..df9e0da5852 100644 --- a/plugins/modules/wafv2_resources.py +++ b/plugins/modules/wafv2_resources.py @@ -71,9 +71,9 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls -def get_web_acl(wafv2, name, scope, id, fail_json_aws): +def get_web_acl(wafv2, name, scope, acl_id, fail_json_aws): try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=acl_id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -129,16 +129,16 @@ def main(): response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) - id = None + acl_id = None retval = {} change = False for item in response.get("WebACLs"): if item.get("Name") == name: - id = item.get("Id") + acl_id = item.get("Id") - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) + if acl_id: + existing_acl = get_web_acl(wafv2, name, scope, acl_id, module.fail_json_aws) waf_arn = existing_acl.get("WebACL").get("ARN") retval = list_wafv2_resources(wafv2, waf_arn, module.fail_json_aws) diff --git a/plugins/modules/wafv2_resources_info.py b/plugins/modules/wafv2_resources_info.py index 5cafee1f67d..60f6d3a5670 100644 --- a/plugins/modules/wafv2_resources_info.py +++ b/plugins/modules/wafv2_resources_info.py @@ -60,9 +60,9 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls -def get_web_acl(wafv2, name, scope, id, fail_json_aws): +def get_web_acl(wafv2, name, scope, acl_id, fail_json_aws): try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=acl_id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -98,15 +98,15 @@ def main(): # check if web acl exists response = list_web_acls(wafv2, scope, module.fail_json_aws) - id = None + acl_id = None retval = {} for item in response.get("WebACLs"): if item.get("Name") == name: - id = item.get("Id") + acl_id = item.get("Id") - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) + if acl_id: + existing_acl = get_web_acl(wafv2, name, scope, acl_id, module.fail_json_aws) arn = existing_acl.get("WebACL").get("ARN") retval = camel_dict_to_snake_dict(list_wafv2_resources(wafv2, arn, module.fail_json_aws)) diff --git a/plugins/modules/wafv2_rule_group.py b/plugins/modules/wafv2_rule_group.py index e2a7fd1d438..6e2b0d9859f 100644 --- a/plugins/modules/wafv2_rule_group.py +++ b/plugins/modules/wafv2_rule_group.py @@ -243,7 +243,7 @@ def update(self, description, rules, sampled_requests, cloudwatch_metrics, metri req_obj["Description"] = description try: - response = self.wafv2.update_rule_group(**req_obj) + self.wafv2.update_rule_group(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 rule group.") return self.refresh_group() @@ -312,7 +312,7 @@ def create(self, capacity, description, rules, sampled_requests, cloudwatch_metr req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: - response = self.wafv2.create_rule_group(**req_obj) + self.wafv2.create_rule_group(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to create wafv2 rule group.") diff --git a/plugins/modules/wafv2_rule_group_info.py b/plugins/modules/wafv2_rule_group_info.py index 58862a9a5f2..38a0b2a20ce 100644 --- a/plugins/modules/wafv2_rule_group_info.py +++ b/plugins/modules/wafv2_rule_group_info.py @@ -99,9 +99,9 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups -def get_rule_group(wafv2, name, scope, id, fail_json_aws): +def get_rule_group(wafv2, name, scope, group_id, fail_json_aws): try: - response = wafv2.get_rule_group(Name=name, Scope=scope, Id=id) + response = wafv2.get_rule_group(Name=name, Scope=scope, Id=group_id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 rule group.") return response @@ -125,17 +125,17 @@ def main(): # check if rule group exists response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws) - id = None + group_id = None retval = {} for item in response.get("RuleGroups"): if item.get("Name") == name: - id = item.get("Id") + group_id = item.get("Id") arn = item.get("ARN") existing_group = None - if id: - existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws) + if group_id: + existing_group = get_rule_group(wafv2, name, scope, group_id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_group.get("RuleGroup")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) retval["tags"] = tags or {} diff --git a/plugins/modules/wafv2_web_acl.py b/plugins/modules/wafv2_web_acl.py index 054c093c532..32a22db5dff 100644 --- a/plugins/modules/wafv2_web_acl.py +++ b/plugins/modules/wafv2_web_acl.py @@ -335,7 +335,7 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.name = name self.scope = scope self.fail_json_aws = fail_json_aws - self.existing_acl, self.id, self.locktoken = self.get_web_acl() + self.existing_acl, self.acl_id, self.locktoken = self.get_web_acl() def update( self, @@ -350,7 +350,7 @@ def update( req_obj = { "Name": self.name, "Scope": self.scope, - "Id": self.id, + "Id": self.acl_id, "DefaultAction": default_action, "Rules": rules, "VisibilityConfig": { @@ -368,16 +368,18 @@ def update( req_obj["CustomResponseBodies"] = custom_response_bodies try: - response = self.wafv2.update_web_acl(**req_obj) + self.wafv2.update_web_acl(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") - self.existing_acl, self.id, self.locktoken = self.get_web_acl() + self.existing_acl, self.acl_id, self.locktoken = self.get_web_acl() return self.existing_acl def remove(self): try: - response = self.wafv2.delete_web_acl(Name=self.name, Scope=self.scope, Id=self.id, LockToken=self.locktoken) + response = self.wafv2.delete_web_acl( + Name=self.name, Scope=self.scope, Id=self.acl_id, LockToken=self.locktoken + ) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to remove wafv2 web acl.") return response @@ -388,7 +390,7 @@ def get(self): return None def get_web_acl(self): - id = None + acl_id = None locktoken = None arn = None existing_acl = None @@ -396,18 +398,18 @@ def get_web_acl(self): for item in response.get("WebACLs"): if item.get("Name") == self.name: - id = item.get("Id") + acl_id = item.get("Id") locktoken = item.get("LockToken") arn = item.get("ARN") - if id: + if acl_id: try: - existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=id) + existing_acl = self.wafv2.get_web_acl(Name=self.name, Scope=self.scope, Id=acl_id) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to get wafv2 web acl.") tags = describe_wafv2_tags(self.wafv2, arn, self.fail_json_aws) existing_acl["tags"] = tags - return existing_acl, id, locktoken + return existing_acl, acl_id, locktoken def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) @@ -443,11 +445,11 @@ def create( req_obj["Tags"] = ansible_dict_to_boto3_tag_list(tags) try: - response = self.wafv2.create_web_acl(**req_obj) + self.wafv2.create_web_acl(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to create wafv2 web acl.") - self.existing_acl, self.id, self.locktoken = self.get_web_acl() + self.existing_acl, self.acl_id, self.locktoken = self.get_web_acl() return self.existing_acl @@ -544,7 +546,7 @@ def main(): custom_response_bodies, ) elif tags_changed: - retval, id, locktoken = web_acl.get_web_acl() + retval, acl_id, locktoken = web_acl.get_web_acl() else: retval = web_acl.get() diff --git a/plugins/modules/wafv2_web_acl_info.py b/plugins/modules/wafv2_web_acl_info.py index e3cdc46e330..6aaf543f2cd 100644 --- a/plugins/modules/wafv2_web_acl_info.py +++ b/plugins/modules/wafv2_web_acl_info.py @@ -101,9 +101,9 @@ from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_web_acls -def get_web_acl(wafv2, name, scope, id, fail_json_aws): +def get_web_acl(wafv2, name, scope, acl_id, fail_json_aws): try: - response = wafv2.get_web_acl(Name=name, Scope=scope, Id=id) + response = wafv2.get_web_acl(Name=name, Scope=scope, Id=acl_id) except (BotoCoreError, ClientError) as e: fail_json_aws(e, msg="Failed to get wafv2 web acl.") return response @@ -120,7 +120,6 @@ def main(): supports_check_mode=True, ) - state = module.params.get("state") name = module.params.get("name") scope = module.params.get("scope") @@ -128,17 +127,17 @@ def main(): # check if web acl exists response = wafv2_list_web_acls(wafv2, scope, module.fail_json_aws) - id = None + acl_id = None arn = None retval = {} for item in response.get("WebACLs"): if item.get("Name") == name: - id = item.get("Id") + acl_id = item.get("Id") arn = item.get("ARN") - if id: - existing_acl = get_web_acl(wafv2, name, scope, id, module.fail_json_aws) + if acl_id: + existing_acl = get_web_acl(wafv2, name, scope, acl_id, module.fail_json_aws) retval = camel_dict_to_snake_dict(existing_acl.get("WebACL")) tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws) retval["tags"] = tags diff --git a/tests/unit/plugins/inventory/test_aws_mq.py b/tests/unit/plugins/inventory/test_aws_mq.py index 8969b4a0387..7e4559a5e05 100644 --- a/tests/unit/plugins/inventory/test_aws_mq.py +++ b/tests/unit/plugins/inventory/test_aws_mq.py @@ -55,8 +55,8 @@ def make_clienterror_exception(code="AccessDenied"): ) -@pytest.fixture() -def inventory(): +@pytest.fixture(name="inventory") +def fixture_inventory(): inventory = InventoryModule() inventory.inventory = MagicMock() inventory.inventory.set_variable = MagicMock() @@ -76,8 +76,8 @@ def inventory(): return inventory -@pytest.fixture() -def connection(): +@pytest.fixture(name="connection") +def fixture_connection(): conn = MagicMock() return conn @@ -536,13 +536,13 @@ def _get_option_side_effect(x): tmp = [] for host in camel_hosts: new_host = copy.deepcopy(host) - for key in host: + for key, value in host.items(): new_key = key if hostvars_prefix: new_key = _options["hostvars_prefix"] + new_key if hostvars_suffix: new_key += _options["hostvars_suffix"] - new_host[new_key] = host[key] + new_host[new_key] = value tmp.append(new_host) camel_hosts = tmp diff --git a/tests/unit/plugins/modules/dms_endpoint/test_compare_params.py b/tests/unit/plugins/modules/dms_endpoint/test_compare_params.py new file mode 100644 index 00000000000..06a2e1cf509 --- /dev/null +++ b/tests/unit/plugins/modules/dms_endpoint/test_compare_params.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest +from unittest.mock import patch + +from ansible_collections.community.aws.plugins.modules.dms_endpoint import compare_params + +@pytest.mark.parametrize( + "described_params,created_params,expected_result", + [ + [dict(), dict(), False], + # Tags and Password should be ignored + [dict(Tags={"Somekey": "SomeValue"}), dict(Tags={"Somekey": "SomeValue"}), False], + [dict(Tags={"Somekey": "SomeValue"}), dict(Tags={"Somekey": "Another"}), False], + [dict(Tags={"Somekey": "Another"}), dict(), False], + [dict(), dict(Tags={"Somekey": "Another"}), False], + [dict(Password="Example"), dict(Password="Example"), False], + [dict(Password="Example"), dict(Password="AnotherExample"), False], + [dict(Password="AnotherExample"), dict(), False], + [dict(), dict(Password="AnotherExample"), False], + # Everything else should be compared + [dict(EngineDisplayName="ADisplayName"), dict(EngineDisplayName="ADisplayName"), False], + [dict(EngineDisplayName="ADisplayName"), dict(EngineDisplayName="Not that display name"), True], + # Ignore extra values in output from describe_endpoint(), the API changes over time, we only + # care about the things we manage, if they're missing from the described output it's a change + [dict(NewKey="my value"), dict(), False], + [dict(), dict(NewKey="my value"), True], + # Cope with case-insensitivity for some of the keyword values + [dict(SslMode="none"), dict(SslMode="none"), False], + [dict(SslMode="None"), dict(SslMode="none"), False], + [dict(SslMode="NONE"), dict(SslMode="none"), False], + [dict(SslMode="none"), dict(SslMode="verify-ca"), True], + [dict(SslMode="None"), dict(SslMode="verify-ca"), True], + # Bools, and Ints are also valid + [dict(Port=123), dict(Port=123), False], + [dict(Port=123), dict(Port=321), True], + [dict(NoHexPrefix=True), dict(NoHexPrefix=True), False], + [dict(NoHexPrefix=False), dict(NoHexPrefix=False), False], + [dict(NoHexPrefix=False), dict(NoHexPrefix=True), True], + # Slightly more complex example + [ + dict(Tags={"a": "b"}, Password="Example", EngineDisplayName="ADisplayName", SslMode="none", NewKey="123"), + dict(Tags={"a": "b"}, Password="Example", EngineDisplayName="ADisplayName", SslMode="none"), + False, + ], + [ + dict(EngineDisplayName="ADisplayName", SslMode="None"), + dict(Tags={"a": "b"}, Password="Example", EngineDisplayName="ADisplayName", SslMode="none"), + False, + ], + [ + dict(EngineDisplayName="ADisplayName", SslMode="none"), + dict(Tags={"a": "b"}, Password="Example", EngineDisplayName="ADisplayName", SslMode="verify-ca"), + True, + ], + [ + dict(EngineDisplayName="ADisplayName", SslMode="none"), + dict(Tags={"a": "b"}, Password="Example", EngineDisplayName="Not that Name", SslMode="none"), + True, + ], + ], +) +@patch("ansible_collections.community.aws.plugins.modules.dms_endpoint.create_module_params") +def test_compare_params(mock_create_module_params, described_params, created_params, expected_result): + mock_create_module_params.return_value = dict(created_params) + assert compare_params(described_params) is expected_result diff --git a/tests/unit/plugins/modules/test_data_pipeline.py b/tests/unit/plugins/modules/test_data_pipeline.py index d44a63f91cb..a79f1c0012e 100644 --- a/tests/unit/plugins/modules/test_data_pipeline.py +++ b/tests/unit/plugins/modules/test_data_pipeline.py @@ -45,8 +45,8 @@ class FailException(Exception): pass -@pytest.fixture(scope="module") -def dp_setup(): +@pytest.fixture(scope="module", name="dp_setup") +def fixture_dp_setup(): """ Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects diff --git a/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py b/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py index 72ee17f98ed..f774f6d23fb 100644 --- a/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py +++ b/tests/unit/plugins/modules/test_directconnect_link_aggregation_group.py @@ -36,8 +36,8 @@ ) -@pytest.fixture(scope="module") -def dependencies(): +@pytest.fixture(scope="module", name="dependencies") +def fixture_dependencies(): # each LAG dict will contain the keys: module, connections, virtual_interfaces Dependencies = collections.namedtuple("Dependencies", ["lag_1", "lag_2"]) lag_1 = dict() diff --git a/tests/unit/plugins/modules/test_route53_wait.py b/tests/unit/plugins/modules/test_route53_wait.py index 57ed705c574..d7d8175f804 100644 --- a/tests/unit/plugins/modules/test_route53_wait.py +++ b/tests/unit/plugins/modules/test_route53_wait.py @@ -168,11 +168,11 @@ @pytest.mark.parametrize( - "input, expected", + "input_value, expected", DETECT_TASK_RESULTS_DATA, ) -def test_detect_task_results(input, expected): - assert list(detect_task_results(input)) == expected +def test_detect_task_results(input_value, expected): + assert list(detect_task_results(input_value)) == expected DETECT_TASK_RESULTS_FAIL_DATA = [ @@ -225,13 +225,13 @@ def test_detect_task_results(input, expected): @pytest.mark.parametrize( - "input, expected_exc, expected_result", + "input_value, expected_exc, expected_result", DETECT_TASK_RESULTS_FAIL_DATA, ) -def test_detect_task_fail_results(input, expected_exc, expected_result): +def test_detect_task_fail_results(input_value, expected_exc, expected_result): result = [] with pytest.raises(ValueError) as exc: - for res in detect_task_results(input): + for res in detect_task_results(input_value): result.append(res) print(exc.value.args[0])