Skip to content

Commit

Permalink
Add update to the test_efs_access_point test (#6638)
Browse files Browse the repository at this point in the history
  • Loading branch information
dreambeyondorange authored Jan 10, 2025
1 parent ac426d1 commit 8f86b20
Show file tree
Hide file tree
Showing 5 changed files with 143 additions and 48 deletions.
2 changes: 1 addition & 1 deletion tests/integration-tests/conftest_tests_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def remove_disabled_tests(session, config, items):
if item.nodeid.split("[")[0] not in enabled_tests:
if get_xdist_worker_id(session) in ["master", "gw0"]:
# log only in master process to avoid duplicate log entries
logging.warning("Skipping test %s because not defined in config", item.nodeid)
logging.debug("Skipping test %s because not defined in config", item.nodeid)
items.remove(item)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,5 +78,5 @@ def discover_all_test_functions(tests_root_dir):
discovered_test_functions.get(module_dirname, []) + test_functions_identifiers
)

logging.info("Discovered following test functions:\n%s", json.dumps(discovered_test_functions, indent=2))
logging.debug("Discovered following test functions:\n%s", json.dumps(discovered_test_functions, indent=2))
return discovered_test_functions
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def _check_declared_tests_exist(config, tests_root_dir):
logging.error("Some of the configured tests do not exist: %s", e)
raise

logging.info("Found following unused test functions: %s", json.dumps(unused_test_functions, indent=2))
logging.debug("Found following unused test functions: %s", json.dumps(unused_test_functions, indent=2))


if __name__ == "__main__":
Expand Down
127 changes: 82 additions & 45 deletions tests/integration-tests/tests/storage/test_efs.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,68 +223,105 @@ def test_efs_access_point(
To verify the efs is the existing efs, the test expects a file with random ran inside the efs mounted
"""
# Names of files that will be written from separate instance. The test checks the cluster nodes can access them.
# create an additional EFS with file system policy to prevent anonymous access
efs_filesystem_id = efs_stack_factory()[0]
efs_mount_target_stack_factory([efs_filesystem_id])
# create additional EFS filesystems with file system policies to prevent anonymous access
efs_filesystem_ids = efs_stack_factory(num=2)
# Create mount targets for both EFS filesystems
efs_mount_target_stack_factory(efs_filesystem_ids)
tls = True
iam = False
access_point_id = efs_access_point_stack_factory(efs_fs_id=efs_filesystem_id)[0]
# Create access points for both EFS filesystems
access_point_ids = []
for efs_fs_id in efs_filesystem_ids:
access_point_ids.extend(efs_access_point_stack_factory(efs_fs_id=efs_fs_id))
if scheduler != "awsbatch":
account_id = (
boto3.client("sts", region_name=region, endpoint_url=get_sts_endpoint(region))
.get_caller_identity()
.get("Account")
)
policy = {
"Version": "2012-10-17",
"Id": "efs-policy-denying-access-for-direct-efs-access",
"Statement": [
{
"Sid": "efs-block-not-access-point-in-account",
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientRootAccess",
"elasticfilesystem:ClientWrite",
],
"Resource": f"arn:{get_arn_partition(region)}:elasticfilesystem:{region}:{account_id}:"
f"file-system/{efs_filesystem_id}",
"Condition": {
"StringNotLike": {
"elasticfilesystem:AccessPointArn": f"arn:{get_arn_partition(region)}:"
f"elasticfilesystem:{region}:{account_id}:access-point/{access_point_id}"
}
},
},
{
"Sid": "efs-allow-accesspoint-in-account",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientRootAccess",
"elasticfilesystem:ClientWrite",
],
"Resource": f"arn:{get_arn_partition(region)}:elasticfilesystem:{region}:{account_id}:"
f"file-system/{efs_filesystem_id}",
},
],
}
boto3.client("efs").put_file_system_policy(FileSystemId=efs_filesystem_id, Policy=json.dumps(policy))
policy = _get_efs_ap_policy(region, account_id, efs_filesystem_ids[0], access_point_ids[0])
policy_2 = _get_efs_ap_policy(region, account_id, efs_filesystem_ids[1], access_point_ids[1])
boto3.client("efs").put_file_system_policy(FileSystemId=efs_filesystem_ids[0], Policy=json.dumps(policy))
boto3.client("efs").put_file_system_policy(FileSystemId=efs_filesystem_ids[1], Policy=json.dumps(policy_2))

mount_dir = "efs_mount_dir"
cluster_config = pcluster_config_reader(
mount_dir=mount_dir, efs_filesystem_id=efs_filesystem_id, access_point_id=access_point_id
mount_dir=mount_dir, efs_filesystem_id=efs_filesystem_ids[0], access_point_id=access_point_ids[0]
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)

mount_dir = "/" + mount_dir
test_efs_correctly_mounted(remote_command_executor, mount_dir, tls, iam, access_point_id)
mount_dir_test = "/" + mount_dir
test_efs_correctly_mounted(remote_command_executor, mount_dir_test, tls, iam, access_point_ids[0])

scheduler_commands = scheduler_commands_factory(remote_command_executor)
_test_efs_correctly_shared(remote_command_executor, mount_dir, scheduler_commands)
_test_efs_correctly_shared(remote_command_executor, mount_dir_test, scheduler_commands)

# Update cluster to add storage with a live update.
# In particular, we add an extra EFS with an Access Point
mount_dir_2 = "efs_mount_dir_2"
logging.info("Updating the cluster to mount an EFS via AP with live update")
update_cluster_config = pcluster_config_reader(
config_file="pcluster.config.update.yaml",
output_file="pcluster.config.update_add_external_efs.yaml",
mount_dir=mount_dir,
efs_filesystem_id=efs_filesystem_ids[0],
access_point_id=access_point_ids[0],
mount_dir_2=mount_dir_2,
efs_filesystem_id_2=efs_filesystem_ids[1],
access_point_id_2=access_point_ids[1],
)

cluster.update(update_cluster_config)

mount_dir_2_test = "/" + mount_dir_2
test_efs_correctly_mounted(remote_command_executor, mount_dir_2_test, tls, iam, access_point_ids[1])

scheduler_commands = scheduler_commands_factory(remote_command_executor)
_test_efs_correctly_shared(remote_command_executor, mount_dir_2_test, scheduler_commands)


def _get_efs_ap_policy(region, account_id, efs_fs_id, ap_id):
return {
"Version": "2012-10-17",
"Id": "efs-policy-denying-access-for-direct-efs-access",
"Statement": [
{
"Sid": "efs-block-not-access-point-in-account",
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientRootAccess",
"elasticfilesystem:ClientWrite",
],
"Resource": [
f"arn:{get_arn_partition(region)}:elasticfilesystem:{region}:{account_id}:file-system/{efs_fs_id}",
],
"Condition": {
"StringNotLike": {
"elasticfilesystem:AccessPointArn": [
f"arn:{get_arn_partition(region)}:"
f"elasticfilesystem:{region}:{account_id}:access-point/{ap_id}",
]
}
},
},
{
"Sid": "efs-allow-accesspoint-in-account",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientRootAccess",
"elasticfilesystem:ClientWrite",
],
"Resource": [
f"arn:{get_arn_partition(region)}:elasticfilesystem:{region}:{account_id}:file-system/{efs_fs_id}",
],
},
],
}


def _check_efs_after_nodes_reboot(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
Image:
Os: {{ os }}
HeadNode:
InstanceType: {{ instance }}
SharedStorageType: {{ shared_headnode_storage_type }}
Networking:
SubnetId: {{ public_subnet_ids[0] }}
Ssh:
KeyName: {{ key_name }}
Imds:
Secured: {{ imds_secured }}
Scheduling:
Scheduler: {{ scheduler }}
{% if scheduler == "awsbatch" %}AwsBatchQueues:{% else %}SlurmQueues:{% endif %}
- Name: queue-0
ComputeResources:
- Name: compute-resource-0
{% if scheduler == "awsbatch" %}
InstanceTypes:
- {{ instance }}
MinvCpus: 4
DesiredvCpus: 4
{% else %}
Instances:
- InstanceType: {{ instance }}
MinCount: 1
MaxCount: 1
{% endif %}
Networking:
SubnetIds:
- {{ private_subnet_ids[1] }}
{% if scheduler == "slurm" %}
- Name: queue-1
ComputeResources:
- Name: compute-resource-0
Instances:
- InstanceType: {{ instance }}
MinCount: 1
MaxCount: 1
Networking:
SubnetIds:
- {% if private_subnet_ids|length >= 3 %} {{ private_subnet_ids[2] }} {% else %} {{ private_subnet_ids[1] }} {% endif %}
{% endif %}
SharedStorage:
- MountDir: {{ mount_dir }}
Name: efs
StorageType: Efs
EfsSettings:
FileSystemId: {{ efs_filesystem_id }}
AccessPointId: {{ access_point_id }}
EncryptionInTransit: true
- MountDir: {{ mount_dir_2 }}
Name: efs_2
StorageType: Efs
EfsSettings:
FileSystemId: {{ efs_filesystem_id_2 }}
AccessPointId: {{ access_point_id_2 }}
EncryptionInTransit: true

0 comments on commit 8f86b20

Please sign in to comment.