diff --git a/src/confcom/HISTORY.rst b/src/confcom/HISTORY.rst index 40a52795ff5..6c62da8196d 100644 --- a/src/confcom/HISTORY.rst +++ b/src/confcom/HISTORY.rst @@ -2,7 +2,20 @@ Release History =============== +1.1.0 +++++++ +* adding support for image-attached fragments via `acifragmentgen` +* adding workload identity support for VN2 +* adding `--exclude-default-fragments` to disallow sidecars from policy +* adding `--omit-id` for policy stability across multiple image registries +* better handle broken base64 policies in templates +* improve error handling structure +* make some mount types in VN2 required readonly +* prompt users if they want to overwrite their policy in VN2 +* changing where dmverity-vhd and sign1util binaries are fetched from. This includes a significant speedup in dmverity-vhd hashing + 1.0.1 +++++++ * getting rid of msrestazure dependency in _validators.py 1.0.0 diff --git a/src/confcom/azext_confcom/README.md b/src/confcom/azext_confcom/README.md index 4a53af4e9be..293efbe8e5f 100644 --- a/src/confcom/azext_confcom/README.md +++ b/src/confcom/azext_confcom/README.md @@ -29,8 +29,8 @@ - [allow_environment_variable_dropping](#allow_environment_variable_dropping) - [allow_unencrypted_scratch](#allow_unencrypted_scratch) - [allow_capabilities_dropping](#allow_capabilities_dropping) +- [Microsoft Azure CLI 'confcom acifragmentgen' Extension Examples](#microsoft-azure-cli-confcom-acifragmentgen-extension-examples) - [Microsoft Azure CLI 'confcom katapolicygen' Extension Examples](#microsoft-azure-cli-confcom-katapolicygen-extension-examples) - - [Microsoft Azure CLI 'confcom katapolicygen' Extension Examples] ## Microsoft Azure CLI 'confcom acipolicygen' Extension Examples @@ -279,8 +279,8 @@ Mixed-mode policy generation is available in the `confcom` tooling, meaning imag ## AKS Virtual Node -Azure Kubernetes Service (AKS) allows pods to be scheduled on Azure Container Instances (ACI) -using the [AKS Virtual Node](https://learn.microsoft.com/en-us/azure/aks/virtual-nodes) feature. The `confcom` tooling can generate security policies for these ACI-based pods in the same way as for standalone ACI container groups. The key difference is that the `confcom` tooling will ingest an AKS pod specification (`pod.yaml`) instead of an ARM Template. +Azure Kubernetes Service (AKS) allows pods to be scheduled on Azure Container Instances (ACI) +using the [AKS Virtual Node](https://learn.microsoft.com/en-us/azure/aks/virtual-nodes) feature. The `confcom` tooling can generate security policies for these ACI-based pods in the same way as for standalone ACI container groups. The key difference is that the `confcom` tooling will ingest an AKS pod specification (`pod.yaml`) instead of an ARM Template. Use the following command to generate and print a security policy for an AKS pod running on ACI: @@ -288,7 +288,7 @@ Use the following command to generate and print a security policy for an AKS pod az confcom acipolicygen --virtual-node-yaml ./pod.yaml --print-policy ``` -> [!NOTE] +> [!NOTE] > The `acipolicygen` command is specific to generating policies for ACI-based containers. For generating security policies for the [Confidential Containers on AKS](https://learn.microsoft.com/en-us/azure/aks/confidential-containers-overview) feature, use the `katapolicygen` command. ## Security Policy Information Sources @@ -659,6 +659,81 @@ This rule determines whether unencrypted writable storage from the UVM to the co Whether to allow capabilities to be dropped in the same manner as allow_environment_variable_dropping. +## Microsoft Azure CLI 'confcom acifragmentgen' Extension Examples + +Run `az confcom acifragmentgen --help` to see a list of supported arguments along with explanations. The following commands demonstrate the usage of different arguments to generate confidential computing security fragments. + +For information on what a policy fragment is, see [policy fragments](#policy-fragments). For a full walkthrough on how to generate a policy fragment and use it in a policy, see [Create a Key and Cert for Signing](../samples/certs/README.md). + +**Examples:** + +Example 1: The following command creates a security fragment and prints it to stdout as well as saving it to a file `contoso.rego`: + +```bash +az confcom acifragmentgen --config ./fragment_config.json --svn 1 --namespace contoso +``` + +The config file is a JSON file that contains the following information: + +```json +{ + "containers": [ + { + "name": "my-image", + "properties": { + "image": "mcr.microsoft.com/acc/samples/aci/helloworld:2.8", + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env(.*)", + "regex": true + } + ], + "command": [ + "python3", + "main.py" + ] + } + } + ] +} +``` + +The `--svn` argument is used to specify the security version number of the fragment and should be an integer. The `--namespace` argument is used to specify the namespace of the fragment and cannot conflict with some built-in names. If a conflicting name occurs, there will be an error message. [This list of reserved names can be found here under 'reserved_fragment_namespaces'](./data/internal_config.json). The format of the config file generally follows that of the [ACI resource in an ARM template](https://learn.microsoft.com/en-us/azure/templates/microsoft.containerinstance/containergroups?pivots=deployment-language-arm-template). + +Example 2: This command creates a signed security fragment and attaches it to a container image in an ORAS-compliant registry: + +```bash +az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --config ./samples/config.json --upload-fragment +``` + +Example 3: This command creates a file to be used by `acipolicygen` that says which fragments should be included in the policy. Note that the policy must be [COSE](https://www.iana.org/assignments/cose/cose.xhtml) signed: + +```bash +az confcom acifragmentgen --generate-import -p ./contoso.rego.cose --minimum-svn 1 --fragments-json fragments.json +``` + +This outputs a file `fragments.json` that contains the following information: + +```json +{ + "path": "./contoso.rego.cose", + "feed": "contoso.azurecr.io/example", + "includes": [ + "containers", + "fragments" + ], + "issuer": "did:x509:0:sha256:mLzv0uyBNQvC6hi4y9qy8hr6NSZuYFv6gfCwAEWBNqc::subject:CN:Contoso", + "minimum_svn": "1" +} +``` + +This file is then used by `acipolicygen` to generate a policy that includes custom fragments. + ## Microsoft Azure CLI 'confcom katapolicygen' Extension Examples Run `az confcom katapolicygen --help` to see a list of supported arguments along with explanations. The following commands demonstrate the usage of different arguments to generate confidential computing security policies. diff --git a/src/confcom/azext_confcom/_help.py b/src/confcom/azext_confcom/_help.py index 1244f749474..23fd72b7383 100644 --- a/src/confcom/azext_confcom/_help.py +++ b/src/confcom/azext_confcom/_help.py @@ -5,7 +5,7 @@ # -------------------------------------------------------------------------------------------- from knack.help_files import helps # pylint: disable=unused-import - +from azext_confcom.config import SUPPORTED_ALGOS helps[ "confcom" @@ -51,23 +51,23 @@ - name: --debug-mode type: boolean - short-summary: 'When enabled, the generated security policy adds the ability to use /bin/sh or /bin/bash to debug the container. It also enabled stdio access, ability to dump stack traces, and enables runtime logging. It is recommended to only use this option for debugging purposes.' + short-summary: 'When enabled, the generated security policy adds the ability to use /bin/sh or /bin/bash to debug the container. It also enabled stdio access, ability to dump stack traces, and enables runtime logging. It is recommended to only use this option for debugging purposes' - name: --approve-wildcards -y type: boolean - short-summary: 'When enabled, all prompts for using wildcards in environment variables are automatically approved.' + short-summary: 'When enabled, all prompts for using wildcards in environment variables are automatically approved' - name: --disable-stdio type: boolean - short-summary: 'When enabled, the containers in the container group do not have access to stdio.' + short-summary: 'When enabled, the containers in the container group do not have access to stdio' - name: --print-existing-policy type: boolean - short-summary: 'When enabled, the existing security policy that is present in the ARM Template is printed to the command line, and no new security policy is generated.' + short-summary: 'When enabled, the existing security policy that is present in the ARM Template is printed to the command line, and no new security policy is generated' - name: --diff -d type: boolean - short-summary: 'When combined with an input ARM Template, verifies the policy present in the ARM Template under "ccePolicy" and the containers within the ARM Template are compatible. If they are incompatible, a list of reasons is given and the exit status code will be 2.' + short-summary: 'When combined with an input ARM Template file (or YAML file for Virtual Node policy generation), verifies the policy present in the ARM Template under "ccePolicy" and the containers within the file are compatible. If they are incompatible, a list of reasons is given and the exit status code will be 2' - name: --outraw type: boolean @@ -79,7 +79,7 @@ - name: --save-to-file -s type: string - short-summary: 'Save output policy to given file path.' + short-summary: 'Save output policy to given file path' - name: --print-policy type: boolean @@ -89,6 +89,22 @@ type: boolean short-summary: 'When enabled, the hashing algorithm used to generate the policy is faster but less memory efficient' + - name: --omit-id + type: boolean + short-summary: 'When enabled, the generated policy will not contain the ID field. This will keep the policy from being tied to a specific image name and tag' + + - name: --include-fragments -f + type: boolean + short-summary: 'When enabled, the path specified by --fragments-json will be used to pull fragments from an OCI registry or locally and include them in the generated policy' + + - name: --fragments-json -j + type: string + short-summary: 'Path to JSON file containing fragment information to use for generating a policy. This requires --include-fragments to be enabled' + + - name: --exclude-default-fragments -e + type: boolean + short-summary: 'When enabled, the default fragments are not included in the generated policy. This includes containers needed to mount azure files, mount secrets, mount git repos, and other common ACI features' + examples: - name: Input an ARM Template file to inject a base64 encoded Confidential Container Security Policy into the ARM Template text: az confcom acipolicygen --template-file "./template.json" @@ -98,6 +114,96 @@ text: az confcom acipolicygen --template-file "./template.json" -s "./output-file.txt" --print-policy - name: Input an ARM Template file and use a tar file as the image source instead of the Docker daemon text: az confcom acipolicygen --template-file "./template.json" --tar "./image.tar" + - name: Input an ARM Template file and use a fragments JSON file to generate a policy + text: az confcom acipolicygen --template-file "./template.json" --fragments-json "./fragments.json" --include-fragments +""" + +helps[ + "confcom acifragmentgen" +] = f""" + type: command + short-summary: Create a Confidential Container Policy Fragment for ACI. + + parameters: + - name: --image + type: string + short-summary: 'Image to use for the generated policy fragment' + + - name: --input -i + type: string + short-summary: 'Path to a JSON file containing the configuration for the generated policy fragment' + + - name: --tar + type: string + short-summary: 'Path to either a tarball containing image layers or a JSON file containing paths to tarballs of image layers' + + - name: --namespace -n + type: string + short-summary: 'Namespace to use for the generated policy fragment' + + - name: --svn + type: string + short-summary: 'Minimum Allowed Software Version Number for the generated policy fragment. This should be a monotonically increasing integer' + + - name: --feed -f + type: string + short-summary: 'Feed to use for the generated policy fragment. This is typically the same as the image name when using image-attached fragments. It is the location in the remote repository where the fragment will be stored' + + - name: --key -k + type: string + short-summary: 'Path to .pem formatted key file to use for signing the generated policy fragment. This must be used with --chain' + + - name: --chain + type: string + short-summary: 'Path to .pem formatted certificate chain file to use for signing the generated policy fragment. This must be used with --key' + + - name: --algo + type: string + short-summary: | + Algorithm used for signing the generated policy fragment. This must be used with --key and --chain. + Supported algorithms are {SUPPORTED_ALGOS} + + - name: --fragment-path -p + type: string + short-summary: 'Path to an existing policy fragment file to be used with --generate-import. This option allows you to create import statements for the specified fragment without needing to pull it from an OCI registry' + + - name: --generate-import -g + type: boolean + short-summary: 'Generate an import statement for a policy fragment' + + - name: --disable-stdio + type: boolean + short-summary: 'When enabled, the containers in the container group do not have access to stdio' + + - name: --debug-mode + type: boolean + short-summary: 'When enabled, the generated security policy adds the ability to use /bin/sh or /bin/bash to debug the container. It also enabled stdio access, ability to dump stack traces, and enables runtime logging. It is recommended to only use this option for debugging purposes' + + - name: --output-filename + type: string + short-summary: 'Save output policy to given file path' + + - name: --outraw + type: boolean + short-summary: 'Output policy in clear text compact JSON instead of default pretty print format' + + - name: --upload-fragment -u + type: boolean + short-summary: 'When enabled, the generated policy fragment will be uploaded to the registry of the image being used' + + - name: --fragments-json -j + type: string + short-summary: 'Path to a JSON file that will store the fragment import information generated when using --generate-import. This file can later be fed into the policy generation command (acipolicygen) to include the fragment in a new or existing policy. If not specified, the import statement will be printed to the console instead of being saved to a file' + + examples: + - name: Input an image name to generate a simple fragment + text: az confcom acifragmentgen --image mcr.microsoft.com/azuredocs/aci-helloworld + - name: Input a config file to generate a fragment with a custom namespace and debug mode enabled + text: az confcom acifragmentgen --input "./config.json" --namespace "my-namespace" --debug-mode + - name: Generate an import statement for a signed local fragment + text: az confcom acifragmentgen --fragment-path "./fragment.json" --generate-import --minimum-svn 1 + - name: Generate a fragment and COSE sign it with a key and chain + text: az confcom acifragmentgen --image mcr.microsoft.com/azuredocs/aci-helloworld --key "./key.pem" --chain "./chain.pem" --svn 1 --namespace contoso --no-print """ helps[ diff --git a/src/confcom/azext_confcom/_params.py b/src/confcom/azext_confcom/_params.py index b27c8c5078a..c00a6503e15 100644 --- a/src/confcom/azext_confcom/_params.py +++ b/src/confcom/azext_confcom/_params.py @@ -5,6 +5,24 @@ # pylint: disable=line-too-long from knack.arguments import CLIArgumentType +from azext_confcom._validators import ( + validate_params_file, + validate_diff, + validate_aci_source, + validate_print_format, + validate_save_to_file, + validate_faster_hashing, + validate_katapolicygen_input, + validate_fragment_key_and_chain, + validate_fragment_source, + validate_fragment_generate_import, + validate_fragment_namespace_and_svn, + validate_fragment_minimum_svn, + validate_fragment_algo, + validate_fragment_path, + validate_fragment_json, + validate_fragment_json_policy, +) def load_arguments(self, _): @@ -27,30 +45,35 @@ def load_arguments(self, _): options_list=("--input", "-i"), required=False, help="Input JSON config file", + validator=validate_aci_source ) c.argument( "arm_template", options_list=("--template-file", "-a"), required=False, help="ARM template file", + validator=validate_aci_source ) c.argument( "arm_template_parameters", options_list=("--parameters", "-p"), required=False, help="ARM template parameters", + validator=validate_params_file ) c.argument( "virtual_node_yaml_path", options_list=("--virtual-node-yaml"), required=False, help="Virtual node YAML file", + validator=validate_aci_source ) c.argument( "image_name", options_list=("--image",), required=False, help="Image Name", + validator=validate_aci_source ) c.argument( "tar_mapping_location", @@ -86,7 +109,8 @@ def load_arguments(self, _): "diff", options_list=("--diff", "-d"), required=False, - help="Compare the CCE Policy field in the ARM Template to the containers in the ARM Template and make sure they are compatible", + help="Compare the CCE Policy field in the ARM Template file (or YAML file for Virtual Node) to the containers in the file and make sure they are compatible", + validator=validate_diff ) c.argument( "validate_sidecar", @@ -107,6 +131,7 @@ def load_arguments(self, _): required=False, action="store_true", help="Output policy in clear text compact JSON instead of default base64 format", + validator=validate_print_format, ) c.argument( "outraw_pretty_print", @@ -114,24 +139,181 @@ def load_arguments(self, _): required=False, action="store_true", help="Output policy in clear text and pretty print format", + validator=validate_print_format, ) c.argument( "save_to_file", options_list=("--save-to-file", "-s"), required=False, help="Save output policy to given file path", + validator=validate_save_to_file, ) c.argument( "print_policy_to_terminal", options_list=("--print-policy"), required=False, help="Print the generated policy in the terminal", + validator=validate_print_format, ) c.argument( "faster_hashing", options_list=("--faster-hashing"), required=False, help="Use buffered image reader for dmverity hashing. This will speed up the hashing process but use much more memory.", + validator=validate_faster_hashing, + ) + c.argument( + "omit_id", + options_list=("--omit-id"), + required=False, + help="Omit the id field in the policy. This is helpful if the image being used will be present in multiple registries and used interchangeably.", + ) + + c.argument( + "include_fragments", + options_list=("--include-fragments", "-f"), + required=False, + help="Include fragments in the generated policy", + ) + c.argument( + "fragments_json", + options_list=("--fragments-json", "-j"), + required=False, + help="Path to JSON file containing fragment information", + validator=validate_fragment_json_policy, + ) + c.argument( + "exclude_default_fragments", + options_list=("--exclude-default-fragments", "-e"), + required=False, + help="Exclude default fragments in the generated policy", + ) + + with self.argument_context("confcom acifragmentgen") as c: + c.argument( + "image_name", + options_list=("--image"), + required=False, + help="Image Name to be used for the generated policy fragment", + validator=validate_fragment_source + ) + c.argument( + "input_path", + options_list=("--input", "-i"), + required=False, + help="Config file for information about the intended generated policy fragment", + validator=validate_fragment_source + ) + c.argument( + "tar_mapping_location", + options_list=("--tar",), + required=False, + help="Tar file locations in JSON format where the key is the name and tag of the image and the value is the path to the tar file", + ) + c.argument( + "namespace", + options_list=("--namespace", "-n"), + required=False, + help="Namespace for the generated policy fragment", + validator=validate_fragment_namespace_and_svn, + ) + c.argument( + "svn", + options_list=("--svn"), + required=False, + help="Software Version Number for the generated policy fragment", + validator=validate_fragment_namespace_and_svn, + ) + c.argument( + "feed", + options_list=("--feed", "-f"), + required=False, + help="Feed for the generated policy fragment", + ) + c.argument( + "key", + options_list=("--key", "-k"), + required=False, + help="Key for signing the generated policy fragment. Must be in PEM format", + validator=validate_fragment_key_and_chain, + ) + c.argument( + "chain", + options_list=("--chain"), + required=False, + help="Certificate chain for signing the generated policy fragment. Must be in PEM format", + validator=validate_fragment_key_and_chain, + ) + c.argument( + "algo", + options_list=("--algo"), + required=False, + help="Algorithm for signing the generated policy fragment", + validator=validate_fragment_algo, + ) + c.argument( + "fragment_path", + options_list=("--fragment-path", "-p"), + required=False, + help="Path to a policy fragment to be used with --generate-import to make import statements without having access to the fragment's OCI registry", + validator=validate_fragment_path, + ) + c.argument( + "generate_import", + options_list=("--generate-import", "-g"), + required=False, + help="Generate an import statement for a policy fragment", + validator=validate_fragment_generate_import, + ) + c.argument( + "minimum_svn", + options_list=("--minimum-svn",), + required=False, + help="Used with --generate-import to specify the minimum SVN for the import statement", + validator=validate_fragment_minimum_svn, + ) + c.argument( + "disable_stdio", + options_list=("--disable-stdio",), + required=False, + help="Disabling container stdio will disable the ability to see the output of the container in the terminal for Confidential ACI", + ) + c.argument( + "debug_mode", + options_list=("--debug-mode",), + required=False, + help="Debug mode will enable processes in a container group that are helpful for debugging", + ) + c.argument( + "output_filename", + options_list=("--output-filename"), + required=False, + help="Output filename for the generated policy fragment", + ) + c.argument( + "outraw", + options_list=("--outraw"), + required=False, + help="Output policy fragment in clear text compact JSON instead of default base64 format", + ) + c.argument( + "upload_fragment", + options_list=("--upload-fragment", "-u"), + required=False, + help="Upload a policy fragment to a container registry", + ) + c.argument( + "no_print", + options_list=("--no-print",), + required=False, + help="Do not print the generated policy fragment to stdout", + ) + c.argument( + "fragments_json", + options_list=("--fragments-json", "-j"), + required=False, + help="Path to JSON file to write fragment import information. This is used with --generate-import. If not specified, the import statement will print to the console", + validator=validate_fragment_json, ) with self.argument_context("confcom katapolicygen") as c: @@ -140,58 +322,68 @@ def load_arguments(self, _): options_list=("--yaml", "-y"), required=False, help="Input YAML config file", + validator=validate_katapolicygen_input, ) c.argument( "outraw", options_list=("--outraw"), required=False, help="Print the generated policy in the terminal in Rego format", + validator=validate_katapolicygen_input, ) c.argument( "print_policy", options_list=("--print-policy"), required=False, help="Print the generated policy in the terminal in base64", + validator=validate_katapolicygen_input, ) c.argument( "config_map_file", options_list=("--config-map-file", "-c"), required=False, help="Config map file", + validator=validate_katapolicygen_input, ) c.argument( "use_cached_files", options_list=("--use-cached-files", "-u"), required=False, help="Use cached files", + validator=validate_katapolicygen_input, ) c.argument( "settings_file_name", options_list=("--settings-file-name", "-j"), required=False, help="Path for custom settings file", + validator=validate_katapolicygen_input, ) c.argument( "rules_file_name", options_list=("--rules-file-name", "-p"), required=False, help="Path for custom rules file", + validator=validate_katapolicygen_input, ) c.argument( "print_version", options_list=("--print-version", "-v"), required=False, help="Print the version of the genpolicy tool", + validator=validate_katapolicygen_input, ) c.argument( "containerd_pull", options_list=("--containerd-pull", "-d"), required=False, help="Use containerd to pull the image", + validator=validate_katapolicygen_input, ) c.argument( "containerd_socket_path", options_list=("--containerd-socket-path"), required=False, help="Path to containerd socket if not using the default", + validator=validate_katapolicygen_input, ) diff --git a/src/confcom/azext_confcom/_validators.py b/src/confcom/azext_confcom/_validators.py new file mode 100644 index 00000000000..8077f8e1bf9 --- /dev/null +++ b/src/confcom/azext_confcom/_validators.py @@ -0,0 +1,112 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from knack.util import CLIError +from azext_confcom.config import RESERVED_FRAGMENT_NAMES, SUPPORTED_ALGOS + + +def validate_params_file(namespace): + if namespace.arm_template_parameters and not namespace.arm_template: + raise CLIError( + "Can only use ARM Template Parameters if ARM Template is also present" + ) + + +def validate_diff(namespace): + if (namespace.diff and namespace.input_path) or (namespace.diff and namespace.image_name): + raise CLIError("Can only diff CCE policy from ARM Template or YAML File") + + +def validate_print_format(namespace): + if sum(map(bool, [namespace.print_policy_to_terminal, namespace.outraw, namespace.outraw_pretty_print])) > 1: + raise CLIError("Can only print in one format at a time") + + +def validate_aci_source(namespace): + if sum(map(bool, [ + namespace.input_path, + namespace.arm_template, + namespace.image_name, + namespace.virtual_node_yaml_path + ])) != 1: + raise CLIError("Can only generate CCE policy from one source at a time") + + +def validate_faster_hashing(namespace): + if namespace.faster_hashing and namespace.tar_mapping_location: + raise CLIError("Cannot use --faster-hashing with --tar") + + +def validate_save_to_file(namespace): + if namespace.save_to_file and namespace.arm_template and not ( + namespace.print_policy_to_terminal or namespace.outraw or namespace.outraw_pretty_print + ): + raise CLIError("Must print policy to terminal when saving to file") + + +def validate_fragment_json_policy(namespace): + if namespace.fragments_json and not namespace.include_fragments: + raise CLIError("Must provide --include-fragments to reference a fragment import JSON file") + + +def validate_katapolicygen_input(namespace): + if not (namespace.yaml_path or namespace.print_version): + raise CLIError("Either --yaml-path or --print-version is required") + + +def validate_fragment_key_and_chain(namespace): + if sum(map(bool, [namespace.key, namespace.chain])) == 1: + raise CLIError("Must provide both --key and --chain to sign a fragment") + + +def validate_fragment_source(namespace): + if not namespace.generate_import and sum(map(bool, [namespace.image_name, namespace.input_path])) != 1: + raise CLIError("Must provide either an image name or an input file to generate a fragment") + + +def validate_fragment_generate_import(namespace): + if namespace.generate_import and sum(map(bool, [ + namespace.fragment_path, + namespace.input_path, + namespace.image_name + ])) != 1: + raise CLIError( + ( + "Must provide either a fragment path, an input file, or " + "an image name to generate an import statement" + ) + ) + + +def validate_fragment_namespace_and_svn(namespace): + if not namespace.generate_import and (not namespace.namespace or not namespace.svn): + raise CLIError("Must provide both --namespace and --svn to generate a fragment") + if not namespace.generate_import and namespace.namespace in RESERVED_FRAGMENT_NAMES: + raise CLIError(f"Namespace '{namespace.namespace}' is reserved") + if namespace.svn and not namespace.svn.isdigit(): + raise CLIError("--svn must be an integer") + if not namespace.generate_import and (namespace.svn and int(namespace.svn) < 0): + raise CLIError("--svn must be greater than or equal to 0") + + +def validate_fragment_minimum_svn(namespace): + if namespace.generate_import and (not namespace.minimum_svn or int(namespace.minimum_svn) < 0): + raise CLIError("--minimum-svn must be greater than or equal to 0") + + +def validate_fragment_algo(namespace): + validate_fragment_key_and_chain(namespace) + if namespace.algo not in SUPPORTED_ALGOS: + raise CLIError(f"Algorithm '{namespace.algo}' is not supported. Supported algorithms are {SUPPORTED_ALGOS}") + + +def validate_fragment_path(namespace): + if namespace.fragment_path and not namespace.generate_import: + raise CLIError("Must provide --generate-import to specify a fragment path") + + +def validate_fragment_json(namespace): + if namespace.fragments_json and not namespace.generate_import: + raise CLIError("Must provide --fragment-path to place a fragment import into a file") diff --git a/src/confcom/azext_confcom/commands.py b/src/confcom/azext_confcom/commands.py index 1b76746e6ef..1d2bb45f724 100644 --- a/src/confcom/azext_confcom/commands.py +++ b/src/confcom/azext_confcom/commands.py @@ -8,6 +8,7 @@ def load_command_table(self, _): with self.command_group("confcom") as g: g.custom_command("acipolicygen", "acipolicygen_confcom") + g.custom_command("acifragmentgen", "acifragmentgen_confcom") g.custom_command("katapolicygen", "katapolicygen_confcom") with self.command_group("confcom"): diff --git a/src/confcom/azext_confcom/config.py b/src/confcom/azext_confcom/config.py index 4fc6284b238..ac1b65bc9fc 100644 --- a/src/confcom/azext_confcom/config.py +++ b/src/confcom/azext_confcom/config.py @@ -4,7 +4,7 @@ # -------------------------------------------------------------------------------------------- import os -from azext_confcom import os_util +from azext_confcom.os_util import load_json_from_file, load_str_from_file # input json values ACI_FIELD_VERSION = "version" @@ -78,6 +78,8 @@ ACI_FIELD_TEMPLATE_RESOURCE_LABEL, ACI_FIELD_TEMPLATE_RESOURCE_PROFILE_LABEL ] +ACI_FIELD_TEMPLATE_TAGS = "tags" +ACI_FIELD_TEMPLATE_ZERO_SIDECAR = "Annotate-zero-sidecar" ACI_FIELD_YAML_MOUNT_TYPE = "emptyDir" ACI_FIELD_YAML_LIVENESS_PROBE = "livenessProbe" ACI_FIELD_YAML_READINESS_PROBE = "readinessProbe" @@ -85,6 +87,8 @@ VIRTUAL_NODE_YAML_METADATA = "metadata" VIRTUAL_NODE_YAML_NAME = "name" VIRTUAL_NODE_YAML_ANNOTATIONS = "annotations" +VIRTUAL_NODE_YAML_LABELS = "labels" +VIRTUAL_NODE_YAML_LABEL_WORKLOAD_IDENTITY = "azure.workload.identity/use" VIRTUAL_NODE_YAML_POLICY = "microsoft.containerinstance.virtualnode.ccepolicy" VIRTUAL_NODE_YAML_LIFECYCLE = "lifecycle" VIRTUAL_NODE_YAML_LIFECYCLE_POST_START = "postStart" @@ -142,11 +146,15 @@ POLICY_FIELD_CONTAINERS_ELEMENTS_ALLOW_STDIO_ACCESS = "allow_stdio_access" POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS = "fragments" POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED = "feed" +POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH = "path" POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISS = "iss" +POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER = "issuer" POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN = "minimum_svn" POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_INCLUDES = "includes" POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_CONFIGMAP_LOCATION = "/mnt/configmap" POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_CONFIGMAP_TYPE = "emptyDir" +REGO_CONTAINER_START = "containers := " +REGO_FRAGMENT_START = "fragments := " CONFIG_FILE = "./data/internal_config.json" @@ -154,7 +162,7 @@ script_directory = os.path.dirname(os.path.realpath(__file__)) CONFIG_FILE_PATH = f"{script_directory}/{CONFIG_FILE}" -_config = os_util.load_json_from_file(CONFIG_FILE_PATH) +_config = load_json_from_file(CONFIG_FILE_PATH) DEFAULT_WORKING_DIR = _config["containerd"]["defaultWorkingDir"] MOUNT_SOURCE_TABLE = {} @@ -171,6 +179,8 @@ MANAGED_IDENTITY_ENV_RULES = _config["managedIdentity"]["environmentVariables"] # VN2 environment variables VIRTUAL_NODE_ENV_RULES = _config["default_envs_virtual_node"]["environmentVariables"] +# VN2 environment variables for workload identities +VIRTUAL_NODE_ENV_RULES_WORKLOAD_IDENTITY = _config["workload_identity_virtual_node"]["environmentVariables"] # Enable container restart environment variable for all containers ENABLE_RESTART_ENV_RULE = _config["enableRestart"]["environmentVariables"] # default mounts image for customer containers @@ -185,15 +195,22 @@ DEFAULT_REGO_FRAGMENTS = _config["default_rego_fragments"] # things that need to be set for debug mode DEBUG_MODE_SETTINGS = _config["debugMode"] +# reserved fragment names for existing pieces of Rego +RESERVED_FRAGMENT_NAMES = _config["reserved_fragment_namespaces"] +# fragment artifact type +ARTIFACT_TYPE = "application/x-ms-policy-frag" # customer rego file for data to be injected REGO_FILE = "./data/customer_rego_policy.txt" +REGO_FRAGMENT_FILE = "./data/customer_rego_fragment.txt" script_directory = os.path.dirname(os.path.realpath(__file__)) REGO_FILE_PATH = f"{script_directory}/{REGO_FILE}" -CUSTOMER_REGO_POLICY = os_util.load_str_from_file(REGO_FILE_PATH) +REGO_FRAGMENT_FILE_PATH = f"{script_directory}/{REGO_FRAGMENT_FILE}" +CUSTOMER_REGO_POLICY = load_str_from_file(REGO_FILE_PATH) +CUSTOMER_REGO_FRAGMENT = load_str_from_file(REGO_FRAGMENT_FILE_PATH) # sidecar rego file SIDECAR_REGO_FILE = "./data/sidecar_rego_policy.txt" SIDECAR_REGO_FILE_PATH = f"{script_directory}/{SIDECAR_REGO_FILE}" -SIDECAR_REGO_POLICY = os_util.load_str_from_file(SIDECAR_REGO_FILE_PATH) +SIDECAR_REGO_POLICY = load_str_from_file(SIDECAR_REGO_FILE_PATH) # data folder DATA_FOLDER = os.path.join(script_directory, "data") @@ -243,3 +260,13 @@ "SIGSYS": 31, "SIGUNUSED": 31 } +# these algorithms are the only supported ones in https://github.com/veraison/go-cose/blob/main/algorithm.go +SUPPORTED_ALGOS = [ + "PS256", + "PS384", + "PS512", + "ES256", + "ES384", + "ES512", + "EdDSA", +] diff --git a/src/confcom/azext_confcom/container.py b/src/confcom/azext_confcom/container.py index edf4f4675d6..31534381a16 100644 --- a/src/confcom/azext_confcom/container.py +++ b/src/confcom/azext_confcom/container.py @@ -466,7 +466,6 @@ def extract_allow_privilege_escalation(container_json: Any) -> bool: allow_privilege_escalation = True # assumes that securityContext field is optional if security_context: - # get the field for allow privilege escalation, default to true temp_privilege_escalation = case_insensitive_dict_get( security_context, @@ -595,18 +594,13 @@ def __init__( self._user = user or {} self._capabilities = capabilities self._allow_privilege_escalation = allowPrivilegeEscalation - self._policy_json = None - self._policy_json_str = None - self._policy_json_str_pp = None self._identifier = id_val self._exec_processes = execProcesses or [] self._signals = signals or [] self._extraEnvironmentRules = extraEnvironmentRules - def get_policy_json(self) -> str: - if not self._policy_json: - self._policy_json_serialization() - return self._policy_json + def get_policy_json(self, omit_id: bool = False) -> str: + return self._populate_policy_json_elements(omit_id=omit_id) def get_id(self) -> str: return self._identifier @@ -707,7 +701,6 @@ def _get_mounts_json(self) -> Dict[str, Any]: return [] mounts = [] - for m in self._mounts: mount = copy.deepcopy(config.DEFAULT_MOUNT_POLICY) mount[ @@ -735,10 +728,8 @@ def _get_mounts_json(self) -> Dict[str, Any]: return mounts - def _populate_policy_json_elements(self) -> Dict[str, Any]: - + def _populate_policy_json_elements(self, omit_id: bool = False) -> Dict[str, Any]: elements = { - config.POLICY_FIELD_CONTAINERS_ID: self._identifier, config.POLICY_FIELD_CONTAINERS_NAME: self.get_name(), config.POLICY_FIELD_CONTAINERS_ELEMENTS_LAYERS: self._layers, config.POLICY_FIELD_CONTAINERS_ELEMENTS_COMMANDS: self._command, @@ -754,17 +745,14 @@ def _populate_policy_json_elements(self) -> Dict[str, Any]: config.POLICY_FIELD_CONTAINERS_ELEMENTS_ALLOW_STDIO_ACCESS: self._allow_stdio_access, config.POLICY_FIELD_CONTAINERS_ELEMENTS_NO_NEW_PRIVILEGES: not self._allow_privilege_escalation } - self._policy_json = elements - return self._policy_json + if not omit_id: + elements[config.POLICY_FIELD_CONTAINERS_ID] = self._identifier + # if we are omitting the id, we should remove the id value from the policy if it's in the name field + elif omit_id and self.get_name() == self._identifier: + del elements[config.POLICY_FIELD_CONTAINERS_NAME] - def _policy_json_serialization(self): - policy = self._populate_policy_json_elements() - # serialize json policy to object, compact string and pretty print string - self._policy_json_str, self._policy_json_str_pp = ( - json.dumps(policy, separators=(",", ":"), sort_keys=True), - json.dumps(policy, indent=2, sort_keys=True), - ) + return elements class UserContainerImage(ContainerImage): @@ -784,17 +772,12 @@ def from_json( # Start with the customer environment rules env_rules = _INJECTED_CUSTOMER_ENV_RULES - # If is_vn2, add the VN2 environment rules if is_vn2: env_rules += _INJECTED_SERVICE_VN2_ENV_RULES image.set_extra_environment_rules(env_rules) - return image - def _populate_policy_json_elements(self) -> Dict[str, Any]: - elements = super()._populate_policy_json_elements() - self._policy_json = elements - - return self._policy_json + def _populate_policy_json_elements(self, omit_id: bool = False) -> Dict[str, Any]: + return super()._populate_policy_json_elements(omit_id=omit_id) diff --git a/src/confcom/azext_confcom/cose_proxy.py b/src/confcom/azext_confcom/cose_proxy.py new file mode 100644 index 00000000000..af148b023d5 --- /dev/null +++ b/src/confcom/azext_confcom/cose_proxy.py @@ -0,0 +1,185 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import subprocess +import os +import stat +import platform +from typing import List +import requests +from azext_confcom.errors import eprint +from azext_confcom.config import ( + REGO_CONTAINER_START, + REGO_FRAGMENT_START, + POLICY_FIELD_CONTAINERS, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN, + ACI_FIELD_CONTAINERS_REGO_FRAGMENTS_INCLUDES, +) + + +host_os = platform.system() +machine = platform.machine() + + +def call_cose_sign_tool(args: List[str], error_message: str, check=False): + item = subprocess.run(args, check=check, capture_output=True, timeout=120) + + if item.returncode != 0: + eprint(f"{error_message}: {item.stderr.decode('utf-8')}", exit_code=item.returncode) + + return item + + +class CoseSignToolProxy: # pylint: disable=too-few-public-methods + + @staticmethod + def download_binaries(): + dir_path = os.path.dirname(os.path.realpath(__file__)) + + bin_folder = os.path.join(dir_path, "bin") + if not os.path.exists(bin_folder): + os.makedirs(bin_folder) + + # get the most recent release artifacts from github + r = requests.get("https://api.github.com/repos/microsoft/cosesign1go/releases") + r.raise_for_status() + needed_assets = ["sign1util", "sign1util.exe"] + + # these should be newest to oldest + for release in r.json(): + # search for both windows and linux binaries + needed_asset_info = [asset for asset in release["assets"] if asset["name"] in needed_assets] + if len(needed_asset_info) == len(needed_assets): + for asset in needed_asset_info: + # get the download url for the dmverity-vhd file + exe_url = asset["browser_download_url"] + # download the file + r = requests.get(exe_url) + r.raise_for_status() + # save the file to the bin folder + with open(os.path.join(bin_folder, asset["name"]), "wb") as f: + f.write(r.content) + # stop iterating through releases + break + + def __init__(self): + script_directory = os.path.dirname(os.path.realpath(__file__)) + DEFAULT_LIB = "./bin/sign1util" + + if host_os == "Linux": + DEFAULT_LIB += "" + elif host_os == "Windows": + DEFAULT_LIB += ".exe" + elif host_os == "Darwin": + eprint("The extension for MacOS has not been implemented.") + else: + eprint( + "Unknown target platform. The extension only works with Windows and Linux" + ) + + self.policy_bin = os.path.join(f"{script_directory}", f"{DEFAULT_LIB}") + + # check if the extension binary exists + if not os.path.exists(self.policy_bin): + eprint("The extension binary file cannot be located.") + if not os.access(self.policy_bin, os.X_OK): + # add executable permissions for the current user if they don't exist + st = os.stat(self.policy_bin) + os.chmod(self.policy_bin, st.st_mode | stat.S_IXUSR) + + def cose_sign( + self, + payload_path: str, + key_path: str, + cert_path: str, + feed: str, + iss: str, + algo: str, + out_path: str = "payload.rego.cose", + ) -> bool: + policy_bin_str = str(self.policy_bin) + + arg_list = [ + policy_bin_str, + "create", + "-algo", + algo, + "-chain", + cert_path, + "-claims", + payload_path, + "-key", + key_path, + "-out", + out_path, + ] + + if feed: + arg_list.extend(["-feed", feed]) + + if iss: + arg_list.extend(["-issuer", iss]) + + call_cose_sign_tool(arg_list, "Error signing the policy fragment") + return True + + def create_issuer(self, cert_path: str) -> str: + policy_bin_str = str(self.policy_bin) + + arg_list = [policy_bin_str, "did-x509", "-chain", cert_path, "-policy", "CN"] + + item = call_cose_sign_tool(arg_list, "Error creating the issuer") + + return item.stdout.decode("utf-8") + + # generate an import statement from a signed policy fragment + def generate_import_from_path(self, fragment_path: str, minimum_svn: int) -> str: + # TODO: make sure the fragment is signed correctly + if not os.path.exists(fragment_path): + eprint(f"The fragment file at {fragment_path} does not exist") + + policy_bin_str = str(self.policy_bin) + + arg_list_chain = [policy_bin_str, "check", "--in", fragment_path, "--verbose"] + + item = call_cose_sign_tool(arg_list_chain, "Error getting information from signed fragment file") + + stdout = item.stdout.decode("utf-8") + # extract issuer, feed, and payload from the fragment + issuer = stdout.split("iss: ")[1].split("\n")[0] + feed = stdout.split("feed: ")[1].split("\n")[0] + payload = stdout.split("payload:")[1] + + includes = [] + if REGO_CONTAINER_START in payload: + includes.append(POLICY_FIELD_CONTAINERS) + + if REGO_FRAGMENT_START in payload: + includes.append(POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS) + + # put it all together + import_statement = { + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER: issuer, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED: feed, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN: minimum_svn, + ACI_FIELD_CONTAINERS_REGO_FRAGMENTS_INCLUDES: includes, + } + + return import_statement + + def extract_payload_from_path(self, fragment_path: str) -> str: + policy_bin_str = str(self.policy_bin) + if not os.path.exists(fragment_path): + eprint(f"The fragment file at {fragment_path} does not exist") + + arg_list_chain = [policy_bin_str, "check", "--in", fragment_path, "--verbose"] + + item = call_cose_sign_tool(arg_list_chain, "Error getting information from signed fragment file") + + stdout = item.stdout.decode("utf-8") + return stdout.split("payload:")[1] diff --git a/src/confcom/azext_confcom/custom.py b/src/confcom/azext_confcom/custom.py index 27731d05db5..7bc3f2cc34d 100644 --- a/src/confcom/azext_confcom/custom.py +++ b/src/confcom/azext_confcom/custom.py @@ -10,26 +10,27 @@ from knack.log import get_logger from azext_confcom.config import ( DEFAULT_REGO_FRAGMENTS, - VIRTUAL_NODE_YAML_METADATA, - VIRTUAL_NODE_YAML_ANNOTATIONS, - VIRTUAL_NODE_YAML_POLICY, + POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS, + ) + from azext_confcom import os_util from azext_confcom.template_util import ( pretty_print_func, print_func, str_to_sha256, inject_policy_into_template, + inject_policy_into_yaml, print_existing_policy_from_arm_template, - print_existing_policy_from_yaml, - deep_dict_update, - filter_non_pod_resources, - convert_to_pod_spec_helper, + print_existing_policy_from_yaml ) +from azext_confcom.fragment_util import get_all_fragment_contents from azext_confcom.init_checks import run_initial_docker_checks from azext_confcom import security_policy from azext_confcom.security_policy import OutputType from azext_confcom.kata_proxy import KataPolicyGenProxy +from azext_confcom.cose_proxy import CoseSignToolProxy +from azext_confcom import oras_proxy logger = get_logger(__name__) @@ -55,27 +56,15 @@ def acipolicygen_confcom( disable_stdio: bool = False, print_existing_policy: bool = False, faster_hashing: bool = False, + omit_id: bool = False, + include_fragments: bool = False, + fragments_json: str = None, + exclude_default_fragments: bool = False, ): - - if sum(map(bool, [input_path, arm_template, image_name, virtual_node_yaml_path])) != 1: - error_out("Can only generate CCE policy from one source at a time") - if sum(map(bool, [print_policy_to_terminal, outraw, outraw_pretty_print])) > 1: - error_out("Can only print in one format at a time") - elif (diff and input_path) or (diff and image_name): - error_out("Can only diff CCE policy from ARM Template or YAML File") - elif arm_template_parameters and not arm_template: - error_out( - "Can only use ARM Template Parameters if ARM Template is also present" - ) - elif save_to_file and arm_template and not (print_policy_to_terminal or outraw or outraw_pretty_print): - error_out("Must print policy to terminal when saving to file") - elif faster_hashing and tar_mapping_location: - error_out("Cannot use --faster-hashing with --tar") - if print_existing_policy or outraw or outraw_pretty_print: logger.warning( "%s %s %s %s %s", - "Secrets that are included in the provided arm template or configuration files ", + "Secrets that are included in the provided arm template or configuration files", "in the container env or cmd sections will be printed out with this flag.", "These are outputed secrets that you must protect. Be sure that you do not include these secrets in your", "source control. Also verify that no secrets are present in the logs of your command or script.", @@ -84,10 +73,10 @@ def acipolicygen_confcom( if print_existing_policy and arm_template: print_existing_policy_from_arm_template(arm_template, arm_template_parameters) - sys.exit(0) - elif print_existing_policy and virtual_node_yaml_path: + return + if print_existing_policy and virtual_node_yaml_path: print_existing_policy_from_yaml(virtual_node_yaml_path) - sys.exit(0) + return if debug_mode: logger.warning("WARNING: %s %s", @@ -103,6 +92,18 @@ def acipolicygen_confcom( # warn user that input infrastructure_svn is less than the configured default value check_infrastructure_svn(infrastructure_svn) + fragments_list = [] + fragment_policy_list = [] + # gather information about the fragments being used in the new policy + if include_fragments: + fragments_list = os_util.load_json_from_file(fragments_json or input_path) + fragments_list = fragments_list.get("fragments", []) or fragments_list + + # convert to list if it's just a dict + if not isinstance(fragments_list, list): + fragments_list = [fragments_list] + fragment_policy_list = get_all_fragment_contents(fragments_list) + # telling the user what operation we're doing logger.warning( "Generating security policy for %s: %s in %s", @@ -115,7 +116,11 @@ def acipolicygen_confcom( # error checking for making sure an input is provided is above if input_path: container_group_policies = security_policy.load_policy_from_file( - input_path, debug_mode=debug_mode, + input_path, + debug_mode=debug_mode, + infrastructure_svn=infrastructure_svn, + disable_stdio=disable_stdio, + exclude_default_fragments=exclude_default_fragments, ) elif arm_template: container_group_policies = security_policy.load_policy_from_arm_template_file( @@ -125,6 +130,10 @@ def acipolicygen_confcom( debug_mode=debug_mode, disable_stdio=disable_stdio, approve_wildcards=approve_wildcards, + diff_mode=diff, + rego_imports=fragments_list, + fragment_contents=fragment_policy_list, + exclude_default_fragments=exclude_default_fragments, ) elif image_name: container_group_policies = security_policy.load_policy_from_image_name( @@ -136,9 +145,8 @@ def acipolicygen_confcom( debug_mode=debug_mode, disable_stdio=disable_stdio, approve_wildcards=approve_wildcards, + diff_mode=diff ) - virtual_node_yaml = list(os_util.load_multiple_yaml_from_file(virtual_node_yaml_path)) - filtered_yaml = filter_non_pod_resources(virtual_node_yaml) exit_code = 0 @@ -156,38 +164,26 @@ def acipolicygen_confcom( if validate_sidecar: exit_code = validate_sidecar_in_policy(policy, output_type == security_policy.OutputType.PRETTY_PRINT) elif virtual_node_yaml_path and not (print_policy_to_terminal or outraw or outraw_pretty_print or diff): - current_yaml = filtered_yaml[count] - # find where this policy needs to go in the original file - count_in_file = virtual_node_yaml.index(current_yaml) - # use the reference this helper function returns to place the policy in the correct spot - pod_item = convert_to_pod_spec_helper(current_yaml) - # Metadata to be added to virtual node YAML - needed_metadata = { - VIRTUAL_NODE_YAML_METADATA: { - VIRTUAL_NODE_YAML_ANNOTATIONS: { - VIRTUAL_NODE_YAML_POLICY: policy.get_serialized_output(), - } - } - } - - # Update virtual node YAML with metadata - deep_dict_update(needed_metadata, pod_item) - # replace contents in the original file - virtual_node_yaml[count_in_file] = current_yaml - - os_util.write_multiple_yaml_to_file(virtual_node_yaml_path, virtual_node_yaml) + result = inject_policy_into_yaml( + virtual_node_yaml_path, policy.get_serialized_output(omit_id=omit_id), count + ) + if result: + print(str_to_sha256(policy.get_serialized_output(OutputType.RAW, omit_id=omit_id))) + logger.info("CCE Policy successfully injected into YAML file") elif diff: exit_code = get_diff_outputs(policy, output_type == security_policy.OutputType.PRETTY_PRINT) elif arm_template and not (print_policy_to_terminal or outraw or outraw_pretty_print): result = inject_policy_into_template(arm_template, arm_template_parameters, - policy.get_serialized_output(), count) + policy.get_serialized_output(omit_id=omit_id), count) if result: # this is always going to be the unencoded policy - print(str_to_sha256(policy.get_serialized_output(OutputType.RAW))) + print(str_to_sha256(policy.get_serialized_output(OutputType.RAW, omit_id=omit_id))) logger.info("CCE Policy successfully injected into ARM Template") + else: # output to terminal - print(f"{policy.get_serialized_output(output_type)}\n\n") + print(f"{policy.get_serialized_output(output_type, omit_id=omit_id)}\n\n") + # output to file if save_to_file: logger.warning( @@ -198,7 +194,97 @@ def acipolicygen_confcom( ) policy.save_to_file(save_to_file, output_type) - sys.exit(exit_code) + if exit_code != 0: + sys.exit(exit_code) + + +# pylint: disable=R0914 +def acifragmentgen_confcom( + image_name: str, + input_path: str, + tar_mapping_location: str, + namespace: str, + svn: str, + feed: str, + key: str, + chain: str, + minimum_svn: int, + algo: str = "ES384", + fragment_path: str = None, + generate_import: bool = False, + disable_stdio: bool = False, + debug_mode: bool = False, + output_filename: str = None, + outraw: bool = False, + upload_fragment: bool = False, + no_print: bool = False, + fragments_json: str = "", +): + output_type = get_fragment_output_type(outraw) + + if generate_import: + cose_client = CoseSignToolProxy() + import_statement = cose_client.generate_import_from_path(fragment_path, minimum_svn=minimum_svn) + if fragments_json: + if os.path.isfile(fragments_json): + logger.info("Appending import statement to JSON file") + fragments_file_contents = os_util.load_json_from_file(fragments_json) + fragments_list = fragments_file_contents.get(POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS, []) + else: + logger.info("Creating import statement JSON file") + fragments_file_contents = {} + fragments_list = [] + # convert to list if it's just a dict + if not isinstance(fragments_list, list): + fragments_list = [fragments_list] + fragments_list.append(import_statement) + + fragments_file_contents[POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS] = fragments_list + os_util.write_str_to_file(fragments_json, pretty_print_func(fragments_file_contents)) + else: + print(pretty_print_func(import_statement)) + return + + tar_mapping = tar_mapping_validation(tar_mapping_location, using_config_file=bool(input_path)) + + if image_name: + policy = security_policy.load_policy_from_image_name( + image_name, debug_mode=debug_mode, disable_stdio=disable_stdio + ) + else: + # this is using --input + if not tar_mapping: + tar_mapping = os_util.load_tar_mapping_from_config_file(input_path) + policy = security_policy.load_policy_from_config_file( + input_path, debug_mode=debug_mode, disable_stdio=disable_stdio + ) + policy.populate_policy_content_for_all_images( + individual_image=bool(image_name), tar_mapping=tar_mapping + ) + + # if no feed is provided, use the first image's feed + # to assume it's an image-attached fragment + if not feed: + feed = policy.get_images()[0].containerImage + + fragment_text = policy.generate_fragment(namespace, svn, output_type) + + if output_type != security_policy.OutputType.DEFAULT and not no_print: + print(fragment_text) + + # take ".rego" off the end of the filename if it's there, it'll get added back later + output_filename.replace(".rego", "") + filename = f"{output_filename or namespace}.rego" + os_util.write_str_to_file(filename, fragment_text) + + if key: + cose_proxy = CoseSignToolProxy() + iss = cose_proxy.create_issuer(chain) + out_path = filename + ".cose" + + cose_proxy.cose_sign(filename, key, chain, feed, iss, algo, out_path) + if upload_fragment: + oras_proxy.attach_fragment_to_image(feed, out_path) def katapolicygen_confcom( @@ -215,9 +301,6 @@ def katapolicygen_confcom( ): kata_proxy = KataPolicyGenProxy() - if not (yaml_path or print_version): - error_out("Either --yaml-path or --print-version is required") - output = kata_proxy.kata_genpolicy( yaml_path, config_map_file=config_map_file, @@ -231,7 +314,6 @@ def katapolicygen_confcom( containerd_socket_path=containerd_socket_path, ) print(output) - sys.exit(0) def update_confcom(cmd, instance, tags=None): @@ -296,7 +378,9 @@ def get_diff_outputs(policy: security_policy.AciPolicy, outraw_pretty_print: boo return exit_code -def tar_mapping_validation(tar_mapping_location: str): +# TODO: refactor this function to use _validators.py functions and make sure the tar path +# isn't coming from the config file rather than the flag +def tar_mapping_validation(tar_mapping_location: str, using_config_file: bool = False): tar_mapping = None if tar_mapping_location: if not os.path.isfile(tar_mapping_location): @@ -311,7 +395,7 @@ def tar_mapping_validation(tar_mapping_location: str): # passing in a single tar location for a single image policy else: tar_mapping = tar_mapping_location - else: + elif not using_config_file: # only need to do the docker checks if we're not grabbing image info from tar files error_msg = run_initial_docker_checks() if error_msg: @@ -329,6 +413,8 @@ def get_output_type(outraw, outraw_pretty_print): return output_type -def error_out(error_string): - logger.error(error_string) - sys.exit(1) +def get_fragment_output_type(outraw): + output_type = security_policy.OutputType.PRETTY_PRINT + if outraw: + output_type = security_policy.OutputType.RAW + return output_type diff --git a/src/confcom/azext_confcom/data/customer_rego_fragment.txt b/src/confcom/azext_confcom/data/customer_rego_fragment.txt new file mode 100644 index 00000000000..20cd0f40a32 --- /dev/null +++ b/src/confcom/azext_confcom/data/customer_rego_fragment.txt @@ -0,0 +1,8 @@ +package %s + +svn := %s +framework_version := "0.2.3" + +fragments := %s + +containers := %s diff --git a/src/confcom/azext_confcom/data/internal_config.json b/src/confcom/azext_confcom/data/internal_config.json index c2209fcba26..4e0bc1ef316 100644 --- a/src/confcom/azext_confcom/data/internal_config.json +++ b/src/confcom/azext_confcom/data/internal_config.json @@ -1,5 +1,5 @@ { - "version": "1.0.1", + "version": "1.1.0", "hcsshim_config": { "maxVersion": "1.0.0", "minVersion": "0.0.1" @@ -101,6 +101,34 @@ } ] }, + "workload_identity_virtual_node": { + "environmentVariables": [ + { + "name": "AZURE_CLIENT_ID", + "value": ".+", + "strategy": "re2", + "required": false + }, + { + "name": "AZURE_TENANT_ID", + "value": ".+", + "strategy": "re2", + "required": false + }, + { + "name": "AZURE_FEDERATED_TOKEN_FILE", + "value": ".+", + "strategy": "re2", + "required": false + }, + { + "name": "AZURE_AUTHORITY_HOST", + "value": ".+", + "strategy": "re2", + "required": false + } + ] + }, "managedIdentity": { "environmentVariables": [ { @@ -279,8 +307,15 @@ ] } ], + "reserved_fragment_namespaces": [ + "microsoftcontainerinstance", + "data", + "input", + "policy" + ], "default_containers": [ { + "name": "pause-container", "command": [ "/pause" ], diff --git a/src/confcom/azext_confcom/errors.py b/src/confcom/azext_confcom/errors.py index a3f8cec89e1..598b437345b 100644 --- a/src/confcom/azext_confcom/errors.py +++ b/src/confcom/azext_confcom/errors.py @@ -12,7 +12,7 @@ class AccContainerError(Exception): """Generic ACC Container errors""" -def eprint(*args, **kwargs): +def eprint(*args, exit_code=1, **kwargs): # print to stderr with formatting to be noticeable in the terminal logger.error(*args, **kwargs) - sys.exit(1) + sys.exit(exit_code) diff --git a/src/confcom/azext_confcom/fragment_util.py b/src/confcom/azext_confcom/fragment_util.py new file mode 100644 index 00000000000..61370d8eaaa --- /dev/null +++ b/src/confcom/azext_confcom/fragment_util.py @@ -0,0 +1,75 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import yaml +from azext_confcom import config +from azext_confcom import oras_proxy +from azext_confcom.cose_proxy import CoseSignToolProxy +from azext_confcom.template_util import ( + case_insensitive_dict_get, + extract_containers_from_text, +) + + +# input is the full rego file as a string +# output is all of the containers in the rego files as a list of dictionaries +def combine_fragments_with_policy(all_fragments): + out_fragments = [] + for fragment in all_fragments: + container_text = extract_containers_from_text(fragment, "containers := ") + container_text = container_text.replace("\t", " ") + containers = yaml.load(container_text, Loader=yaml.FullLoader) + out_fragments.extend(containers) + return out_fragments + + +def get_all_fragment_contents(fragment_imports): + fragment_feeds = [ + case_insensitive_dict_get(fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED) + for fragment in fragment_imports + ] + + all_fragments_contents = [] + cose_proxy = CoseSignToolProxy() + + for fragment in fragment_imports: + # pull locally if there is a path, otherwise pull from the remote registry + if ( + config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH in fragment and + fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH] + ): + contents = [ + cose_proxy.extract_payload_from_path( + fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH] + ) + ] + else: + feed_name = case_insensitive_dict_get( + fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED + ) + contents = oras_proxy.pull_all_image_attached_fragments(feed_name) + + # add the new fragments to the list of all fragments if they're not already there + # the side effect of adding this way is that if we have a local path to a nested fragment + # we will pull then use the local version of the fragment instead of pulling from the registry + for content in contents: + fragment_text = extract_containers_from_text( + content, config.REGO_FRAGMENT_START + ).replace("\t", " ") + + fragments = yaml.load( + fragment_text, + Loader=yaml.FullLoader, + ) + + # this adds new feeds to the list of feeds to pull dynamically + # it will end when there are no longer nested fragments to pull + for new_fragment in fragments: + if new_fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED] not in fragment_feeds: + fragment_imports.append(new_fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED]) + + all_fragments_contents.append(content) + + return combine_fragments_with_policy(all_fragments_contents) diff --git a/src/confcom/azext_confcom/kata_proxy.py b/src/confcom/azext_confcom/kata_proxy.py index 2a1d80f498b..daa30846a4f 100644 --- a/src/confcom/azext_confcom/kata_proxy.py +++ b/src/confcom/azext_confcom/kata_proxy.py @@ -4,11 +4,9 @@ # -------------------------------------------------------------------------------------------- import subprocess -from typing import List import os import stat import sys -from pathlib import Path import platform import requests from azext_confcom.config import DATA_FOLDER @@ -37,6 +35,7 @@ def download_binaries(): # get the most recent release artifacts from github r = requests.get("https://api.github.com/repos/microsoft/kata-containers/releases") + r.raise_for_status() bin_flag = False needed_assets = ["genpolicy", "genpolicy.exe"] # search for genpolicy in the assets from kata-container releases @@ -56,6 +55,7 @@ def download_binaries(): exe_url = asset["browser_download_url"] # download the file r = requests.get(exe_url) + r.raise_for_status() # save the file to the bin folder with open(os.path.join(bin_folder, save_name), "wb") as f: f.write(r.content) @@ -79,24 +79,19 @@ def __init__(self): if host_os == "Linux": DEFAULT_LIB += "-linux" elif host_os == "Windows": - if machine.endswith("64"): - DEFAULT_LIB += "-windows.exe" - else: - eprint( - "32-bit Windows is not supported." - ) + eprint("The katapolicygen subcommand for Windows has not been implemented.") elif host_os == "Darwin": - eprint("The extension for MacOS has not been implemented.") + eprint("The katapolicygen subcommand for MacOS has not been implemented.") else: eprint( - "Unknown target platform. The extension only works with Windows, Linux and MacOS" + "Unknown target platform. The katapolicygen subcommand only works with Linux" ) - self.policy_bin = Path(os.path.join(f"{script_directory}", f"{DEFAULT_LIB}")) + self.policy_bin = os.path.join(f"{script_directory}", f"{DEFAULT_LIB}") # check if the extension binary exists if not os.path.exists(self.policy_bin): - eprint("The extension binary file cannot be located.") + eprint("The katapolicygen subcommand binary file cannot be located.") if not os.access(self.policy_bin, os.X_OK): # add executable permissions for the current user if they don't exist st = os.stat(self.policy_bin) @@ -114,7 +109,7 @@ def kata_genpolicy( print_version=False, containerd_pull=False, containerd_socket_path=None - ) -> List[str]: + ) -> list[str]: policy_bin_str = str(self.policy_bin) # get path to data and rules folder arg_list = [policy_bin_str] diff --git a/src/confcom/azext_confcom/oras_proxy.py b/src/confcom/azext_confcom/oras_proxy.py new file mode 100644 index 00000000000..0507b9449bc --- /dev/null +++ b/src/confcom/azext_confcom/oras_proxy.py @@ -0,0 +1,126 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import subprocess +import json +import platform +import re +from typing import List +from azext_confcom.errors import eprint +from azext_confcom.config import ARTIFACT_TYPE +from azext_confcom.cose_proxy import CoseSignToolProxy + +host_os = platform.system() +machine = platform.machine() + + +def call_oras_cli(args, check=False): + return subprocess.run(args, check=check, capture_output=True, timeout=120) + + +# discover if there are policy artifacts associated with the image +# return their digests in a list if there are some +def discover( + image: str, +) -> List[str]: + arg_list = ["oras", "discover", image, "-o", "json", "--artifact-type", ARTIFACT_TYPE] + item = call_oras_cli(arg_list, check=False) + hashes = [] + + if item.returncode == 0: + json_output = json.loads(item.stdout.decode("utf-8")) + manifests = json_output["manifests"] + for manifest in manifests: + hashes.append(manifest["digest"]) + # get the exit code from the subprocess + else: + if "401: Unauthorized" in item.stderr.decode("utf-8"): + eprint( + f"Error pulling the policy fragment from {image}.\n\n" + + "Please log into the registry and try again.\n\n" + ) + eprint(f"Error retrieving fragments from remote repo: {item.stderr.decode('utf-8')}", exit_code=item.returncode) + return hashes + + +# pull the policy fragment from the remote repo and return its contents as a string +def pull( + image: str, + image_hash: str, +) -> str: + if "@sha256:" in image: + image = image.split("@")[0] + arg_list = ["oras", "pull", f"{image}@{image_hash}"] + item = call_oras_cli(arg_list, check=False) + + # get the exit code from the subprocess + if item.returncode != 0: + if "401: Unauthorized" in item.stderr.decode("utf-8"): + eprint( + f"Error pulling the policy fragment: {image}@{image_hash}.\n\n" + + "Please log into the registry and try again.\n\n" + ) + eprint(f"Error while pulling fragment: {item.stderr.decode('utf-8')}", exit_code=item.returncode) + + # extract the file name from stdout + filename = "" + lines = item.stdout.decode("utf-8").splitlines() + for line in lines: + if "Downloaded" in line: + filename = line.split(" ")[-1] + break + + if filename == "": + eprint(f"Could not find the filename of the pulled fragment for {image}@{image_hash}") + + return filename + + +def pull_all_image_attached_fragments(image): + # TODO: be smart about if we're pulling a fragment directly or trying to discover them from an image tag + # TODO: this will be for standalone fragments + fragments = discover(image) + fragment_contents = [] + proxy = CoseSignToolProxy() + for fragment_digest in fragments: + filename = pull(image, fragment_digest) + text = proxy.extract_payload_from_path(filename) + # containers = extract_containers_from_text(text, REGO_CONTAINER_START) + # new_fragments = extract_containers_from_text(text, REGO_FRAGMENT_START) + # if new_fragments: + # for new_fragment in new_fragments: + # feed = new_fragment.get("feed") + # # if we don't have the feed in the list of feeds we've already pulled, pull it + # if feed not in fragment_feeds: + # fragment_contents.extend(pull_all_image_attached_fragments(feed, fragment_feeds=fragment_feeds)) + fragment_contents.append(text) + return fragment_contents + + +def check_oras_cli(): + text = "ORAS CLI not installed. Please install ORAS CLI: https://oras.land/docs/installation" + try: + item = call_oras_cli(["oras", "version"], check=False) + if item.returncode != 0: + eprint(text) + except FileNotFoundError: + eprint(text) + + +def attach_fragment_to_image(image_name: str, filename: str): + if ":" not in image_name: + image_name += ":latest" + # attach the fragment to the image + arg_list = ["oras", "attach", "--artifact-type", ARTIFACT_TYPE, image_name, filename] + item = call_oras_cli(arg_list, check=False) + if item.returncode != 0: + eprint(f"Could not attach fragment to image: {image_name}. Failed with {item.stderr}") + + # regex to extract the digest from the output + digest_result = re.search(r" sha256:[a-f0-9]{64}", item.stdout.decode("utf8")) + if digest_result is None: + print("Attached fragment to image, but could not extract digest from output.") + digest = digest_result.group(0) + print(f"Fragment attached to image '{image_name}' with Digest:{digest}") diff --git a/src/confcom/azext_confcom/os_util.py b/src/confcom/azext_confcom/os_util.py index 8541a0dd24d..ee3d0e32c0d 100644 --- a/src/confcom/azext_confcom/os_util.py +++ b/src/confcom/azext_confcom/os_util.py @@ -30,8 +30,8 @@ def base64_to_str(data: str) -> str: try: data_bytes = base64.b64decode(data) data_str = data_bytes.decode("ascii") - except binascii.Error: - eprint(f"Invalid base64 string: {data}") + except binascii.Error as e: + raise ValueError(f"Invalid base64 string: {data}") from e return data_str @@ -135,18 +135,27 @@ def load_tar_mapping_from_file(path: str) -> dict: return raw_json +def load_tar_mapping_from_config_file(path: str) -> dict: + raw_json = load_json_from_file(path) + containers = raw_json.get("containers", []) + output_dict = {} + for container in containers: + tar_path = container.get("path") + if tar_path and not os.path.isfile(tar_path): + eprint(f"Tarball does not exist at path: {tar_path}") + image_name = container.get("properties", {}).get("image", "") + output_dict[image_name] = tar_path + return output_dict + + def map_image_from_tar_backwards_compatibility(image_name: str, tar: TarFile, tar_location: str): tar_dir = os.path.dirname(tar_location) # grab all files in the folder and only take the one that's named with hex values and a json extension members = tar.getmembers() - info_file_name = [ - file - for file in members - if file.name.endswith(".json") and not file.name.startswith("manifest") - ] + info_file = None # if there's more than one image in the tarball, we need to do some more logic - if len(info_file_name) > 0: + if len(members) > 0: # extract just the manifest file and see if any of the RepoTags match the image_name we're searching for # the manifest.json should have a list of all the image tags # and what json files they map to to get env vars, startup cmd, etc. @@ -157,7 +166,7 @@ def map_image_from_tar_backwards_compatibility(image_name: str, tar: TarFile, ta for image in manifest: if image_name in image.get("RepoTags"): info_file = [ - item for item in info_file_name if item.name == image.get("Config") + item for item in members if item.name == image.get("Config") ][0] break # remove the extracted manifest file to clean up diff --git a/src/confcom/azext_confcom/rootfs_proxy.py b/src/confcom/azext_confcom/rootfs_proxy.py index 15715f31e69..fca3757f872 100644 --- a/src/confcom/azext_confcom/rootfs_proxy.py +++ b/src/confcom/azext_confcom/rootfs_proxy.py @@ -9,7 +9,6 @@ import os import sys import stat -from pathlib import Path import platform import requests from knack.log import get_logger @@ -34,27 +33,24 @@ def download_binaries(): os.makedirs(bin_folder) # get the most recent release artifacts from github - r = requests.get("https://api.github.com/repos/microsoft/hcsshim/releases") - bin_flag = False - exe_flag = False - # search for dmverity-vhd in the assets from hcsshim releases + r = requests.get("https://api.github.com/repos/microsoft/integrity-vhd/releases") + r.raise_for_status() + needed_assets = ["dmverity-vhd", "dmverity-vhd.exe"] + # these should be newest to oldest for release in r.json(): - # these should be newest to oldest - for asset in release["assets"]: - # download the file if it contains dmverity-vhd - if "dmverity-vhd" in asset["name"]: - if "exe" in asset["name"]: - exe_flag = True - else: - bin_flag = True + # search for both windows and linux binaries + needed_asset_info = [asset for asset in release["assets"] if asset["name"] in needed_assets] + if len(needed_asset_info) == len(needed_assets): + for asset in needed_asset_info: # get the download url for the dmverity-vhd file exe_url = asset["browser_download_url"] # download the file r = requests.get(exe_url) + r.raise_for_status() # save the file to the bin folder with open(os.path.join(bin_folder, asset["name"]), "wb") as f: f.write(r.content) - if bin_flag and exe_flag: + # stop iterating through releases break def __init__(self): @@ -74,10 +70,10 @@ def __init__(self): eprint("The extension for MacOS has not been implemented.") else: eprint( - "Unknown target platform. The extension only works with Windows, Linux and MacOS" + "Unknown target platform. The extension only works with Windows and Linux" ) - self.policy_bin = Path(os.path.join(f"{script_directory}", f"{DEFAULT_LIB}")) + self.policy_bin = os.path.join(f"{script_directory}", f"{DEFAULT_LIB}") # check if the extension binary exists if not os.path.exists(self.policy_bin): diff --git a/src/confcom/azext_confcom/security_policy.py b/src/confcom/azext_confcom/security_policy.py index ff60b03e2ec..b3d6da3cf9e 100644 --- a/src/confcom/azext_confcom/security_policy.py +++ b/src/confcom/azext_confcom/security_policy.py @@ -6,9 +6,8 @@ import json import warnings import copy -from typing import Any, List, Dict, Tuple +from typing import Any, List, Dict, Tuple, Union from enum import Enum, auto -import docker import deepdiff from knack.log import get_logger from tqdm import tqdm @@ -25,7 +24,6 @@ readable_diff, case_insensitive_dict_get, compare_env_vars, - compare_containers, get_values_for_params, process_mounts, process_configmap, @@ -39,9 +37,14 @@ convert_to_pod_spec, filter_non_pod_resources, decompose_confidential_properties, + process_env_vars_from_config, + process_mounts_from_config, + process_fragment_imports, + get_container_diff, ) from azext_confcom.rootfs_proxy import SecurityPolicyProxy + logger = get_logger() @@ -62,9 +65,9 @@ def __init__( existing_rego_fragments: Any = None, debug_mode: bool = False, disable_stdio: bool = False, - is_vn2: bool = False + is_vn2: bool = False, + fragment_contents: Any = None, ) -> None: - self._docker_client = None self._rootfs_proxy = None self._policy_str = None self._policy_str_pp = None @@ -72,6 +75,7 @@ def __init__( self._fragments = rego_fragments self._existing_fragments = existing_rego_fragments self._api_version = config.API_VERSION + self._fragment_contents = fragment_contents if debug_mode: self._allow_properties_access = config.DEBUG_MODE_SETTINGS.get( @@ -118,7 +122,7 @@ def __init__( containers = case_insensitive_dict_get( deserialized_config, config.ACI_FIELD_CONTAINERS ) - if not containers: + if containers is None: eprint( f'Field ["{config.ACI_FIELD_CONTAINERS}"] is empty or can not be found.' ) @@ -135,17 +139,11 @@ def __init__( self._images = container_results - def __enter__(self) -> None: + def __enter__(self) -> Any: return self def __exit__(self, exception_type, exception_value, exception_traceback) -> None: - self.close() - - def _get_docker_client(self) -> docker.client.DockerClient: - if not self._docker_client: - self._docker_client = docker.from_env() - - return self._docker_client + return None def _get_rootfs_proxy(self) -> SecurityPolicyProxy: if not self._rootfs_proxy: @@ -153,24 +151,22 @@ def _get_rootfs_proxy(self) -> SecurityPolicyProxy: return self._rootfs_proxy - def _close_docker_client(self) -> None: - if self._docker_client: - self._get_docker_client().close() - - def close(self) -> None: - self._close_docker_client() + def get_fragments(self) -> List[str]: + return self._fragments or [] def get_serialized_output( self, output_type: OutputType = OutputType.DEFAULT, rego_boilerplate=True, - ) -> str: + omit_id: bool = False, + include_sidecars: bool = True, + ): # error check the output type if not isinstance(output_type, Enum) or output_type.value not in [item.value for item in OutputType]: eprint("Unknown output type for serialization.") policy_str = self._policy_serialization( - output_type == OutputType.PRETTY_PRINT + output_type == OutputType.PRETTY_PRINT, include_sidecars=include_sidecars, omit_id=omit_id ) if rego_boilerplate: @@ -182,10 +178,17 @@ def get_serialized_output( # encode to base64 return os_util.str_to_base64(policy_str) - def _add_rego_boilerplate(self, output: str) -> str: + def generate_fragment(self, namespace: str, svn: str, output_type: int) -> str: + return config.CUSTOMER_REGO_FRAGMENT % ( + namespace, + pretty_print_func(svn), + pretty_print_func(self.get_fragments()), + self.get_serialized_output(output_type, rego_boilerplate=False, include_sidecars=False), + ) + def _add_rego_boilerplate(self, output: str) -> str: # determine if we're outputting for a sidecar or not - if self._images[0].get_id() and is_sidecar(self._images[0].get_id()): + if self._images and self._images[0].get_id() and is_sidecar(self._images[0].get_id()): return config.SIDECAR_REGO_POLICY % ( pretty_print_func(self._api_version), output @@ -243,10 +246,16 @@ def validate_sidecars(self) -> Tuple[bool, Dict]: OutputType.PRETTY_PRINT, rego_boilerplate=False ) policy_content = json.loads(policy_str) + + for container in policy_content: + # the test sets the sidecar to have the name be the same as the ID, so we need to simulate that + container[config.ACI_FIELD_CONTAINERS_NAME] = container[config.ACI_FIELD_CONTAINERS_ID] # done this way instead of self.validate() because the input.json is # the source of truth + return policy.validate(policy_content, sidecar_validation=True) + # pylint: disable=too-many-locals def validate(self, policy, sidecar_validation=False) -> Tuple[bool, Dict]: """Utility method: general method to compare two policies. One being the current object and the other is passed in as a parameter. @@ -272,6 +281,10 @@ def validate(self, policy, sidecar_validation=False) -> Tuple[bool, Dict]: case_insensitive_dict_get(i, config.POLICY_FIELD_CONTAINERS_ID) for i in policy ] + policy_names = [ + case_insensitive_dict_get(i, config.POLICY_FIELD_CONTAINERS_NAME) + for i in policy + ] for container in arm_containers: # see if the IDs match with any container in the policy @@ -279,35 +292,34 @@ def validate(self, policy, sidecar_validation=False) -> Tuple[bool, Dict]: id_val = case_insensitive_dict_get(container, config.ACI_FIELD_CONTAINERS_ID) container_name = case_insensitive_dict_get( container, - config.ACI_FIELD_CONTAINERS_NAME + config.POLICY_FIELD_CONTAINERS_NAME ) - # idx = policy_ids.index(id_val) if id_val in policy_ids else None + # container names are required for valid k8s yamls and ARM templates. So this would only happen + # in a future scenario where we enable diff mode for pure json files and the user does not provide + # a name for the container + if id_val is None and container_name is None: + raise ValueError( + ( + "Container ID and Name cannot both be None to use diff mode. " + "Try adding a name to the container and regenerate the CCE policy." + ) + ) + idx_arr = [i for i, item in enumerate(policy_ids) if item == id_val] + idx_arr_name = [i for i, item in enumerate(policy_names) if item == container_name] + set_idx = set(idx_arr + idx_arr_name) - if idx_arr == []: + if len(set_idx) == 0: reason_list[container_name] = f"{id_val} not found in policy" continue temp_diff_list = [] - for idx in idx_arr: + for idx in set_idx: temp_diff = {} matching_policy_container = policy[idx] - # copy so we can delete fields and not affect the original data - # structure - container1 = copy.deepcopy(matching_policy_container) - container2 = copy.deepcopy(container) - - # the ID does not matter so delete them from comparison - container1.pop(config.POLICY_FIELD_CONTAINERS_ID, None) - container2.pop(config.POLICY_FIELD_CONTAINERS_ID, None) - # env vars will be compared later so delete them from this - # comparison - container1.pop(config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS, None) - container2.pop(config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS, None) - - diff_values = compare_containers(container1, container2) + diff_values = get_container_diff(matching_policy_container, container) # label the diff with the ID so it can be merged # with the env vars and other container diffs temp_diff[container_name] = diff_values @@ -364,17 +376,16 @@ def save_to_file( output = self.get_serialized_output(output_type) os_util.write_str_to_file(file_path, output) - def _policy_serialization(self, pretty_print=False) -> str: + def _policy_serialization(self, pretty_print=False, include_sidecars: bool = True, omit_id: bool = False) -> str: policy = [] regular_container_images = self.get_images() is_sidecars = True for image in regular_container_images: is_sidecars = is_sidecars and is_sidecar(image.containerImage) - image_dict = image.get_policy_json() + image_dict = image.get_policy_json(omit_id=omit_id) policy.append(image_dict) - - if not is_sidecars: + if not is_sidecars and include_sidecars: # add in the default containers that have their hashes pre-computed policy += copy.deepcopy(config.DEFAULT_CONTAINERS) if self._disable_stdio: @@ -418,6 +429,7 @@ def populate_policy_content_for_all_images( for image in container_images: image.parse_all_parameters_and_variables(AciPolicy.all_params, AciPolicy.all_vars) image_name = f"{image.base}:{image.tag}" + image_info, tar = get_image_info(progress, message_queue, tar_mapping, image) # verify and populate the working directory property @@ -501,6 +513,12 @@ def populate_policy_content_for_all_images( } image.set_user(user) + if self._fragment_contents and self.should_eliminate_container_covered_by_fragments(image): + # these containers will get taken out later in the function + # since they are covered by a fragment + logger.info("Container covered by fragment: %s", image_name) + continue + # populate tar location if isinstance(tar_mapping, dict): tar_location = get_tar_location_from_mapping(tar_mapping, image_name) @@ -511,18 +529,58 @@ def populate_policy_content_for_all_images( progress.update() progress.close() - self.close() # unload the message queue for message in message_queue: logger.warning(message) + out_images = list(filter(lambda image: image.get_layers(), self.get_images())) + self.set_images(out_images) + + def should_eliminate_container_covered_by_fragments(self, image): + for fragment_image in self._fragment_contents: + # we're not comparing layers to save computation time + fragment_image["layers"] = [] + # TODO: make this print a warning if there is a fragment image that's close + # save some computation time by checking if the image tag is the same first + container_name = case_insensitive_dict_get( + fragment_image, + config.POLICY_FIELD_CONTAINERS_NAME + ) + fragment_image_id = fragment_image.get(config.ACI_FIELD_CONTAINERS_ID) + if ":" not in fragment_image: + fragment_image_id = f"{fragment_image_id}:latest" + if ( + fragment_image_id == image.base + image.tag or + container_name == image.get_name() + ): + image_policy = image.get_policy_json() + + container_diff = get_container_diff(fragment_image, image_policy) + + # if the rest of the container is good, check the env vars + if not container_diff: + env_reason_list = compare_env_vars( + fragment_image_id, + case_insensitive_dict_get( + fragment_image, + config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS, + ), + case_insensitive_dict_get( + image_policy, config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS + ), + ) + + # if the env vars are the same, then we can eliminate the container + if not env_reason_list: + return True + return False + def get_images(self) -> List[ContainerImage]: return self._images - def pull_image(self, image: ContainerImage) -> Any: - client = self._get_docker_client() - return client.images.pull(image.base, image.tag) + def set_images(self, images: List[ContainerImage]) -> None: + self._images = images # pylint: disable=R0914, @@ -533,6 +591,10 @@ def load_policy_from_arm_template_str( debug_mode: bool = False, disable_stdio: bool = False, approve_wildcards: bool = False, + diff_mode: bool = False, + rego_imports: Any = None, + fragment_contents: Any = None, + exclude_default_fragments: bool = False, ) -> List[AciPolicy]: """Function that converts ARM template string to an ACI Policy""" input_arm_json = os_util.load_json_from_str(template_data) @@ -579,6 +641,13 @@ def load_policy_from_arm_template_str( containers = [] existing_containers = None fragments = None + exclude_default_fragments = False + + tags = case_insensitive_dict_get(resource, config.ACI_FIELD_TEMPLATE_TAGS) + if tags: + exclude_default_fragments = case_insensitive_dict_get(tags, config.ACI_FIELD_TEMPLATE_ZERO_SIDECAR) + if isinstance(exclude_default_fragments, str): + exclude_default_fragments = exclude_default_fragments.lower() == "true" container_group_properties = case_insensitive_dict_get( resource, config.ACI_FIELD_TEMPLATE_PROPERTIES @@ -600,17 +669,29 @@ def load_policy_from_arm_template_str( if init_container_list: container_list.extend(init_container_list) - existing_containers, fragments = extract_confidential_properties( - container_group_properties - ) + try: + existing_containers, fragments = extract_confidential_properties( + container_group_properties + ) + except ValueError as e: + if diff_mode: + # In diff mode, we raise an error if the base64 policy is malformed + eprint(f"Unable to decode existing policy. Please check the base64 encoding.\n{e}") + else: + # In non-diff mode, we ignore the error and proceed without the policy + existing_containers, fragments = ([], []) - rego_fragments = copy.deepcopy(config.DEFAULT_REGO_FRAGMENTS) + rego_fragments = copy.deepcopy(config.DEFAULT_REGO_FRAGMENTS) if not exclude_default_fragments else [] if infrastructure_svn: # assumes the first DEFAULT_REGO_FRAGMENT is always the # infrastructure fragment rego_fragments[0][ config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN ] = infrastructure_svn + if rego_imports: + # error check the rego imports for invalid data types + process_fragment_imports(rego_imports) + rego_fragments.extend(rego_imports) volumes = ( case_insensitive_dict_get( @@ -637,7 +718,7 @@ def load_policy_from_arm_template_str( if not image_name: eprint( - f'Field ["{config.ACI_FIELD_TEMPLATE_PARAMETERS}"] is empty or cannot be found' + f'Field ["{config.ACI_FIELD_TEMPLATE_IMAGE}"] is empty or cannot be found' ) exec_processes = [] @@ -681,6 +762,7 @@ def load_policy_from_arm_template_str( # fallback to default fragments if the policy is not present existing_rego_fragments=fragments, debug_mode=debug_mode, + fragment_contents=fragment_contents, ) ) return container_groups @@ -693,6 +775,10 @@ def load_policy_from_arm_template_file( debug_mode: bool = False, disable_stdio: bool = False, approve_wildcards: bool = False, + diff_mode: bool = False, + rego_imports: list = None, + fragment_contents: list = None, + exclude_default_fragments: bool = False, ) -> List[AciPolicy]: """Utility function: generate policy object from given arm template and parameter file paths""" input_arm_json = os_util.load_str_from_file(template_path) @@ -700,20 +786,40 @@ def load_policy_from_arm_template_file( if parameter_path: input_parameter_json = os_util.load_str_from_file(parameter_path) return load_policy_from_arm_template_str( - input_arm_json, input_parameter_json, infrastructure_svn, - debug_mode=debug_mode, disable_stdio=disable_stdio, approve_wildcards=approve_wildcards, + input_arm_json, + input_parameter_json, + infrastructure_svn, + debug_mode=debug_mode, + disable_stdio=disable_stdio, + approve_wildcards=approve_wildcards, + rego_imports=rego_imports, + diff_mode=diff_mode, + fragment_contents=fragment_contents, + exclude_default_fragments=exclude_default_fragments, ) -def load_policy_from_file(path: str, debug_mode: bool = False) -> AciPolicy: +def load_policy_from_file( + path: str, + debug_mode: bool = False, + disable_stdio: bool = False, + infrastructure_svn: str = None, + exclude_default_fragments: bool = False, +) -> AciPolicy: """Utility function: generate policy object from given json file path""" policy_input_json = os_util.load_str_from_file(path) - return load_policy_from_str(policy_input_json, debug_mode=debug_mode, ) + return load_policy_from_str( + policy_input_json, + debug_mode=debug_mode, + disable_stdio=disable_stdio, + infrastructure_svn=infrastructure_svn, + exclude_default_fragments=exclude_default_fragments, + ) def load_policy_from_image_name( - image_names: List[str] or str, debug_mode: bool = False, disable_stdio: bool = False + image_names: Union[List[str], str], debug_mode: bool = False, disable_stdio: bool = False ) -> AciPolicy: # can either take a list of image names or a single image name if isinstance(image_names, str): @@ -747,16 +853,29 @@ def load_policy_from_image_name( ) -def load_policy_from_str(data: str, debug_mode: bool = False) -> AciPolicy: +def load_policy_from_str( + data: str, + debug_mode: bool = False, + disable_stdio: bool = False, + infrastructure_svn: str = None, + exclude_default_fragments: bool = False, +) -> AciPolicy: """Utility function: generate policy object from given json string""" policy_input_json = os_util.load_json_from_str(data) containers = case_insensitive_dict_get( policy_input_json, config.ACI_FIELD_CONTAINERS + ) or [] + + version = case_insensitive_dict_get( + policy_input_json, config.ACI_FIELD_VERSION ) + if not version: + policy_input_json[config.ACI_FIELD_VERSION] = "1.0" + rego_fragments = case_insensitive_dict_get( policy_input_json, config.ACI_FIELD_CONTAINERS_REGO_FRAGMENTS - ) + ) or [] if rego_fragments: if not isinstance(rego_fragments, list): @@ -780,7 +899,7 @@ def load_policy_from_str(data: str, debug_mode: bool = False) -> AciPolicy: iss = case_insensitive_dict_get( fragment, config.ACI_FIELD_CONTAINERS_REGO_FRAGMENTS_ISS - ) + ) or case_insensitive_dict_get(fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER) if not isinstance(iss, str): eprint( f'Field ["{config.ACI_FIELD_CONTAINERS}"]' @@ -791,7 +910,7 @@ def load_policy_from_str(data: str, debug_mode: bool = False) -> AciPolicy: minimum_svn = case_insensitive_dict_get( fragment, config.ACI_FIELD_CONTAINERS_REGO_FRAGMENTS_MINIMUM_SVN - ) + ) or case_insensitive_dict_get(fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN) if not isinstance(minimum_svn, str): eprint( f'Field ["{config.ACI_FIELD_CONTAINERS}"]' @@ -811,13 +930,17 @@ def load_policy_from_str(data: str, debug_mode: bool = False) -> AciPolicy: + "can only be a list." ) - if not containers: - eprint(f'Field ["{config.ACI_FIELD_CONTAINERS}"] is empty or can not be found.') + if not containers and not rego_fragments: + eprint( + f'Field ["{config.ACI_FIELD_CONTAINERS}"]' + + f' and field ["{config.ACI_FIELD_CONTAINERS_REGO_FRAGMENTS}"] can not both be empty.' + ) for container in containers: + image_properties = case_insensitive_dict_get(container, config.ACI_FIELD_TEMPLATE_PROPERTIES) image_name = case_insensitive_dict_get( container, config.ACI_FIELD_CONTAINERS_CONTAINERIMAGE - ) + ) or case_insensitive_dict_get(image_properties, config.ACI_FIELD_TEMPLATE_IMAGE) container_name = case_insensitive_dict_get( container, config.ACI_FIELD_CONTAINERS_NAME @@ -838,9 +961,42 @@ def load_policy_from_str(data: str, debug_mode: bool = False) -> AciPolicy: ) container[config.ACI_FIELD_CONTAINERS_SIGNAL_CONTAINER_PROCESSES] = [] + if image_properties: + exec_processes = [] + extract_probe(exec_processes, image_properties, config.ACI_FIELD_CONTAINERS_READINESS_PROBE) + extract_probe(exec_processes, image_properties, config.ACI_FIELD_CONTAINERS_LIVENESS_PROBE) + container[config.ACI_FIELD_CONTAINERS_CONTAINERIMAGE] = image_name + container[config.ACI_FIELD_CONTAINERS_ENVS] = process_env_vars_from_config(image_properties) + container[config.ACI_FIELD_CONTAINERS_COMMAND] = case_insensitive_dict_get( + image_properties, config.ACI_FIELD_TEMPLATE_COMMAND + ) or [] + container[config.ACI_FIELD_CONTAINERS_MOUNTS] = ( + process_mounts_from_config(image_properties) + + process_configmap(image_properties) + ) + container[config.ACI_FIELD_CONTAINERS_EXEC_PROCESSES] = ( + exec_processes + + config.DEBUG_MODE_SETTINGS.get("execProcesses") + if debug_mode else exec_processes + ) + container[config.ACI_FIELD_CONTAINERS_ALLOW_STDIO_ACCESS] = not disable_stdio + container[config.ACI_FIELD_CONTAINERS_SECURITY_CONTEXT] = case_insensitive_dict_get( + image_properties, config.ACI_FIELD_TEMPLATE_SECURITY_CONTEXT + ) + + if not exclude_default_fragments: + rego_fragments.extend(copy.deepcopy(config.DEFAULT_REGO_FRAGMENTS)) + + if infrastructure_svn: + # assumes the first DEFAULT_REGO_FRAGMENT is always the + # infrastructure fragment + rego_fragments[0][ + config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN + ] = infrastructure_svn + return AciPolicy( policy_input_json, - rego_fragments=rego_fragments or config.DEFAULT_REGO_FRAGMENTS, + rego_fragments=rego_fragments, debug_mode=debug_mode, ) @@ -849,14 +1005,16 @@ def load_policy_from_virtual_node_yaml_file( virtual_node_yaml_path: str, debug_mode: bool = False, disable_stdio: bool = False, - approve_wildcards: bool = False + approve_wildcards: bool = False, + diff_mode: bool = False, ) -> List[AciPolicy]: yaml_contents_str = os_util.load_str_from_file(virtual_node_yaml_path) return load_policy_from_virtual_node_yaml_str( yaml_contents_str, debug_mode=debug_mode, disable_stdio=disable_stdio, - approve_wildcards=approve_wildcards + approve_wildcards=approve_wildcards, + diff_mode=diff_mode, ) @@ -864,7 +1022,8 @@ def load_policy_from_virtual_node_yaml_str( yaml_contents_str: List[str], debug_mode: bool = False, disable_stdio: bool = False, - approve_wildcards: bool = False + approve_wildcards: bool = False, + diff_mode: bool = False, ) -> List[AciPolicy]: """ Load a virtual node yaml file and generate a policy object @@ -891,10 +1050,21 @@ def load_policy_from_virtual_node_yaml_str( # extract existing policy and fragments for diff mode metadata = case_insensitive_dict_get(yaml, "metadata") annotations = case_insensitive_dict_get(metadata, config.VIRTUAL_NODE_YAML_ANNOTATIONS) + labels = case_insensitive_dict_get(metadata, config.VIRTUAL_NODE_YAML_LABELS) or [] + use_workload_identity = ( + config.VIRTUAL_NODE_YAML_LABEL_WORKLOAD_IDENTITY in labels + and labels.get(config.VIRTUAL_NODE_YAML_LABEL_WORKLOAD_IDENTITY) == "true") existing_policy = case_insensitive_dict_get(annotations, config.VIRTUAL_NODE_YAML_POLICY) - if existing_policy: - (existing_containers, existing_fragments) = decompose_confidential_properties(existing_policy) - + try: + if existing_policy: + existing_containers, existing_fragments = decompose_confidential_properties(existing_policy) + except ValueError as e: + if diff_mode: + # In diff mode, we raise an error if the base64 policy is malformed + eprint(f"Unable to decode existing policy. Please check the base64 encoding.\n{e}") + else: + # In non-diff mode, we ignore the error and proceed without the policy + existing_containers, existing_fragments = ([], []) # because there are many ways to get pod information, we normalize them so the interface is the same normalized_yaml = convert_to_pod_spec(yaml) @@ -927,6 +1097,8 @@ def load_policy_from_virtual_node_yaml_str( secrets_data, approve_wildcards=approve_wildcards ) + if use_workload_identity: + envs += config.VIRTUAL_NODE_ENV_RULES_WORKLOAD_IDENTITY # command command = case_insensitive_dict_get(container, "command") or [] @@ -935,19 +1107,44 @@ def load_policy_from_virtual_node_yaml_str( # mounts mounts = copy.deepcopy(config.DEFAULT_MOUNTS_VIRTUAL_NODE) volumes = case_insensitive_dict_get(spec, "volumes") or [] - configmap_volume_names = [ - case_insensitive_dict_get(volume, "name") if "configmap" in volume else None for volume in volumes - ] + + # set of volume types that are read-only by default + read_only_types = {"configMap", "secret", "downwardAPI", "projected"} + volume_mounts = case_insensitive_dict_get(container, "volumeMounts") if volume_mounts: for mount in volume_mounts: + mount_name = case_insensitive_dict_get(mount, "name") + mount_path = case_insensitive_dict_get(mount, "mountPath") + + # find the corresponding volume + volume = next( + (vol for vol in volumes if case_insensitive_dict_get(vol, "name") == mount_name), + None + ) + + # determine if this volume is one of the read-only types + read_only_default = any(key in read_only_types for key in volume.keys()) + + if read_only_default: + # log warning if readOnly is explicitly set to false for a read-only volume type + if case_insensitive_dict_get(mount, "readOnly") is False: + logger.warning( + "Volume '%s' in container '%s' is of a type that requires readOnly access (%s), " + "but readOnly: false was specified. Enforcing readOnly: true for policy generation.", + mount_name, + case_insensitive_dict_get(container, "name"), + ', '.join(read_only_types) + ) + mount_readonly = True + else: + # use the readOnly field or default to False for non-read-only volumes + mount_readonly = case_insensitive_dict_get(mount, "readOnly") or False + mounts.append({ config.ACI_FIELD_CONTAINERS_MOUNTS_TYPE: config.ACI_FIELD_YAML_MOUNT_TYPE, - config.ACI_FIELD_CONTAINERS_MOUNTS_PATH: case_insensitive_dict_get(mount, "mountPath"), - config.ACI_FIELD_CONTAINERS_MOUNTS_READONLY: - case_insensitive_dict_get(mount, "name") in configmap_volume_names or - case_insensitive_dict_get(mount, "readOnly") or - False, + config.ACI_FIELD_CONTAINERS_MOUNTS_PATH: mount_path, + config.ACI_FIELD_CONTAINERS_MOUNTS_READONLY: mount_readonly, }) # container security context @@ -1002,3 +1199,81 @@ def load_policy_from_virtual_node_yaml_str( ) ) return all_policies + + +def load_policy_from_config_file(config_file, debug_mode: bool = False, disable_stdio: bool = False): + config_content = os_util.load_str_from_file(config_file) + return load_policy_from_config_str(config_content, debug_mode, disable_stdio) + + +# Used for generating policy fragments +def load_policy_from_config_str(config_str, debug_mode: bool = False, disable_stdio: bool = False): + config_dict = os_util.load_json_from_str(config_str) + containers = [] + + rego_fragments = case_insensitive_dict_get( + config_dict, config.ACI_FIELD_CONTAINERS_REGO_FRAGMENTS + ) + + container_list = case_insensitive_dict_get( + config_dict, config.ACI_FIELD_CONTAINERS + ) + + for container in container_list: + container_name = case_insensitive_dict_get( + container, config.ACI_FIELD_CONTAINERS_NAME + ) + if not container_name: + eprint(f'Field ["{config.ACI_FIELD_CONTAINERS_NAME}"] is empty or cannot be found') + + container_properties = case_insensitive_dict_get( + container, config.ACI_FIELD_TEMPLATE_PROPERTIES + ) + + image_name = case_insensitive_dict_get( + container_properties, config.ACI_FIELD_TEMPLATE_IMAGE + ) + + if not image_name: + eprint( + f'Field ["{config.ACI_FIELD_TEMPLATE_IMAGE}"] is empty or cannot be found' + ) + + exec_processes = [] + extract_probe(exec_processes, container_properties, config.ACI_FIELD_CONTAINERS_READINESS_PROBE) + extract_probe(exec_processes, container_properties, config.ACI_FIELD_CONTAINERS_LIVENESS_PROBE) + + containers.append( + { + config.ACI_FIELD_CONTAINERS_ID: image_name, + config.ACI_FIELD_CONTAINERS_NAME: container_name, + config.ACI_FIELD_CONTAINERS_CONTAINERIMAGE: image_name, + config.ACI_FIELD_CONTAINERS_ENVS: process_env_vars_from_config( + container_properties + ), + config.ACI_FIELD_CONTAINERS_COMMAND: case_insensitive_dict_get( + container_properties, config.ACI_FIELD_TEMPLATE_COMMAND + ) + or [], + config.ACI_FIELD_CONTAINERS_MOUNTS: process_mounts_from_config(container_properties), + config.ACI_FIELD_CONTAINERS_EXEC_PROCESSES: exec_processes + + config.DEBUG_MODE_SETTINGS.get("execProcesses") + if debug_mode + else exec_processes, + config.ACI_FIELD_CONTAINERS_SIGNAL_CONTAINER_PROCESSES: [], + config.ACI_FIELD_CONTAINERS_ALLOW_STDIO_ACCESS: not disable_stdio, + config.ACI_FIELD_CONTAINERS_SECURITY_CONTEXT: case_insensitive_dict_get( + container_properties, config.ACI_FIELD_TEMPLATE_SECURITY_CONTEXT + ), + } + ) + + return AciPolicy( + { + config.ACI_FIELD_VERSION: "1.0", + config.ACI_FIELD_CONTAINERS: containers, + }, + disable_stdio=disable_stdio, + rego_fragments=rego_fragments, + debug_mode=debug_mode, + ) diff --git a/src/confcom/azext_confcom/template_util.py b/src/confcom/azext_confcom/template_util.py index 16472470a79..829d644096e 100644 --- a/src/confcom/azext_confcom/template_util.py +++ b/src/confcom/azext_confcom/template_util.py @@ -19,7 +19,6 @@ from azext_confcom import os_util from azext_confcom import config - # TODO: these can be optimized to not have so many groups in the single match # make this global so it can be used in multiple functions PARAMETER_AND_VARIABLE_REGEX = r"\[(?:parameters|variables)\(\s*'([^\.\/]+?)'\s*\)\]" @@ -27,8 +26,7 @@ class DockerClient: - def __init__(self) -> None: - self._client = None + _client = None def __enter__(self) -> docker.DockerClient: return self.get_client() @@ -165,12 +163,11 @@ def get_image_info(progress, message_queue, tar_mapping, image): config.ACI_FIELD_CONTAINERS_ARCHITECTURE_VALUE ): progress.close() - eprint( - f"{image_name} is attempting to build for unsupported architecture: " + + eprint(( + f"{image_name} is attempting to build for unsupported architecture: " f"{raw_image.attrs.get(config.ACI_FIELD_CONTAINERS_ARCHITECTURE_KEY)}. " - + f"Only {config.ACI_FIELD_CONTAINERS_ARCHITECTURE_VALUE} is supported by Confidential ACI" - ) - + f"Only {config.ACI_FIELD_CONTAINERS_ARCHITECTURE_VALUE} is supported by Confidential ACI" + )) return image_info, tar @@ -201,7 +198,7 @@ def process_env_vars_from_template(params: dict, # add in the env vars from the template template_env_vars = case_insensitive_dict_get( image_properties, config.ACI_FIELD_TEMPLATE_ENVS - ) + ) or [] if template_env_vars: for env_var in template_env_vars: @@ -283,9 +280,10 @@ def process_env_vars_from_yaml(container, config_maps, secrets, approve_wildcard response = (approve_wildcards or configmap_name in wildcarded_resource_names or - input("Would you like to use a wildcard value for ConfigMap " + - f"{configmap_name}? (y/n): " - ) + input(( + "Would you like to use a wildcard value for ConfigMap " + f"{configmap_name}? (y/n): " + )) ) if ( @@ -328,10 +326,10 @@ def process_env_vars_from_yaml(container, config_maps, secrets, approve_wildcard if value is None and secret_name not in not_wildcarded_resource_names: response = (approve_wildcards or secret_name in wildcarded_resource_names or - input("Would you like to use a wildcard value for Secret " + - f"{secret_name}? (y/n): " - ) - ) + input(( + "Would you like to use a wildcard value for Secret " + f"{secret_name}? (y/n): " + ))) if ( approve_wildcards or @@ -345,9 +343,11 @@ def process_env_vars_from_yaml(container, config_maps, secrets, approve_wildcard config.ACI_FIELD_CONTAINERS_ENVS_STRATEGY: "re2", }) else: - eprint(f"Secret {name} needs a value. " + - "Either attach the Secret resource " + - "to the yaml file or use a wildcard.") + eprint(( + f"Secret {name} needs a value. " + "Either attach the Secret resource " + "to the yaml file or use a wildcard." + )) elif secret_name in not_wildcarded_resource_names: output_env_vars.append({ @@ -378,8 +378,8 @@ def process_env_vars_from_yaml(container, config_maps, secrets, approve_wildcard container_name = ref.get('resourceFieldRef').get('containerName') if container_name != container.get('name'): - eprint("Container names other than the current " + - f"container are not currently supported: {container_name}") + eprint(("Container names other than the current " + f"container are not currently supported: {container_name}")) resource = ref.get('resourceFieldRef').get('resource') request_or_limit, resource_type = resource.split('.') @@ -459,6 +459,83 @@ def filter_non_pod_resources(resources: List[dict]) -> List[dict]: return [resource for resource in resources if resource and resource.get("kind") in important_resource_names] +def process_env_vars_from_config(container) -> List[Dict[str, str]]: + env_vars = [] + # add in the env vars from the template + template_env_vars = case_insensitive_dict_get( + container, config.ACI_FIELD_TEMPLATE_ENVS + ) or [] + for env_var in template_env_vars: + name = case_insensitive_dict_get(env_var, "name") + secure_value = case_insensitive_dict_get(env_var, "secureValue") + is_secure = bool(secure_value) + value = case_insensitive_dict_get(env_var, "value") or secure_value + + if not name and not is_secure: + eprint( + f"Environment variable with value: {value} is missing a name" + ) + elif not name and is_secure: + eprint( + "Environment variable with secure value is missing a name" + ) + elif not value: + eprint(f'Environment variable {name} does not have a value. Please check the template file.') + + env_vars.append({ + config.ACI_FIELD_CONTAINERS_ENVS_NAME: name, + config.ACI_FIELD_CONTAINERS_ENVS_VALUE: value, + config.ACI_FIELD_CONTAINERS_ENVS_STRATEGY: + "re2" if case_insensitive_dict_get(env_var, "regex") else "string", + }) + + return env_vars + + +def process_fragment_imports(rego_imports) -> None: + for rego_import in rego_imports: + feed = case_insensitive_dict_get( + rego_import, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED + ) + if not isinstance(feed, str): + eprint( + f'Field ["{config.ACI_FIELD_CONTAINERS}"]' + + f'["{config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED}"] ' + + "can only be a string value." + ) + + iss = case_insensitive_dict_get( + rego_import, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER + ) + if not isinstance(iss, str): + eprint( + f'Field ["{config.ACI_FIELD_CONTAINERS}"]' + + f'["{config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER}"] ' + + "can only be a string value." + ) + + minimum_svn = case_insensitive_dict_get( + rego_import, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN + ) + + if not minimum_svn or not isinstance(minimum_svn, str) or not minimum_svn.isdigit(): + eprint( + f'Field ["{config.ACI_FIELD_CONTAINERS}"]' + + f'["{config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN}"] ' + + "can only be an integer value." + ) + + includes = case_insensitive_dict_get( + rego_import, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_INCLUDES + ) + if not isinstance(includes, list): + eprint( + f'Field ["{config.ACI_FIELD_CONTAINERS}"]' + + f'["{config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_INCLUDES}"] ' + + "can only be a list value." + ) + + def process_mounts(image_properties: dict, volumes: List[dict]) -> List[Dict[str, str]]: mount_source_table_keys = config.MOUNT_SOURCE_TABLE.keys() # initialize empty array of mounts @@ -529,6 +606,59 @@ def process_configmap(image_properties: dict) -> List[Dict[str, str]]: }] +def process_mounts_from_config(image_properties: dict) -> List[Dict[str, str]]: + mounts = [] + # get the mount types from the mounts section of the ARM template + volume_mounts = ( + case_insensitive_dict_get( + image_properties, config.ACI_FIELD_TEMPLATE_VOLUME_MOUNTS + ) + or [] + ) + + if volume_mounts and not isinstance(volume_mounts, list): + # parameter definition is in parameter file but not arm + # template + eprint( + f'Parameter ["{config.ACI_FIELD_TEMPLATE_VOLUME_MOUNTS}"] must be a list' + ) + + # get list of mount information based on mount name + for mount in volume_mounts: + mount_type = case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_TYPE + ) + + if not mount_type: + eprint( + f'Field ["{config.ACI_FIELD_TEMPLATE_MOUNTS_TYPE}"] is empty or cannot be found in mount' + ) + + mount_path = case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_PATH + ) + + if not mount_path: + eprint( + f'Field ["{config.ACI_FIELD_TEMPLATE_MOUNTS_PATH}"] is empty or cannot be found in mount' + ) + + mounts.append( + { + config.ACI_FIELD_CONTAINERS_MOUNTS_TYPE: case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_TYPE + ), + config.ACI_FIELD_CONTAINERS_MOUNTS_PATH: case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_PATH + ), + config.ACI_FIELD_CONTAINERS_MOUNTS_READONLY: case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_READONLY + ), + } + ) + return mounts + + def get_values_for_params(input_parameter_json: dict, all_params: dict) -> Dict[str, Any]: # combine the parameter file into a single dictionary with the template # parameters @@ -658,6 +788,21 @@ def compare_containers(container1, container2) -> Dict[str, Any]: return readable_diff(json.loads(diff.to_json())) +def get_container_diff(container1, container2) -> Dict[str, Any]: + container1_copy = copy.deepcopy(container1) + container2_copy = copy.deepcopy(container2) + + # the ID does not matter so delete them from comparison + container1_copy.pop(config.POLICY_FIELD_CONTAINERS_ID, None) + container2_copy.pop(config.POLICY_FIELD_CONTAINERS_ID, None) + # env vars will be compared later so delete them from this + # comparison + container1_copy.pop(config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS, None) + container2_copy.pop(config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS, None) + + return compare_containers(container1_copy, container2_copy) + + def change_key_names(dictionary) -> Dict: """Recursive function to rename keys wherever they are in the output diff dictionary""" # need to rename fields in the deep diff to be more accessible to customers @@ -846,9 +991,6 @@ def extract_confidential_properties( def decompose_confidential_properties(cce_policy: str) -> Tuple[List[Dict], List[Dict]]: - container_start = "containers := " - fragment_start = "fragments := " - cce_policy = os_util.base64_to_str(cce_policy) # error check that the decoded policy existing in the template is not in JSON format try: @@ -861,23 +1003,26 @@ def decompose_confidential_properties(cce_policy: str) -> Tuple[List[Dict], List # this is expected, we do not want json pass + return extract_containers_and_fragments_from_text(cce_policy) + + +def extract_containers_and_fragments_from_text(text: str) -> Tuple[List[Dict], List[Dict]]: try: - container_text = extract_containers_from_text(cce_policy, container_start) + container_text = extract_containers_from_text(text, config.REGO_CONTAINER_START) # replace tabs with 4 spaces, YAML parser can take in JSON with trailing commas but not tabs # so we need to get rid of the tabs container_text = container_text.replace("\t", " ") - containers = yaml.load(container_text, Loader=yaml.FullLoader) - fragment_text = extract_containers_from_text( - cce_policy, fragment_start + text, config.REGO_FRAGMENT_START ).replace("\t", " ") fragments = yaml.load( fragment_text, Loader=yaml.FullLoader, ) - except yaml.YAMLError: + except yaml.YAMLError as e: + eprint(f"Error parsing rego file: {e}") # reading the rego file failed, so we'll just return the default outputs containers = [] fragments = [] @@ -1048,10 +1193,10 @@ def inject_policy_into_template( container_group_name = get_container_group_name( input_arm_json, parameter_data, count ) - user_input = input( - "Do you want to overwrite the CCE Policy currently in container group " + + user_input = input(( + "Do you want to overwrite the CCE Policy currently in container group " f'"{container_group_name}" in the ARM Template? (y/n) ' - ) + )) if user_input.lower() == "y": confidential_compute_properties[ config.ACI_FIELD_TEMPLATE_CCE_POLICY @@ -1064,6 +1209,55 @@ def inject_policy_into_template( return False +def inject_policy_into_yaml( + yaml_file_path: str, policy: str, count: int +) -> bool: + virtual_node_yaml = list(os_util.load_multiple_yaml_from_file(yaml_file_path)) + filtered_yaml = filter_non_pod_resources(virtual_node_yaml) + current_yaml = filtered_yaml[count] + pod_item = convert_to_pod_spec_helper(current_yaml) + + # extract existing policy (if any) + try: + existing_policy = pod_item[config.VIRTUAL_NODE_YAML_METADATA][ + config.VIRTUAL_NODE_YAML_ANNOTATIONS][config.VIRTUAL_NODE_YAML_POLICY] + except KeyError: + existing_policy = None + + # check if the existing policy should be overwritten + if existing_policy: + workload_name = pod_item.get("metadata", {}).get("name", f"Workload {count}") + user_input = input( + f"Do you want to overwrite the Base64 Policy currently in workload '{workload_name}'? (y/n) " + ) + # if user declines, exit + if user_input.lower() != "y": + return False + + # prepare new metadata with updated policy + needed_metadata = { + config.VIRTUAL_NODE_YAML_METADATA: { + config.VIRTUAL_NODE_YAML_ANNOTATIONS: { + config.VIRTUAL_NODE_YAML_POLICY: policy + } + } + } + + # update workload metadata with new policy + deep_dict_update(needed_metadata, pod_item) + + # find index of current YAML in the original YAML file + count_in_file = virtual_node_yaml.index(current_yaml) + + # replace current YAML with updated yaml in the original list + virtual_node_yaml[count_in_file] = current_yaml + + # write updated yaml back to file + os_util.write_multiple_yaml_to_file(yaml_file_path, virtual_node_yaml) + + return True + + def get_container_group_name( input_arm_json: dict, input_parameter_json: dict, count: int ) -> bool: @@ -1214,7 +1408,6 @@ def print_existing_policy_from_yaml(virtual_node_yaml_path: str) -> None: def process_seccomp_policy(policy2): - # helper function to add fields to a dictionary if they don't exist def defaults(obj, default): for key in default: diff --git a/src/confcom/azext_confcom/tests/latest/README.md b/src/confcom/azext_confcom/tests/latest/README.md index ce4a3299e9a..7dd37c05dd5 100644 --- a/src/confcom/azext_confcom/tests/latest/README.md +++ b/src/confcom/azext_confcom/tests/latest/README.md @@ -37,6 +37,7 @@ test_update_infrastructure_svn | mcr.microsoft.com/cbl-mariner/distroless/python test_multiple_policies | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot & mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if two unique policies are generated from a single ARM Template container multiple container groups. Also have an extra resource that is untouched. Also has a secureValue for an environment variable. test_arm_template_with_init_container | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot & mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if having an initContainer is picked up and added to the list of valid containers test_arm_template_without_stdio_access | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if disabling container stdio access gets passed down to individual containers +test_arm_template_omit_id | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Check that the id field is omitted from the policy test_arm_template_allow_elevated_false | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Disabling allow_elevated via securityContext test_arm_template_policy_regex | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Make sure the regex generated from the ARM Template workflow matches that of the policy.json workflow test_wildcard_env_var | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Check that an "allow all" regex is created when a value for env var is not provided via a parameter value @@ -56,6 +57,7 @@ test_arm_template_security_context_user_group | N/A | See if user is set correct test_arm_template_security_context_uid_group | N/A | See if user is set correctly by getting the user field from the Docker image in the format uid:group test_arm_template_security_context_uid | N/A | See if user is set correctly by getting the user field from the Docker image in the format uid test_arm_template_security_context_user_dockerfile | N/A | See if user is set correctly by getting the user field from the Docker image in the format user +test_zero_sidecar | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Make sure the infrastructure fragment is taken out when the appropriate tag is present in an ARM template ## policy.json [test file](test_confcom_scenario.py) @@ -78,6 +80,7 @@ test_docker_pull | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Test p test_infrastructure_svn | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | make sure the correct infrastructure_svn is present in the policy test_stdio_access_default | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Checking the default value for std I/O access test_stdio_access_updated | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Checking the value for std I/O when it's set +test_omit_id | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Check that the id field is omitted from the policy test_environment_variables_parsing | mcr.microsoft.com/azuredocs/aci-dataprocessing-cc:v1 | Make sure env vars are output in the right format test_get_layers_from_not_exists_image | notexists:1.0.0 | Fail out grabbing layers if image doesn't exist test_incorrect_allow_elevated_data_type | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Making allow_elevated fail out if it's not a boolean @@ -85,7 +88,6 @@ test_incorrect_workingdir_path | mcr.microsoft.com/cbl-mariner/distroless/minima test_incorrect_workingdir_data_type | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Fail if working dir is an array test_incorrect_command_data_type | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Fail if command is not array of strings test_json_missing_containers | N/A | Fail if containers are not specified -test_json_missing_version | mcr.microsoft.com/azuredocs/aci-dataprocessing-cc:v1 | Fail if version is not included in policy.json test_json_missing_containerImage | N/A | Fail if container doesn't have an image specified test_json_missing_environmentVariables | mcr.microsoft.com/azuredocs/aci-dataprocessing-cc:v1 | Fail if there are no env vars defined test_json_missing_command | mcr.microsoft.com/azuredocs/aci-dataprocessing-cc:v1 | Fail if there is no command specified @@ -113,6 +115,7 @@ test_invalid_many_input_types | Makes sure we're only getting input from one sou test_diff_wrong_input_type | Makes sure we're only doing the diff command if we're using a ARM Template as the input type test_parameters_without_template | Makes sure we error out if a parameter file is getting passed in without an ARM Template test_input_and_virtual_node | Error out if both input and virtual node are specified +test_workload_identity | Make sure env vars are injected if workload identity is used ## Tar File [test file](test_confcom_tar.py) @@ -135,6 +138,7 @@ test_invalid_input_path | mcr.microsoft.com/aks/e2e/library-busybox:master.22031 test_invalid_config_map_path | mcr.microsoft.com/aks/e2e/library-busybox:master.220314.1-linux-amd64 | Input a path that does not exist for the config-map.yaml file test_valid_settings | mcr.microsoft.com/aks/e2e/library-busybox:master.220314.1-linux-amd64 | Input a valid path for the pod.yaml with the default config file test_print_version | N/A | Print the version of the extension +test_invalid_settings | mcr.microsoft.com/aks/e2e/library-busybox:master.220314.1-linux-amd64 | Input an invalid name for a custom settings file ## Virtual Node File [test file](test_confcom_virtual_node.py) @@ -145,3 +149,18 @@ Test Name | Image Used | Purpose test_compare_policy_sources | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Compare the output of a policy generated from a Virtual Node file and a policy generated from an input json test_configmaps | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Check that the configmaps are being added to the policy in env var and mount form test_secrets | mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot | Check that the secrets are being added to the policy in env var and mount form + +## Fragment File [test file](test_confcom_fragment.py) + +This is how to generate a policy fragment to be included in a CCE Policy for Confidential ACI + +Test Name | Image Used | Purpose +---|---|--- +test_fragment_user_container_customized_mounts | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if mounts are translated correctly to the appropriate source and destination locations +test_fragment_user_container_mount_injected_dns | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if the resolvconf mount works properly +test_fragment_injected_sidecar_container_msi | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201203.1 | Make sure User mounts and env vars aren't added to sidecar containers, using JSON output format +test_debug_processes | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Enable exec_processes via debug_mode +test_fragment_sidecar | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 | See if sidecar fragments can be created by a given policy.json +test_fragment_sidecar_stdio_access_default | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 | Check that sidecar containers have std I/O access by default +test_fragment_incorrect_sidecar | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 | See what output format for failing sidecar validation would be +test_invalid_input | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 | Fail out under various invalid input circumstances diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_arm.py b/src/confcom/azext_confcom/tests/latest/test_confcom_arm.py index e5b095d9c63..cd9f315ab76 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_arm.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_arm.py @@ -7,7 +7,6 @@ import unittest import json import deepdiff -import docker from unittest.mock import patch from azext_confcom.security_policy import ( @@ -17,7 +16,12 @@ ) import azext_confcom.config as config from azext_confcom.custom import acipolicygen_confcom -from azext_confcom.template_util import case_insensitive_dict_get, extract_confidential_properties, extract_containers_from_text, DockerClient +from azext_confcom.template_util import ( + case_insensitive_dict_get, + decompose_confidential_properties, + extract_containers_from_text, + DockerClient, +) TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) @@ -1250,7 +1254,7 @@ def test_arm_template_with_parameter_file_clean_room(self): "metadata": { "description": "Name for the container group" }, - "defaultValue":"mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0" + "defaultValue":"mcr.microsoft.com/cbl-mariner/base/nginx:1-cm2.0" }, "containername": { "type": "string", @@ -1355,22 +1359,21 @@ def test_arm_template_with_parameter_file_clean_room(self): } """ with DockerClient() as client: - # client = docker.from_env() - original_image = "mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0" + original_image = "mcr.microsoft.com/cbl-mariner/base/nginx:1-cm2.0" try: client.images.remove(original_image) except: # do nothing pass - regular_image = load_policy_from_arm_template_str( - custom_arm_json_default_value, "" - ) - regular_image[0].populate_policy_content_for_all_images() + regular_image = load_policy_from_arm_template_str( + custom_arm_json_default_value, "" + ) + regular_image[0].populate_policy_content_for_all_images() # create and tag same image to the new name to see if docker will error out that the image is not in a remote repo - new_repo = "fakerepo.microsoft.com" - new_image_name = "azure-functions" - new_tag = "fake-tag" - + new_repo = "fakerepo.microsoft.com" + new_image_name = "azure-functions" + new_tag = "fake-tag" + with DockerClient() as client: image = client.images.get(original_image) try: client.images.remove(new_repo + "/" + new_image_name + ":" + new_tag) @@ -1379,8 +1382,6 @@ def test_arm_template_with_parameter_file_clean_room(self): pass image.tag(new_repo + "/" + new_image_name, tag=new_tag) - # client.close() - clean_room = load_policy_from_arm_template_str( custom_arm_json_default_value, self.parameter_file ) @@ -1582,8 +1583,7 @@ def setUpClass(cls): cls.aci_policy.populate_policy_content_for_all_images() cls.aci_policy2 = load_policy_from_arm_template_str(cls.custom_json2, "")[0] cls.aci_policy2.populate_policy_content_for_all_images() - container_start = "containers := " - cls.containers = json.loads(extract_containers_from_text(cls.aci_policy.get_serialized_output(OutputType.PRETTY_PRINT), container_start)) + cls.containers = json.loads(extract_containers_from_text(cls.aci_policy.get_serialized_output(OutputType.PRETTY_PRINT), config.REGO_CONTAINER_START)) def test_policy_diff(self): self.aci_policy._existing_cce_policy = self.containers @@ -2043,8 +2043,7 @@ def setUpClass(cls): cls.aci_policy3.populate_policy_content_for_all_images(faster_hashing=True) def test_multiple_policies(self): - container_start = "containers := " - policy2_containers = json.loads(extract_containers_from_text(self.aci_policy2.get_serialized_output(OutputType.PRETTY_PRINT), container_start)) + policy2_containers = json.loads(extract_containers_from_text(self.aci_policy2.get_serialized_output(OutputType.PRETTY_PRINT), config.REGO_CONTAINER_START)) self.aci_policy._existing_cce_policy = policy2_containers is_valid, diff = self.aci_policy.validate_cce_policy() @@ -2397,6 +2396,149 @@ def test_arm_template_without_stdio_access(self): self.assertFalse(stdio_access) +class PolicyGeneratingOmitId(unittest.TestCase): + + custom_arm_json_default_value = """ + { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + + + "parameters": { + "containergroupname": { + "type": "string", + "metadata": { + "description": "Name for the container group" + }, + "defaultValue":"simple-container-group" + }, + "image": { + "type": "string", + "metadata": { + "description": "Name for the container group" + }, + "defaultValue":"mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot" + }, + "containername": { + "type": "string", + "metadata": { + "description": "Name for the container" + }, + "defaultValue":"simple-container" + }, + + "port": { + "type": "string", + "metadata": { + "description": "Port to open on the container and the public IP address." + }, + "defaultValue": "8080" + }, + "cpuCores": { + "type": "string", + "metadata": { + "description": "The number of CPU cores to allocate to the container." + }, + "defaultValue": "1.0" + }, + "memoryInGb": { + "type": "string", + "metadata": { + "description": "The amount of memory to allocate to the container in gigabytes." + }, + "defaultValue": "1.5" + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "Location for all resources." + } + } + }, + "resources": [ + { + "name": "[parameters('containergroupname')]", + "type": "Microsoft.ContainerInstance/containerGroups", + "apiVersion": "2023-05-01", + "location": "[parameters('location')]", + + "properties": { + "containers": [ + { + "name": "[parameters('containername')]", + "properties": { + "image": "[parameters('image')]", + "environmentVariables": [ + { + "name": "PORT", + "value": "80" + } + ], + + "ports": [ + { + "port": "[parameters('port')]" + } + ], + "command": [ + "/bin/bash", + "-c", + "while sleep 5; do cat /mnt/input/access.log; done" + ], + "resources": { + "requests": { + "cpu": "[parameters('cpuCores')]", + "memoryInGb": "[parameters('memoryInGb')]" + } + } + } + } + ], + + "osType": "Linux", + "restartPolicy": "OnFailure", + "confidentialComputeProperties": { + "IsolationType": "SevSnp" + }, + "ipAddress": { + "type": "Public", + "ports": [ + { + "protocol": "Tcp", + "port": "[parameters( 'port' )]" + } + ] + } + } + } + ], + "outputs": { + "containerIPv4Address": { + "type": "string", + "value": "[reference(resourceId('Microsoft.ContainerInstance/containerGroups/', parameters('containergroupname'))).ipAddress.ip]" + } + } + } + """ + + @classmethod + def setUpClass(cls): + cls.aci_arm_policy = load_policy_from_arm_template_str( + cls.custom_arm_json_default_value, "" + )[0] + cls.aci_arm_policy.populate_policy_content_for_all_images() + + def test_arm_template_omit_id(self): + regular_image_json = json.loads( + self.aci_arm_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False, omit_id=True + ) + ) + + self.assertTrue(config.POLICY_FIELD_CONTAINERS_ID not in regular_image_json[0]) + + class PolicyGeneratingAllowElevated(unittest.TestCase): custom_arm_json_default_value = """ @@ -2835,10 +2977,7 @@ def test_printing_existing_policy(self): self.assertEqual(exc_info.exception.code, 1) - with self.assertRaises(SystemExit) as exc_info: - acipolicygen_confcom(None, "test_template2.json", None, None, None, None, None, print_existing_policy=True) - - self.assertEqual(exc_info.exception.code, 0) + acipolicygen_confcom(None, "test_template2.json", None, None, None, None, None, print_existing_policy=True) finally: # delete test file os.remove("test_template.json") @@ -4841,7 +4980,7 @@ def setUpClass(cls): cls.dockerfile_path5 = os.path.join(cls.path, "./Dockerfile5.dockerfile") cls.dockerfile_path6 = os.path.join(cls.path, "./Dockerfile6.dockerfile") - cls.client = docker.from_env() + cls.client = DockerClient().get_client() @classmethod def tearDownClass(cls): @@ -5351,7 +5490,7 @@ class PolicyStopSignal(unittest.TestCase): "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "variables": { - "image": "mcr.microsoft.com/cbl-mariner/base/nginx:1.22-cm2.0" + "image": "mcr.microsoft.com/azurelinux/base/nginx:1" }, @@ -5479,3 +5618,144 @@ def test_stop_signal(self): 3 in regular_image_json[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_SIGNAL_CONTAINER_PROCESSES] ) + + +class PolicyZeroSidecar(unittest.TestCase): + custom_arm_json = """ + { + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "variables": { + "image": "mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot" + }, + + + "parameters": { + "containergroupname": { + "type": "string", + "metadata": { + "description": "Name for the container group" + }, + "defaultValue":"simple-container-group" + }, + + "containername": { + "type": "string", + "metadata": { + "description": "Name for the container" + }, + "defaultValue":"simple-container" + }, + "port": { + "type": "string", + "metadata": { + "description": "Port to open on the container and the public IP address." + }, + "defaultValue": "80" + }, + "cpuCores": { + "type": "string", + "metadata": { + "description": "The number of CPU cores to allocate to the container." + }, + "defaultValue": "1.0" + }, + "memoryInGb": { + "type": "string", + "metadata": { + "description": "The amount of memory to allocate to the container in gigabytes." + }, + "defaultValue": "1.5" + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "Location for all resources." + } + } + }, + "resources": [ + { + "name": "[parameters('containergroupname')]", + "type": "Microsoft.ContainerInstance/containerGroups", + "tags": { + "Annotate-zero-sidecar": "true" + }, + "apiVersion": "2022-04-01-preview", + "location": "[parameters('location')]", + "properties": { + "containers": [ + { + "name": "[parameters('containername')]", + + "properties": { + "image": "[variables('image')]", + "command": [ + "python3" + ], + "ports": [ + { + "port": "[parameters('port')]" + } + ], + "resources": { + "requests": { + "cpu": "[parameters('cpuCores')]", + "memoryInGb": "[parameters('memoryInGb')]" + } + } + + } + } + + ], + + "osType": "Linux", + "restartPolicy": "OnFailure", + "confidentialComputeProperties": { + "IsolationType": "SevSnp" + }, + "ipAddress": { + "type": "Public", + "ports": [ + { + "protocol": "Tcp", + "port": "[parameters( 'port' )]" + } + ] + } + } + } + ], + "outputs": { + "containerIPv4Address": { + "type": "string", + "value": "[reference(resourceId('Microsoft.ContainerInstance/containerGroups/', parameters('containergroupname'))).ipAddress.ip]" + } + } + } + """ + aci_policy = None + + @classmethod + def setUpClass(cls): + + cls.aci_arm_policy = load_policy_from_arm_template_str(cls.custom_arm_json, "")[ + 0 + ] + cls.aci_arm_policy.populate_policy_content_for_all_images() + + def test_zero_sidecar(self): + content = self.aci_arm_policy.get_serialized_output( + output_type=OutputType.DEFAULT + ) + + _, fragments = decompose_confidential_properties( + content + ) + + # check for the empty fragments section + self.assertEqual( + fragments, [] + ) diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py b/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py new file mode 100644 index 00000000000..39a08d6faec --- /dev/null +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py @@ -0,0 +1,532 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import unittest +import json +import subprocess +from knack.util import CLIError + +from azext_confcom.security_policy import ( + UserContainerImage, + OutputType, + load_policy_from_config_str +) + +import azext_confcom.config as config +from azext_confcom.template_util import ( + case_insensitive_dict_get, + extract_containers_and_fragments_from_text, +) +from azext_confcom.custom import acifragmentgen_confcom +from azure.cli.testsdk import ScenarioTest + +TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) + + +class FragmentMountEnforcement(unittest.TestCase): + custom_json = """ + { + "version": "1.0", + "containers": [ + { + "name": "test-container", + "properties": { + "image": "mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0", + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env_[[:alpha:]]*", + "regex": true + } + ], + "command": ["rustc", "--help"], + "volumeMounts": [ + { + "name": "azurefile", + "mountPath": "/mount/azurefile", + "mountType": "azureFile", + "readonly": true + } + ] + } + } + ] + } + """ + aci_policy = None + + @classmethod + def setUpClass(cls): + with load_policy_from_config_str(cls.custom_json) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + cls.aci_policy = aci_policy + + def test_fragment_user_container_customized_mounts(self): + image = next( + ( + img + for img in self.aci_policy.get_images() + if isinstance(img, UserContainerImage) and img.base == "mcr.microsoft.com/cbl-mariner/distroless/minimal" + ), + None, + ) + + self.assertIsNotNone(image) + data = image.get_policy_json() + + self.assertEqual( + len( + case_insensitive_dict_get( + data, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS + ) + ), + 2, + ) + mount = case_insensitive_dict_get( + data, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS + )[0] + resolv_mount = case_insensitive_dict_get( + data, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS + )[1] + self.assertIsNotNone(resolv_mount) + self.assertEqual( + case_insensitive_dict_get(mount, "source"), + "sandbox:///tmp/atlas/azureFileVolume/.+", + ) + self.assertEqual( + case_insensitive_dict_get( + resolv_mount, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_DESTINATION + ), + "/etc/resolv.conf", + ) + self.assertEqual( + resolv_mount[config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_OPTIONS][2], "rw" + ) + + def test_fragment_user_container_mount_injected_dns(self): + image = next( + ( + img + for img in self.aci_policy.get_images() + if isinstance(img, UserContainerImage) and img.base == "mcr.microsoft.com/cbl-mariner/distroless/minimal" + ), + None, + ) + + self.assertIsNotNone(image) + data = image.get_policy_json() + self.assertEqual( + len( + case_insensitive_dict_get( + data, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS + ) + ), + 2, + ) + mount = case_insensitive_dict_get( + data, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS + )[1] + self.assertIsNotNone(mount) + self.assertEqual( + case_insensitive_dict_get( + mount, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_SOURCE + ), + "sandbox:///tmp/atlas/resolvconf/.+", + ) + self.assertEqual( + case_insensitive_dict_get( + mount, config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_DESTINATION + ), + "/etc/resolv.conf", + ) + self.assertEqual( + mount[config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_OPTIONS][2], "rw" + ) + + +class FragmentGenerating(unittest.TestCase): + custom_json = """ + { + "version": "1.0", + "containers": [ + { + "name": "sidecar-container", + "properties": { + "image": "mcr.microsoft.com/aci/msi-atlas-adapter:master_20201203.1", + "environmentVariables": [ + { + "name": "IDENTITY_API_VERSION", + "value": ".+", + "regex": true + }, + { + "name": "IDENTITY_HEADER", + "value": ".+", + "regex": true + }, + { + "name": "IDENTITY_SERVER_THUMBPRINT", + "value": ".+", + "regex": true + }, + { + "name": "ACI_MI_CLIENT_ID_.+", + "value": ".+", + "regex": true + }, + { + "name": "ACI_MI_RES_ID_.+", + "value": ".+", + "regex": true + }, + { + "name": "HOSTNAME", + "value": ".+", + "regex": true + }, + { + "name": "TERM", + "value": "xterm", + "regex": false + }, + { + "name": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + }, + { + "name": "(?i)(FABRIC)_.+", + "value": ".+", + "regex": true + }, + { + "name": "Fabric_Id+", + "value": ".+", + "regex": true + }, + { + "name": "Fabric_ServiceName", + "value": ".+", + "regex": true + }, + { + "name": "Fabric_ApplicationName", + "value": ".+", + "regex": true + }, + { + "name": "Fabric_CodePackageName", + "value": ".+", + "regex": true + }, + { + "name": "Fabric_ServiceDnsName", + "value": ".+", + "regex": true + }, + { + "name": "ACI_MI_DEFAULT", + "value": ".+", + "regex": true + }, + { + "name": "TokenProxyIpAddressEnvKeyName", + "value": "[ContainerToHostAddress|Fabric_NodelPOrFQDN]", + "regex": true + }, + { + "name": "ContainerToHostAddress", + "value": "sidecar-container" + }, + { + "name": "Fabric_NetworkingMode", + "value": ".+", + "regex": true + }, + { + "name": "azurecontainerinstance_restarted_by", + "value": ".+", + "regex": true + } + ], + "command": ["/bin/sh","-c","until ./msiAtlasAdapter; do echo $? restarting; done"], + "mounts": null + } + } + ] + } + """ + aci_policy = None + + @classmethod + def setUpClass(cls): + with load_policy_from_config_str(cls.custom_json) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + cls.aci_policy = aci_policy + + def test_fragment_injected_sidecar_container_msi(self): + image = self.aci_policy.get_images()[0] + env_vars = [ + { + "name": "IDENTITY_API_VERSION", + "value": ".+", + }, + { + "name": "IDENTITY_HEADER", + "value": ".+", + }, + { + "name": "IDENTITY_SERVER_THUMBPRINT", + "value": ".+", + }, + { + "name": "ACI_MI_CLIENT_ID_.+", + "value": ".+", + }, + { + "name": "ACI_MI_RES_ID_.+", + "value": ".+", + }, + { + "name": "HOSTNAME", + "value": ".+", + }, + { + "name": "TERM", + "value": "xterm", + }, + { + "name": "PATH", + "value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + }, + { + "name": "(?i)(FABRIC)_.+", + "value": ".+", + }, + { + "name": "Fabric_Id+", + "value": ".+", + }, + { + "name": "Fabric_ServiceName", + "value": ".+", + }, + { + "name": "Fabric_ApplicationName", + "value": ".+", + }, + { + "name": "Fabric_CodePackageName", + "value": ".+", + }, + { + "name": "Fabric_ServiceDnsName", + "value": ".+", + }, + { + "name": "ACI_MI_DEFAULT", + "value": ".+", + }, + { + "name": "TokenProxyIpAddressEnvKeyName", + "value": "[ContainerToHostAddress|Fabric_NodelPOrFQDN]", + }, + { + "name": "ContainerToHostAddress", + "value": "sidecar-container", + }, + { + "name": "Fabric_NetworkingMode", + "value": ".+", + }, + { + "name": "azurecontainerinstance_restarted_by", + "value": ".+", + } + ] + command = ["/bin/sh", "-c", "until ./msiAtlasAdapter; do echo $? restarting; done"] + self.assertEqual(image.base, "mcr.microsoft.com/aci/msi-atlas-adapter") + self.assertIsNotNone(image) + + self.assertEqual(image._command, command) + env_names = list(map(lambda x: x['pattern'], image._environmentRules + image._extraEnvironmentRules)) + for env_var in env_vars: + self.assertIn(env_var['name'] + "=" + env_var['value'], env_names) + + expected_workingdir = "/root/" + self.assertEqual(image._workingDir, expected_workingdir) + + +class FragmentPolicyGeneratingDebugMode(unittest.TestCase): + custom_json = """ + { + "version": "1.0", + "containers": [ + { + "name": "test-container", + "properties": { + "image": "mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0", + "environmentVariables": [ + + ], + "command": ["python3"] + } + } + ] + } + """ + aci_policy = None + + @classmethod + def setUpClass(cls): + with load_policy_from_config_str(cls.custom_json, debug_mode=True) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + cls.aci_policy = aci_policy + + def test_debug_processes(self): + policy = self.aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=True + ) + self.assertIsNotNone(policy) + + # see if debug mode is enabled + containers, _ = extract_containers_and_fragments_from_text(policy) + + self.assertTrue(containers[0]["allow_stdio_access"]) + self.assertTrue(containers[0]["exec_processes"][0]["command"] == ["/bin/sh"]) + + +class FragmentSidecarValidation(unittest.TestCase): + custom_json = """ + { + "version": "1.0", + "containers": [ + { + "name": "test-container", + "properties": { + "image": "mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1", + "environmentVariables": [ + { + "name": "PATH", + "value": ".+", + "regex": true + } + ], + "command": [ + "/bin/sh", + "-c", + "until ./msiAtlasAdapter; do echo $? restarting; done" + ], + "workingDir": "/root/", + "mounts": null + } + } + ] +} + """ + custom_json2 = """ + { + "version": "1.0", + "containers": [ + { + "name": "test-container", + "properties": { + "image": "mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1", + "environmentVariables": [ + {"name": "PATH", + "value":"/", + "strategy":"string"} + ], + "command": [ + "/bin/sh", + "-c", + "until ./msiAtlasAdapter; do echo $? restarting; done" + ], + "workingDir": "/root/", + "mounts": null + } + } + ] +} + """ + + aci_policy = None + existing_policy = None + + @classmethod + def setUpClass(cls): + with load_policy_from_config_str(cls.custom_json) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + cls.aci_policy = aci_policy + with load_policy_from_config_str(cls.custom_json2) as aci_policy2: + aci_policy2.populate_policy_content_for_all_images() + cls.aci_policy2 = aci_policy2 + + def test_fragment_sidecar(self): + is_valid, diff = self.aci_policy.validate_sidecars() + self.assertTrue(is_valid) + self.assertTrue(not diff) + + def test_fragment_sidecar_stdio_access_default(self): + self.assertTrue( + json.loads( + self.aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False + ) + )[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_ALLOW_STDIO_ACCESS] + ) + + def test_fragment_incorrect_sidecar(self): + + is_valid, diff = self.aci_policy2.validate_sidecars() + + self.assertFalse(is_valid) + expected_diff = { + "mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1": { + "env_rules": [ + "environment variable with rule " + + "'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'" + + " does not match strings or regex in policy rules" + ] + } + } + + self.assertEqual(diff, expected_diff) + + +class InitialFragmentErrors(ScenarioTest): + def test_invalid_input(self): + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --image mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 -i fakepath/parameters.json --namespace fake_namespace --svn 1") + self.assertEqual(wrapped_exit.exception.args[0], "Must provide either an image name or an input file to generate a fragment") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --generate-import --minimum-svn 1") + self.assertEqual(wrapped_exit.exception.args[0], "Must provide either a fragment path, an input file, or " + + "an image name to generate an import statement") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --image mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 -k fakepath/key.pem --namespace fake_namespace --svn 1") + self.assertEqual(wrapped_exit.exception.args[0], "Must provide both --key and --chain to sign a fragment") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --fragment-path ./fragment.json --image mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 --namespace fake_namespace --svn 1 --minimum-svn 1") + self.assertEqual(wrapped_exit.exception.args[0], "Must provide --generate-import to specify a fragment path") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --input ./input.json --namespace example --svn -1") + self.assertEqual(wrapped_exit.exception.args[0], "--svn must be an integer") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --input ./input.json --namespace policy --svn 1") + self.assertEqual(wrapped_exit.exception.args[0], "Namespace 'policy' is reserved") + + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acifragmentgen --algo fake_algo --key ./key.pem --chain ./cert-chain.pem --namespace example --svn 1 -i ./input.json") + self.assertEqual(wrapped_exit.exception.args[0], f"Algorithm 'fake_algo' is not supported. Supported algorithms are {config.SUPPORTED_ALGOS}") \ No newline at end of file diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_image.py b/src/confcom/azext_confcom/tests/latest/test_confcom_image.py index 7a29a602ae7..e866f93db6c 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_image.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_image.py @@ -7,13 +7,13 @@ import unittest import json import deepdiff -import docker from azext_confcom.security_policy import ( OutputType, load_policy_from_image_name, load_policy_from_str, ) +from azext_confcom.template_util import DockerClient import azext_confcom.config as config TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) @@ -99,35 +99,34 @@ def test_invalid_image_policy(self): class PolicyGeneratingImageCleanRoom(unittest.TestCase): def test_clean_room_policy(self): - client = docker.from_env() - original_image = ( - "mcr.microsoft.com/aci/atlas-mount-azure-file-volume:master_20201210.2" - ) - try: - client.images.remove(original_image) - except: - # do nothing - pass + with DockerClient() as client: + original_image = ( + "mcr.microsoft.com/aci/atlas-mount-azure-file-volume:master_20201210.2" + ) + try: + client.images.remove(original_image) + except: + # do nothing + pass regular_image = load_policy_from_image_name(original_image) regular_image.populate_policy_content_for_all_images(individual_image=True) # create and tag same image to the new name to see if docker will error out that the image is not in a remote repo new_repo = "mcr.microsoft.com" new_image_name = "aci/atlas-mount-azure-file-volume" new_tag = "fake-tag" - - image = client.images.get(original_image) - try: - client.images.remove(new_repo + "/" + new_image_name + ":" + new_tag) - except: - # do nothing - pass - image.tag(new_repo + "/" + new_image_name, tag=new_tag) - try: - client.images.remove(original_image) - except: - # do nothing - pass - client.close() + with DockerClient() as client: + image = client.images.get(original_image) + try: + client.images.remove(new_repo + "/" + new_image_name + ":" + new_tag) + except: + # do nothing + pass + image.tag(new_repo + "/" + new_image_name, tag=new_tag) + try: + client.images.remove(original_image) + except: + # do nothing + pass policy = load_policy_from_image_name( new_repo + "/" + new_image_name + ":" + new_tag diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py b/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py index ad1c2931957..0bc7c6d980f 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py @@ -5,12 +5,15 @@ import os import unittest +import unittest.mock as patch +from io import StringIO +import platform from azext_confcom.custom import katapolicygen_confcom import pytest TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) - +host_os_linux = platform.system() == "Linux" # @unittest.skip("not in use") @pytest.mark.run(order=1) @@ -65,22 +68,34 @@ def test_valid_settings(self): try: with open(filename, "w") as f: f.write(KataPolicyGen.pod_string) - with self.assertRaises(SystemExit) as wrapped_exit: + if host_os_linux: katapolicygen_confcom( filename, None ) + else: + with self.assertRaises(SystemExit) as wrapped_exit: + katapolicygen_confcom( + filename, None + ) + self.assertNotEqual(wrapped_exit.exception.code, 0) + return + with open(filename, "r") as f: content = f.read() finally: if os.path.exists(filename): os.remove(filename) - self.assertEqual(wrapped_exit.exception.code, 0, "Policy not generated successfully") - self.assertNotEqual(content, KataPolicyGen.pod_string, "Policy content not changed in yaml") + if host_os_linux: + self.assertNotEqual(content, KataPolicyGen.pod_string, "Policy content not changed in yaml") def test_print_version(self): - with self.assertRaises(SystemExit) as wrapped_exit: + if host_os_linux: katapolicygen_confcom( None, None, print_version=True ) - - self.assertEqual(wrapped_exit.exception.code, 0) + else: + with self.assertRaises(SystemExit) as wrapped_exit: + katapolicygen_confcom( + None, None, print_version=True + ) + self.assertNotEqual(wrapped_exit.exception.code, 0) diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py b/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py index a449b13c532..6cb6375ddee 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py @@ -14,7 +14,7 @@ ) import azext_confcom.config as config -from azext_confcom.template_util import case_insensitive_dict_get +from azext_confcom.template_util import case_insensitive_dict_get, DockerClient TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) @@ -571,12 +571,14 @@ def test_image_layers_python(self): """ with load_policy_from_str(custom_json) as aci_policy: # pull actual image to local for next step - aci_policy.pull_image(aci_policy.get_images()[0]) + with DockerClient() as client: + image_ref = aci_policy.get_images()[0] + image = client.images.pull(image_ref.base, tag=image_ref.tag) aci_policy.populate_policy_content_for_all_images() layers = aci_policy.get_images()[0]._layers expected_layers = [ - "e1385118e8a5f9f9751d096e1dbe734c601fa93cd031045c6d70d4bc47479f90", - "41ac484628e184e63ef0d4fc4d4cb3133eec849022d24bd737ac8a36bb3a1212" + "6750e14f6156783394b6837e615252668e74de3df7b1b7281deabdcf5d07c329", + "28551577cdd5c3971cbf47c119cee9c376f2d1b633dc5a3df5f01d4e9cb51aff" ] self.assertEqual(len(layers), len(expected_layers)) for i in range(len(expected_layers)): @@ -596,12 +598,14 @@ def test_docker_pull(self): } """ with load_policy_from_str(custom_json) as aci_policy: - image = aci_policy.pull_image(aci_policy.get_images()[0]) + with DockerClient() as client: + image_ref = aci_policy.get_images()[0] + image = client.images.pull(image_ref.base, tag=image_ref.tag) self.assertIsNotNone(image.id) self.assertEqual( - image.id, - "sha256:844d1f1a88fbf036ef4a16acb291141d15abdd37f5844c82bab5e96395a8ceca", + image.tags[0], + "mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0", ) def test_infrastructure_svn(self): @@ -728,6 +732,40 @@ def test_stdio_access_updated(self): )[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_ALLOW_STDIO_ACCESS] ) + def test_omit_id(self): + image_name = "mcr.microsoft.com/cbl-mariner/distroless/python:3.9-nonroot" + custom_json = f""" + {{ + "version": "1.0", + "containers": [ + {{ + "containerImage": "{image_name}", + "environmentVariables": [], + "command": ["echo", "hello"], + "allowStdioAccess": false + }} + ] + }} + """ + with load_policy_from_str(custom_json) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + + self.assertIsNone( + json.loads( + aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False, omit_id=True + ) + )[0].get(config.POLICY_FIELD_CONTAINERS_ID) + ) + + self.assertEqual( + json.loads( + aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False, omit_id=False + ) + )[0].get(config.POLICY_FIELD_CONTAINERS_ID), image_name + ) + class CustomJsonParsingIncorrect(unittest.TestCase): def test_get_layers_from_not_exists_image(self): @@ -836,28 +874,6 @@ def test_json_missing_containers(self): load_policy_from_str(custom_json) self.assertEqual(exc_info.exception.code, 1) - def test_json_missing_version(self): - custom_json = """ - { - "containers": [ - { - "containerImage": "mcr.microsoft.com/azuredocs/aci-dataprocessing-cc:v1", - "environmentVariables": [ - { - "name": "port", - "value": "80", - "strategy": "string" - } - ], - "command": ["python", "app.py"] - } - ] - } - """ - with self.assertRaises(SystemExit) as exc_info: - load_policy_from_str(custom_json) - self.assertEqual(exc_info.exception.code, 1) - def test_json_missing_containerImage(self): custom_json = """ { diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_startup.py b/src/confcom/azext_confcom/tests/latest/test_confcom_startup.py index 77c3135d4d4..fb37c92f61a 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_startup.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_startup.py @@ -5,48 +5,45 @@ import os from azure.cli.testsdk import ScenarioTest -from azext_confcom.custom import acipolicygen_confcom +from knack.util import CLIError TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) class InitialErrors(ScenarioTest): def test_invalid_output_flags(self): - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json --outraw --outraw-pretty-print") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only print in one format at a time") - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json --outraw --print-policy") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only print in one format at a time") - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json --print-policy --outraw-pretty-print") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only print in one format at a time") def test_invalid_many_input_types(self): - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json -a fakepath2/template.json") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only generate CCE policy from one source at a time") def test_diff_wrong_input_type(self): - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json --diff") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only diff CCE policy from ARM Template or YAML File") - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen --image alpine --diff") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only diff CCE policy from ARM Template or YAML File") def test_parameters_without_template(self): - with self.assertRaises(SystemExit) as wrapped_exit: - self.cmd("az confcom acipolicygen -p fakepath/parameters.json") - acipolicygen_confcom( - None, None, "fakepath/parameters.json", None, None, None, None - ) - self.assertEqual(wrapped_exit.exception.code, 1) + with self.assertRaises(CLIError) as wrapped_exit: + self.cmd("az confcom acipolicygen -p fakepath/parameters.json -i fakepath/input.json") + self.assertEqual(wrapped_exit.exception.args[0], "Can only use ARM Template Parameters if ARM Template is also present") def test_input_and_virtual_node(self): - with self.assertRaises(SystemExit) as wrapped_exit: + with self.assertRaises(CLIError) as wrapped_exit: self.cmd("az confcom acipolicygen -i fakepath/input.json --virtual-node-yaml fakepath/virtual-node.yaml") - self.assertEqual(wrapped_exit.exception.code, 1) + self.assertEqual(wrapped_exit.exception.args[0], "Can only generate CCE policy from one source at a time") diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_tar.py b/src/confcom/azext_confcom/tests/latest/test_confcom_tar.py index 88a0fd032b3..c26768e3705 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_tar.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_tar.py @@ -7,7 +7,6 @@ import unittest import deepdiff import json -import docker from azext_confcom.security_policy import ( OutputType, diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_template_util.py b/src/confcom/azext_confcom/tests/latest/test_confcom_template_util.py index 107e72313bf..edaab331ded 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_template_util.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_template_util.py @@ -490,10 +490,7 @@ def test_inject_policy_into_template(self): with open(filename, "w") as f: f.write(template) - with self.assertRaises(SystemExit) as exc_info: - acipolicygen_confcom(None, filename, None, None, None, None, None) - - self.assertEqual(exc_info.exception.code, 0) + acipolicygen_confcom(None, filename, None, None, None, None, None) with open(filename, "r") as f: template_with_policy = load_json_from_str(f.read()) diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py b/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py index 4df97eb9552..c3cb207539e 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py @@ -218,6 +218,8 @@ class PolicyGeneratingVirtualNode(unittest.TestCase): kind: Pod metadata: name: simple-container-pod + labels: + azure.workload.identity/use: "true" spec: initContainers: - name: init-container @@ -314,3 +316,15 @@ def test_init_containers(self): self.assertEqual(containers[0][config.POLICY_FIELD_CONTAINERS_NAME], "simple-container") # see if the main container is in the policy self.assertEqual(containers[1][config.POLICY_FIELD_CONTAINERS_NAME], "init-container") + + def test_workload_identity(self): + virtual_node_policy = load_policy_from_virtual_node_yaml_str(self.custom_yaml_init_containers)[0] + virtual_node_policy.populate_policy_content_for_all_images() + container_start = "containers := " + containers = json.loads(extract_containers_from_text(virtual_node_policy.get_serialized_output(OutputType.PRETTY_PRINT), container_start)) + + # have to extract the name from the pattern + env_rule_names = [(env_rule['pattern']).split("=")[0] for env_rule in containers[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_ENVS]] + + for var in config.VIRTUAL_NODE_ENV_RULES_WORKLOAD_IDENTITY: + self.assertTrue(var['name'] in env_rule_names) diff --git a/src/confcom/samples/certs/README.md b/src/confcom/samples/certs/README.md new file mode 100644 index 00000000000..05314791542 --- /dev/null +++ b/src/confcom/samples/certs/README.md @@ -0,0 +1,92 @@ +# Create a Key and Cert for Signing + +## Prerequisites + +- Must have OpenSSL installed (tested with version 3.0.2) +- Must have Azure CLI installed (tested with version 2.46.0) +- Must have the [`confcom` extension version 1.1.0 or greater](../../README.md) installed +- Must have [ORAS CLI](https://oras.land/docs/installation/) installed (tested with version 1.1.0) + +## Update Config + +*This step sets up the configuration for creating certs to sign the fragment policy. This only needs to be done once.* + +`create_certchain.sh` should have `` specified at the top for `RootPath` + +The image in `fragment_config.json` must be updated from `` to the image you want to attach the fragment to. This is likely going to be in Azure Container Registry but can be in any of these [supported registries](https://oras.land/docs/compatible_oci_registries/). + +## Run the Script + +*This step will create the necessary certificates and private keys to sign the fragment policy, including generating a root private key, intermediate private key, and a server private key. These keys are used to create the certificate chain required for signing. This step only needs to be done once.* + +```bash +./create_certchain.sh +``` + +You will need to select (y) for four prompts to sign the certs needed to create a cert chain. + +After completion, this will create the following files to be used in the confcom signing process: + +- `intermediate/private/ec_p384_private.pem` +- `intermediateCA/certs/www.contoso.com.chain.cert.pem` + +## Run confcom + +*This step will generate the fragment policy, sign it with the certs created in the previous step, and upload the fragment to the container registry.* + +You may need to change the path to the chain and key files in the following command: + +```bash +az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --config ./samples/config.json --upload-fragment +``` + +After running the command, there will be the following files created: + +- `contoso.rego` +- `contoso.rego.cose` + +Where `contoso.rego` is the fragment policy and `contoso.rego.cose` is the signed policy in COSE format. + +The `--upload-fragment` flag will attempt to attach the fragment to the container image in the ORAS-compliant registry. You may need to login to the registry before running the command via something like `az acr login`. + +The fragment can be seen in the Azure portal under the container repo's artifacts By going through the following steps: + +1. Go to the Azure portal +2. Go to the image's associated Azure Container Registry instance +3. Go to the specific image's repository +4. Click the tag of the image the fragment was attached to +5. Click the `Referrers` tab +6. The fragment should be listed as an artifact + +## Generate Security Policy for an ARM Template + +*This step will generate a security policy for an ARM template and include the fragment policy created in the previous step.* + +To create an import statement for the newly created rego fragment, run the following command: + +```bash +az confcom acifragmentgen --generate-import -p ./contoso.rego.cose --minimum-svn 1 +``` + +Which will output the fragment's import in json format. **Place this import statement into a new `fragments.json` file.** + +example output: + +```json +{ + "issuer": "did:x509:0:sha256:I__iuL25oXEVFdTP_aBLx_eT1RPHbCQ_ECBQfYZpt9s::eku:1.3.6.1.4.1.311.76.59.1.3", + "feed": "contoso.azurecr.io/infra", + "minimum_svn": "1", + "includes": [ + "containers" + ] +} +``` + +To generate a security policy for an ARM template, run the following command: + +```bash +az confcom acipolicygen -a template.json --include-fragments --fragments-json fragments.json +``` + +This will insert the fragment policy into the ARM template and include the mentioned fragments in the `fragments.json` file. diff --git a/src/confcom/samples/certs/create_certchain.sh b/src/confcom/samples/certs/create_certchain.sh new file mode 100755 index 00000000000..5bdaa3ca299 --- /dev/null +++ b/src/confcom/samples/certs/create_certchain.sh @@ -0,0 +1,81 @@ +# Following guide from: https://www.golinuxcloud.com/openssl-create-certificate-chain-linux/ + +RootPath=/home//azure-cli-extensions/src/confcom/samples/certs + +# create dirs for root CA +mkdir -p $RootPath/rootCA/{certs,crl,newcerts,private,csr} +mkdir -p $RootPath/intermediateCA/{certs,crl,newcerts,private,csr} + +# create index files +echo 1000 > $RootPath/rootCA/serial +echo 1000 > $RootPath/intermediateCA/serial + +# create crlnumbers +echo 0100 > $RootPath/rootCA/crlnumber +echo 0100 > $RootPath/intermediateCA/crlnumber + +# create index files +touch $RootPath/rootCA/index.txt +touch $RootPath/intermediateCA/index.txt + +# generate root key +openssl genrsa -out $RootPath/rootCA/private/ca.key.pem 4096 +chmod 400 $RootPath/rootCA/private/ca.key.pem + +# view the key +# openssl rsa -noout -text -in $RootPath/rootCA/private/ca.key.pem + +# generate root cert +openssl req -config openssl_root.cnf -key $RootPath/rootCA/private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out $RootPath/rootCA/certs/ca.cert.pem -subj "/C=US/ST=Georgia/L=Atlanta/O=Microsoft/OU=ACCCT/CN=Root CA" + +# change permissions on root key so it's not globally readable +chmod 444 $RootPath/rootCA/certs/ca.cert.pem + +# verify root cert +openssl x509 -noout -text -in $RootPath/rootCA/certs/ca.cert.pem + +# generate intermediate key +openssl genrsa -out $RootPath/intermediateCA/private/intermediate.key.pem 4096 +chmod 400 $RootPath/intermediateCA/private/intermediate.key.pem + +# make CSR for intermediate +openssl req -config openssl_intermediate.cnf -key $RootPath/intermediateCA/private/intermediate.key.pem -new -sha256 -out $RootPath/intermediateCA/certs/intermediate.csr.pem -subj "/C=US/ST=Georgia/L=Atlanta/O=Microsoft/OU=ACCCT/CN=Intermediate CA" + +# sign intermediate cert with root +openssl ca -config openssl_root.cnf -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in $RootPath/intermediateCA/certs/intermediate.csr.pem -out $RootPath/intermediateCA/certs/intermediate.cert.pem + +# make it readable by everyone +chmod 444 $RootPath/intermediateCA/certs/intermediate.cert.pem + +# print the cert +# openssl x509 -noout -text -in $RootPath/intermediateCA/certs/intermediate.cert.pem + +# verify intermediate cert +openssl verify -CAfile $RootPath/rootCA/certs/ca.cert.pem $RootPath/intermediateCA/certs/intermediate.cert.pem + +# create chain file +cat $RootPath/intermediateCA/certs/intermediate.cert.pem $RootPath/rootCA/certs/ca.cert.pem > $RootPath/intermediateCA/certs/ca-chain.cert.pem + +# verify chain +openssl verify -CAfile $RootPath/intermediateCA/certs/ca-chain.cert.pem $RootPath/intermediateCA/certs/intermediate.cert.pem + +# create server key +openssl ecparam -out $RootPath/intermediateCA/private/www.contoso.com.key.pem -name secp384r1 -genkey +openssl pkcs8 -topk8 -nocrypt -in $RootPath/intermediateCA/private/www.contoso.com.key.pem -out $RootPath/intermediateCA/private/ec_p384_private.pem + +chmod 400 $RootPath/intermediateCA/private/www.contoso.com.key.pem + +# create csr for server +openssl req -config openssl_intermediate.cnf -key $RootPath/intermediateCA/private/www.contoso.com.key.pem -new -sha384 -out $RootPath/intermediateCA/csr/www.contoso.com.csr.pem -batch + +# sign server cert with intermediate key +openssl ca -config openssl_intermediate.cnf -extensions server_cert -days 375 -notext -md sha384 -in $RootPath/intermediateCA/csr/www.contoso.com.csr.pem -out $RootPath/intermediateCA/certs/www.contoso.com.cert.pem + +# print the cert +# openssl x509 -noout -text -in $RootPath/intermediateCA/certs/www.contoso.com.cert.pem + +# make a public key +# openssl x509 -pubkey -noout -in $RootPath/intermediateCA/certs/www.contoso.com.cert.pem -out $RootPath/intermediateCA/certs/pubkey.pem + +# create chain file +cat $RootPath/intermediateCA/certs/www.contoso.com.cert.pem $RootPath/intermediateCA/certs/intermediate.cert.pem $RootPath/rootCA/certs/ca.cert.pem > $RootPath/intermediateCA/certs/www.contoso.com.chain.cert.pem \ No newline at end of file diff --git a/src/confcom/samples/certs/openssl_intermediate.cnf b/src/confcom/samples/certs/openssl_intermediate.cnf new file mode 100644 index 00000000000..f7d6a72dfd4 --- /dev/null +++ b/src/confcom/samples/certs/openssl_intermediate.cnf @@ -0,0 +1,71 @@ +[ ca ] # The default CA section +default_ca = CA_default # The default CA name + +[ CA_default ] # Default settings for the intermediate CA +dir = ./intermediateCA # Intermediate CA directory +certs = $dir/certs # Certificates directory +crl_dir = $dir/crl # CRL directory +new_certs_dir = $dir/newcerts # New certificates directory +database = $dir/index.txt # Certificate index file +serial = $dir/serial # Serial number file +RANDFILE = $dir/private/.rand # Random number file +private_key = $dir/private/intermediate.key.pem # Intermediate CA private key +certificate = $dir/certs/intermediate.cert.pem # Intermediate CA certificate +crl = $dir/crl/intermediate.crl.pem # Intermediate CA CRL +crlnumber = $dir/crlnumber # Intermediate CA CRL number +crl_extensions = crl_ext # CRL extensions +default_crl_days = 30 # Default CRL validity days +default_md = sha256 # Default message digest +preserve = no # Preserve existing extensions +email_in_dn = no # Exclude email from the DN +name_opt = ca_default # Formatting options for names +cert_opt = ca_default # Certificate output options +policy = policy_loose # Certificate policy + +[ policy_loose ] # Policy for less strict validation +countryName = optional # Country is optional +stateOrProvinceName = optional # State or province is optional +localityName = optional # Locality is optional +organizationName = optional # Organization is optional +organizationalUnitName = optional # Organizational unit is optional +commonName = supplied # Must provide a common name +emailAddress = optional # Email address is optional + +[ req ] # Request settings +default_bits = 2048 # Default key size +distinguished_name = req_distinguished_name # Default DN template +string_mask = utf8only # UTF-8 encoding +default_md = sha256 # Default message digest +x509_extensions = v3_intermediate_ca # Extensions for intermediate CA certificate + +[ req_distinguished_name ] # Template for the DN in the CSR +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address +countryName_default = US +stateOrProvinceName_default = Georgia +localityName_default = Atlanta +0.organizationName_default = Contoso +organizationalUnitName_default = Contoso +commonName_default = Contoso +emailAddress_default = contoso@contoso.com + +[ v3_intermediate_ca ] # Intermediate CA certificate extensions +subjectKeyIdentifier = hash # Subject key identifier +authorityKeyIdentifier = keyid:always,issuer # Authority key identifier +basicConstraints = critical, CA:true # Basic constraints for a CA +keyUsage = critical, digitalSignature, cRLSign, keyCertSign # Key usage for a CA + +[ crl_ext ] # CRL extensions +authorityKeyIdentifier=keyid:always # Authority key identifier + +[ server_cert ] # Server certificate extensions +basicConstraints = CA:FALSE # Not a CA certificate +nsCertType = server # Server certificate type +keyUsage = critical, digitalSignature, keyEncipherment # Key usage for a server cert +extendedKeyUsage = serverAuth, codeSigning # Extended key usage for server authentication purposes (e.g., TLS/SSL servers). +authorityKeyIdentifier = keyid,issuer # Authority key identifier linking the certificate to the issuer's public key. \ No newline at end of file diff --git a/src/confcom/samples/certs/openssl_root.cnf b/src/confcom/samples/certs/openssl_root.cnf new file mode 100644 index 00000000000..7c26148f509 --- /dev/null +++ b/src/confcom/samples/certs/openssl_root.cnf @@ -0,0 +1,70 @@ +[ ca ] # The default CA section +default_ca = CA_default # The default CA name + +[ CA_default ] # Default settings for the CA +dir = ./rootCA # CA directory +certs = $dir/certs # Certificates directory +crl_dir = $dir/crl # CRL directory +new_certs_dir = $dir/newcerts # New certificates directory +database = $dir/index.txt # Certificate index file +serial = $dir/serial # Serial number file +RANDFILE = $dir/private/.rand # Random number file +private_key = $dir/private/ca.key.pem # Root CA private key +certificate = $dir/certs/ca.cert.pem # Root CA certificate +crl = $dir/crl/ca.crl.pem # Root CA CRL +crlnumber = $dir/crlnumber # Root CA CRL number +crl_extensions = crl_ext # CRL extensions +default_crl_days = 30 # Default CRL validity days +default_md = sha256 # Default message digest +preserve = no # Preserve existing extensions +email_in_dn = no # Exclude email from the DN +name_opt = ca_default # Formatting options for names +cert_opt = ca_default # Certificate output options +policy = policy_strict # Certificate policy +unique_subject = no # Allow multiple certs with the same DN + +[ policy_strict ] # Policy for stricter validation +countryName = match # Must match the issuer's country +stateOrProvinceName = match # Must match the issuer's state +organizationName = match # Must match the issuer's organization +organizationalUnitName = optional # Organizational unit is optional +commonName = supplied # Must provide a common name +emailAddress = optional # Email address is optional + +[ req ] # Request settings +default_bits = 2048 # Default key size +distinguished_name = req_distinguished_name # Default DN template +string_mask = utf8only # UTF-8 encoding +default_md = sha256 # Default message digest +prompt = no # Non-interactive mode + +[ req_distinguished_name ] # Template for the DN in the CSR +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address +countryName_default = US +stateOrProvinceName_default = Georgia +localityName_default = Atlanta +0.organizationName_default = Contoso +organizationalUnitName_default = Contoso +commonName_default = Contoso +emailAddress_default = contoso@contoso.com + +[ v3_ca ] # Root CA certificate extensions +subjectKeyIdentifier = hash # Subject key identifier +authorityKeyIdentifier = keyid:always,issuer # Authority key identifier +basicConstraints = critical, CA:true # Basic constraints for a CA +keyUsage = critical, keyCertSign, cRLSign # Key usage for a CA + +[ crl_ext ] # CRL extensions +authorityKeyIdentifier = keyid:always,issuer # Authority key identifier + +[ v3_intermediate_ca ] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, cRLSign, keyCertSign diff --git a/src/confcom/samples/config.json b/src/confcom/samples/config.json new file mode 100644 index 00000000000..4d6709e713d --- /dev/null +++ b/src/confcom/samples/config.json @@ -0,0 +1,48 @@ +{ + "version": "1.0", + "fragments": [ + { + "issuer": "did:x509:0:sha256:I__iuL25oXEVFdTP_aBLx_eT1RPHbCQ_ECBQfYZpt9s::eku:1.3.6.1.4.1.311.76.59.1.3", + "feed": "contoso.azurecr.io/infra", + "minimum_svn": "1", + "includes": [ + "containers" + ] + } + ], + "containers": [ + { + "name": "my-image", + "properties": { + "image": "mcr.microsoft.com/acc/samples/aci/helloworld:2.8", + "execProcesses": [ + { + "command": [ + "echo", + "Hello World" + ] + } + ], + "volumeMounts": [ + { + "name": "azurefile", + "mountPath": "/mount/azurefile", + "mountType": "azureFile", + "readOnly": true + } + ], + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env(.*)", + "regex": true + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/confcom/samples/contoso.rego b/src/confcom/samples/contoso.rego new file mode 100644 index 00000000000..bc10bbdebf0 --- /dev/null +++ b/src/confcom/samples/contoso.rego @@ -0,0 +1,277 @@ +package contoso + +svn := "1" +framework_version := "0.2.3" + +fragments := [] + +containers := [ + { + "allow_elevated": false, + "allow_stdio_access": true, + "capabilities": { + "ambient": [], + "bounding": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "effective": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "inheritable": [], + "permitted": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ] + }, + "command": [ + "python3", + "main.py" + ], + "env_rules": [ + { + "pattern": "TEST_REGEXP_ENV=test_regexp_env(.*)", + "required": false, + "strategy": "re2" + }, + { + "pattern": "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "required": false, + "strategy": "string" + }, + { + "pattern": "PYTHONUNBUFFERED=1", + "required": false, + "strategy": "string" + }, + { + "pattern": "TERM=xterm", + "required": false, + "strategy": "string" + }, + { + "pattern": "(?i)(FABRIC)_.+=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "HOSTNAME=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "T(E)?MP=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "FabricPackageFileName=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "HostedServiceName=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "IDENTITY_API_VERSION=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "IDENTITY_HEADER=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "IDENTITY_SERVER_THUMBPRINT=.+", + "required": false, + "strategy": "re2" + }, + { + "pattern": "azurecontainerinstance_restarted_by=.+", + "required": false, + "strategy": "re2" + } + ], + "exec_processes": [], + "id": "mcr.microsoft.com/acc/samples/aci/helloworld:2.8", + "layers": [ + "0de62d1aaa53f09c1ba26871cc97bda0ed29ea2eba4eb95c42b800159f0c087c", + "1db0e60df71bbeda66196a3b518967cbc1b650cda08ada110744e0e07c965a5a", + "e5c725f6ef8eae5de23753c9af8ca5489153eecd12982a0db0fc13d93fc7e124", + "fdafe8a7071ca0af2ec45276bd7c4abe8aa3068b1fef08856251cf19638c52f2", + "398208096568e4d3b1f7e420038c23d2bd3ba0a6c6b21b0f0d8f61c04d796bd7" + ], + "mounts": [ + { + "destination": "/mount/azurefile", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "sandbox:///tmp/atlas/azureFileVolume/.+", + "type": "bind" + }, + { + "destination": "/etc/resolv.conf", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "sandbox:///tmp/atlas/resolvconf/.+", + "type": "bind" + } + ], + "no_new_privileges": false, + "seccomp_profile_sha256": "", + "signals": [], + "user": { + "group_idnames": [ + { + "pattern": "", + "strategy": "any" + } + ], + "umask": "0022", + "user_idname": { + "pattern": "", + "strategy": "any" + } + }, + "working_dir": "/app" + }, + { + "allow_elevated": false, + "allow_stdio_access": true, + "capabilities": { + "ambient": [], + "bounding": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "effective": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "inheritable": [], + "permitted": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ] + }, + "command": [ + "/pause" + ], + "env_rules": [ + { + "pattern": "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "required": true, + "strategy": "string" + }, + { + "pattern": "TERM=xterm", + "required": false, + "strategy": "string" + } + ], + "exec_processes": [], + "layers": [ + "16b514057a06ad665f92c02863aca074fd5976c755d26bff16365299169e8415" + ], + "mounts": [], + "no_new_privileges": false, + "seccomp_profile_sha256": "", + "signals": [], + "user": { + "group_idnames": [ + { + "pattern": "", + "strategy": "any" + } + ], + "umask": "0022", + "user_idname": { + "pattern": "", + "strategy": "any" + } + }, + "working_dir": "/" + } +] diff --git a/src/confcom/samples/fragment_config.json b/src/confcom/samples/fragment_config.json new file mode 100644 index 00000000000..918515d94f4 --- /dev/null +++ b/src/confcom/samples/fragment_config.json @@ -0,0 +1,25 @@ +{ + "containers": [ + { + "name": "my-image", + "properties": { + "image": "", + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env(.*)", + "regex": true + } + ], + "command": [ + "python3", + "main.py" + ] + } + } + ] +} \ No newline at end of file diff --git a/src/confcom/samples/fragments.json b/src/confcom/samples/fragments.json new file mode 100644 index 00000000000..23102bb0e12 --- /dev/null +++ b/src/confcom/samples/fragments.json @@ -0,0 +1,10 @@ +{ + "path": "./example.rego.cose", + "feed": "contoso.azurecr.io/example", + "includes": [ + "containers", + "fragments" + ], + "issuer": "did:x509:0:sha256:mLzv0uyBNQvC6hi4y9qy8hr6NSZuYFv6gfCwAEWBNqc::subject:CN:Contoso", + "minimum_svn": "1" +} \ No newline at end of file diff --git a/src/confcom/setup.py b/src/confcom/setup.py index afc2c356d74..1dd7767e1b5 100644 --- a/src/confcom/setup.py +++ b/src/confcom/setup.py @@ -10,6 +10,7 @@ from setuptools import setup, find_packages from azext_confcom.rootfs_proxy import SecurityPolicyProxy from azext_confcom.kata_proxy import KataPolicyGenProxy +from azext_confcom.cose_proxy import CoseSignToolProxy try: from azure_bdist_wheel import cmdclass @@ -18,7 +19,7 @@ logger.warn("Wheel is not available, disabling bdist_wheel hook") -VERSION = "1.0.1" +VERSION = "1.1.0" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers @@ -45,6 +46,7 @@ SecurityPolicyProxy.download_binaries() KataPolicyGenProxy.download_binaries() +CoseSignToolProxy.download_binaries() with open("README.md", "r", encoding="utf-8") as f: README = f.read() @@ -70,6 +72,8 @@ "bin/dmverity-vhd", # linux for ACI "bin/genpolicy-windows.exe", # windows for AKS "bin/genpolicy-linux", # linux for AKS + "bin/sign1util.exe", # windows for cose tool + "bin/sign1util", # linux for cose tool "data/*", ] },