From a4bd0b6013127dbc0f90049073ceea755e8daac9 Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Fri, 18 Oct 2024 14:34:43 +0200 Subject: [PATCH 1/2] Remove deprecated features: `zenml deploy` and `zenml deploy` (#3089) * Remove deprecated mlstacks code from the zenml core * Remove deprecated stack/component deployment and server deployment code and docs * Remove zenml deploy from docs * Remove unused terraform service code * Fix hardcoded docs links * Removing all mlstacks mentions * Update docs/book/how-to/trigger-pipelines/use-templates-dashboard.md Co-authored-by: Alex Strick van Linschoten * Move custom-secret-stores.md where it belongs --------- Co-authored-by: Alex Strick van Linschoten --- .github/workflows/ci-slow.yml | 5 - .github/workflows/release.yml | 30 - .gitignore | 3 - .test_durations | 12 - .../component-guide/artifact-stores/gcp.md | 11 - .../component-guide/artifact-stores/s3.md | 9 - .../container-registries/aws.md | 10 - .../container-registries/gcp.md | 20 - .../component-guide/model-deployers/seldon.md | 14 - .../component-guide/orchestrators/airflow.md | 9 +- .../component-guide/orchestrators/azureml.md | 3 +- .../component-guide/orchestrators/kubeflow.md | 10 - .../orchestrators/kubernetes.md | 12 +- .../orchestrators/sagemaker.md | 12 - .../component-guide/orchestrators/tekton.md | 10 - .../component-guide/orchestrators/vertex.md | 2 - .../step-operators/sagemaker.md | 12 +- .../getting-started/deploying-zenml/README.md | 4 +- .../deploying-zenml/custom-secret-stores.md | 102 +++ .../deploy-with-custom-image.md | 18 - .../deploying-zenml/deploy-with-zenml-cli.md | 322 --------- .../migration-guide/migration-zero-sixty.md | 2 +- .../how-to/popular-integrations/kubernetes.md | 6 +- docs/book/how-to/stack-deployment/README.md | 5 - .../deploy-a-stack-using-mlstacks.md | 233 ------- .../troubleshoot-stack-components.md | 81 --- .../use-templates-dashboard.md | 5 +- docs/book/introduction.md | 3 - docs/book/reference/how-do-i.md | 2 - docs/book/toc.md | 5 +- .../user-guide/cloud-guide/cloud-guide.md | 3 +- .../llmops-guide/evaluation/retrieval.md | 4 +- .../evaluating-finetuned-embeddings.md | 5 +- .../book/user-guide/production-guide/ci-cd.md | 2 +- .../production-guide/deploying-zenml.md | 18 +- pyproject.toml | 8 - scripts/install-zenml-dev.sh | 2 +- src/zenml/analytics/enums.py | 13 - src/zenml/cli/__init__.py | 40 +- src/zenml/cli/base.py | 4 +- src/zenml/cli/server.py | 229 +------ src/zenml/cli/stack.py | 522 +------------- src/zenml/cli/stack_components.py | 406 ----------- src/zenml/cli/stack_recipes.py | 469 ------------- src/zenml/cli/utils.py | 57 -- src/zenml/constants.py | 27 - src/zenml/enums.py | 3 - src/zenml/services/terraform/__init__.py | 14 - .../services/terraform/terraform_service.py | 441 ------------ src/zenml/utils/mlstacks_utils.py | 635 ------------------ src/zenml/utils/terraform_utils.py | 43 -- src/zenml/zen_server/deploy/__init__.py | 9 - .../zen_server/deploy/terraform/__init__.py | 41 -- .../deploy/terraform/providers/__init__.py | 14 - .../terraform/providers/aws_provider.py | 61 -- .../terraform/providers/azure_provider.py | 59 -- .../terraform/providers/gcp_provider.py | 59 -- .../terraform/providers/terraform_provider.py | 331 --------- .../deploy/terraform/recipes/aws/.gitignore | 8 - .../deploy/terraform/recipes/aws/helm.tf | 20 - .../deploy/terraform/recipes/aws/ingress.tf | 30 - .../deploy/terraform/recipes/aws/outputs.tf | 7 - .../deploy/terraform/recipes/aws/printf.cmd | 2 - .../deploy/terraform/recipes/aws/sql.tf | 62 -- .../deploy/terraform/recipes/aws/terraform.tf | 44 -- .../deploy/terraform/recipes/aws/variables.tf | 167 ----- .../deploy/terraform/recipes/aws/vpc.tf | 47 -- .../terraform/recipes/aws/zen_server.tf | 103 --- .../deploy/terraform/recipes/azure/.gitignore | 8 - .../deploy/terraform/recipes/azure/helm.tf | 20 - .../deploy/terraform/recipes/azure/ingress.tf | 30 - .../terraform/recipes/azure/key_vault.tf | 73 -- .../deploy/terraform/recipes/azure/outputs.tf | 7 - .../deploy/terraform/recipes/azure/printf.cmd | 2 - .../deploy/terraform/recipes/azure/rg.tf | 36 - .../deploy/terraform/recipes/azure/sql.tf | 65 -- .../terraform/recipes/azure/terraform.tf | 52 -- .../terraform/recipes/azure/variables.tf | 176 ----- .../terraform/recipes/azure/zen_server.tf | 103 --- .../deploy/terraform/recipes/gcp/.gitignore | 8 - .../deploy/terraform/recipes/gcp/helm.tf | 20 - .../deploy/terraform/recipes/gcp/ingress.tf | 30 - .../deploy/terraform/recipes/gcp/outputs.tf | 7 - .../deploy/terraform/recipes/gcp/printf.cmd | 2 - .../deploy/terraform/recipes/gcp/sql.tf | 64 -- .../deploy/terraform/recipes/gcp/terraform.tf | 44 -- .../deploy/terraform/recipes/gcp/variables.tf | 171 ----- .../terraform/recipes/gcp/zen_server.tf | 114 ---- .../deploy/terraform/terraform_zen_server.py | 246 ------- src/zenml/zen_server/utils.py | 50 +- tests/unit/utils/test_mlstacks_utils.py | 290 -------- 91 files changed, 146 insertions(+), 6453 deletions(-) create mode 100644 docs/book/getting-started/deploying-zenml/custom-secret-stores.md delete mode 100644 docs/book/getting-started/deploying-zenml/deploy-with-zenml-cli.md delete mode 100644 docs/book/how-to/stack-deployment/deploy-a-stack-using-mlstacks.md delete mode 100644 docs/book/how-to/stack-deployment/troubleshoot-stack-components.md delete mode 100644 src/zenml/cli/stack_recipes.py delete mode 100644 src/zenml/services/terraform/__init__.py delete mode 100644 src/zenml/services/terraform/terraform_service.py delete mode 100644 src/zenml/utils/mlstacks_utils.py delete mode 100644 src/zenml/utils/terraform_utils.py delete mode 100644 src/zenml/zen_server/deploy/terraform/__init__.py delete mode 100644 src/zenml/zen_server/deploy/terraform/providers/__init__.py delete mode 100644 src/zenml/zen_server/deploy/terraform/providers/aws_provider.py delete mode 100644 src/zenml/zen_server/deploy/terraform/providers/azure_provider.py delete mode 100644 src/zenml/zen_server/deploy/terraform/providers/gcp_provider.py delete mode 100644 src/zenml/zen_server/deploy/terraform/providers/terraform_provider.py delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/.gitignore delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/helm.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/ingress.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/outputs.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/printf.cmd delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/sql.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/terraform.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/variables.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/vpc.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/aws/zen_server.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/.gitignore delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/helm.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/ingress.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/key_vault.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/outputs.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/printf.cmd delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/rg.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/sql.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/terraform.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/variables.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/azure/zen_server.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/.gitignore delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/helm.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/ingress.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/outputs.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/printf.cmd delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/sql.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/terraform.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/variables.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/recipes/gcp/zen_server.tf delete mode 100644 src/zenml/zen_server/deploy/terraform/terraform_zen_server.py delete mode 100644 tests/unit/utils/test_mlstacks_utils.py diff --git a/.github/workflows/ci-slow.yml b/.github/workflows/ci-slow.yml index b02fc73d6f0..8053c2f0abd 100644 --- a/.github/workflows/ci-slow.yml +++ b/.github/workflows/ci-slow.yml @@ -160,11 +160,6 @@ jobs: - name: Create virtual environment run: | uv venv - - name: Check mlstacks compatibility - run: | - source .venv/bin/activate - uv pip install -e . - uv pip install mlstacks - name: Check for broken dependencies run: | source .venv/bin/activate diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fa72a238160..a04394bc03f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,35 +10,6 @@ jobs: os: arc-runner-set python-version: '3.9' secrets: inherit - mlstacks-compatibility-check: - needs: setup-and-test - runs-on: arc-runner-set - steps: - - name: Checkout code - uses: actions/checkout@v4.1.1 - - name: Set up Python - uses: actions/setup-python@v5.0.0 - with: - python-version: '3.9' - - name: Install uv - run: | - curl -LsSf https://astral.sh/uv/install.sh | sh - source $HOME/.cargo/env - - name: Create virtual environment - run: | - source $HOME/.cargo/env - uv venv - - name: Check mlstacks compatibility - run: | - source .venv/bin/activate - source $HOME/.cargo/env - uv pip install -e . - uv pip install mlstacks - - name: Check for broken dependencies - run: | - source .venv/bin/activate - source $HOME/.cargo/env - uv pip check mysql-db-migration-testing: runs-on: arc-runner-set env: @@ -101,7 +72,6 @@ jobs: if: github.repository == 'zenml-io/zenml' needs: - setup-and-test - - mlstacks-compatibility-check - sqlite-db-migration-testing - mysql-db-migration-testing - mariadb-db-migration-testing diff --git a/.gitignore b/.gitignore index 1dce76f4b88..45663e284ab 100644 --- a/.gitignore +++ b/.gitignore @@ -193,8 +193,5 @@ examples/feast_feature_store/feast_feature_repo/data/registry.db #generated folder by zenml zenml_tutorial/ -# script for testing -mlstacks_reset.sh - .local/ # PLEASE KEEP THIS LINE AT THE EOF: never include here src/zenml/zen_server/dashboard, since it is affecting release flow diff --git a/.test_durations b/.test_durations index d532c3d853e..43a05501aef 100644 --- a/.test_durations +++ b/.test_durations @@ -1226,18 +1226,6 @@ "tests/unit/utils/test_io_utils.py::test_resolve_relative_path": 0.0019732369999019284, "tests/unit/utils/test_io_utils.py::test_write_file_contents_as_string_fails_with_non_string_types": 0.001999538000063694, "tests/unit/utils/test_io_utils.py::test_write_file_contents_as_string_works": 0.0021266399999149144, - "tests/unit/utils/test_mlstacks_utils.py::test_click_params_to_mlstacks_conversion": 0.11761479299991606, - "tests/unit/utils/test_mlstacks_utils.py::test_component_construction_works_for_component_deploy": 0.05780457900016245, - "tests/unit/utils/test_mlstacks_utils.py::test_component_construction_works_for_stack_deploy": 0.061128635000045506, - "tests/unit/utils/test_mlstacks_utils.py::test_component_flavor_parsing_works": 0.0018454319999818836, - "tests/unit/utils/test_mlstacks_utils.py::test_config_addition_works": 0.03295685900002354, - "tests/unit/utils/test_mlstacks_utils.py::test_extra_config_validation": 0.0014914250000401807, - "tests/unit/utils/test_mlstacks_utils.py::test_get_stack_spec_file_path_fails_when_no_stack": 0.009623762999922292, - "tests/unit/utils/test_mlstacks_utils.py::test_get_stack_spec_file_path_only_works_with_full_name": 0.01112438799998472, - "tests/unit/utils/test_mlstacks_utils.py::test_get_stack_spec_file_path_works": 0.008664245999966624, - "tests/unit/utils/test_mlstacks_utils.py::test_spec_file_exists_works_when_no_file": 0.06172314599996298, - "tests/unit/utils/test_mlstacks_utils.py::test_stack_construction_works_for_stack_deploy": 0.4371748089999983, - "tests/unit/utils/test_mlstacks_utils.py::test_stack_exists_works": 0.0230911900001729, "tests/unit/utils/test_networking_utils.py::test_find_available_port_works": 0.0015159350000431004, "tests/unit/utils/test_networking_utils.py::test_port_available_works": 0.002061952000076417, "tests/unit/utils/test_networking_utils.py::test_port_is_open_on_local_host_works": 0.0015869370001837524, diff --git a/docs/book/component-guide/artifact-stores/gcp.md b/docs/book/component-guide/artifact-stores/gcp.md index d3ba74d10f1..97ed88001e9 100644 --- a/docs/book/component-guide/artifact-stores/gcp.md +++ b/docs/book/component-guide/artifact-stores/gcp.md @@ -30,7 +30,6 @@ or [the ZenML GCP Terraform module](../../how-to/stack-deployment/deploy-a-cloud for a shortcut on how to deploy & register this stack component. {% endhint %} - The GCS Artifact Store flavor is provided by the GCP ZenML integration, you need to install it on your local machine to be able to register a GCS Artifact Store and add it to your stack: ```shell @@ -51,16 +50,6 @@ zenml stack register custom_stack -a gs_store ... --set Depending on your use case, however, you may also need to provide additional configuration parameters pertaining to [authentication](gcp.md#authentication-methods) to match your deployment scenario. -#### Infrastructure Deployment - -A GCS Artifact Store can be deployed directly from the ZenML CLI: - -```shell -zenml artifact-store deploy gcs_artifact_store --flavor=gcp --provider=gcp ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the [dedicated documentation section](../../how-to/stack-deployment/README.md). - #### Authentication Methods Integrating and using a GCS Artifact Store in your pipelines is not possible without employing some form of authentication. If you're looking for a quick way to get started locally, you can use the _Implicit Authentication_ method. However, the recommended way to authenticate to the GCP cloud platform is through [a GCP Service Connector](../../how-to/auth-management/gcp-service-connector.md). This is particularly useful if you are configuring ZenML stacks that combine the GCS Artifact Store with other remote stack components also running in GCP. diff --git a/docs/book/component-guide/artifact-stores/s3.md b/docs/book/component-guide/artifact-stores/s3.md index aa1b03bec60..432b3f63e1e 100644 --- a/docs/book/component-guide/artifact-stores/s3.md +++ b/docs/book/component-guide/artifact-stores/s3.md @@ -50,15 +50,6 @@ zenml stack register custom_stack -a s3_store ... --set Depending on your use case, however, you may also need to provide additional configuration parameters pertaining to [authentication](s3.md#authentication-methods) or [pass advanced configuration parameters](s3.md#advanced-configuration) to match your S3-compatible service or deployment scenario. -#### Infrastructure Deployment - -An S3 Artifact Store can be deployed directly from the ZenML CLI: - -```shell -zenml artifact-store deploy s3-artifact-store --flavor=s3 --provider=aws ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the [dedicated documentation section](../../how-to/stack-deployment/README.md). #### Authentication Methods diff --git a/docs/book/component-guide/container-registries/aws.md b/docs/book/component-guide/container-registries/aws.md index 8781a705dbd..a803460ec9e 100644 --- a/docs/book/component-guide/container-registries/aws.md +++ b/docs/book/component-guide/container-registries/aws.md @@ -49,16 +49,6 @@ To figure out the URI for your registry: * Go [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) and choose the region in which you would like to store your container images. Make sure to choose a nearby region for faster access. * Once you have both these values, fill in the values in this template `.dkr.ecr..amazonaws.com` to get your container registry URI. -#### Infrastructure Deployment - -An AWS ECR Container Registry can be deployed directly from the ZenML CLI: - -```shell -zenml container-registry deploy ecr_container_registry --flavor=aws --provider=aws ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ### How to use it To use the AWS container registry, we need: diff --git a/docs/book/component-guide/container-registries/gcp.md b/docs/book/component-guide/container-registries/gcp.md index 8ecebb920d1..c42b5e9a7e6 100644 --- a/docs/book/component-guide/container-registries/gcp.md +++ b/docs/book/component-guide/container-registries/gcp.md @@ -33,16 +33,6 @@ When using the Google Artifact Registry, you need to: * enable it [here](https://console.cloud.google.com/marketplace/product/google/artifactregistry.googleapis.com) * go [here](https://console.cloud.google.com/artifacts) and create a `Docker` repository. -### Infrastructure Deployment - -A GCP Container Registry can be deployed directly from the ZenML CLI: - -```shell -zenml container-registry deploy gcp_container_registry --flavor=gcp --provider=gcp ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the [dedicated documentation section](../../how-to/stack-deployment/deploy-a-stack-using-mlstacks.md). - ## How to find the registry URI When using the Google Artifact Registry, the GCP container registry URI should have the following format: @@ -61,16 +51,6 @@ To figure out the URI for your registry: * Go [here](https://console.cloud.google.com/artifacts) and select the repository that you want to use to store Docker images. If you don't have a repository yet, take a look at the [deployment section](gcp.md#how-to-deploy-it). * On the top, click the copy button to copy the full repository URL. -#### Infrastructure Deployment - -A GCP Container Registry can be deployed directly from the ZenML CLI: - -```shell -zenml container-registry deploy gcp_container_registry --flavor=gcp --provider=gcp ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ### How to use it To use the GCP container registry, we need: diff --git a/docs/book/component-guide/model-deployers/seldon.md b/docs/book/component-guide/model-deployers/seldon.md index 4f1212743bc..0100268cd71 100644 --- a/docs/book/component-guide/model-deployers/seldon.md +++ b/docs/book/component-guide/model-deployers/seldon.md @@ -50,20 +50,6 @@ Since the Seldon Model Deployer is interacting with the Seldon Core model server In addition to these parameters, the Seldon Core Model Deployer may also require additional configuration to be set up to allow it to authenticate to the remote artifact store or persistent storage service where model artifacts are located. This is covered in the [Managing Seldon Core Authentication](seldon.md#managing-seldon-core-authentication) section. -{% hint style="info" %} -Configuring Seldon Core in a Kubernetes cluster can be a complex and error-prone process, so we have provided a set of Terraform-based recipes to quickly provision popular combinations of MLOps tools. More information about these recipes can be found in the [MLOps Stack Recipes](https://github.com/zenml-io/mlstacks). -{% endhint %} - -### Infrastructure Deployment - -The Seldon Model Deployer can be deployed directly from the ZenML CLI: - -```shell -zenml model-deployer deploy seldon_deployer --flavor=seldon --provider= ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the [dedicated documentation section](../../how-to/stack-deployment/README.md). - ### Seldon Core Installation Example The following example briefly shows how you can install Seldon in an EKS Kubernetes cluster. It assumes that the EKS cluster itself is already set up and configured with IAM access. For more information or tutorials for other clouds, check out the [official Seldon Core installation instructions](https://github.com/SeldonIO/seldon-core/tree/master/examples/auth#demo-setup). diff --git a/docs/book/component-guide/orchestrators/airflow.md b/docs/book/component-guide/orchestrators/airflow.md index 210fd83ac0b..e11512f901b 100644 --- a/docs/book/component-guide/orchestrators/airflow.md +++ b/docs/book/component-guide/orchestrators/airflow.md @@ -30,9 +30,8 @@ setup is necessary. There are many options to use a deployed Airflow server: -* Use one of [ZenML's Airflow stack recipes](https://github.com/zenml-io/mlstacks). This is the simplest solution to - get ZenML working with Airflow, as the recipe also takes care of additional steps such as installing required Python - dependencies in your Airflow server environment. +* Use [the ZenML GCP Terraform module](../../how-to/stack-deployment/deploy-a-cloud-stack-with-terraform.md) + which includes a [Google Cloud Composer](https://cloud.google.com/composer) component. * Use a managed deployment of Airflow such as [Google Cloud Composer](https://cloud.google.com/composer) , [Amazon MWAA](https://aws.amazon.com/managed-workflows-for-apache-airflow/), or [Astronomer](https://www.astronomer.io/). @@ -40,8 +39,8 @@ There are many options to use a deployed Airflow server: official [Airflow docs](https://airflow.apache.org/docs/apache-airflow/stable/production-deployment.html) for more information. -If you're not using `mlstacks` to deploy Airflow, there are some additional Python packages that you'll need to -install in the Python environment of your Airflow server: +If you're not using the ZenML GCP Terraform module to deploy Airflow, there are some additional Python +packages that you'll need to install in the Python environment of your Airflow server: * `pydantic~=2.7.1`: The Airflow DAG files that ZenML creates for you require Pydantic to parse and validate configuration files. diff --git a/docs/book/component-guide/orchestrators/azureml.md b/docs/book/component-guide/orchestrators/azureml.md index f3b3ef6db70..2ac084e2bf7 100644 --- a/docs/book/component-guide/orchestrators/azureml.md +++ b/docs/book/component-guide/orchestrators/azureml.md @@ -69,8 +69,7 @@ Azure hosting environments and credentials used in local development. of service principals on Azure to allow you to connect your cloud components with proper authentication. For this method, you will need to [create a service principal on Azure](https://learn.microsoft.com/en-us/azure/developer/python/sdk/authentication-on-premises-apps?tabs=azure-portal), -assign it the correct permissions and use it to [register a ZenML Azure Service -Connector](https://docs.zenml.io/how-to/auth-management/azure-service-connector). +assign it the correct permissions and use it to [register a ZenML Azure Service Connector](../../how-to/auth-management/azure-service-connector.md). ```bash zenml service-connector register --type azure -i zenml orchestrator connect -c diff --git a/docs/book/component-guide/orchestrators/kubeflow.md b/docs/book/component-guide/orchestrators/kubeflow.md index 2fa0b5140fa..65adf45a4c5 100644 --- a/docs/book/component-guide/orchestrators/kubeflow.md +++ b/docs/book/component-guide/orchestrators/kubeflow.md @@ -88,16 +88,6 @@ If one or more of the deployments are not in the `Running` state, try increasing If you're installing Kubeflow Pipelines manually, make sure the Kubernetes service is called exactly `ml-pipeline`. This is a requirement for ZenML to connect to your Kubeflow Pipelines deployment. {% endhint %} -#### Infrastructure Deployment - -A Kubeflow orchestrator can be deployed directly from the ZenML CLI: - -```shell -zenml orchestrator deploy kubeflow_orchestrator --flavor=kubeflow --provider= ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ### How to use it To use the Kubeflow orchestrator, we need: diff --git a/docs/book/component-guide/orchestrators/kubernetes.md b/docs/book/component-guide/orchestrators/kubernetes.md index adc14fe52c9..6bebc7800a0 100644 --- a/docs/book/component-guide/orchestrators/kubernetes.md +++ b/docs/book/component-guide/orchestrators/kubernetes.md @@ -28,20 +28,10 @@ You should use the Kubernetes orchestrator if: ### How to deploy it -The Kubernetes orchestrator requires a Kubernetes cluster in order to run. There are many ways to deploy a Kubernetes cluster using different cloud providers or on your custom infrastructure, and we can't possibly cover all of them, but you can check out our cloud guide +The Kubernetes orchestrator requires a Kubernetes cluster in order to run. There are many ways to deploy a Kubernetes cluster using different cloud providers or on your custom infrastructure, and we can't possibly cover all of them, but you can check out our [our cloud guide](../../user-guide/cloud-guide/cloud-guide.md). If the above Kubernetes cluster is deployed remotely on the cloud, then another pre-requisite to use this orchestrator would be to deploy and connect to a [remote ZenML server](../../getting-started/deploying-zenml/README.md). -#### Infrastructure Deployment - -A Kubernetes orchestrator can be deployed directly from the ZenML CLI: - -```shell -zenml orchestrator deploy k8s_orchestrator --flavor=kubernetes --provider= ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ### How to use it To use the Kubernetes orchestrator, we need: diff --git a/docs/book/component-guide/orchestrators/sagemaker.md b/docs/book/component-guide/orchestrators/sagemaker.md index 7cfd623ac3d..e9a7ad7ffb2 100644 --- a/docs/book/component-guide/orchestrators/sagemaker.md +++ b/docs/book/component-guide/orchestrators/sagemaker.md @@ -39,18 +39,6 @@ In order to use a Sagemaker AI orchestrator, you need to first deploy [ZenML to The only other thing necessary to use the ZenML Sagemaker orchestrator is enabling the relevant permissions for your particular role. -In order to quickly enable APIs, and create other resources necessary for to use this integration, we will soon provide a Sagemaker stack recipe via [our `mlstacks` repository](https://github.com/zenml-io/mlstacks), which will help you set up the infrastructure with one click. - -### Infrastructure Deployment - -A Sagemaker orchestrator can be deployed directly from the ZenML CLI: - -```shell -zenml orchestrator deploy sagemaker_orchestrator --flavor=sagemaker --provider=aws ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ## How to use it To use the Sagemaker orchestrator, we need: diff --git a/docs/book/component-guide/orchestrators/tekton.md b/docs/book/component-guide/orchestrators/tekton.md index 65eae2a5de5..9179301e261 100644 --- a/docs/book/component-guide/orchestrators/tekton.md +++ b/docs/book/component-guide/orchestrators/tekton.md @@ -69,16 +69,6 @@ If one or more of the deployments are not in the `Running` state, try increasing ZenML has only been tested with Tekton Pipelines >=0.38.3 and may not work with previous versions. {% endhint %} -#### Infrastructure Deployment - -A Tekton orchestrator can be deployed directly from the ZenML CLI: - -```shell -zenml orchestrator deploy tekton_orchestrator --flavor=tekton --provider= ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. - ### How to use it To use the Tekton orchestrator, we need: diff --git a/docs/book/component-guide/orchestrators/vertex.md b/docs/book/component-guide/orchestrators/vertex.md index 6fdf34a914b..66338fb489f 100644 --- a/docs/book/component-guide/orchestrators/vertex.md +++ b/docs/book/component-guide/orchestrators/vertex.md @@ -35,8 +35,6 @@ In order to use a Vertex AI orchestrator, you need to first deploy [ZenML to the The only other thing necessary to use the ZenML Vertex orchestrator is enabling Vertex-relevant APIs on the Google Cloud project. -In order to quickly enable APIs, and create other resources necessary for using this integration, you can also consider using [mlstacks](https://mlstacks.zenml.io/vertex), which helps you set up the infrastructure with one click. - ## How to use it To use the Vertex orchestrator, we need: diff --git a/docs/book/component-guide/step-operators/sagemaker.md b/docs/book/component-guide/step-operators/sagemaker.md index 98e5f61c528..0491290b72b 100644 --- a/docs/book/component-guide/step-operators/sagemaker.md +++ b/docs/book/component-guide/step-operators/sagemaker.md @@ -15,17 +15,7 @@ You should use the SageMaker step operator if: ### How to deploy it -* Create a role in the IAM console that you want the jobs running in SageMaker to assume. This role should at least have the `AmazonS3FullAccess` and `AmazonSageMakerFullAccess` policies applied. Check [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html#sagemaker-roles-create-execution-role) for a guide on how to set up this role. - -#### Infrastructure Deployment - -A Sagemaker step operator can be deployed directly from the ZenML CLI: - -```shell -zenml orchestrator deploy sagemaker_step_operator --flavor=sagemaker --provider=aws ... -``` - -You can pass other configurations specific to the stack components as key-value arguments. If you don't provide a name, a random one is generated for you. For more information about how to work use the CLI for this, please refer to the dedicated documentation section. +Create a role in the IAM console that you want the jobs running in SageMaker to assume. This role should at least have the `AmazonS3FullAccess` and `AmazonSageMakerFullAccess` policies applied. Check [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html#sagemaker-roles-create-execution-role) for a guide on how to set up this role. ### How to use it diff --git a/docs/book/getting-started/deploying-zenml/README.md b/docs/book/getting-started/deploying-zenml/README.md index 27f29f62c43..4115916ec29 100644 --- a/docs/book/getting-started/deploying-zenml/README.md +++ b/docs/book/getting-started/deploying-zenml/README.md @@ -42,7 +42,7 @@ Deploying the ZenML Server is a crucial step towards transitioning to a producti Currently, there are two main options to access a deployed ZenML server: 1. **SaaS:** With [ZenML Pro](../zenml-pro/zenml-pro.md) offering you can utilize a control plane to create ZenML servers, also known as tenants. These tenants are managed and maintained by ZenML's dedicated team, alleviating the burden of server management from your end. Importantly, your data remains securely within your stack, and ZenML's role is primarily to handle tracking of metadata and server maintenance. -2. **Self-hosted Deployment:** Alternatively, you have the ability to deploy ZenML on your own self-hosted environment. This can be achieved through various methods, including using [our CLI](deploy-with-zenml-cli.md), [Docker](../../component-guide/model-registries/model-registries.md), [Helm](deploy-with-helm.md), or [HuggingFace Spaces](deploy-using-huggingface-spaces.md). We also offer our Pro version for self-hosted deployments, so you can use our full paid feature-set while staying fully in control with an airgapped solution on your infrastructure. +2. **Self-hosted Deployment:** Alternatively, you have the ability to deploy ZenML on your own self-hosted environment. This can be achieved through various methods, including using [Docker](../../component-guide/model-registries/model-registries.md), [Helm](deploy-with-helm.md), or [HuggingFace Spaces](deploy-using-huggingface-spaces.md). We also offer our Pro version for self-hosted deployments, so you can use our full paid feature-set while staying fully in control with an airgapped solution on your infrastructure. {% hint style="warning" %} Currently the ZenML server supports a legacy and a brand-new version of the dashboard. To use the legacy version which supports stack registration from the dashboard simply set the following environment variable in the deployment environment: `export ZEN_SERVER_USE_LEGACY_DASHBOARD=True`. @@ -54,6 +54,6 @@ Both options offer distinct advantages, allowing you to choose the deployment ap Documentation for the various deployment strategies can be found in the following pages below (in our 'how-to' guides): -
Deploy with ZenML CLIDeploying ZenML on cloud using the ZenML CLI.deploy-with-zenml-cli.md
Deploy with DockerDeploying ZenML in a Docker container.deploy-with-docker.md
Deploy with HelmDeploying ZenML in a Kubernetes cluster with Helm.deploy-with-helm.md
Deploy with HuggingFace SpacesDeploying ZenML to Hugging Face Spaces.deploy-with-hugging-face-spaces.md
+
Deploy with DockerDeploying ZenML in a Docker container.deploy-with-docker.md
Deploy with HelmDeploying ZenML in a Kubernetes cluster with Helm.deploy-with-helm.md
Deploy with HuggingFace SpacesDeploying ZenML to Hugging Face Spaces.deploy-with-hugging-face-spaces.md
ZenML Scarf
diff --git a/docs/book/getting-started/deploying-zenml/custom-secret-stores.md b/docs/book/getting-started/deploying-zenml/custom-secret-stores.md new file mode 100644 index 00000000000..dee059b8bd9 --- /dev/null +++ b/docs/book/getting-started/deploying-zenml/custom-secret-stores.md @@ -0,0 +1,102 @@ +--- +description: Learning how to develop a custom secret store. +--- + +# Custom secret stores + +The secrets store acts as the one-stop shop for all the secrets to which your pipeline or stack components might need access. It is responsible for storing, updating and deleting _only the secrets values_ for ZenML secrets, while the ZenML secret metadata is stored in the SQL database. The secrets store interface implemented by all available secrets store back-ends is defined in the `zenml.zen_stores.secrets_stores.secrets_store_interface` core module and looks more or less like this: + +```python +class SecretsStoreInterface(ABC): + """ZenML secrets store interface. + + All ZenML secrets stores must implement the methods in this interface. + """ + + # --------------------------------- + # Initialization and configuration + # --------------------------------- + + @abstractmethod + def _initialize(self) -> None: + """Initialize the secrets store. + + This method is called immediately after the secrets store is created. + It should be used to set up the backend (database, connection etc.). + """ + + # --------- + # Secrets + # --------- + + @abstractmethod + def store_secret_values( + self, + secret_id: UUID, + secret_values: Dict[str, str], + ) -> None: + """Store secret values for a new secret. + + Args: + secret_id: ID of the secret. + secret_values: Values for the secret. + """ + + @abstractmethod + def get_secret_values(self, secret_id: UUID) -> Dict[str, str]: + """Get the secret values for an existing secret. + + Args: + secret_id: ID of the secret. + + Returns: + The secret values. + + Raises: + KeyError: if no secret values for the given ID are stored in the + secrets store. + """ + + @abstractmethod + def update_secret_values( + self, + secret_id: UUID, + secret_values: Dict[str, str], + ) -> None: + """Updates secret values for an existing secret. + + Args: + secret_id: The ID of the secret to be updated. + secret_values: The new secret values. + + Raises: + KeyError: if no secret values for the given ID are stored in the + secrets store. + """ + + @abstractmethod + def delete_secret_values(self, secret_id: UUID) -> None: + """Deletes secret values for an existing secret. + + Args: + secret_id: The ID of the secret. + + Raises: + KeyError: if no secret values for the given ID are stored in the + secrets store. + """ +``` + +{% hint style="info" %} +This is a slimmed-down version of the real interface which aims to highlight the abstraction layer. In order to see the full definition and get the complete docstrings, please check the [SDK docs](https://sdkdocs.zenml.io/latest/core\_code\_docs/core-zen\_stores/#zenml.zen\_stores.secrets\_stores.secrets\_store\_interface.SecretsStoreInterface) . +{% endhint %} + +## Build your own custom secrets store + +If you want to create your own custom secrets store implementation, you can follow the following steps: + +1. Create a class that inherits from the `zenml.zen_stores.secrets_stores.base_secrets_store.BaseSecretsStore` base class and implements the `abstractmethod`s shown in the interface above. Use `SecretsStoreType.CUSTOM` as the `TYPE` value for your secrets store class. +2. If you need to provide any configuration, create a class that inherits from the `SecretsStoreConfiguration` class and add your configuration parameters there. Use that as the `CONFIG_TYPE` value for your secrets store class. +3. To configure the ZenML server to use your custom secrets store, make sure your code is available in the container image that is used to run the ZenML server. Then, use environment variables or helm chart values to configure the ZenML server to use your custom secrets store, as covered in the [deployment guide](../README.md). + +
ZenML Scarf
diff --git a/docs/book/getting-started/deploying-zenml/deploy-with-custom-image.md b/docs/book/getting-started/deploying-zenml/deploy-with-custom-image.md index 0c54ba5eaa8..945adde3f21 100644 --- a/docs/book/getting-started/deploying-zenml/deploy-with-custom-image.md +++ b/docs/book/getting-started/deploying-zenml/deploy-with-custom-image.md @@ -56,24 +56,6 @@ If you want to verify your custom image locally, you can follow the [Deploy a cu Next, adjust your preferred deployment strategy to use the custom Docker image you just built. -#### Deploy a custom ZenML image via CLI - -You can deploy your custom image via the `zenml deploy` CLI command by setting the `--config` argument to a custom configuration file that has both `zenmlserver_image_repo` and `zenmlserver_image_tag` set: - -1. Define a custom `config.yaml` based on the [base deployment configuration file](deploy-with-zenml-cli.md#base-configuration-file) and set `zenmlserver_image_repo` and `zenmlserver_image_tag` according to the custom image you built: - - ```yaml - zenmlserver_image_repo: / - zenmlserver_image_tag: - ``` -2. Run `zenml deploy` with the custom config file: - - ```shell - zenml deploy --config=/PATH/TO/FILE - ``` - -See the general [ZenML CLI Deployment Guide](deploy-with-zenml-cli.md) for more information on how to use the `zenml deploy` CLI command and what other options can be configured. - #### Deploy a custom ZenML image via Docker To deploy your custom image via Docker, first familiarize yourself with the general [ZenML Docker Deployment Guide](deploy-with-docker.md). diff --git a/docs/book/getting-started/deploying-zenml/deploy-with-zenml-cli.md b/docs/book/getting-started/deploying-zenml/deploy-with-zenml-cli.md deleted file mode 100644 index 2efb7656baf..00000000000 --- a/docs/book/getting-started/deploying-zenml/deploy-with-zenml-cli.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -description: Deploying ZenML on cloud using the ZenML CLI. ---- - -# Deploy with ZenML CLI - -The easiest and fastest way to get running on the cloud is by using the `deploy` CLI command. It currently only supports deploying to Kubernetes on managed cloud services. You can check the [overview page](./README.md#deploying-a-zenml-server) to learn about other options that you have. - -Before we begin, it will help to understand the [architecture](./README.md) around the ZenML server and the database that it uses. Now, depending on your setup, you may find one of the following scenarios relevant. - -## Option 1: Starting from scratch - -If you don't have an existing Kubernetes cluster, you have the following two options to set it up: - -* Creating it manually using the documentation for your cloud provider. For convenience, here are links for [AWS](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html), [Azure](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal?tabs=azure-cli), and [GCP](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster#before\_you\_begin). -* Using a [stack recipe](../../how-to/stack-deployment/deploy-a-stack-using-mlstacks.md) that sets up a cluster along with other tools that you might need in your cloud stack like artifact stores and secret managers. Take a look at all [available stack recipes](https://github.com/zenml-io/mlstacks) to see if there's something that works for you. - -{% hint style="warning" %} -Once you have created your cluster, make sure that you configure your [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) client to talk to it. -{% endhint %} - -You're now ready to deploy ZenML! Run the following command: - -```bash -zenml deploy -``` - -You will be prompted to provide a name for your deployment and details like what cloud provider you want to deploy to — and that's it! It creates the database and any VPCs, permissions, and more that are needed. - -{% hint style="info" %} -In order to be able to run the `deploy` command, you should have your cloud provider's CLI configured locally with permissions to create resources like MySQL databases and networks. -{% endhint %} - -Reasonable defaults are in place for you already and if you wish to configure more settings, take a look at the next scenario that uses a config file. - -## Option 2: Using existing cloud resources - -### Existing Kubernetes cluster - -If you already have an existing cluster without an ingress controller, you can jump straight to the `deploy` command above to get going with the defaults. Please make sure that you have your local `kubectl` configured to talk to your cluster. - -#### Having an existing NGINX Ingress Controller - -The `deploy` command, by default, tries to create an NGINX ingress controller on your cluster. If you already have an existing controller, you can tell ZenML to not re-deploy it through the use of a config file. This file can be found in the [Configuration File Templates](deploy-with-zenml-cli.md#configuration-file-templates) towards the end of this guide. It offers a host of configuration options that you can leverage for advanced use cases. - -* Check if an ingress controller is running on your cluster by running the following command. You should see an entry in the output with the hostname populated. - - ```bash - # change the namespace to any other where - # You might have the controller installed - kubectl get svc -n ingress-nginx - ``` -* Set `create_ingress_controller` to `false`. -* Supply your controller's hostname to the `ingress_controller_hostname` variable. - - > **Note:** The address should not have a trailing `/`. -* You can now run the `deploy` command and pass the config file above, to it. - - ``` - zenml deploy --config=/PATH/TO/FILE - ``` - - > **Note:** To be able to run the deploy command, you should have your cloud provider's CLI configured locally with permissions to create resources like MySQL databases and networks. - -### Existing hosted SQL database - -If you also already have a database that you would want to use with the deployment, you can choose to configure it with the use of the config file. Here, we will demonstrate setting the database. - -* Fill the fields below from the config file with values from your database. - - ```yaml - # The username and password for the database. - database_username: - database_password: - - # The URL of the database to use for the ZenML server. - database_url: - - # The path to the SSL CA certificate to use for the database connection. - database_ssl_ca: - - # The path to the client SSL certificate to use for the database connection. - database_ssl_cert: - - # The path to the client SSL key to use for the database connection. - database_ssl_key: - - # Whether to verify the database server SSL certificate. - database_ssl_verify_server_cert: - ``` -* Run the `deploy` command and pass the config file above to it. - - ``` - zenml deploy --config=/PATH/TO/FILE - ``` - - > **Note** To be able to run the deploy command, you should have your cloud provider's CLI configured locally with permissions to create resources like MySQL databases and networks. - -## Configuration file templates - -#### Base configuration file - -Below is the general structure of a config file. Use this as a base and then add any cloud-specific parameters from the sections below. - -
- -General - -```yaml -# Name of the server deployment. -name: - -# The server provider type, one of aws, gcp or azure. -provider: - -# The path to the kubectl config file to use for deployment. -kubectl_config_path: - -# The Kubernetes namespace to deploy the ZenML server to. -namespace: zenmlserver - -# The path to the ZenML server helm chart to use for deployment. -helm_chart: - -# The repository and tag to use for the ZenML server Docker image. -zenmlserver_image_repo: zenmldocker/zenml -zenmlserver_image_tag: latest - -# Whether to deploy an nginx ingress controller as part of the deployment. -create_ingress_controller: true - -# Whether to use TLS for the ingress. -ingress_tls: true - -# Whether to generate self-signed TLS certificates for the ingress. -ingress_tls_generate_certs: true - -# The name of the Kubernetes secret to use for the ingress. -ingress_tls_secret_name: zenml-tls-certs - -# The ingress controller's IP address. The ZenML server will be exposed on a subdomain of this IP. For AWS, if you have a hostname instead, use the following command to get the IP address: `dig +short `. -ingress_controller_ip: - -# Whether to create a SQL database service as part of the recipe. -deploy_db: true - -# The username and password for the database. -database_username: user -database_password: - -# The URL of the database to use for the ZenML server. -database_url: - -# The path to the SSL CA certificate to use for the database connection. -database_ssl_ca: - -# The path to the client SSL certificate to use for the database connection. -database_ssl_cert: - -# The path to the client SSL key to use for the database connection. -database_ssl_key: - -# Whether to verify the database server SSL certificate. -database_ssl_verify_server_cert: true - -# The log level to set the terraform client. Choose one of TRACE, -# DEBUG, INFO, WARN, or ERROR (case insensitive). -log_level: ERROR -``` - -
- -{% hint style="info" %} -Feel free to include only those variables that you want to customize, in your file. For all other variables, the default values (shown above) will be used. -{% endhint %} - -#### Cloud-specific settings - -{% tabs %} -{% tab title="AWS" %} -
# The AWS region to deploy to.
-region: eu-west-1 
-
-# The name of the RDS instance to create
-rds_name: zenmlserver
-
-# Name of RDS database to create.
-db_name: zenmlserver
-
-# Type of RDS database to create.
-db_type: mysql
-
-# Version of RDS database to create.
-db_version: 5.7.38
-
-# Instance class of RDS database to create.
-db_instance_class: db.t3.micro
-
-# Allocated storage of RDS database to create.
-db_allocated_storage: 5
-
- -The `database_username` and `database_password` from the general config is used to set those variables for the AWS RDS instance. -{% endtab %} - -{% tab title="GCP" %} -
# The project in GCP to deploy the server in.
-project_id: 
-
-# The GCP region to deploy to.
-region: europe-west3
-
-# The name of the CloudSQL instance to create.
-cloudsql_name: zenmlserver
-
-# Name of CloudSQL database to create.
-db_name: zenmlserver
-
-# Instance class of CloudSQL database to create.
-db_instance_tier: db-n1-standard-1
-
-# Allocated storage of CloudSQL database, in GB, to create.
-db_disk_size: 10
-
-# Whether or not to enable the Secrets Manager API. Disable this if you
-# don't have ListServices permissions on the project.
-enable_secrets_manager_api: true
-
- -* The `project_id` is required to be set. -* The `database_username` and `database_password` from the general config is used to set those variables for the CloudSQL instance. -* SSL is disabled by default on the database and the option to enable it is coming soon! -{% endtab %} - -{% tab title="Azure" %} -```yaml -# The Azure resource_group to deploy to. -resource_group: zenml - -# The name of the Flexible MySQL instance to create. -db_instance_name: zenmlserver - -# Name of RDS database to create. -db_name: zenmlserver - -# Version of MySQL database to create. -db_version: 5.7 - -# The sku_name for the database resource. -db_sku_name: B_Standard_B1s - -# Allocated storage of MySQL database to create. -db_disk_size: 20 -``` - -The `database_username` and `database_password` from the general config is used to set those variables for the Azure Flexible MySQL server. -{% endtab %} -{% endtabs %} - -## Connecting to deployed ZenML - -Immediately after deployment, the ZenML server needs to be activated before it can be used. The activation process includes creating an initial admin user account and configuring some server settings. You can do this only by visiting the ZenML server URL in your browser and following the on-screen instructions. Connecting your local ZenML client to the server is not possible until the server is properly initialized. - -Once ZenML is deployed, one or multiple users can connect to it with the `zenml connect` command. - -```bash -zenml connect -``` - -{% hint style="info" %} -If no arguments are supplied, ZenML will attempt to connect to the last ZenML server deployed from the local host using the `zenml deploy` command: -{% endhint %} - -In order to connect to a specific ZenML server, you can either pass the configuration as command line arguments or as a YAML file: - -```bash -zenml connect --url=https://zenml.example.com:8080 --no-verify-ssl -``` - -or - -```bash -zenml connect --config=/path/to/zenml_server_config.yaml -``` - -The YAML file should have the following structure when connecting to a ZenML server: - -```yaml -# The URL of the ZenML server -url: - -# Either a boolean, in which case it controls whether the server's TLS -# certificate is verified, or a string, in which case it must be a path -# to a CA certificate bundle to use or the CA bundle value itself -verify_ssl: -``` - -Here is an example of a ZenML server YAML configuration file: - -```yaml -url: https://ac8ef63af203226194a7725ee71d85a-7635928635.us-east-1.elb.amazonaws.com/zenml -verify_ssl: | - -----BEGIN CERTIFICATE----- -... - -----END CERTIFICATE----- -``` - -To disconnect from the current ZenML server and revert to using the local default database, use the following command: - -```bash -zenml disconnect -``` - -## How does it work? - -Here's an architecture diagram that shows how the workflow looks like when you do `zenml deploy`. - -![Running zenml deploy](../../../.gitbook/assets/zenml\_deploy.png) - -The deploy CLI makes use of a "recipe" inside the `zenml-io/zenml` repository to deploy the server on the right cloud. Any configuration that you pass with the CLI, is sent to the recipe as input variables. - -
ZenML Scarf
diff --git a/docs/book/how-to/manage-the-zenml-server/migration-guide/migration-zero-sixty.md b/docs/book/how-to/manage-the-zenml-server/migration-guide/migration-zero-sixty.md index 6ce63337495..1487a47f9ee 100644 --- a/docs/book/how-to/manage-the-zenml-server/migration-guide/migration-zero-sixty.md +++ b/docs/book/how-to/manage-the-zenml-server/migration-guide/migration-zero-sixty.md @@ -56,7 +56,7 @@ is still using `sqlalchemy` v1 and is incompatible with pydantic v2. As a solution, we have removed the dependencies of the `airflow` integration. Now, you can use ZenML to create your Airflow pipelines and use a separate environment to run them with Airflow. You can check the updated docs -[right here](https://docs.zenml.io/stack-components/orchestrators/airflow). +[right here](../../component-guide/orchestrators/airflow.md). ### AWS diff --git a/docs/book/how-to/popular-integrations/kubernetes.md b/docs/book/how-to/popular-integrations/kubernetes.md index d42713d30b3..41a5c5fd1ed 100644 --- a/docs/book/how-to/popular-integrations/kubernetes.md +++ b/docs/book/how-to/popular-integrations/kubernetes.md @@ -23,11 +23,7 @@ To use the Kubernetes Orchestrator, you'll need: ## Deploying the Orchestrator -You can deploy the orchestrator from the ZenML CLI: - -```bash -zenml orchestrator deploy k8s_orchestrator --flavor=kubernetes --provider= -``` +The Kubernetes orchestrator requires a Kubernetes cluster in order to run. There are many ways to deploy a Kubernetes cluster using different cloud providers or on your custom infrastructure, and we can't possibly cover all of them, but you can check out [our cloud guide](../../user-guide/cloud-guide/cloud-guide.md). ## Configuring the Orchestrator diff --git a/docs/book/how-to/stack-deployment/README.md b/docs/book/how-to/stack-deployment/README.md index 6dfaa452c29..9ddc03dafe6 100644 --- a/docs/book/how-to/stack-deployment/README.md +++ b/docs/book/how-to/stack-deployment/README.md @@ -98,11 +98,6 @@ This docs section consists of information that makes it easier to provision, con Description of deploying a cloud stack with Terraform. ./deploy-a-cloud-stack-with-terraform.md - - Deploy stack/components using mlstacks - Deploying an entire stack with ZenML's `mlstacks` package. - ./deploy-a-stack-using-mlstacks.md - Reference secrets in stack configuration Description of referencing secrets in stack configuration. diff --git a/docs/book/how-to/stack-deployment/deploy-a-stack-using-mlstacks.md b/docs/book/how-to/stack-deployment/deploy-a-stack-using-mlstacks.md deleted file mode 100644 index 3e4c7efb75d..00000000000 --- a/docs/book/how-to/stack-deployment/deploy-a-stack-using-mlstacks.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -description: Individually deploying different stack components. ---- - -# ⚒️ Deploying stacks and components using `mlstacks` - -The first step in running your pipelines on remote infrastructure is to deploy all the components that you would need, like an [MLflow tracking server](../../component-guide/experiment-trackers/mlflow.md), -[Kubeflow orchestrator](../../component-guide/orchestrators/kubeflow.md), and more to your cloud. - -This can bring plenty of benefits like scalability, reliability, and collaboration. ZenML eases the path to production by providing a seamless way for all tools to interact with others through the use of abstractions. However, one of the most painful parts of this process, from what we see on our Slack and in general, is the deployment of these stack components. - -[`mlstacks`](https://mlstacks.zenml.io/) is a Python package that allows you to quickly spin up MLOps -infrastructure using Terraform. It is designed to be used with -[ZenML](https://zenml.io), but can be used with any MLOps tool or platform. You -can deploy a modular MLOps stack for AWS, GCP or K3D using mlstacks. Each deployment type is designed to offer a great deal of flexibility in configuring the resources while preserving the ease of application through the use of sensible defaults. - -To make even this process easier for our users, we have created the `deploy` command in `zenml`, which allows you to quickly get started with a full-fledged MLOps stack using only a few commands. You can choose to deploy individual stack components through the stack-component CLI or deploy a stack with multiple components together (a tad more manual steps). - -Check out [the full documentation for the mlstacks package](https://mlstacks.zenml.io/) for more information. - -## When should I deploy something using mlstacks? - -{% hint style="info" %} -**MLStacks deploys resources using a Kubernetes cluster, which may be expensive and not for every user. In order to use stacks which are more basic and cheaper on the cloud, read [how to easily register a cloud stack](../../how-to/stack-deployment/register-a-cloud-stack.md) -if you have existing infrastructure, or read [how to deploy a cloud stack in one click](../../how-to/stack-deployment/deploy-a-cloud-stack.md) or [how to deploy a cloud stack with Terraform](../../how-to/stack-deployment/deploy-a-cloud-stack-with-terraform).** - -Or simply try running one of: - -```shell -zenml stack register --provider aws -zenml stack deploy --provider aws -``` -{% endhint %} - -To answer this question, here are some pros and cons in comparison to the stack-component deploy method which can help you choose what works best for you! - -{% tabs %} -{% tab title="😍 Pros" %} -* Offers a lot of flexibility in what you deploy. -* Deploying with `mlstacks` gives you a full MLOps stack as the output. Your - components and stack is automatically imported to ZenML. This saves you the - effort of manually registering all the components. -{% endtab %} - -{% tab title="😥 Cons" %} -* Currently only supports AWS, GCP, and K3D as providers. -* Most stack deployments are Kubernetes-based which might be too heavy for your - needs. -* Not all stack components are supported yet. -{% endtab %} -{% endtabs %} - -The ZenML CLI has special subcommands that allow you to deploy individual stack components as well as whole stacks using MLStacks. These stacks will be useful for you if: - -* You are at the start of your MLOps journey, and would like to explore different tools. -* You are looking for guidelines for production-grade deployments. - -## How does `mlstacks` work? - -MLStacks is built around the concept of a stack specification. A stack specification is a YAML file that describes the stack and includes references to component specification files. A component specification is a YAML file that describes a component. (Currently all deployments of components (in various combinations) must be defined within the context of a stack.) - -ZenML handles the creation of stack specifications for you when you run one of the `deploy` subcommands using the CLI. A valid specification is generated and used by `mlstacks` to deploy your stack using Terraform. The Terraform definitions and state are stored in your global configuration directory along with any state files generated while deploying your stack. - -Your configuration directory could be in a number of different places depending on your operating system, but read more about it in the [Click docs](https://click.palletsprojects.com/en/8.1.x/api/#click.get\_app\_dir) to see which location applies to your situation. - -## Installing the mlstacks extra - -To install `mlstacks`, either run `pip install mlstacks` or `pip install "zenml[mlstacks]"` to install it along with ZenML. - -MLStacks uses Terraform on the backend to manage infrastructure. You will need to have Terraform installed. Please visit [the Terraform docs](https://learn.hashicorp.com/tutorials/terraform/install-cli#install-terraform) for installation instructions. - -MLStacks also uses Helm to deploy Kubernetes resources. You will need to have Helm installed. Please visit [the Helm docs](https://helm.sh/docs/intro/install/#from-script) for installation instructions. - -## Deploying a stack - -Deploying an end-to-end stack through the ZenML CLI is only possible with the [deployment wizard which does not use `mlstacks`](../../how-to/stack-deployment/deploy-a-cloud-stack.md). However, you can use `mlstacks` directly to deploy various types of stacks and [import them into ZenML](https://mlstacks.zenml.io/reference/zenml). - -```shell -zenml stack import -f -``` - -## Deploying a stack component - -If you have used ZenML before, you must be familiar with the flow of registering new stack components. It goes something like this: - -```shell -zenml artifact-store register my_store --flavor=s3 --path=s3://my_bucket -``` - -Commands like these assume that you already have the stack component deployed. In this case, it would mean that you must already have a bucket called `my_bucket` on AWS S3 to be able to use this component. - -We took inspiration from this design to build something that feels natural to use and is also sufficiently powerful to take care of the deployment of the respective stack components for you. This is where the \ `deploy` CLI comes in! - -The `deploy` command allows you to deploy individual components of your MLOps stack with a single command 🚀. You can also customize your components easily by passing in flags (more on that later). - -{% hint style="info" %} -To install `mlstacks`, either run `pip install mlstacks` or `pip install "zenml[mlstacks]"` to install it along with ZenML. - -MLStacks uses Terraform on the backend to manage infrastructure. You will need to have Terraform installed. Please visit [the Terraform docs](https://learn.hashicorp.com/tutorials/terraform/install-cli#install-terraform) for installation instructions. - -MLStacks also uses Helm to deploy Kubernetes resources. You will need to have Helm installed. Please visit [the Helm docs](https://helm.sh/docs/intro/install/#from-script) for installation instructions. -{% endhint %} - -For example, to deploy an artifact store on a GCP account, you can run: - -{% code overflow="wrap" %} -```bash -# after installing mlstacks -zenml artifact-store deploy -f gcp -p gcp -r us-east1 -x project_id=zenml my_store -``` -{% endcode %} - -The command above takes in the following parameters: - -* **Name**: The name of the stack component. In this case, it is `my_store`. -* **Flavor:** The flavor of the stack component to deploy. Here, we are deploying an artifact store with the `gcp` flavor. -* **Provider:** The provider to deploy this stack component on. Currently, only **GCP, AWS, and K3D** are supported as providers. -* **Region**: The region to deploy the stack component in. -* **Extra Config:** Some components can be customized by the user and these settings are passed as flags to the command. In the example above, we pass the GCP project ID to select what project to deploy the component to. - -Successful execution of this command does the following: - -* It also automatically registers the deployed stack component with your ZenML server, so you don't have to worry about manually configuring components after the deployment! 🤩 - -{% hint style="info" %} -The command currently uses your local credentials for GCP and AWS to provision resources. Integration with your ZenML connectors might be possible soon too! -{% endhint %} - -
- -Want to know what happens in the background? - -The stack component deploy CLI is powered by ZenML's [mlstacks](https://github.com/zenml-io/mlstacks) in the background. This allows you to configure and deploy select stack components. - -Using the values you pass for the cloud, the CLI picks up the right modular recipe to use (one of AWS, GCP, or K3D) and then deploys that recipe with the specific stack component enabled. - -
- -### Destroying a stack component - -Destroying a stack component (i.e. deleting and destroying the underlying -infrastructure) is as easy as deploying one. You can run the following command -to destroy the artifact store we created above: - -```bash -zenml artifact-store destroy -p gcp my_store -``` - -This will destroy the deployed infrastructure and prompt you if you also want to remove and deregister the component from your ZenML server. - -### 🍨 Available flavors for stack components - -Here's a table of all the flavors that can be deployed through the CLI for every stack component. This is a list that will keep on growing and you can also contribute any flavor or stack component that you feel is missing. Refer to the [Contribution page](../../../../CONTRIBUTING.md) for steps on how to do that :smile: - -
- -How does flavor selection work in the background? - -Whenever you pass in a flavor to any stack-component deploy function, the combination of these two parameters is used to construct a variable name in the following format: - -``` -enable__ -``` - -This variable is then passed as input to the underlying modular recipe. If you check the [`variables.tf`](https://github.com/zenml-io/mlstacks/blob/main/gcp-modular/variables.tf) file for a given recipe, you can find all the supported flavor-stack component combinations there. - -
- -| Component Type | Flavor(s) | -| ------------------ | ------------------------------------ | -| Artifact Store | s3, gcp, minio | -| Container Registry | aws, gcp | -| Experiment Tracker | mlflow | -| Orchestrator | kubernetes, kubeflow, tekton, vertex | -| MLOps Platform | zenml | -| Model Deployer | seldon | -| Step Operator | sagemaker, vertex | - -#### ✨ Customizing your stack components - -With simplicity, we didn't want to compromise on the flexibility that this deployment method allows. As such, we have added the option to pass configuration specific to the stack components as key-value arguments to the deploy CLI. Here is an assortment of all possible configurations that can be set. - -
- -How do configuration flags work? - -The flags that you pass to the deploy CLI are passed on as-is to the backing modular recipes as input variables. This means that all the flags need to be defined as variables in the respective recipe. - -For example, if you take a look at the [`variables.tf`](https://github.com/zenml-io/mlstacks/blob/main/gcp-modular/variables.tf) file for a modular recipe, like the `gcp-modular` recipe, you can find variables like `mlflow_bucket` that you could potentially pass in. - -Validation for these flags does not exist yet at the CLI level, so you must be careful in naming them while calling `deploy`. - -All these extra configuration options are passed in with the `-x` option. For example, we already saw this in action above when we passed in the GCP project ID to the artifact store deploy command. - -```bash -zenml artifact-store deploy -f gcp -p gcp -r us-east1 -x project_id=zenml my_store -``` - -Simply pass in as many `-x` flags as you want to customize your stack component. - -
- -**Experiment Trackers** - -You can assign an existing bucket to the MLflow experiment tracker by passing the `-x mlflow_bucket=...` configuration: - -```shell -zenml experiment-tracker deploy mlflow_tracker --flavor=mlflow -p YOUR_DESIRED_PROVIDER -r YOUR_REGION -x mlflow_bucket=gs://my_bucket -``` - -**Artifact Stores** - -For an artifact store, you can pass `bucket_name` as an argument to the command. - -```bash -zenml artifact-store deploy s3_artifact_store --flavor=s3 --provider=aws -r YOUR_REGION -x bucket_name=my_bucket -``` - -**Container Registries** - -For container registries, you can pass the repository name using `repo_name`: - -```bash -zenml container-registry deploy aws_registry --flavor=aws -p aws -r YOUR_REGION -x repo_name=my_repo -``` - -This is only useful for the AWS case since AWS requires a repository to be created before pushing images to it and the deploy command ensures that a repository with the name you provide is created. In case of GCP and other providers, you can choose the repository name at the same time as you are pushing the image via code. This is achieved through setting the `target_repo` attribute of [the `DockerSettings` object](../customize-docker-builds/README.md). - -#### Other configuration - -* In the case of GCP components, it is _required_ that you pass a project ID to the command as extra configuration when you're creating any GCP resource. - -
ZenML Scarf
diff --git a/docs/book/how-to/stack-deployment/troubleshoot-stack-components.md b/docs/book/how-to/stack-deployment/troubleshoot-stack-components.md deleted file mode 100644 index 60bce33d2f3..00000000000 --- a/docs/book/how-to/stack-deployment/troubleshoot-stack-components.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -description: Learn how to troubleshoot Stack Components deployed with ZenML. ---- - -# Troubleshoot stack components - -There are two ways in which you can understand if something has gone wrong while deploying your stack or stack components. - -## Error logs from the CLI - -The CLI will show any errors that the deployment runs into. Most of these would be coming from the underlying terraform library and could range from issues like resources with the same name existing in your cloud to a wrong naming scheme for some resource. - -Most of these are easy to fix and self-explanatory but feel free to ask any questions or doubts you may have to us on the ZenML Slack! 🙋‍ - -## Debugging errors with already deployed components - -Sometimes, an application might fail after an initial successful deployment. This section will cover steps on how to debug failures in such a case, for Kubernetes apps, since they form a majority of all tools deployed with the CLI. - -{% hint style="info" %} -Other components include cloud-specific apps like Vertex AI, Sagemaker, S3 buckets, and more. Information on what has gone wrong with them would be best found on the web console for the respective clouds. -{% endhint %} - -### Getting access to the Kubernetes Cluster - -The first step to figuring out the problem with a deployed Kubernetes app is to get access to the underlying cluster hosting it. When you deploy apps that require a cluster, ZenML creates a cluster for you and this is reused for all subsequent apps that need it. - -{% hint style="info" %} -If you've used the `zenml stack deploy` flow to deploy your components, your local `kubectl` might already have access to the cluster. Check by running the following command: - -``` -kubectl get nodes -``` -{% endhint %} - -#### Stack Component Deploy - -{% tabs %} -{% tab title="AWS" %} -1. Get the name of the deployed cluster.\ - `zenml stack recipe output eks-cluster-name` -2. Figure out the region that the cluster is deployed to. By default, the region is set to `eu-west-1` , which you should use in the next step if you haven't supplied a custom value while creating the cluster. -3. Run the following command.\ - `aws eks update-kubeconfig --name --region ` -{% endtab %} - -{% tab title="GCP" %} -1. Get the name of the deployed cluster.\ - \ - `zenml stack recipe output gke-cluster-name`\\ -2. Figure out the region that the cluster is deployed to. By default, the region is set to `europe-west1`, which you should use in the next step if you haven't supplied a custom value while creating the cluster.\\ -3. Figure out the project that the cluster is deployed to. You must have passed in a project ID while creating a GCP resource for the first time.\\ -4. Run the following command.\ - `gcloud container clusters get-credentials --region --project ` -{% endtab %} - -{% tab title="K3D" %} -{% hint style="info" %} -You may already have your `kubectl` client configured with your cluster. Check by running `kubectl get nodes` before proceeding. -{% endhint %} - -1. Get the name of the deployed cluster.\ - \ - `zenml stack recipe output k3d-cluster-name`\\ -2. Set the `KUBECONFIG` env variable to the `kubeconfig` file from the cluster.\ - \ - `export KUBECONFIG=$(k3d kubeconfig get )`\\ -3. You can now use the `kubectl` client to talk to the cluster. -{% endtab %} -{% endtabs %} - -#### Stack Recipe Deploy - -The steps for the stack recipe case should be the same as the ones listed above. The only difference that you need to take into account is the name of the outputs that contain your cluster name and the default regions. - -Each recipe might have its own values and here's how you can ascertain those values. - -* For the cluster name, go into the `outputs.tf` file in the root directory and search for the output that exposes the cluster name. -* For the region, check out the `variables.tf` or the `locals.tf` file for the default value assigned to it. - - -
ZenML Scarf
diff --git a/docs/book/how-to/trigger-pipelines/use-templates-dashboard.md b/docs/book/how-to/trigger-pipelines/use-templates-dashboard.md index 721c3b419d7..cf1a679b4d9 100644 --- a/docs/book/how-to/trigger-pipelines/use-templates-dashboard.md +++ b/docs/book/how-to/trigger-pipelines/use-templates-dashboard.md @@ -27,9 +27,8 @@ In order to run a template from the dashboard: - You can go to a specific template page and click on `Run Template`. Either way, you will be forwarded to a page where you will see the -`Run Details`. Here, you have the option to upload a `.yaml` [configurations -file](https://docs.zenml.io/how-to/use-configuration-files) or change the -configuration on the go by using our editor. +`Run Details`. Here, you have the option to upload a `.yaml` [configurations file](../../how-to/use-configuration-files/README.md) +or change the configuration on the go by using our editor. ![Run Details](../../.gitbook/assets/run-templates-run-1.png) diff --git a/docs/book/introduction.md b/docs/book/introduction.md index b996c3c06a8..d27c92b1b5f 100644 --- a/docs/book/introduction.md +++ b/docs/book/introduction.md @@ -18,9 +18,6 @@ ZenML enables MLOps infrastructure experts to define, deploy, and manage sophist * **Self-hosted deployment:** ZenML can be deployed on any cloud provider and provides many Terraform-based utility functions to deploy other MLOps tools or even entire MLOps stacks: ```bash - # Deploy ZenML to any cloud - zenml deploy --provider aws - # Connect cloud resources with a simple wizard zenml stack register --provider aws diff --git a/docs/book/reference/how-do-i.md b/docs/book/reference/how-do-i.md index ba0770f2690..56dc4ed59ba 100644 --- a/docs/book/reference/how-do-i.md +++ b/docs/book/reference/how-do-i.md @@ -26,8 +26,6 @@ Check out [our dedicated documentation page](../how-to/configure-python-environm ZenML is designed to be stack-agnostic, so you can use it with any cloud infrastructure or MLOps stack. Each of the documentation pages for stack components explain how to deploy these components on the most popular cloud providers. -We also build and maintain [the `mlstacks` package](https://mlstacks.zenml.io/) and library which offers a dedicated way to spin up infrastructure for your ZenML pipelines. It's fully integrated into ZenML's CLI and is a great way to get started with deploying your infrastructure. ZenML also [publishes and maintains modules on the Terraform Registry](https://registry.terraform.io/namespaces/zenml-io) (which are used by `mlstacks` under the hood) which you can also use as a standalone solution if you are familiar with Terraform. - * **deploy ZenML** on my internal company cluster? Read [the documentation on self-hosted ZenML deployments](../getting-started/deploying-zenml/README.md) in which several options are presented. diff --git a/docs/book/toc.md b/docs/book/toc.md index cfd639443f4..1a71b092316 100644 --- a/docs/book/toc.md +++ b/docs/book/toc.md @@ -6,13 +6,12 @@ * [🧙 Installation](getting-started/installation.md) * [🪄 Core concepts](getting-started/core-concepts.md) * [🤔 Deploying ZenML](getting-started/deploying-zenml/README.md) - * [Deploy with ZenML CLI](getting-started/deploying-zenml/deploy-with-zenml-cli.md) * [Deploy with Docker](getting-started/deploying-zenml/deploy-with-docker.md) * [Deploy with Helm](getting-started/deploying-zenml/deploy-with-helm.md) * [Deploy using HuggingFace Spaces](getting-started/deploying-zenml/deploy-using-huggingface-spaces.md) * [Deploy with custom images](getting-started/deploying-zenml/deploy-with-custom-image.md) * [Secret management](getting-started/deploying-zenml/secret-management.md) - * [Custom secret stores](getting-started/deploying-zenml/manage-the-deployed-services/custom-secret-stores.md) + * [Custom secret stores](getting-started/deploying-zenml/custom-secret-stores.md) * [☁️ ZenML Pro](getting-started/zenml-pro/README.md) * [System Architectures](getting-started/zenml-pro/system-architectures.md) * [ZenML SaaS](getting-started/zenml-pro/zenml-pro.md) @@ -125,10 +124,8 @@ * [Deploy a cloud stack with ZenML](how-to/stack-deployment/deploy-a-cloud-stack.md) * [Deploy a cloud stack with Terraform](how-to/stack-deployment/deploy-a-cloud-stack-with-terraform.md) * [Register a cloud stack](how-to/stack-deployment/register-a-cloud-stack.md) - * [Deploy stack/components using mlstacks](how-to/stack-deployment/deploy-a-stack-using-mlstacks.md) * [Reference secrets in stack configuration](how-to/stack-deployment/reference-secrets-in-stack-configuration.md) * [Implement a custom stack component](how-to/stack-deployment/implement-a-custom-stack-component.md) - * [Troubleshoot stack components](how-to/stack-deployment/troubleshoot-stack-components.md) * [🚜 Train with GPUs](how-to/training-with-gpus/training-with-gpus.md) * [Distributed Training with 🤗 Accelerate](how-to/training-with-gpus/accelerate-distributed-training.md) * [🌲 Control logging](how-to/control-logging/README.md) diff --git a/docs/book/user-guide/cloud-guide/cloud-guide.md b/docs/book/user-guide/cloud-guide/cloud-guide.md index ef1191fd25e..491b8319c0e 100644 --- a/docs/book/user-guide/cloud-guide/cloud-guide.md +++ b/docs/book/user-guide/cloud-guide/cloud-guide.md @@ -10,7 +10,8 @@ A `stack` is the configuration of tools and infrastructure that your pipelines c

ZenML is the translation layer that allows your code to run on any of your stacks

-Note, this guide focuses on the *registering* a stack, meaning that the resources required to run pipelines have already been *provisioned*. In order to provision the underlying infrastructure, you can either do so manually, via a IaC tool like Terraform, or use ZenML's sister project [MLStacks](https://mlstacks.zenml.io/). +Note, this guide focuses on the *registering* a stack, meaning that the resources required to run pipelines have already been *provisioned*. In order to provision the underlying infrastructure, you can either do so manually, use the [in-browser stack deployment wizard](../../how-to/stack-deployment/deploy-a-cloud-stack.md), the [stack registration wizard](../../how-to/stack-deployment/register-a-cloud-stack.md), +or [the ZenML Terraform modules](../../how-to/stack-deployment/deploy-a-cloud-stack-with-terraform.md).
ZenML Scarf
diff --git a/docs/book/user-guide/llmops-guide/evaluation/retrieval.md b/docs/book/user-guide/llmops-guide/evaluation/retrieval.md index d03d6c5e83b..524b6edc053 100644 --- a/docs/book/user-guide/llmops-guide/evaluation/retrieval.md +++ b/docs/book/user-guide/llmops-guide/evaluation/retrieval.md @@ -34,9 +34,7 @@ could be improved. I looked in our documentation to find some examples where the information could only be found in a single page and then wrote some queries that would require the retrieval component to find that page. For example, the query "How do I get going with the Label Studio integration? What are the first -steps?" would require the retrieval component to find [the Label Studio -integration -page](https://docs.zenml.io/stacks-and-components/component-guide/annotators/label-studio). +steps?" would require the retrieval component to find [the Label Studio integration page](../../../component-guide/annotators/label-studio.md). Some of the other examples used are: | Question | URL Ending | diff --git a/docs/book/user-guide/llmops-guide/finetuning-embeddings/evaluating-finetuned-embeddings.md b/docs/book/user-guide/llmops-guide/finetuning-embeddings/evaluating-finetuned-embeddings.md index 89851016aaa..4229e1612f7 100644 --- a/docs/book/user-guide/llmops-guide/finetuning-embeddings/evaluating-finetuned-embeddings.md +++ b/docs/book/user-guide/llmops-guide/finetuning-embeddings/evaluating-finetuned-embeddings.md @@ -55,7 +55,7 @@ def evaluate_base_model( We log the results for our core Matryoshka dimensions as model metadata to ZenML within our evaluation step. This will allow us to inspect these results from -within [the Model Control Plane](https://docs.zenml.io/how-to/use-the-model-control-plane) (see +within [the Model Control Plane](../../../how-to/use-the-model-control-plane/README.md) (see below for more details). Our results come in the form of a dictionary of string keys and float values which will, like all step inputs and outputs, be versioned, tracked and saved in your artifact store. @@ -107,8 +107,7 @@ well as compare the actual values of our evals or inspect the hardware or hyperparameters used for training. This one-stop-shop interface is available on ZenML Pro and you can learn more -about it in the [Model Control Plane -documentation](https://docs.zenml.io/how-to/use-the-model-control-plane). +about it in the [Model Control Plane documentation](../../../how-to/use-the-model-control-plane/README.md). ## Next Steps diff --git a/docs/book/user-guide/production-guide/ci-cd.md b/docs/book/user-guide/production-guide/ci-cd.md index 14855617f95..99f6243b16c 100644 --- a/docs/book/user-guide/production-guide/ci-cd.md +++ b/docs/book/user-guide/production-guide/ci-cd.md @@ -30,7 +30,7 @@ template: you can fork it and easily adapt it to your own MLOps stack, infrastru ### Configure an API Key in ZenML In order to facilitate machine-to-machine connection you need to create an API key within ZenML. Learn more about those -[here](https://docs.zenml.io/how-to/connecting-to-zenml/connect-with-a-service-account). +[here](../../how-to/connecting-to-zenml/connect-with-a-service-account.md). ```bash zenml service-account create github_action_api_key diff --git a/docs/book/user-guide/production-guide/deploying-zenml.md b/docs/book/user-guide/production-guide/deploying-zenml.md index 450bbf119a5..2a60e71806f 100644 --- a/docs/book/user-guide/production-guide/deploying-zenml.md +++ b/docs/book/user-guide/production-guide/deploying-zenml.md @@ -26,23 +26,7 @@ On top of the one-click SaaS experience, ZenML Pro also comes built-in with addi ### Option 2: Self-host ZenML on your cloud provider -As ZenML is open source, it is easy to [self-host it](../../getting-started/deploying-zenml/README.md). There is even a [ZenML CLI](../../getting-started/deploying-zenml/deploy-with-zenml-cli.md) one-liner that deploys ZenML on a Kubernetes cluster, abstracting away all the infrastructure complexity. If you don't have an existing Kubernetes cluster, you can create it manually using the documentation for your cloud provider. For convenience, here are links for [AWS](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html), [Azure](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal?tabs=azure-cli), and [GCP](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster#before\_you\_begin). - -{% hint style="warning" %} -Once you have created your cluster, make sure that you configure your [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) client to connect to it. -{% endhint %} - -You're now ready to deploy ZenML! Run the following command: - -```bash -zenml deploy -``` - -You will be prompted to provide a name for your deployment and details like what cloud provider you want to deploy to, in addition to the username, password, and email you want to set for the default user — and that's it! It creates the database and any VPCs, permissions, and more that are needed. - -{% hint style="info" %} -In order to be able to run the `deploy` command, you should have your cloud provider's CLI configured locally with permissions to create resources like MySQL databases and networks. -{% endhint %} +As ZenML is open source, it is easy to [self-host it](../../getting-started/deploying-zenml/README.md) in a Kubernetes cluster. If you don't have an existing Kubernetes cluster, you can create it manually using the documentation for your cloud provider. For convenience, here are links for [AWS](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html), [Azure](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-portal?tabs=azure-cli), and [GCP](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster#before\_you\_begin). To learn more about different options for [deploying ZenML, visit the deployment documentation](../../getting-started/deploying-zenml/README.md). diff --git a/pyproject.toml b/pyproject.toml index 96ba82f52ce..c569b4c6ae0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,9 +79,6 @@ copier = { version = ">=8.1.0", optional = true } pyyaml-include = { version = "<2.0", optional = true } jinja2-time = { version = "^0.2.0", optional = true } -# Optional terraform dependency for stack recipes and ZenServer deployments -python-terraform = { version = "^0.10.1", optional = true } - # Optional dependencies for the AWS secrets store boto3 = { version = ">=1.16.0", optional = true } @@ -181,9 +178,6 @@ types-termcolor = { version = "^1.1.2", optional = true } types-psutil = { version = "^5.8.13", optional = true } types-passlib = { version = "^1.7.7", optional = true } -# mlstacks dependencies -mlstacks = { version = "0.10.0", optional = true } - [tool.poetry.extras] server = [ "fastapi", @@ -276,7 +270,6 @@ dev = [ "types-psutil", "types-passlib", ] -mlstacks = ["mlstacks"] [build-system] requires = ["poetry-core"] @@ -469,7 +462,6 @@ module = [ "datasets.*", "pyngrok.*", "cloudpickle.*", - "mlstacks.*", "matplotlib.*", "IPython.*", "huggingface_hub.*", diff --git a/scripts/install-zenml-dev.sh b/scripts/install-zenml-dev.sh index d33e36e5e85..c3ca2f61f57 100755 --- a/scripts/install-zenml-dev.sh +++ b/scripts/install-zenml-dev.sh @@ -28,7 +28,7 @@ parse_args () { install_zenml() { # install ZenML in editable mode - uv pip install $PIP_ARGS -e ".[server,templates,terraform,secrets-aws,secrets-gcp,secrets-azure,secrets-hashicorp,s3fs,gcsfs,adlfs,dev,mlstacks,connectors-aws,connectors-gcp,connectors-azure,azureml,sagemaker,vertex]" + uv pip install $PIP_ARGS -e ".[server,templates,terraform,secrets-aws,secrets-gcp,secrets-azure,secrets-hashicorp,s3fs,gcsfs,adlfs,dev,connectors-aws,connectors-gcp,connectors-azure,azureml,sagemaker,vertex]" } install_integrations() { diff --git a/src/zenml/analytics/enums.py b/src/zenml/analytics/enums.py index d599bbf527f..8fd26d3c7be 100644 --- a/src/zenml/analytics/enums.py +++ b/src/zenml/analytics/enums.py @@ -80,24 +80,11 @@ class AnalyticsEvent(str, Enum): # Service account and API keys CREATED_SERVICE_ACCOUNT = "Service account created" - # Stack recipes - RUN_STACK_RECIPE = "Stack recipe ran" - DEPLOY_STACK = "Stack deployed" - DESTROY_STACK = "Stack destroyed" - - # Stack component deploy - DEPLOY_STACK_COMPONENT = "Stack component deployed" - DESTROY_STACK_COMPONENT = "Stack component destroyed" - # Full stack infrastructure deployment DEPLOY_FULL_STACK = "Full stack deployed" # Tag created CREATED_TAG = "Tag created" - # ZenML server events - ZENML_SERVER_DEPLOYED = "ZenML server deployed" - ZENML_SERVER_DESTROYED = "ZenML server destroyed" - # Server Settings SERVER_SETTINGS_UPDATED = "Server Settings Updated" diff --git a/src/zenml/cli/__init__.py b/src/zenml/cli/__init__.py index fd7c2267d67..4576d5f36f7 100644 --- a/src/zenml/cli/__init__.py +++ b/src/zenml/cli/__init__.py @@ -1870,13 +1870,7 @@ def my_pipeline(...): The ZenML client can be [configured to connect to a remote database or ZenML server](https://docs.zenml.io/how-to/connecting-to-zenml) -with the `zenml connect` command. If no arguments are supplied, ZenML -will attempt to connect to the last ZenML server deployed from the local host -using the 'zenml deploy' command: - -```bash -zenml connect -``` +with the `zenml connect` command. To connect to a ZenML server, you can either pass the configuration as command line arguments or as a YAML file: @@ -2517,37 +2511,6 @@ def my_pipeline(...): ```bash zenml logging set-verbosity DEBUG ``` - -Deploying ZenML to the cloud ----------------------------- - -The ZenML CLI provides a simple way to deploy ZenML to the cloud. Simply run - -```bash -zenml deploy -``` - -You will be prompted to provide a name for your deployment and details like what -cloud provider you want to deploy to — and that's it! It creates the -database and any VPCs, permissions, and more that are needed. - -In order to be able to run the deploy command, you should have your cloud -provider's CLI configured locally with permissions to create resources like -MySQL databases and networks. - -Deploying Stack Components --------------------------- - -Stack components can be deployed directly via the CLI. You can use the `deploy` -subcommand for this. For example, you could deploy a GCP artifact store using -the following command: - -```shell -zenml artifact-store deploy -f gcp -p gcp -r us-east1 -x project_id=zenml-core basic_gcp_artifact_store -``` - -For full documentation on this functionality, please refer to [the dedicated -documentation on stack component deploy](https://docs.zenml.io/how-to/stack-deployment/deploy-a-stack-component). """ from zenml.cli.version import * # noqa @@ -2570,7 +2533,6 @@ def my_pipeline(...): from zenml.cli.service_connectors import * # noqa from zenml.cli.stack import * # noqa from zenml.cli.stack_components import * # noqa -from zenml.cli.stack_recipes import * # noqa from zenml.cli.user_management import * # noqa from zenml.cli.workspace import * # noqa from zenml.cli.tag import * # noqa diff --git a/src/zenml/cli/base.py b/src/zenml/cli/base.py index ecbb01d3607..88570f17bf0 100644 --- a/src/zenml/cli/base.py +++ b/src/zenml/cli/base.py @@ -50,7 +50,7 @@ from zenml.logger import get_logger from zenml.utils.io_utils import copy_dir, get_global_config_directory from zenml.utils.yaml_utils import write_yaml -from zenml.zen_server.utils import get_active_deployment +from zenml.zen_server.utils import get_local_server logger = get_logger(__name__) # WT_SESSION is a Windows Terminal specific environment variable. If it @@ -358,7 +358,7 @@ def clean(yes: bool = False, local: bool = False) -> None: ) if yes or confirm: - server = get_active_deployment(local=True) + server = get_local_server() if server: from zenml.zen_server.deploy.deployer import ServerDeployer diff --git a/src/zenml/cli/server.py b/src/zenml/cli/server.py index aedf40ce92b..3d38f77e20b 100644 --- a/src/zenml/cli/server.py +++ b/src/zenml/cli/server.py @@ -23,8 +23,6 @@ from rich.errors import MarkupError import zenml -from zenml.analytics.enums import AnalyticsEvent -from zenml.analytics.utils import track_handler from zenml.cli import utils as cli_utils from zenml.cli.cli import cli from zenml.cli.web_login import web_login @@ -35,8 +33,8 @@ from zenml.enums import ServerProviderType, StoreType from zenml.exceptions import AuthorizationException, IllegalOperationError from zenml.logger import get_logger -from zenml.utils import terraform_utils, yaml_utils -from zenml.zen_server.utils import get_active_deployment +from zenml.utils import yaml_utils +from zenml.zen_server.utils import get_local_server logger = get_logger(__name__) @@ -181,7 +179,7 @@ def up( deployer = ServerDeployer() - server = get_active_deployment(local=True) + server = get_local_server() if server and server.config.provider != provider: deployer.remove_server(LOCAL_ZENML_SERVER_NAME) @@ -266,7 +264,7 @@ def show(ngrok_token: Optional[str] = None) -> None: @cli.command("down", help="Shut down the local ZenML dashboard.") def down() -> None: """Shut down the local ZenML dashboard.""" - server = get_active_deployment(local=True) + server = get_local_server() if not server: cli_utils.declare("The local ZenML dashboard is not running.") @@ -284,180 +282,6 @@ def down() -> None: gc.set_default_store() -@cli.command("deploy", help="Deploy ZenML in the cloud.") -@click.option( - "--provider", - "-p", - type=click.Choice( - [ - ServerProviderType.AWS.value, - ServerProviderType.GCP.value, - ServerProviderType.AZURE.value, - ], - case_sensitive=True, - ), - default=None, - help="Server deployment provider.", -) -@click.option( - "--name", - type=str, - help="A name for the ZenML server deployment. This is used as a prefix for " - "the names of deployed resources, such as database services and Kubernetes " - "resources.", -) -@click.option( - "--timeout", - "-t", - type=click.INT, - default=None, - help="Time in seconds to wait for the server to be deployed.", -) -@click.option( - "--config", - help="Use a YAML or JSON configuration or configuration file.", - required=False, - type=str, -) -@click.option( - "--gcp-project-id", - help="The project in GCP to deploy the server to. ", - required=False, - type=str, -) -def deploy( - provider: Optional[str] = None, - name: Optional[str] = None, - timeout: Optional[int] = None, - config: Optional[str] = None, - gcp_project_id: Optional[str] = None, -) -> None: - """Deploy the ZenML server in a cloud provider. - - Args: - name: Name for the ZenML server deployment. - provider: ZenML server provider name. - timeout: Time in seconds to wait for the server to start. - config: A YAML or JSON configuration or configuration file to use. - gcp_project_id: The project in GCP to deploy the server to. - """ - with track_handler( - event=AnalyticsEvent.ZENML_SERVER_DEPLOYED - ) as analytics_handler: - try: - terraform_utils.verify_terraform_installation() - except RuntimeError as e: - cli_utils.error(str(e)) - - config_dict: Dict[str, Any] = {} - - if config: - if os.path.isfile(config): - config_dict = yaml_utils.read_yaml(config) - else: - config_dict = yaml.safe_load(config) - if not isinstance(config_dict, dict): - cli_utils.error( - "The configuration argument must be JSON/YAML content or " - "point to a valid configuration file." - ) - - name = config_dict.get("name", name) - provider = config_dict.get("provider", provider) - - if not name: - name = click.prompt( - "ZenML server name (used as a prefix for the names of deployed " - "resources)", - default="zenml", - ) - config_dict["name"] = name - - if not provider: - provider = click.prompt( - "ZenML server provider", - type=click.Choice( - [ - ServerProviderType.AWS.value, - ServerProviderType.GCP.value, - ServerProviderType.AZURE.value, - ], - case_sensitive=True, - ), - default=ServerProviderType.AWS.value, - ) - config_dict["provider"] = provider - - if provider == ServerProviderType.GCP.value: - if "project_id" not in config_dict: - if not gcp_project_id: - gcp_project_id = click.prompt( - "GCP project ID", - ) - config_dict["project_id"] = gcp_project_id - - from zenml.zen_server.deploy.deployment import ServerDeploymentConfig - - server_config = ServerDeploymentConfig.model_validate(config_dict) - - from zenml.zen_server.deploy.deployer import ServerDeployer - - deployer = ServerDeployer() - - server = get_active_deployment(local=False) - if server: - if server.config.provider != provider: - cli_utils.error( - "ZenML is already deployed using a different provider " - f"({server.config.provider}). Please tear down the " - "existing deployment by running `zenml destroy` before " - "deploying a new one." - ) - - if server.config.name != name: - cli_utils.error( - f"An existing deployment with a different name " - f"'{server.config.name}' already exists. Please tear down " - f"the existing deployment by running `zenml destroy` " - f"before deploying a new one." - ) - - server = deployer.deploy_server(server_config, timeout=timeout) - - metadata = { - "server_deployment": str(server.config.provider), - } - - analytics_handler.metadata = metadata - - if server.status and server.status.url: - cli_utils.declare( - f"ZenML server '{name}' running at '{server.status.url}'. To " - "connect to the server, run `zenml connect --url " - f"{server.status.url}`." - ) - - -@cli.command( - "destroy", help="Tear down and clean up the cloud ZenML deployment." -) -def destroy() -> None: - """Tear down and clean up a cloud ZenML deployment.""" - server = get_active_deployment(local=False) - if not server: - cli_utils.declare("No cloud ZenML server has been deployed.") - return - - from zenml.zen_server.deploy.deployer import ServerDeployer - - deployer = ServerDeployer() - deployer.remove_server(server.config.name) - - cli_utils.declare( - "The ZenML server has been torn down and all resources removed." - ) - - @cli.command( "status", help="Show information about the current configuration." ) @@ -497,18 +321,11 @@ def status() -> None: f"Local store files are located at: '{gc.local_stores_path}'" ) - server = get_active_deployment(local=True) + server = get_local_server() if server: cli_utils.declare("The status of the local dashboard:") cli_utils.print_server_deployment(server) - server = get_active_deployment(local=False) - if server: - cli_utils.declare( - "The status of the cloud ZenML server deployed from this host:" - ) - cli_utils.print_server_deployment(server) - @cli.command( "connect", @@ -661,7 +478,7 @@ def connect( # Raise an error if a local server is running when trying to connect to # another server - active_deployment = get_active_deployment(local=True) + active_deployment = get_local_server() if ( active_deployment and active_deployment.status @@ -701,24 +518,6 @@ def connect( api_key = api_key or store_dict.get("api_key") verify_ssl = store_dict.get("verify_ssl", verify_ssl) - elif url is None: - server = get_active_deployment(local=False) - - if server is None or not server.status or not server.status.url: - cli_utils.warning( - "Running `zenml connect` without arguments can only be used to " - "connect to a ZenML server previously deployed from this host " - "with `zenml deploy`, but no such active deployment was found. " - "Please use the `--url` or `--config` command line arguments " - "to configure how to connect to a remote third party ZenML " - "server. Alternatively, call `zenml up` to start the ZenML " - "dashboard locally." - ) - return - url = server.status.url - if server.status.ca_crt: - verify_ssl = server.status.ca_crt - if not url: url = click.prompt("ZenML server URL", type=str) else: @@ -792,12 +591,7 @@ def disconnect_server() -> None: cli_utils.declare("Restored default store configuration.") -@cli.command("logs", help="Show the logs for the local or cloud ZenML server.") -@click.option( - "--local", - is_flag=True, - help="Show the logs for the local ZenML server.", -) +@cli.command("logs", help="Show the logs for the local ZenML server.") @click.option( "--follow", "-f", @@ -818,7 +612,6 @@ def disconnect_server() -> None: help="Show raw log contents (don't pretty-print logs).", ) def logs( - local: bool = False, follow: bool = False, raw: bool = False, tail: Optional[int] = None, @@ -826,17 +619,11 @@ def logs( """Display the logs for a ZenML server. Args: - local: Whether to show the logs for the local ZenML server. follow: Continue to output new log data as it becomes available. tail: Only show the last NUM lines of log output. raw: Show raw log contents (don't pretty-print logs). """ - server = get_active_deployment(local=True) - if not local: - remote_server = get_active_deployment(local=False) - if remote_server is not None: - server = remote_server - + server = get_local_server() if server is None: cli_utils.error( "The local ZenML dashboard is not running. Please call `zenml " diff --git a/src/zenml/cli/stack.py b/src/zenml/cli/stack.py index 0d45df07f20..48c9e384fba 100644 --- a/src/zenml/cli/stack.py +++ b/src/zenml/cli/stack.py @@ -14,12 +14,10 @@ """CLI for manipulating ZenML local and global config file.""" import getpass -import os import re import time import webbrowser from datetime import datetime -from pathlib import Path from typing import ( TYPE_CHECKING, Any, @@ -46,23 +44,14 @@ from zenml.cli.text_utils import OldSchoolMarkdownHeading from zenml.cli.utils import ( _component_display_name, - confirmation, - declare, - error, is_sorted_or_filtered, list_options, print_model_url, print_page_info, print_stacks_table, - verify_mlstacks_prerequisites_installation, ) from zenml.client import Client from zenml.console import console -from zenml.constants import ( - ALPHA_MESSAGE, - MLSTACKS_SUPPORTED_STACK_COMPONENTS, - STACK_RECIPE_MODULAR_RECIPES, -) from zenml.enums import ( CliCategories, StackComponentType, @@ -71,7 +60,6 @@ from zenml.exceptions import ( IllegalOperationError, ) -from zenml.io.fileio import rmtree from zenml.logger import get_logger from zenml.models import ( ComponentInfo, @@ -88,16 +76,6 @@ get_resources_options_from_resource_model_for_full_stack, ) from zenml.utils.dashboard_utils import get_component_url, get_stack_url -from zenml.utils.io_utils import create_dir_recursive_if_not_exists -from zenml.utils.mlstacks_utils import ( - convert_click_params_to_mlstacks_primitives, - convert_mlstacks_primitives_to_dicts, - deploy_mlstacks_stack, - get_stack_spec_file_path, - stack_exists, - stack_spec_exists, - verify_spec_and_tf_files_exist, -) from zenml.utils.yaml_utils import read_yaml, write_yaml if TYPE_CHECKING: @@ -989,21 +967,11 @@ def list_stacks(ctx: click.Context, **kwargs: Any) -> None: type=click.STRING, required=False, ) -@click.option( - "--outputs", - "-o", - is_flag=True, - default=False, - help="Include the outputs from mlstacks deployments.", -) -def describe_stack( - stack_name_or_id: Optional[str] = None, outputs: bool = False -) -> None: +def describe_stack(stack_name_or_id: Optional[str] = None) -> None: """Show details about a named stack or the active stack. Args: stack_name_or_id: Name of the stack to describe. - outputs: Include the outputs from mlstacks deployments. """ client = Client() @@ -1019,8 +987,6 @@ def describe_stack( stack=stack_, active=stack_.id == client.active_stack_model.id, ) - if outputs: - cli_utils.print_stack_outputs(stack_) print_model_url(get_stack_url(stack_)) @@ -1441,105 +1407,6 @@ def register_secrets( client.update_secret(secret_name, add_or_update_values=secret_values) -def _get_deployment_params_interactively( - click_params: Dict[str, Any], -) -> Dict[str, Any]: - """Get deployment values from command line arguments. - - Args: - click_params: Required and pre-existing values. - - Returns: - Full deployment arguments. - """ - deployment_values = { - "provider": click_params["provider"], - "stack_name": click_params["stack_name"], - "region": click_params["region"], - } - for component_type in MLSTACKS_SUPPORTED_STACK_COMPONENTS: - verify_mlstacks_prerequisites_installation() - from mlstacks.constants import ALLOWED_FLAVORS - - if ( - click.prompt( - f"Enable {component_type}?", - type=click.Choice(["y", "n"]), - default="n", - ) - == "y" - ): - component_flavor = click.prompt( - f" Enter {component_type} flavor", - type=click.Choice(ALLOWED_FLAVORS[component_type]), - ) - deployment_values[component_type] = component_flavor - - if ( - click.prompt( - "Deploy using debug_mode?", - type=click.Choice(["y", "n"]), - default="n", - ) - == "y" - ): - deployment_values["debug_mode"] = True - - extra_config = [] - # use click.prompt to populate extra_config until someone just hits enter - while True: - declare( - "\nAdd to extra_config for stack deployment -->\n", - bold=True, - ) - key = click.prompt( - "Enter `extra_config` key or hit enter to skip", - type=str, - default="", - ) - if key == "": - break - value = click.prompt( - f"Enter value for '{key}'", - type=str, - ) - extra_config.append(f"{key}={value}") - - # get mandatory GCP project_id if provider is GCP - # skip if project_id already specified in extra_config - if click_params["provider"] == "gcp" and not any( - s.startswith("project_id=") for s in extra_config - ): - project_id = click.prompt("What is your GCP project_id?", type=str) - extra_config.append(f"project_id={project_id}") - declare(f"Project ID '{project_id}' added to extra_config.") - - deployment_values["extra_config"] = extra_config - - tags = [] - # use click.prompt to populate tags until someone just hits enter - while True: - declare( - "\nAdd to tags for stack deployment -->\n", - bold=True, - ) - tag = click.prompt( - "Enter `tags` key or hit enter to skip", - type=str, - default="", - ) - if tag == "": - break - value = click.prompt( - f"Enter value for '{tag}'", - type=str, - ) - tags.append(f"{tag}={value}") - deployment_values["tags"] = tags - - return deployment_values - - def validate_name(ctx: click.Context, param: str, value: str) -> str: """Validate the name of the stack. @@ -1792,393 +1659,6 @@ def deploy( ) -@stack.command(help="[DEPRECATED] Deploy a stack using mlstacks.") -@click.option( - "--provider", - "-p", - "provider", - required=True, - type=click.Choice(STACK_RECIPE_MODULAR_RECIPES), -) -@click.option( - "--name", - "-n", - "stack_name", - type=click.STRING, - required=True, - help="Set a name for the ZenML stack that will be imported from the YAML " - "configuration file which gets generated after deploying the stack recipe. " - "Defaults to the name of the stack recipe being deployed.", -) -@click.option( - "--region", - "-r", - "region", - type=click.STRING, - required=True, - help="The region to deploy the stack to.", -) -@click.option( - "--no-import", - "-ni", - "no_import_stack_flag", - is_flag=True, - help="If you don't want the stack to be imported automatically.", -) -@click.option( - "--artifact-store", - "-a", - "artifact_store", - required=False, - is_flag=True, - help="Whether to deploy an artifact store.", -) -@click.option( - "--container-registry", - "-c", - "container_registry", - required=False, - is_flag=True, - help="Whether to deploy a container registry.", -) -@click.option( - "--mlops-platform", - "-m", - "mlops_platform", - type=click.Choice(["zenml"]), - required=False, - help="The flavor of MLOps platform to use." - "If not specified, the default MLOps platform will be used.", -) -@click.option( - "--orchestrator", - "-o", - required=False, - type=click.Choice( - [ - "kubernetes", - "kubeflow", - "tekton", - "sagemaker", - "skypilot", - "vertex", - ] - ), - help="The flavor of orchestrator to use. " - "If not specified, the default orchestrator will be used.", -) -@click.option( - "--model-deployer", - "-md", - "model_deployer", - required=False, - type=click.Choice(["mlflow", "seldon"]), - help="The flavor of model deployer to use. ", -) -@click.option( - "--experiment-tracker", - "-e", - "experiment_tracker", - required=False, - type=click.Choice(["mlflow"]), - help="The flavor of experiment tracker to use.", -) -@click.option( - "--step-operator", - "-s", - "step_operator", - required=False, - type=click.Choice(["sagemaker"]), - help="The flavor of step operator to use.", -) -@click.option( - "--file", - "-f", - "file", - required=False, - type=click.Path(exists=True, dir_okay=False, readable=True), - help="Use a YAML specification file as the basis of the stack deployment.", -) -@click.option( - "--debug-mode", - "-d", - "debug_mode", - is_flag=True, - default=False, - help="Whether to run the stack deployment in debug mode.", -) -@click.option( - "--extra-config", - "-x", - "extra_config", - multiple=True, - help="Extra configurations as key=value pairs. This option can be used multiple times.", -) -@click.option( - "--tags", - "-t", - "tags", - required=False, - type=click.STRING, - help="Pass one or more tags.", - multiple=True, -) -@click.option( - "--interactive", - "-i", - "interactive", - is_flag=True, - default=False, - help="Deploy the stack interactively.", -) -@click.pass_context -def deploy_mlstack( - ctx: click.Context, - provider: str, - stack_name: str, - region: str, - mlops_platform: Optional[str] = None, - orchestrator: Optional[str] = None, - model_deployer: Optional[str] = None, - experiment_tracker: Optional[str] = None, - step_operator: Optional[str] = None, - no_import_stack_flag: bool = False, - artifact_store: Optional[bool] = None, - container_registry: Optional[bool] = None, - file: Optional[str] = None, - debug_mode: bool = False, - tags: Optional[List[str]] = None, - extra_config: Optional[List[str]] = None, - interactive: bool = False, -) -> None: - """Deploy a stack with mlstacks. - - `zenml stack_recipe pull ` has to be called with the - same relative path before the `deploy` command. - - Args: - ctx: The click context. - provider: The cloud provider to deploy the stack to. - stack_name: A name for the ZenML stack that gets imported as a result - of the recipe deployment. - no_import_stack_flag: If you don't want the stack to be imported into - ZenML after deployment. - artifact_store: The flavor of artifact store to deploy. In the case of - the artifact store, it doesn't matter what you specify here, as - there's only one flavor per cloud provider and that will be deployed. - orchestrator: The flavor of orchestrator to use. - container_registry: The flavor of container registry to deploy. In the case of - the container registry, it doesn't matter what you specify here, as - there's only one flavor per cloud provider and that will be deployed. - model_deployer: The flavor of model deployer to deploy. - experiment_tracker: The flavor of experiment tracker to deploy. - step_operator: The flavor of step operator to deploy. - extra_config: Extra configurations as key=value pairs. - tags: Pass one or more tags. - debug_mode: Whether to run the stack deployment in debug mode. - file: Use a YAML specification file as the basis of the stack - deployment. - mlops_platform: The flavor of MLOps platform to use. - region: The region to deploy the stack to. - interactive: Deploy the stack interactively. - """ - cli_utils.warning( - "The `zenml stack deploy-mlstack` (former `zenml stack deploy`) CLI " - "command has been deprecated and will be removed in a future release. " - "Please use `zenml stack deploy` instead for a simplified " - "experience." - ) - - with track_handler( - event=AnalyticsEvent.DEPLOY_STACK, - ) as analytics_handler: - if stack_exists(stack_name): - cli_utils.error( - f"Stack with name '{stack_name}' already exists. Please choose a " - "different name." - ) - elif stack_spec_exists(stack_name): - cli_utils.error( - f"Stack spec for stack named '{stack_name}' already exists. " - "Please choose a different name." - ) - - cli_utils.declare("Checking prerequisites are installed...") - cli_utils.verify_mlstacks_prerequisites_installation() - cli_utils.warning(ALPHA_MESSAGE) - - if not file: - cli_params: Dict[str, Any] = ctx.params - if interactive: - cli_params = _get_deployment_params_interactively(cli_params) - stack, components = convert_click_params_to_mlstacks_primitives( - cli_params - ) - - from mlstacks.utils import zenml_utils - - cli_utils.declare("Checking flavor compatibility...") - if not zenml_utils.has_valid_flavor_combinations( - stack, components - ): - cli_utils.error( - "The specified stack and component flavors are not compatible " - "with the provider or with one another. Please try again." - ) - - stack_dict, component_dicts = convert_mlstacks_primitives_to_dicts( - stack, components - ) - # write the stack and component yaml files - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - - spec_dir = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "stack_specs", - stack.name, - ) - cli_utils.declare(f"Writing spec files to {spec_dir}...") - create_dir_recursive_if_not_exists(spec_dir) - - stack_file_path = os.path.join( - spec_dir, f"stack-{stack.name}.yaml" - ) - write_yaml(file_path=stack_file_path, contents=stack_dict) - for component in component_dicts: - write_yaml( - file_path=os.path.join( - spec_dir, f"{component['name']}.yaml" - ), - contents=component, - ) - else: - declare("Importing from stack specification file...") - stack_file_path = file - - from mlstacks.utils.yaml_utils import load_stack_yaml - - stack = load_stack_yaml(stack_file_path) - - analytics_handler.metadata = { - "stack_provider": stack.provider, - "debug_mode": debug_mode, - "no_import_stack_flag": no_import_stack_flag, - "user_created_spec": bool(file), - "mlops_platform": mlops_platform, - "orchestrator": orchestrator, - "model_deployer": model_deployer, - "experiment_tracker": experiment_tracker, - "step_operator": step_operator, - "artifact_store": artifact_store, - "container_registry": container_registry, - } - - deploy_mlstacks_stack( - spec_file_path=stack_file_path, - stack_name=stack.name, - stack_provider=stack.provider, - debug_mode=debug_mode, - no_import_stack_flag=no_import_stack_flag, - user_created_spec=bool(file), - ) - - -@stack.command( - help="Destroy stack components created previously with " - "`zenml stack deploy`" -) -@click.argument("stack_name", required=True) -@click.option( - "--debug", - "-d", - "debug_mode", - is_flag=True, - default=False, - help="Whether to run Terraform in debug mode.", -) -def destroy( - stack_name: str, - debug_mode: bool = False, -) -> None: - """Destroy all resources previously created with `zenml stack deploy`. - - Args: - stack_name: Name of the stack - debug_mode: Whether to run Terraform in debug mode. - """ - if not confirmation( - f"Are you sure you want to destroy stack '{stack_name}' and all " - "associated infrastructure?" - ): - error("Aborting stack destroy...") - - with track_handler( - event=AnalyticsEvent.DESTROY_STACK, - ) as analytics_handler: - analytics_handler.metadata["debug_mode"] = debug_mode - cli_utils.verify_mlstacks_prerequisites_installation() - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - - # check the stack actually exists - if not stack_exists(stack_name): - cli_utils.error( - f"Stack with name '{stack_name}' does not exist. Please check and " - "try again." - ) - - spec_file_path = get_stack_spec_file_path(stack_name) - spec_files_dir: str = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), "stack_specs", stack_name - ) - user_created_spec = str(Path(spec_file_path).parent) != spec_files_dir - - provider = read_yaml(file_path=spec_file_path).get("provider") - tf_definitions_path: str = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "terraform", - f"{provider}-modular", - ) - - cli_utils.declare( - "Checking Terraform definitions and spec files are present..." - ) - verify_spec_and_tf_files_exist(spec_file_path, tf_definitions_path) - - from mlstacks.utils import terraform_utils - - cli_utils.declare( - f"Destroying stack '{stack_name}' using Terraform..." - ) - terraform_utils.destroy_stack( - stack_path=spec_file_path, debug_mode=debug_mode - ) - cli_utils.declare(f"Stack '{stack_name}' successfully destroyed.") - - if cli_utils.confirmation( - f"Would you like to recursively delete the associated ZenML " - f"stack '{stack_name}'?\nThis will delete the stack and any " - "underlying stack components." - ): - from zenml.client import Client - - client = Client() - client.delete_stack(name_id_or_prefix=stack_name, recursive=True) - cli_utils.declare( - f"Stack '{stack_name}' successfully deleted from ZenML." - ) - - spec_dir = os.path.dirname(spec_file_path) - if not user_created_spec and cli_utils.confirmation( - f"Would you like to delete the `mlstacks` spec directory for " - f"this stack, located at {spec_dir}?" - ): - rmtree(spec_files_dir) - cli_utils.declare( - f"Spec directory for stack '{stack_name}' successfully deleted." - ) - cli_utils.declare(f"Stack '{stack_name}' successfully destroyed.") - - @stack.command( "connect", help="Connect a service-connector to a stack's components. " diff --git a/src/zenml/cli/stack_components.py b/src/zenml/cli/stack_components.py index 68ff640f1f6..0f8921eed7f 100644 --- a/src/zenml/cli/stack_components.py +++ b/src/zenml/cli/stack_components.py @@ -13,9 +13,6 @@ # permissions and limitations under the License. """Functionality to generate stack component CLI commands.""" -import os -import random -import string import time from importlib import import_module from typing import Any, Callable, List, Optional, Tuple, cast @@ -24,8 +21,6 @@ import click from rich.markdown import Markdown -from zenml.analytics.enums import AnalyticsEvent -from zenml.analytics.utils import track_handler from zenml.cli import utils as cli_utils from zenml.cli.annotator import register_annotator_subcommands from zenml.cli.cli import TagGroup, cli @@ -41,7 +36,6 @@ ) from zenml.client import Client from zenml.console import console -from zenml.constants import ALPHA_MESSAGE, STACK_RECIPE_MODULAR_RECIPES from zenml.enums import CliCategories, StackComponentType from zenml.exceptions import AuthorizationException, IllegalOperationError from zenml.io import fileio @@ -51,14 +45,6 @@ ) from zenml.utils import source_utils from zenml.utils.dashboard_utils import get_component_url -from zenml.utils.io_utils import create_dir_recursive_if_not_exists -from zenml.utils.mlstacks_utils import ( - convert_click_params_to_mlstacks_primitives, - convert_mlstacks_primitives_to_dicts, - import_new_mlstacks_component, - verify_spec_and_tf_files_exist, -) -from zenml.utils.yaml_utils import write_yaml def generate_stack_component_get_command( @@ -907,382 +893,6 @@ def delete_stack_component_flavor_command(name_or_id: str) -> None: return delete_stack_component_flavor_command -def generate_stack_component_deploy_command( - component_type: StackComponentType, -) -> Callable[ - [str, str, str, str, bool, Optional[List[str]], List[str]], None -]: - """Generates a `deploy` command for the stack component type. - - Args: - component_type: Type of the component to generate the command for. - - Returns: - A function that can be used as a `click` command. - """ - display_name = _component_display_name(component_type) - - @click.argument( - "name", - type=str, - ) - @click.option( - "--flavor", - "-f", - "flavor", - help=f"The flavor of the {display_name} to deploy.", - required=True, - type=str, - ) - @click.option( - "--provider", - "-p", - "provider", - required=True, - type=click.Choice(STACK_RECIPE_MODULAR_RECIPES), - help="The cloud (or local provider) to use to deploy the stack component.", - ) - @click.option( - "--region", - "-r", - "region", - required=True, - type=str, - help="The region to deploy the stack component to.", - ) - @click.option( - "--debug-mode", - "-d", - "debug_mode", - is_flag=True, - default=False, - help="Whether to deploy the stack component in debug mode.", - ) - @click.option( - "--extra-config", - "-x", - "extra_config", - multiple=True, - help="Extra configurations as key=value pairs. This option can be " - "used multiple times.", - ) - @click.option( - "--tags", - "-t", - "tags", - required=False, - type=click.STRING, - help="Pass one or more tags.", - multiple=True, - ) - def deploy_stack_component_command( - name: str, - flavor: str, - provider: str, - region: str, - debug_mode: bool = False, - tags: Optional[List[str]] = None, - extra_config: List[str] = [], - ) -> None: - """Deploy a stack component. - - This function also registers the newly-deployed component. - - Args: - name: Name of the component to register. - flavor: Flavor of the component to register. - provider: Cloud provider (or local) to use to deploy the stack - component. - region: Region to deploy the stack component to. - debug_mode: Whether to deploy the stack component in debug mode. - tags: Tags to be added to the component. - extra_config: Extra configuration values to be added to the - """ - with track_handler( - event=AnalyticsEvent.DEPLOY_STACK_COMPONENT, - ) as analytics_handler: - client = Client() - try: - # raise error if user already has a component with the same name - client.get_stack_component( - component_type=component_type, - name_id_or_prefix=name, - allow_name_prefix_match=False, - ) - cli_utils.error( - f"A stack component of type '{component_type.value}' with " - f"the name '{name}' already exists. Please try again with " - f"a different component name." - ) - except KeyError: - pass - from mlstacks.constants import ALLOWED_FLAVORS - - if flavor not in ALLOWED_FLAVORS[component_type.value]: - cli_utils.error( - f"Flavor '{flavor}' is not supported for " - f"{_component_display_name(component_type, True)}. " - "Allowed flavors are: " - f"{', '.join(ALLOWED_FLAVORS[component_type.value])}." - ) - - # for cases like artifact store and container - # registry the flavor is the same as the cloud - if flavor in {"s3", "sagemaker", "aws"} and provider != "aws": - cli_utils.error( - f"Flavor '{flavor}' is not supported for " - f"{_component_display_name(component_type, True)} on " - f"{provider}." - ) - elif flavor in {"vertex", "gcp"} and provider != "gcp": - cli_utils.error( - f"Flavor '{flavor}' is not supported for " - f"{_component_display_name(component_type, True)} on " - f"{provider}." - ) - - # if the cloud is gcp, project_id is required - extra_config_obj = ( - dict(config.split("=") for config in extra_config) - if extra_config - else {} - ) - if provider == "gcp" and "project_id" not in extra_config_obj: - cli_utils.error( - "Missing Project ID. You must pass your GCP project ID to " - "the deploy command as part of the `--extra_config` option." - ) - cli_utils.declare("Checking prerequisites are installed...") - cli_utils.verify_mlstacks_prerequisites_installation() - from mlstacks.utils import zenml_utils - - cli_utils.warning(ALPHA_MESSAGE) - cli_params = { - "provider": provider, - "region": region, - "stack_name": "".join( - random.choice(string.ascii_letters + string.digits) - for _ in range(5) - ), - "tags": tags, - "extra_config": list(extra_config), - "file": None, - "debug_mode": debug_mode, - component_type.value: flavor, - } - if component_type == StackComponentType.ARTIFACT_STORE: - cli_params["extra_config"].append(f"bucket_name={name}") # type: ignore[union-attr] - stack, components = convert_click_params_to_mlstacks_primitives( - cli_params, zenml_component_deploy=True - ) - - analytics_handler.metadata = { - "flavor": flavor, - "provider": provider, - "debug_mode": debug_mode, - "component_type": component_type.value, - } - - cli_utils.declare("Checking flavor compatibility...") - if not zenml_utils.has_valid_flavor_combinations( - stack, components - ): - cli_utils.error( - "The specified stack and component flavors are not " - "compatible with the provider or with one another. " - "Please try again." - ) - - stack_dict, component_dicts = convert_mlstacks_primitives_to_dicts( - stack, components - ) - # write the stack and component yaml files - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - - spec_dir = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "stack_specs", - stack.name, - ) - cli_utils.declare(f"Writing spec files to {spec_dir}...") - create_dir_recursive_if_not_exists(spec_dir) - - stack_file_path = os.path.join( - spec_dir, f"stack-{stack.name}.yaml" - ) - write_yaml(file_path=stack_file_path, contents=stack_dict) - for component in component_dicts: - write_yaml( - file_path=os.path.join( - spec_dir, f"{component['name']}.yaml" - ), - contents=component, - ) - - from mlstacks.utils import terraform_utils - - cli_utils.declare("Deploying stack using Terraform...") - terraform_utils.deploy_stack( - stack_file_path, debug_mode=debug_mode - ) - cli_utils.declare("Stack successfully deployed.") - - stack_name: str = cli_params["stack_name"] # type: ignore[assignment] - cli_utils.declare( - f"Importing {component_type.value} component '{name}' into ZenML.." - ) - import_new_mlstacks_component( - stack_name=stack_name, - component_name=name, - provider=stack.provider, - stack_spec_dir=spec_dir, - ) - cli_utils.declare("Component successfully imported into ZenML.") - - return deploy_stack_component_command - - -def generate_stack_component_destroy_command( - component_type: StackComponentType, -) -> Callable[[str, str, bool], None]: - """Generates a `destroy` command for the stack component type. - - Args: - component_type: Type of the component to generate the command for. - - Returns: - A function that can be used as a `click` command. - """ - _component_display_name(component_type) - - @click.argument( - "name_id_or_prefix", - type=str, - required=True, - ) - @click.option( - "--provider", - "-p", - "provider", - type=click.Choice(["aws", "k3d", "gcp"]), - required=True, - ) - @click.option( - "--debug-mode", - "-d", - "debug_mode", - is_flag=True, - default=False, - help="Whether to destroy the stack component in debug mode.", - ) - def destroy_stack_component_command( - name_id_or_prefix: str, - provider: str, - debug_mode: bool = False, - ) -> None: - """Destroy a stack component. - - Args: - name_id_or_prefix: Name, ID or prefix of the component to destroy. - provider: Cloud provider (or local) where the stack was deployed. - debug_mode: Whether to destroy the stack component in debug mode. - """ - with track_handler( - event=AnalyticsEvent.DESTROY_STACK_COMPONENT, - ) as analytics_handler: - analytics_handler.metadata = { - "provider": provider, - "component_type": component_type.value, - "debug_mode": debug_mode, - } - client = Client() - - try: - component = client.get_stack_component( - name_id_or_prefix=name_id_or_prefix, - component_type=component_type, - allow_name_prefix_match=False, - ) - except KeyError: - cli_utils.error( - "Could not find a stack component with name or id " - f"'{name_id_or_prefix}'.", - ) - - # Check if the component was created by a recipe - if not component.component_spec_path: - cli_utils.error( - f"Cannot destroy stack component {component.name}. It " - "was not created by a recipe.", - ) - - cli_utils.verify_mlstacks_prerequisites_installation() - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - - # spec_files_dir: str = component.component_spec_path - component_spec_path: str = component.component_spec_path - stack_name: str = os.path.basename( - os.path.dirname(component_spec_path) - ) - stack_spec_path: str = os.path.join( - os.path.dirname(component_spec_path), - f"stack-{stack_name}.yaml", - ) - tf_definitions_path: str = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "terraform", - f"{provider}-modular", - ) - - cli_utils.declare( - "Checking Terraform definitions and spec files are present..." - ) - verify_spec_and_tf_files_exist( - stack_spec_path, tf_definitions_path - ) - - from mlstacks.utils import terraform_utils - - cli_utils.declare( - f"Destroying component '{component.name}' using Terraform..." - ) - terraform_utils.destroy_stack( - stack_path=stack_spec_path, debug_mode=debug_mode - ) - cli_utils.declare( - f"Component '{component.name}' successfully destroyed." - ) - - if cli_utils.confirmation( - f"Would you like to delete the associated ZenML " - f"component '{component.name}'?\nThis will delete the stack " - "component registered with ZenML." - ): - client.delete_stack_component( - name_id_or_prefix=component.id, - component_type=component.type, - ) - cli_utils.declare( - f"Component '{component.name}' successfully deleted from ZenML." - ) - - spec_dir = os.path.dirname(stack_spec_path) - if cli_utils.confirmation( - f"Would you like to delete the `mlstacks` spec directory for " - f"this component, located at {spec_dir}?" - ): - fileio.rmtree(spec_dir) - cli_utils.declare( - f"Spec directory for component '{component.name}' successfully " - "deleted." - ) - cli_utils.declare( - f"Component '{component.name}' successfully destroyed." - ) - - return destroy_stack_component_command - - def prompt_select_resource_id( resource_ids: List[str], resource_name: str, @@ -1660,22 +1270,6 @@ def command_group() -> None: "explain", help=f"Explaining the {plural_display_name}." )(explain_command) - # zenml stack-component deploy - deploy_command = generate_stack_component_deploy_command(component_type) - context_settings = {"ignore_unknown_options": True} - command_group.command( - "deploy", - context_settings=context_settings, - help=f"Deploy a new {singular_display_name}.", - )(deploy_command) - - # zenml stack-component destroy - destroy_command = generate_stack_component_destroy_command(component_type) - command_group.command( - "destroy", - help=f"Destroy an existing {singular_display_name}.", - )(destroy_command) - # zenml stack-component flavor @command_group.group( "flavor", help=f"Commands to interact with {plural_display_name}." diff --git a/src/zenml/cli/stack_recipes.py b/src/zenml/cli/stack_recipes.py deleted file mode 100644 index aebfb15c5c6..00000000000 --- a/src/zenml/cli/stack_recipes.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: - -# https://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Functionality to handle downloading ZenML stacks via the CLI.""" - -from typing import Optional - -import click - -from zenml.cli import utils as cli_utils -from zenml.cli.stack import stack -from zenml.logger import get_logger - -logger = get_logger(__name__) - - -@stack.group( - "recipe", - help="DISABLED: Commands for using the stack recipes.", - invoke_without_command=True, -) -def stack_recipe() -> None: - """Access all ZenML stack recipes.""" - - -@stack_recipe.command( - name="list", help="DISABLED: List the available stack recipes." -) -def list_stack_recipes() -> None: - """List all available stack recipes.""" - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please refer to the `mlstacks` documentation for more " - "information at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command( - help="DISABLED: Deletes the ZenML stack recipes directory." -) -@click.option( - "--path", - "-p", - type=click.STRING, - default="zenml_stack_recipes", - help="Relative path at which you want to clean the stack_recipe(s)", -) -@click.option( - "--yes", - "-y", - is_flag=True, - help="Whether to skip the confirmation prompt.", -) -def clean( - path: str, - yes: bool, -) -> None: - """Deletes the stack recipes directory from your working directory. - - Args: - path: The path at which you want to clean the stack_recipe(s). - yes: Whether to skip the confirmation prompt. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please refer to the `mlstacks` documentation for more " - "information at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command(help="DISABLED: Find out more about a stack recipe.") -@click.argument("stack_recipe_name") -def info( - stack_recipe_name: str, -) -> None: - """Find out more about a stack recipe. - - Outputs a pager view of the stack_recipe's README.md file. - - Args: - stack_recipe_name: The name of the stack recipe. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please refer to the `mlstacks` documentation for more " - "information at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command( - help="DISABLED: Describe the stack components for a recipe." -) -@click.argument( - "stack_recipe_name", - type=click.Choice(("aws-modular", "gcp-modular", "k3d-modular")), -) -def describe( - stack_recipe_name: str, -) -> None: - """Describe the stack components and their tools. - - Outputs the "Description" section of the recipe metadata. - - Args: - stack_recipe_name: The name of the stack recipe. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please refer to the `mlstacks` documentation for more " - "information at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command(help="DISABLED: Pull stack recipes.") -@click.argument("stack_recipe_name", required=False, default=None) -@click.option( - "--yes", - "-y", - "force", - is_flag=True, - help="Force the redownload of the stack_recipes folder to the ZenML config " - "folder.", -) -@click.option( - "--path", - "-p", - type=click.STRING, - default="zenml_stack_recipes", - help="Relative path at which you want to install the stack recipe(s)", -) -def pull( - stack_recipe_name: str, - force: bool, - path: str, -) -> None: - """Pull stack_recipes straight into your current working directory. - - Add the flag --yes or -y to redownload all the stack_recipes afresh. - - Args: - stack_recipe_name: The name of the stack_recipe. - force: Force the redownload of the stack_recipes folder. - path: The path at which you want to install the stack_recipe(s). - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please refer to the `mlstacks` documentation for more " - "information at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command(help="DISABLED: Deploy a stack recipe.") -@click.argument("stack_recipe_name", required=True) -@click.option( - "--path", - "-p", - type=click.STRING, - default="zenml_stack_recipes", - help="Relative path at which local stack recipe(s) should exist", -) -@click.option( - "--force", - "-f", - "force", - is_flag=True, - help="Force pull the stack recipe. This overwrites any existing recipe " - "files present locally, including the terraform state files and the " - "local configuration.", -) -@click.option( - "--stack-name", - "-n", - type=click.STRING, - required=False, - help="Set a name for the ZenML stack that will be imported from the YAML " - "configuration file which gets generated after deploying the stack recipe. " - "Defaults to the name of the stack recipe being deployed.", -) -@click.option( - "--import", - "import_stack_flag", - is_flag=True, - help="Import the stack automatically after the recipe is deployed.", -) -@click.option( - "--log-level", - type=click.Choice( - ["TRACE", "DEBUG", "INFO", "WARN", "ERROR"], case_sensitive=False - ), - help="Choose one of TRACE, DEBUG, INFO, WARN or ERROR (case insensitive) as " - "log level for the deploy operation.", - default="ERROR", -) -@click.option( - "--no-server", - is_flag=True, - help="Don't deploy ZenML even if there's no active cloud deployment.", -) -@click.option( - "--skip-pull", - is_flag=True, - help="Skip the pulling of the stack recipe before deploying. This should be used " - "if you have a local copy of your recipe already. Use the `--path` or `-p` flag to " - "specify the directory that hosts your recipe(s).", -) -@click.option( - "--artifact-store", - "-a", - help="The flavor of artifact store to use. " - "If not specified, the default artifact store will be used.", -) -@click.option( - "--orchestrator", - "-o", - help="The flavor of orchestrator to use. " - "If not specified, the default orchestrator will be used.", -) -@click.option( - "--container-registry", - "-c", - help="The flavor of container registry to use. " - "If not specified, no container registry will be deployed.", -) -@click.option( - "--model-deployer", - "-d", - help="The flavor of model deployer to use. " - "If not specified, no model deployer will be deployed.", -) -@click.option( - "--experiment-tracker", - "-e", - help="The flavor of experiment tracker to use. " - "If not specified, no experiment tracker will be deployed.", -) -@click.option( - "--step-operator", - "-s", - help="The flavor of step operator to use. " - "If not specified, no step operator will be deployed.", -) -@click.option( - "--config", - help="Use a YAML or JSON configuration or configuration file to pass" - "variables to the stack recipe.", - required=False, - type=str, -) -@click.pass_context -def deploy( - ctx: click.Context, - stack_recipe_name: str, - artifact_store: Optional[str], - orchestrator: Optional[str], - container_registry: Optional[str], - model_deployer: Optional[str], - experiment_tracker: Optional[str], - step_operator: Optional[str], - path: str, - force: bool, - import_stack_flag: bool, - log_level: str, - no_server: bool, - skip_pull: bool, - stack_name: Optional[str], - config: Optional[str], -) -> None: - """Run the stack_recipe at the specified relative path. - - `zenml stack_recipe pull ` has to be called with the - same relative path before the `deploy` command. - - Args: - ctx: The click context. - stack_recipe_name: The name of the stack_recipe. - path: The path at which you want to install the stack_recipe(s). - force: Force pull the stack recipe, overwriting any existing files. - stack_name: A name for the ZenML stack that gets imported as a result - of the recipe deployment. - import_stack_flag: Import the stack automatically after the recipe is - deployed. The stack configuration file is always generated and - can be imported manually otherwise. - log_level: Choose one of TRACE, DEBUG, INFO, WARN or ERROR - (case-insensitive) as log level for the `deploy` operation. - no_server: Don't deploy ZenML even if there's no active cloud - deployment. - skip_pull: Skip the pull of the stack recipe before deploying. This - should be used if you have a local copy of your recipe already. - artifact_store: The flavor of artifact store to deploy. In the case of - the artifact store, it doesn't matter what you specify here, as - there's only one flavor per cloud provider and that will be - deployed. - orchestrator: The flavor of orchestrator to use. - container_registry: The flavor of container registry to deploy. - In the case of the container registry, it doesn't matter what you - specify here, as there's only one flavor per cloud provider and - that will be deployed. - model_deployer: The flavor of model deployer to deploy. - experiment_tracker: The flavor of experiment tracker to deploy. - step_operator: The flavor of step operator to deploy. - config: Use a YAML or JSON configuration or configuration file to pass - variables to the stack recipe. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please use `zenml stack deploy ...` instead. For more " - "information and to learn about the new syntax, please refer to " - "the `mlstacks` documentation at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command(help="DISABLED: Destroy stack components") -@click.argument("stack_recipe_name", required=True) -@click.option( - "--path", - "-p", - type=click.STRING, - default="zenml_stack_recipes", - help="Relative path at which you want to install the stack_recipe(s)", -) -@click.option( - "--artifact-store", - "-a", - help="The flavor of artifact store to destroy. " - "If not specified, the default artifact store will be assumed.", -) -@click.option( - "--orchestrator", - "-o", - help="The flavor of orchestrator to destroy. " - "If not specified, the default orchestrator will be used.", -) -@click.option( - "--container-registry", - "-c", - help="The flavor of container registry to destroy. " - "If not specified, no container registry will be destroyed.", -) -@click.option( - "--model-deployer", - "-d", - help="The flavor of model deployer to destroy. " - "If not specified, no model deployer will be destroyed.", -) -@click.option( - "--experiment-tracker", - "-e", - help="The flavor of experiment tracker to destroy. " - "If not specified, no experiment tracker will be destroyed.", -) -@click.option( - "--step-operator", - "-s", - help="The flavor of step operator to destroy. " - "If not specified, no step operator will be destroyed.", -) -def destroy( - stack_recipe_name: str, - path: str, - artifact_store: Optional[str], - orchestrator: Optional[str], - container_registry: Optional[str], - model_deployer: Optional[str], - experiment_tracker: Optional[str], - step_operator: Optional[str], -) -> None: - """Destroy all resources. - - `zenml stack_recipe deploy stack_recipe_name` has to be called with the - same relative path before the destroy command. If you want to destroy - specific components of the stack, you can specify the component names - with the corresponding options. If no component is specified, all - components will be destroyed. - - Args: - stack_recipe_name: The name of the stack_recipe. - path: The path of the stack recipe you want to destroy. - artifact_store: The flavor of the artifact store to destroy. - In the case of the artifact store, it doesn't matter what you - specify here, as there's only one flavor per cloud provider and - that will be destroyed. - orchestrator: The flavor of the orchestrator to destroy. - container_registry: The flavor of the container registry to destroy. - In the case of the container registry, it doesn't matter what you - specify here, as there's only one flavor per cloud provider and - that will be destroyed. - model_deployer: The flavor of the model deployer to destroy. - experiment_tracker: The flavor of the experiment tracker to destroy. - step_operator: The flavor of the step operator to destroy. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. Please use `zenml stack destroy ...` instead. For more " - "information and to learn about the new syntax, please refer to " - "the `mlstacks` documentation at https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command( - name="output", - help="DISABLED: Get outputs from a stack recipe.", -) -@click.argument("stack_recipe_name", type=str) -@click.option( - "--path", - "-p", - type=click.STRING, - default="zenml_stack_recipes", - help="Relative path at which you want to install the stack_recipe(s)", -) -@click.option( - "--output", - "-o", - type=click.STRING, - default=None, - help="Name of the output you want to get the value of. If none is given," - "all outputs are returned.", -) -@click.option( - "--format", - "-f", - type=click.Choice(["json", "yaml"], case_sensitive=False), -) -def get_outputs( - stack_recipe_name: str, - path: str, - output: Optional[str], - format: Optional[str], -) -> None: - """Get the outputs of the stack recipe at the specified relative path. - - `zenml stack_recipe deploy stack_recipe_name` has to be called from the - same relative path before the get_outputs command. - - Args: - stack_recipe_name: The name of the stack_recipe. - path: The path of the stack recipe you want to get the outputs from. - output: The name of the output you want to get the value of. - If none is given, all outputs are returned. - format: The format of the output. If none is given, the output - is printed to the console. - """ - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. For more information and to learn about the new syntax, " - "please refer to the `mlstacks` documentation at " - "https://mlstacks.zenml.io/" - ) - - -@stack_recipe.command(help="DISABLED: The active version of mlstacks recipes.") -def version() -> None: - """The active version of the mlstacks recipes.""" - cli_utils.warning( - "This command has been disabled and will be removed in a future " - "release. For more information and to learn about the new syntax, " - "please refer to the `mlstacks` documentation at " - "https://mlstacks.zenml.io/" - ) diff --git a/src/zenml/cli/utils.py b/src/zenml/cli/utils.py index 5c473e7c240..cf3263af064 100644 --- a/src/zenml/cli/utils.py +++ b/src/zenml/cli/utils.py @@ -57,8 +57,6 @@ from zenml.constants import ( FILTERING_DATETIME_FORMAT, IS_DEBUG_ENV, - NOT_INSTALLED_MESSAGE, - TERRAFORM_NOT_INSTALLED_MESSAGE, ) from zenml.enums import GenericFilterOps, StackComponentType from zenml.logger import get_logger @@ -523,43 +521,6 @@ def format_integration_list( return list_of_dicts -def print_stack_outputs(stack: "StackResponse") -> None: - """Prints outputs for stacks deployed with mlstacks. - - Args: - stack: Instance of a stack model. - """ - verify_mlstacks_prerequisites_installation() - - if not stack.stack_spec_path: - declare("No stack spec path is set for this stack.") - return - stack_caption = f"'{stack.name}' stack" - rich_table = table.Table( - box=box.HEAVY_EDGE, - title="MLStacks Outputs", - caption=stack_caption, - show_lines=True, - ) - rich_table.add_column("OUTPUT_KEY", overflow="fold") - rich_table.add_column("OUTPUT_VALUE", overflow="fold") - - from mlstacks.utils.terraform_utils import get_stack_outputs - - stack_spec_file = stack.stack_spec_path - stack_outputs = get_stack_outputs(stack_path=stack_spec_file) - - for output_key, output_value in stack_outputs.items(): - rich_table.add_row(output_key, output_value) - - # capitalize entries in first column - rich_table.columns[0]._cells = [ - component.upper() # type: ignore[union-attr] - for component in rich_table.columns[0]._cells - ] - console.print(rich_table) - - def print_stack_configuration(stack: "StackResponse", active: bool) -> None: """Prints the configuration options of a stack. @@ -609,9 +570,6 @@ def print_stack_configuration(stack: "StackResponse", active: bool) -> None: f"{f'owned by user {stack.user.name}.' if stack.user else 'unowned.'}" ) - if stack.stack_spec_path: - declare(f"Stack spec path for `mlstacks`: '{stack.stack_spec_path}'") - def print_flavor_list(flavors: Page["FlavorResponse"]) -> None: """Prints the list of flavors. @@ -2766,21 +2724,6 @@ def print_model_url(url: Optional[str]) -> None: ) -def verify_mlstacks_prerequisites_installation() -> None: - """Checks if the `mlstacks` package is installed.""" - try: - import mlstacks # noqa: F401 - import python_terraform # noqa: F401 - - subprocess.check_output( - ["terraform", "--version"], universal_newlines=True - ) - except ImportError: - error(NOT_INSTALLED_MESSAGE) - except subprocess.CalledProcessError: - error(TERRAFORM_NOT_INSTALLED_MESSAGE) - - def is_jupyter_installed() -> bool: """Checks if Jupyter notebook is installed. diff --git a/src/zenml/constants.py b/src/zenml/constants.py index 9720d6b2e72..2d5b6185335 100644 --- a/src/zenml/constants.py +++ b/src/zenml/constants.py @@ -447,33 +447,6 @@ def handle_int_env_var(var: str, default: int = 0) -> int: # Stack Recipe constants STACK_RECIPES_GITHUB_REPO = "https://github.com/zenml-io/mlops-stacks.git" -ALPHA_MESSAGE = ( - "The mlstacks tool/package is in alpha and actively being developed. " - "Please avoid running mission-critical workloads on resources deployed " - "through these commands. If you encounter any problems, create an issue " - f"on the repository {STACK_RECIPES_GITHUB_REPO} and we'll help you out!" -) -NOT_INSTALLED_MESSAGE = ( - "The prerequisites for using `mlstacks` (the `mlstacks` and " - "`python-terraform` packages seem to be unavailable on your machine " - "and/or in your environment. To install the missing dependencies: \n\n" - "`pip install mlstacks`" -) -TERRAFORM_NOT_INSTALLED_MESSAGE = ( - "Terraform appears not to be installed on your machine and/or in your " - "environment. Please install Terraform and try again." -) -STACK_RECIPE_MODULAR_RECIPES = ["aws", "gcp", "k3d"] -MLSTACKS_SUPPORTED_STACK_COMPONENTS = [ - "artifact_store", - "container_registry", - "experiment_tracker", - "orchestrator", - "model_deployer", - "mlops_platform", - "step_operator", -] - # Parameters for internal ZenML Models TEXT_FIELD_MAX_LENGTH = 65535 diff --git a/src/zenml/enums.py b/src/zenml/enums.py index ba41e3615d3..e3e12ab70d0 100644 --- a/src/zenml/enums.py +++ b/src/zenml/enums.py @@ -200,9 +200,6 @@ class ServerProviderType(StrEnum): LOCAL = "local" DOCKER = "docker" - AWS = "aws" - GCP = "gcp" - AZURE = "azure" class AnalyticsEventSource(StrEnum): diff --git a/src/zenml/services/terraform/__init__.py b/src/zenml/services/terraform/__init__.py deleted file mode 100644 index 4bbbc7451ab..00000000000 --- a/src/zenml/services/terraform/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Initialization of a Terraform ZenML service.""" diff --git a/src/zenml/services/terraform/terraform_service.py b/src/zenml/services/terraform/terraform_service.py deleted file mode 100644 index 91039fcfc0f..00000000000 --- a/src/zenml/services/terraform/terraform_service.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Implementation of a Terraform ZenML service.""" - -import os -import shutil -import tempfile -from pathlib import Path -from typing import Any, Dict, Generator, Optional, Tuple - -import python_terraform -from pydantic import Field - -from zenml.io import fileio -from zenml.logger import get_logger -from zenml.services.container.container_service import SERVICE_LOG_FILE_NAME -from zenml.services.service import BaseService, ServiceConfig -from zenml.services.service_status import ServiceState, ServiceStatus -from zenml.utils.io_utils import copy_dir, create_dir_recursive_if_not_exists - -logger = get_logger(__name__) - - -SERVICE_CONFIG_FILE_NAME = "service.json" -SERVICE_CONTAINER_GLOBAL_CONFIG_DIR = "zenconfig" -SERVICE_CONTAINER_GLOBAL_CONFIG_PATH = os.path.join( - "/", SERVICE_CONTAINER_GLOBAL_CONFIG_DIR -) - - -class TerraformServiceConfig(ServiceConfig): - """Terraform service configuration. - - Attributes: - root_runtime_path: the root path where the service stores its files. - singleton: set to True to store the service files directly in the - `root_runtime_path` directory instead of creating a subdirectory for - each service instance. Only has effect if the `root_runtime_path` is - also set. - directory_path: the path to the directory that hosts all the HCL files. - copy_terraform_files: whether to copy the HCL files to the service - runtime directory. - log_level: the log level to set the terraform client to. Choose one of - TRACE, DEBUG, INFO, WARN or ERROR (case insensitive). - variables_file_path: the path to the file that stores all variable values. - """ - - root_runtime_path: str - singleton: bool = False - directory_path: str - copy_terraform_files: bool = False - log_level: str = "ERROR" - variables_file_path: str = "values.tfvars.json" - - -class TerraformServiceStatus(ServiceStatus): - """Terraform service status. - - Attributes: - runtime_path: the path where the service files (e.g. the configuration - file used to start the service daemon and the logfile) are located - """ - - runtime_path: Optional[str] = None - - @property - def config_file(self) -> Optional[str]: - """Get the path to the service configuration file. - - Returns: - The path to the configuration file, or None, if the - service has never been started before. - """ - if not self.runtime_path: - return None - return os.path.join(self.runtime_path, SERVICE_CONFIG_FILE_NAME) - - @property - def log_file(self) -> Optional[str]: - """Get the path to the log file where the service output is/has been logged. - - Returns: - The path to the log file, or None, if the service has never been - started before. - """ - if not self.runtime_path: - return None - return os.path.join(self.runtime_path, SERVICE_LOG_FILE_NAME) - - -class TerraformService(BaseService): - """A service represented by a set of resources deployed using a terraform recipe. - - This class extends the base service class with functionality concerning - the life-cycle management and tracking of external services managed using - terraform recipes. - - - Attributes: - config: service configuration - status: service status - """ - - config: TerraformServiceConfig - status: TerraformServiceStatus = Field( - default_factory=TerraformServiceStatus - ) - - _terraform_client: Optional[python_terraform.Terraform] = None - - @property - def terraform_client(self) -> python_terraform.Terraform: - """Initialize and/or return the terraform client. - - Returns: - The terraform client. - """ - if self._terraform_client is None: - working_dir = self.config.directory_path - if self.config.copy_terraform_files: - assert self.status.runtime_path is not None - working_dir = self.status.runtime_path - self._terraform_client = python_terraform.Terraform( - working_dir=working_dir, - ) - return self._terraform_client - - def check_status(self) -> Tuple[ServiceState, str]: - """Check the the current operational state of the external service. - - If the final output name provided in the config exists as a non-null value, - then it's reasonable to assume that the service is up and running. - - Returns: - The operational state of the external service and a message - providing additional information about that state (e.g. a - description of the error if one is encountered while checking the - service status). - """ - code, out, err = self.terraform_client.plan( - detailed_exitcode=True, - refresh=False, - var=self.get_vars(), - input=False, - raise_on_error=False, - ) - - if code == 0: - return (ServiceState.ACTIVE, "The deployment is active.") - elif code == 2: - return ( - ServiceState.INACTIVE, - "The deployment isn't active or needs an update.", - ) - else: - return (ServiceState.ERROR, f"Deployment error: \n{err}") - - def _update_service_config(self) -> None: - """Update the service configuration file. - - This function is called after the service has been started, to update - the service configuration file with the runtime path of the service. - """ - # write the service information in the service config file - assert self.status.config_file is not None - - with open(self.status.config_file, "w") as f: - f.write(self.model_dump_json(indent=4)) - - def _write_vars_to_file(self, vars: Dict[str, Any]) -> None: - """Write variables to the variables file. - - Args: - vars: The variables to write to the file. - """ - import json - - path = self.terraform_client.working_dir - variables_file_path = os.path.join( - path, self.config.variables_file_path - ) - with open(variables_file_path, "w") as f: - json.dump(vars, f) - - def _init_and_apply(self) -> None: - """Function to call terraform init and terraform apply. - - The init call is not repeated if any successful execution has - happened already, to save time. - - Raises: - RuntimeError: if init or apply function fails. - """ - self._update_service_config() - - # this directory gets created after a successful init - previous_run_dir = os.path.join( - self.terraform_client.working_dir, ".ignoreme" - ) - if fileio.exists(previous_run_dir): - logger.info( - "Terraform already initialized, " - "terraform init will not be executed." - ) - else: - ret_code, _, _ = self.terraform_client.init(capture_output=False) - if ret_code != 0: - raise RuntimeError("The command 'terraform init' failed.") - fileio.mkdir(previous_run_dir) - - # get variables from the recipe as a python dictionary - vars = self.get_vars() - - # once init is successful, call terraform apply - self.terraform_client.apply( - var=vars, - input=False, - capture_output=False, - raise_on_error=True, - refresh=False, - ) - - # write variables to the variable file after execution is successful - self._write_vars_to_file(vars) - - def get_vars(self) -> Dict[str, Any]: - """Get variables as a dictionary from values.tfvars.json. - - Returns: - A dictionary of variables to use for the stack recipes - derived from the tfvars.json file. - - Raises: - FileNotFoundError: if the values.tfvars.json file is not - found in the stack recipe. - TypeError: if the file doesn't contain a dictionary of variables. - """ - import json - - path = self.terraform_client.working_dir - variables_file_path = os.path.join( - path, self.config.variables_file_path - ) - if not fileio.exists(variables_file_path): - raise FileNotFoundError( - "The file values.tfvars.json was not found in the " - f"recipe's directory at {variables_file_path}. Please " - "verify if it exists." - ) - - # read values into a dict and return - with fileio.open(variables_file_path, "r") as f: - variables = json.load(f) - if not isinstance(variables, dict): - raise TypeError( - "The values.tfvars.json file must contain a dictionary " - "of variables." - ) - return variables - - def _destroy(self) -> None: - """Function to call terraform destroy on the given path.""" - # get variables from the recipe as a python dictionary - vars = self.get_vars() - - self.terraform_client.destroy( - var=vars, - capture_output=False, - raise_on_error=True, - force=python_terraform.IsNotFlagged, - refresh=False, - ) - - # set empty vars to the file - - def _setup_runtime_path(self) -> None: - """Set up the runtime path for the service. - - This method sets up the runtime path for the service. - """ - # reuse the config file and logfile location from a previous run, - # if available - copy_terraform_files = True - if not self.status.runtime_path or not os.path.exists( - self.status.runtime_path - ): - if self.config.root_runtime_path: - if self.config.singleton: - self.status.runtime_path = self.config.root_runtime_path - else: - self.status.runtime_path = os.path.join( - self.config.root_runtime_path, - str(self.uuid), - ) - if fileio.isdir(self.status.runtime_path): - copy_terraform_files = False - else: - create_dir_recursive_if_not_exists( - str(self.status.runtime_path) - ) - else: - self.status.runtime_path = tempfile.mkdtemp( - prefix="zenml-service-" - ) - - if copy_terraform_files and self.config.copy_terraform_files: - copy_dir( - self.config.directory_path, - self.status.runtime_path, - ) - - def provision(self) -> None: - """Provision the service.""" - self._setup_runtime_path() - self.check_installation() - self._set_log_level() - self._init_and_apply() - - def deprovision(self, force: bool = False) -> None: - """Deprovision the service. - - Args: - force: if True, the service will be deprovisioned even if it is - in a failed state. - """ - self.check_installation() - self._set_log_level() - self._destroy() - # in case of singleton services, this will remove the config - # path as a whole and otherwise, this removes the specific UUID - # directory - assert self.status.config_file is not None - shutil.rmtree(Path(self.status.config_file).parent) - - # overwriting the start/stop function to remove the progress indicator - # having which doesn't allow tf logs to be shown in stdout - def start(self, timeout: int = 0) -> None: - """Start the service and optionally wait for it to become active. - - Args: - timeout: amount of time to wait for the service to become active. - If set to 0, the method will return immediately after checking - the service status. - """ - self.admin_state = ServiceState.ACTIVE - self.provision() - - def stop(self, timeout: int = 0, force: bool = False) -> None: - """Stop the service and optionally wait for it to shutdown. - - Args: - timeout: amount of time to wait for the service to shutdown. - If set to 0, the method will return immediately after checking - the service status. - force: if True, the service will be forcefully stopped. - """ - self.admin_state = ServiceState.INACTIVE - self.deprovision() - - def get_logs( - self, follow: bool = False, tail: Optional[int] = None - ) -> Generator[str, bool, None]: - """Retrieve the service logs. - - Args: - follow: if True, the logs will be streamed as they are written - tail: only retrieve the last NUM lines of log output. - - Raises: - NotImplementedError: not implemented. - """ - raise NotImplementedError( - "This method is not available for Terraform services." - ) - - def get_outputs(self, output: Optional[str] = None) -> Dict[str, Any]: - """Get outputs from the terraform state. - - Args: - output: if specified, only the output with the given name will be - returned. Otherwise, all outputs will be returned. - - Returns: - A dictionary of outputs from the terraform state. - """ - if output: - # if output is specified, then full_outputs is just a string - full_outputs = self.terraform_client.output( - output, full_value=True - ) - return {output: full_outputs} - else: - # get value of the "value" key in the value of full_outputs - # and assign it to the key in the output dict - full_outputs = self.terraform_client.output(full_value=True) - outputs = {k: v["value"] for k, v in full_outputs.items()} - return outputs - - def check_installation(self) -> None: - """Checks if necessary tools are installed on the host system. - - Raises: - RuntimeError: if any required tool is not installed. - """ - if not self._is_terraform_installed(): - raise RuntimeError( - "Terraform is required for stack recipes to run and was not " - "found installed on your machine or not available on " - "your $PATH. Please visit " - "https://learn.hashicorp.com/tutorials/terraform/install-cli " - "to install it." - ) - - def _is_terraform_installed(self) -> bool: - """Checks if terraform is installed on the host system. - - Returns: - True if terraform is installed, false otherwise. - """ - # check terraform version to verify installation. - try: - self.terraform_client.cmd("-version") - except FileNotFoundError: - return False - - return True - - def _set_log_level(self) -> None: - """Set TF_LOG env var to the log_level provided by the user.""" - os.environ["TF_LOG"] = self.config.log_level diff --git a/src/zenml/utils/mlstacks_utils.py b/src/zenml/utils/mlstacks_utils.py deleted file mode 100644 index 2ae6c735439..00000000000 --- a/src/zenml/utils/mlstacks_utils.py +++ /dev/null @@ -1,635 +0,0 @@ -# Copyright (c) ZenML GmbH 2023. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: - -# https://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Functionality to handle interaction with the mlstacks package.""" - -import json -import os -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Optional, - Tuple, - Union, -) -from uuid import UUID - -import click - -from zenml.client import Client -from zenml.constants import ( - MLSTACKS_SUPPORTED_STACK_COMPONENTS, -) -from zenml.enums import StackComponentType -from zenml.utils.dashboard_utils import get_component_url, get_stack_url -from zenml.utils.yaml_utils import read_yaml - -if TYPE_CHECKING: - from mlstacks.models import Component, Stack - - -def stack_exists(stack_name: str) -> bool: - """Checks whether a stack with that name exists or not. - - Args: - stack_name: The name of the stack to check for. - - Returns: - A boolean indicating whether the stack exists or not. - """ - try: - Client().get_stack( - name_id_or_prefix=stack_name, allow_name_prefix_match=False - ) - except KeyError: - return False - return True - - -def get_stack_spec_file_path(stack_name: str) -> str: - """Gets the path to the stack spec file for the given stack name. - - Args: - stack_name: The name of the stack spec to get the path for. - - Returns: - The path to the stack spec file for the given stack name. - - Raises: - KeyError: If the stack does not exist. - """ - try: - stack = Client().get_stack( - name_id_or_prefix=stack_name, allow_name_prefix_match=False - ) - except KeyError as e: - raise e - return stack.stack_spec_path or "" - - -def stack_spec_exists(stack_name: str) -> bool: - """Checks whether a stack spec with that name exists or not. - - Args: - stack_name: The name of the stack spec to check for. - - Returns: - A boolean indicating whether the stack spec exists or not. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - - spec_dir = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), "stack_specs", stack_name - ) - return Path(spec_dir).exists() - - -def _get_component_flavor( - key: str, value: Union[bool, str], provider: str -) -> str: - """Constructs the component flavor from Click CLI params. - - Args: - key: The component key. - value: The component value. - provider: The provider name. - - Returns: - The component flavor. - """ - if key in {"artifact_store"} and bool(value): - if provider == "aws": - flavor = "s3" - elif provider == "azure": - flavor = "azure" - elif provider == "gcp": - flavor = "gcp" - elif provider == "k3d": - flavor = "minio" - elif key in {"container_registry"} and bool(value): - if provider == "aws": - flavor = "aws" - elif provider == "azure": - flavor = "azure" - elif provider == "gcp": - flavor = "gcp" - elif ( - key - in { - "experiment_tracker", - "orchestrator", - "model_deployer", - "model_registry", - "mlops_platform", - "step_operator", - } - and value - and isinstance(value, str) - ): - flavor = value - return flavor - - -def _add_extra_config_to_components( - components: List["Component"], extra_config: Dict[str, str] -) -> List["Component"]: - """Adds extra config to mlstacks `Component` objects. - - Args: - components: A list of mlstacks `Component` objects. - extra_config: A dictionary of extra config. - - Returns: - A list of mlstacks `Component` objects. - - Raises: - KeyError: If the component type is not supported. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - from mlstacks.models.component import ComponentMetadata - - # Define configuration map - config_map = { - ("container_registry",): ["repo_name"], - ("artifact_store",): ["bucket_name"], - ("experiment_tracker", "mlflow"): [ - "mlflow-artifact-S3-access-key", - "mlflow-artifact-S3-secret-key", - "mlflow-username", - "mlflow-password", - "mlflow_bucket", - ], - ("mlops_platform", "zenml"): [ - "zenml-version", - "zenml-username", - "zenml-password", - "zenml-database-url", - ], - ("artifact_store", "minio", "k3d"): [ - "zenml-minio-store-access-key", - "zenml-minio-store-secret-key", - ], - ("experiment_tracker", "mlflow", "k3d"): [ - "mlflow_minio_bucket", - "mlflow-username", - "mlflow-password", - ], - ("model_deployer", "seldon", "k3d"): ["seldon-secret-name"], - } - - def _add_config( - component_metadata: ComponentMetadata, keys: List[str] - ) -> None: - """Adds key-value pair to the component_metadata config if it exists in extra_config. - - Args: - component_metadata: The metadata of the component. - keys: The keys of the configurations. - """ - for key in keys: - value = extra_config.get(key) - if value is not None: - component_metadata.config[key] = value - - # Define a list of component attributes that need checking - component_attributes = ["component_type", "component_flavor", "provider"] - - for component in components: - component_metadata = ComponentMetadata() - component_metadata.config = {} - - # Create a dictionary that maps attribute names to their values - component_values = { - attr: getattr(component, attr, None) - for attr in component_attributes - } - - for config_keys, extra_keys in config_map.items(): - # If all attributes in config_keys match their values in the component - if all( - component_values.get(key) == value - for key, value in zip(component_attributes, config_keys) - ): - _add_config(component_metadata, extra_keys) - - # always add project_id to gcp components - if component.provider == "gcp": - project_id = extra_config.get("project_id") - if project_id: - component_metadata.config["project_id"] = extra_config.get( - "project_id" - ) - else: - raise KeyError( - "No `project_id` is included. Please try again with " - "`--extra-config project_id=`" - ) - - component.metadata = component_metadata - - return components - - -def _validate_extra_config(config: Tuple[str]) -> bool: - """Validates that extra config values are correct. - - Args: - config: A tuple of extra config values. - - Returns: - A boolean indicating whether the extra config values are correct. - """ - return all(item.count("=") <= 1 for item in config) - - -def _construct_components( - params: Dict[str, Any], zenml_component_deploy: bool = False -) -> List["Component"]: - """Constructs mlstacks `Component` objects from raw Click CLI params. - - Args: - params: Raw Click CLI params. - zenml_component_deploy: A boolean indicating whether the stack - contains a single component and is being deployed through `zenml - component deploy` - - Returns: - A list of mlstacks `Component` objects. - - Raises: - ValueError: If one of the extra_config values is invalid. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - from mlstacks.models import Component - - provider = params["provider"] - if not params.get("extra_config"): - params["extra_config"] = () - if not _validate_extra_config(params["extra_config"]): - raise ValueError( - "One of the `extra_config` values is invalid. You passed in " - f"{params['extra_config']}) in which one of the values includes " - "multiple '=' signs. Please fix and try again." - ) - extra_config = ( - dict(config.split("=") for config in params["extra_config"]) - if params.get("extra_config") - else {} - ) - if zenml_component_deploy: - components = [ - Component( - name=params[key], - component_type=key, - component_flavor=_get_component_flavor(key, value, provider), - provider=params["provider"], - ) - for key, value in params.items() - if (value) and (key in MLSTACKS_SUPPORTED_STACK_COMPONENTS) - ] - else: - components = [ - Component( - name=f"{provider}-{key}", - component_type=key, - component_flavor=_get_component_flavor(key, value, provider), - provider=params["provider"], - ) - for key, value in params.items() - if (value) and (key in MLSTACKS_SUPPORTED_STACK_COMPONENTS) - ] - - components = _add_extra_config_to_components(components, extra_config) - return components - - -def _get_stack_tags(tags: Dict[str, str]) -> Dict[str, str]: - """Gets and parses tags from Click params. - - Args: - tags: Raw Click CLI params. - - Returns: - A dictionary of tags. - """ - return dict(tag.split("=") for tag in tags) if tags else {} - - -def _construct_base_stack(params: Dict[str, Any]) -> "Stack": - """Constructs mlstacks `Stack` object from raw Click CLI params. - - Components are added to the `Stack` object subsequently. - - Args: - params: Raw Click CLI params. - - Returns: - A mlstacks `Stack` object. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - from mlstacks.models import Stack - - tags = _get_stack_tags(params["tags"]) - - return Stack( - spec_version=1, - spec_type="stack", - name=params["stack_name"], - provider=params["provider"], - default_region=params["region"], - default_tags=tags, - components=[], - ) - - -def convert_click_params_to_mlstacks_primitives( - params: Dict[str, Any], - zenml_component_deploy: bool = False, -) -> Tuple["Stack", List["Component"]]: - """Converts raw Click CLI params to mlstacks primitives. - - Args: - params: Raw Click CLI params. - zenml_component_deploy: A boolean indicating whether the stack - contains a single component and is being deployed through `zenml - component deploy` - - Returns: - A tuple of Stack and List[Component] objects. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - from mlstacks.models import Component, Stack - - stack: Stack = _construct_base_stack(params) - components: List[Component] = _construct_components( - params, zenml_component_deploy - ) - - # writes the file names to the stack spec - # using format '-.yaml' - for component in components: - stack.components.append( - os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "stack_specs", - stack.name, - f"{component.name}.yaml", - ) - ) - - return stack, components - - -def convert_mlstacks_primitives_to_dicts( - stack: "Stack", components: List["Component"] -) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: - """Converts mlstacks Stack and Components to dicts. - - Args: - stack: A mlstacks Stack object. - components: A list of mlstacks Component objects. - - Returns: - A tuple of Stack and List[Component] dicts. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - - # convert to json first to strip out Enums objects - stack_dict = json.loads(stack.model_dump_json()) - components_dicts = [ - json.loads(component.model_dump_json()) for component in components - ] - - return stack_dict, components_dicts - - -def _setup_import( - provider: str, - stack_name: str, - stack_spec_dir: str, - user_stack_spec_file: Optional[str] = None, -) -> Tuple[Dict[str, Any], str]: - """Sets up the environment for importing a new stack or component. - - Args: - provider: The cloud provider for which the stack or component - is deployed. - stack_name: The name of the stack to import. - stack_spec_dir: The path to the directory containing the stack spec. - user_stack_spec_file: The path to the user-created stack spec file. - - Returns: - A tuple containing the parsed YAML data and the stack spec file path. - """ - from zenml.cli.utils import verify_mlstacks_prerequisites_installation - - verify_mlstacks_prerequisites_installation() - - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - from mlstacks.utils import terraform_utils - - tf_dir = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), - "terraform", - f"{provider}-modular", - ) - stack_spec_file = user_stack_spec_file or os.path.join( - stack_spec_dir, f"stack-{stack_name}.yaml" - ) - stack_filename = terraform_utils.get_stack_outputs( - stack_spec_file, output_key="stack-yaml-path" - ).get("stack-yaml-path")[2:] - import_stack_path = os.path.join(tf_dir, stack_filename) - data = read_yaml(import_stack_path) - - return data, stack_spec_file - - -def _import_components( - data: Dict[str, Any], - stack_spec_dir: str, - component_name: Optional[str] = None, -) -> Dict[StackComponentType, UUID]: - """Imports components based on the provided data. - - Args: - data: The parsed YAML data containing component details. - stack_spec_dir: The path to the directory containing the stack spec. - component_name: The name of the component to import (if any). - - Returns: - A dictionary mapping component types to their respective IDs. - """ - component_ids = {} - for component_type_str, component_config in data["components"].items(): - component_type = StackComponentType(component_type_str) - component_spec_path = os.path.join( - stack_spec_dir, - f"{component_name or component_config['flavor']}-{component_type_str}.yaml", - ) - if component_name: - component_config["name"] = component_name - - from zenml.cli.stack import _import_stack_component - - component_id = _import_stack_component( - component_type=component_type, - component_dict=component_config, - component_spec_path=component_spec_path, - ) - component_ids[component_type] = component_id - - return component_ids - - -def import_new_mlstacks_stack( - stack_name: str, - provider: str, - stack_spec_dir: str, - user_stack_spec_file: Optional[str] = None, -) -> None: - """Import a new stack deployed for a particular cloud provider. - - Args: - stack_name: The name of the stack to import. - provider: The cloud provider for which the stack is deployed. - stack_spec_dir: The path to the directory containing the stack spec. - user_stack_spec_file: The path to the user-created stack spec file. - """ - data, stack_spec_file = _setup_import( - provider, stack_name, stack_spec_dir, user_stack_spec_file - ) - component_ids = _import_components( - data=data, stack_spec_dir=stack_spec_dir - ) - - imported_stack = Client().create_stack( - name=stack_name, - components=component_ids, - stack_spec_file=stack_spec_file, - ) - - from zenml.cli.utils import print_model_url - - print_model_url(get_stack_url(imported_stack)) - - -def import_new_mlstacks_component( - stack_name: str, component_name: str, provider: str, stack_spec_dir: str -) -> None: - """Import a new component deployed for a particular cloud provider. - - Args: - stack_name: The name of the stack to import. - component_name: The name of the component to import. - provider: The cloud provider for which the stack is deployed. - stack_spec_dir: The path to the directory containing the stack spec. - """ - data, _ = _setup_import(provider, stack_name, stack_spec_dir) - component_ids = _import_components( - data, stack_spec_dir, component_name=component_name - ) - component_type = list(component_ids.keys())[ - 0 - ] # Assuming only one component is imported - component = Client().get_stack_component( - component_type, component_ids[component_type] - ) - - from zenml.cli.utils import print_model_url - - print_model_url(get_component_url(component)) - - -def verify_spec_and_tf_files_exist( - spec_file_path: str, tf_file_path: str -) -> None: - """Checks whether both the spec and tf files exist. - - Args: - spec_file_path: The path to the spec file. - tf_file_path: The path to the tf file. - """ - from zenml.cli.utils import error - - if not Path(spec_file_path).exists(): - error(f"Could not find the Stack spec file at {spec_file_path}.") - elif not Path(tf_file_path).exists(): - error( - f"Could not find the Terraform files for the stack " - f"at {tf_file_path}." - ) - - -def deploy_mlstacks_stack( - spec_file_path: str, - stack_name: str, - stack_provider: str, - debug_mode: bool = False, - no_import_stack_flag: bool = False, - user_created_spec: bool = False, -) -> None: - """Deploys an MLStacks stack given a spec file path. - - Args: - spec_file_path: The path to the spec file. - stack_name: The name of the stack. - stack_provider: The cloud provider for which the stack is deployed. - debug_mode: A boolean indicating whether to run in debug mode. - no_import_stack_flag: A boolean indicating whether to import the stack - into ZenML. - user_created_spec: A boolean indicating whether the user created the - spec file. - """ - from zenml.cli.utils import ( - declare, - verify_mlstacks_prerequisites_installation, - ) - - verify_mlstacks_prerequisites_installation() - from mlstacks.constants import MLSTACKS_PACKAGE_NAME - from mlstacks.utils import terraform_utils - - spec_dir = os.path.join( - click.get_app_dir(MLSTACKS_PACKAGE_NAME), "stack_specs", stack_name - ) - declare("Deploying stack using Terraform...") - terraform_utils.deploy_stack(spec_file_path, debug_mode=debug_mode) - declare("Stack successfully deployed.") - - if not no_import_stack_flag: - declare(f"Importing stack '{stack_name}' into ZenML..") - import_new_mlstacks_stack( - stack_name=stack_name, - provider=stack_provider, - stack_spec_dir=spec_dir, - user_stack_spec_file=spec_file_path if user_created_spec else None, - ) - declare("Stack successfully imported into ZenML.") diff --git a/src/zenml/utils/terraform_utils.py b/src/zenml/utils/terraform_utils.py deleted file mode 100644 index 19a0d872799..00000000000 --- a/src/zenml/utils/terraform_utils.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) ZenML GmbH 2023. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Terraform utilities.""" - -import zenml - - -def verify_terraform_installation() -> None: - """Verifies the Terraform installation. - - Raises: - RuntimeError: If Terraform is not installed or ZenML was installed - without the `terraform` extra. - """ - try: - import python_terraform - except ImportError: - raise RuntimeError( - "Missing Terraform python library. This is probably because ZenML " - "was installed without the optional `terraform` extra. To install " - "the missing dependencies, run: \n\n" - f'`pip install "zenml[terraform]=={zenml.__version__}"`.' - ) - - try: - python_terraform.Terraform().cmd("-version") - except FileNotFoundError: - raise RuntimeError( - "Missing Terraform installation. Check out " - "https://developer.hashicorp.com/terraform/downloads for how to " - "install it on your operating system." - ) diff --git a/src/zenml/zen_server/deploy/__init__.py b/src/zenml/zen_server/deploy/__init__.py index f012cd2f1cc..a753e953129 100644 --- a/src/zenml/zen_server/deploy/__init__.py +++ b/src/zenml/zen_server/deploy/__init__.py @@ -16,15 +16,6 @@ # DO NOT REMOVE THESE IMPORTS. They are needed so the ZenML server deployment # providers get registered. from zenml.zen_server.deploy import docker, local # noqa - -try: - from zenml.zen_server.deploy import terraform # noqa -except ImportError: - # If ZenML is installed without the `terraform` extra, all terraform based - # providers won't be available as the `python_terraform` library is not - # installed - pass - from zenml.zen_server.deploy.deployer import ServerDeployer from zenml.zen_server.deploy.deployment import ( ServerDeployment, diff --git a/src/zenml/zen_server/deploy/terraform/__init__.py b/src/zenml/zen_server/deploy/terraform/__init__.py deleted file mode 100644 index b1b2f5cd873..00000000000 --- a/src/zenml/zen_server/deploy/terraform/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""ZenML Server Terraform Deployment.""" - - -from zenml.zen_server.deploy.terraform.providers.aws_provider import ( - AWSServerDeploymentConfig, - AWSServerProvider, -) -from zenml.zen_server.deploy.terraform.providers.azure_provider import ( - AzureServerDeploymentConfig, - AzureServerProvider, -) -from zenml.zen_server.deploy.terraform.providers.gcp_provider import ( - GCPServerDeploymentConfig, - GCPServerProvider, -) -from zenml.zen_server.deploy.terraform.providers.terraform_provider import ( - TerraformServerProvider, -) - -__all__ = [ - "TerraformServerProvider", - "AWSServerProvider", - "AWSServerDeploymentConfig", - "GCPServerProvider", - "GCPServerDeploymentConfig", - "AzureServerProvider", - "AzureServerDeploymentConfig", -] diff --git a/src/zenml/zen_server/deploy/terraform/providers/__init__.py b/src/zenml/zen_server/deploy/terraform/providers/__init__.py deleted file mode 100644 index 943987e9bf1..00000000000 --- a/src/zenml/zen_server/deploy/terraform/providers/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""ZenML Server Terraform Providers.""" diff --git a/src/zenml/zen_server/deploy/terraform/providers/aws_provider.py b/src/zenml/zen_server/deploy/terraform/providers/aws_provider.py deleted file mode 100644 index fb92c3157d3..00000000000 --- a/src/zenml/zen_server/deploy/terraform/providers/aws_provider.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Zen Server AWS Terraform deployer implementation.""" - -from typing import ClassVar, Type - -from zenml.enums import ServerProviderType -from zenml.logger import get_logger -from zenml.zen_server.deploy.terraform.providers.terraform_provider import ( - TerraformServerProvider, -) -from zenml.zen_server.deploy.terraform.terraform_zen_server import ( - TerraformServerDeploymentConfig, -) - -logger = get_logger(__name__) - - -class AWSServerDeploymentConfig(TerraformServerDeploymentConfig): - """AWS server deployment configuration. - - Attributes: - region: The AWS region to deploy to. - rds_name: The name of the RDS instance to create - db_name: Name of RDS database to create. - db_type: Type of RDS database to create. - db_version: Version of RDS database to create. - db_instance_class: Instance class of RDS database to create. - db_allocated_storage: Allocated storage of RDS database to create. - """ - - region: str = "eu-west-1" - rds_name: str = "zenmlserver" - db_name: str = "zenmlserver" - db_type: str = "mysql" - db_version: str = "5.7.38" - db_instance_class: str = "db.t3.micro" - db_allocated_storage: int = 5 - - -class AWSServerProvider(TerraformServerProvider): - """AWS ZenML server provider.""" - - TYPE: ClassVar[ServerProviderType] = ServerProviderType.AWS - CONFIG_TYPE: ClassVar[Type[TerraformServerDeploymentConfig]] = ( - AWSServerDeploymentConfig - ) - - -AWSServerProvider.register_as_provider() diff --git a/src/zenml/zen_server/deploy/terraform/providers/azure_provider.py b/src/zenml/zen_server/deploy/terraform/providers/azure_provider.py deleted file mode 100644 index 441f0512a59..00000000000 --- a/src/zenml/zen_server/deploy/terraform/providers/azure_provider.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Zen Server Azure Terraform deployer implementation.""" - -from typing import ClassVar, Type - -from zenml.enums import ServerProviderType -from zenml.logger import get_logger -from zenml.zen_server.deploy.terraform.providers.terraform_provider import ( - TerraformServerProvider, -) -from zenml.zen_server.deploy.terraform.terraform_zen_server import ( - TerraformServerDeploymentConfig, -) - -logger = get_logger(__name__) - - -class AzureServerDeploymentConfig(TerraformServerDeploymentConfig): - """Azure server deployment configuration. - - Attributes: - resource_group: The Azure resource_group to deploy to. - db_instance_name: The name of the Flexible MySQL instance to create - db_name: Name of RDS database to create. - db_version: Version of MySQL database to create. - db_sku_name: The sku_name for the database resource. - db_disk_size: Allocated storage of MySQL database to create. - """ - - resource_group: str = "zenml" - db_instance_name: str = "zenmlserver" - db_name: str = "zenmlserver" - db_version: str = "5.7" - db_sku_name: str = "B_Standard_B1s" - db_disk_size: int = 20 - - -class AzureServerProvider(TerraformServerProvider): - """Azure ZenML server provider.""" - - TYPE: ClassVar[ServerProviderType] = ServerProviderType.AZURE - CONFIG_TYPE: ClassVar[Type[TerraformServerDeploymentConfig]] = ( - AzureServerDeploymentConfig - ) - - -AzureServerProvider.register_as_provider() diff --git a/src/zenml/zen_server/deploy/terraform/providers/gcp_provider.py b/src/zenml/zen_server/deploy/terraform/providers/gcp_provider.py deleted file mode 100644 index 862966cf25d..00000000000 --- a/src/zenml/zen_server/deploy/terraform/providers/gcp_provider.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Zen Server GCP Terraform deployer implementation.""" - -from typing import ClassVar, Type - -from zenml.enums import ServerProviderType -from zenml.logger import get_logger -from zenml.zen_server.deploy.terraform.providers.terraform_provider import ( - TerraformServerProvider, -) -from zenml.zen_server.deploy.terraform.terraform_zen_server import ( - TerraformServerDeploymentConfig, -) - -logger = get_logger(__name__) - - -class GCPServerDeploymentConfig(TerraformServerDeploymentConfig): - """GCP server deployment configuration. - - Attributes: - project_id: The project in GCP to deploy the server to. - region: The GCP region to deploy to. - cloudsql_name: The name of the CloudSQL instance to create - db_name: Name of CloudSQL database to create. - db_instance_tier: Instance class of CloudSQL database to create. - db_disk_size: Allocated storage of CloudSQL database to create. - """ - - project_id: str - region: str = "europe-west3" - cloudsql_name: str = "zenmlserver" - db_name: str = "zenmlserver" - db_instance_tier: str = "db-n1-standard-1" - db_disk_size: int = 10 - - -class GCPServerProvider(TerraformServerProvider): - """GCP ZenML server provider.""" - - TYPE: ClassVar[ServerProviderType] = ServerProviderType.GCP - CONFIG_TYPE: ClassVar[Type[TerraformServerDeploymentConfig]] = ( - GCPServerDeploymentConfig - ) - - -GCPServerProvider.register_as_provider() diff --git a/src/zenml/zen_server/deploy/terraform/providers/terraform_provider.py b/src/zenml/zen_server/deploy/terraform/providers/terraform_provider.py deleted file mode 100644 index 7f25e4fb87d..00000000000 --- a/src/zenml/zen_server/deploy/terraform/providers/terraform_provider.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Zen Server terraform deployer implementation.""" - -import os -from typing import ClassVar, List, Optional, Tuple, Type, cast -from uuid import uuid4 - -from zenml.config.global_config import GlobalConfiguration -from zenml.logger import get_logger -from zenml.services import BaseService, ServiceConfig -from zenml.services.service_endpoint import ( - ServiceEndpointConfig, - ServiceEndpointProtocol, -) -from zenml.services.service_monitor import ( - HTTPEndpointHealthMonitorConfig, - ServiceEndpointHealthMonitorConfig, -) -from zenml.zen_server.deploy.base_provider import BaseServerProvider -from zenml.zen_server.deploy.deployment import ( - ServerDeploymentConfig, - ServerDeploymentStatus, -) -from zenml.zen_server.deploy.docker.docker_zen_server import ( - ZEN_SERVER_HEALTHCHECK_URL_PATH, -) -from zenml.zen_server.deploy.terraform.terraform_zen_server import ( - TERRAFORM_VALUES_FILE_PATH, - TERRAFORM_ZENML_SERVER_CONFIG_PATH, - TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT, - TERRAFORM_ZENML_SERVER_RECIPE_SUBPATH, - TerraformServerDeploymentConfig, - TerraformZenServer, - TerraformZenServerConfig, -) - -logger = get_logger(__name__) - - -class TerraformServerProvider(BaseServerProvider): - """Terraform ZenML server provider.""" - - CONFIG_TYPE: ClassVar[Type[ServerDeploymentConfig]] = ( - TerraformServerDeploymentConfig - ) - - @staticmethod - def _get_server_recipe_root_path() -> str: - """Get the server recipe root path. - - The Terraform recipe files for all terraform server providers are - located in a folder relative to the `zenml.zen_server.deploy.terraform` - Python module. - - Returns: - The server recipe root path. - """ - import zenml.zen_server.deploy.terraform as terraform_module - - root_path = os.path.join( - os.path.dirname(terraform_module.__file__), - TERRAFORM_ZENML_SERVER_RECIPE_SUBPATH, - ) - return root_path - - @classmethod - def _get_service_configuration( - cls, - server_config: ServerDeploymentConfig, - ) -> Tuple[ - ServiceConfig, - ServiceEndpointConfig, - ServiceEndpointHealthMonitorConfig, - ]: - """Construct the service configuration from a server deployment configuration. - - Args: - server_config: server deployment configuration. - - Returns: - The service configuration. - """ - assert isinstance(server_config, TerraformServerDeploymentConfig) - - return ( - TerraformZenServerConfig( - name=server_config.name, - root_runtime_path=TERRAFORM_ZENML_SERVER_CONFIG_PATH, - singleton=True, - directory_path=os.path.join( - cls._get_server_recipe_root_path(), - server_config.provider, - ), - log_level=server_config.log_level, - variables_file_path=TERRAFORM_VALUES_FILE_PATH, - server=server_config, - ), - ServiceEndpointConfig( - protocol=ServiceEndpointProtocol.HTTP, - allocate_port=False, - ), - HTTPEndpointHealthMonitorConfig( - healthcheck_uri_path=ZEN_SERVER_HEALTHCHECK_URL_PATH, - use_head_request=True, - ), - ) - - def _create_service( - self, - config: ServerDeploymentConfig, - timeout: Optional[int] = None, - ) -> BaseService: - """Create, start and return the terraform ZenML server deployment service. - - Args: - config: The server deployment configuration. - timeout: The timeout in seconds to wait until the service is - running. - - Returns: - The service instance. - - Raises: - RuntimeError: If a terraform service is already running. - """ - assert isinstance(config, TerraformServerDeploymentConfig) - - if timeout is None: - timeout = TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT - - existing_service = TerraformZenServer.get_service() - if existing_service: - raise RuntimeError( - f"A terraform ZenML server with name '{existing_service.config.name}' " - f"is already running. Please stop it first before starting a " - f"new one." - ) - - ( - service_config, - endpoint_cfg, - monitor_cfg, - ) = self._get_service_configuration(config) - - service = TerraformZenServer(uuid=uuid4(), config=service_config) - - service.start(timeout=timeout) - return service - - def _update_service( - self, - service: BaseService, - config: ServerDeploymentConfig, - timeout: Optional[int] = None, - ) -> BaseService: - """Update the terraform ZenML server deployment service. - - Args: - service: The service instance. - config: The new server deployment configuration. - timeout: The timeout in seconds to wait until the updated service is - running. - - Returns: - The updated service instance. - """ - if timeout is None: - timeout = TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT - - ( - new_config, - endpoint_cfg, - monitor_cfg, - ) = self._get_service_configuration(config) - - assert isinstance(new_config, TerraformZenServerConfig) - assert isinstance(service, TerraformZenServer) - - # preserve the server ID across updates - service.config = new_config - service.start(timeout=timeout) - - return service - - def _start_service( - self, - service: BaseService, - timeout: Optional[int] = None, - ) -> BaseService: - """Start the terraform ZenML server deployment service. - - Args: - service: The service instance. - timeout: The timeout in seconds to wait until the service is - running. - - Returns: - The updated service instance. - """ - if timeout is None: - timeout = TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT - - service.start(timeout=timeout) - return service - - def _stop_service( - self, - service: BaseService, - timeout: Optional[int] = None, - ) -> BaseService: - """Stop the terraform ZenML server deployment service. - - Args: - service: The service instance. - timeout: The timeout in seconds to wait until the service is - stopped. - - Returns: - The updated service instance. - """ - if timeout is None: - timeout = TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT - - service.stop(timeout=timeout) - return service - - def _delete_service( - self, - service: BaseService, - timeout: Optional[int] = None, - ) -> None: - """Remove the terraform ZenML server deployment service. - - Args: - service: The service instance. - timeout: The timeout in seconds to wait until the service is - removed. - """ - assert isinstance(service, TerraformZenServer) - - if timeout is None: - timeout = TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT - - service.stop(timeout) - - def _get_service(self, server_name: str) -> BaseService: - """Get the terraform ZenML server deployment service. - - Args: - server_name: The server deployment name. - - Returns: - The service instance. - - Raises: - KeyError: If the server deployment is not found. - """ - service = TerraformZenServer.get_service() - if service is None: - raise KeyError("The terraform ZenML server is not deployed.") - - if service.config.server.name != server_name: - raise KeyError( - "The terraform ZenML server is deployed but with a different name." - ) - return service - - def _list_services(self) -> List[BaseService]: - """Get all service instances for all deployed ZenML servers. - - Returns: - A list of service instances. - """ - service = TerraformZenServer.get_service() - if service: - return [service] - return [] - - def _get_deployment_config( - self, service: BaseService - ) -> ServerDeploymentConfig: - """Recreate the server deployment configuration from a service instance. - - Args: - service: The service instance. - - Returns: - The server deployment configuration. - """ - server = cast(TerraformZenServer, service) - return server.config.server - - def _get_deployment_status( - self, service: BaseService - ) -> ServerDeploymentStatus: - """Get the status of a server deployment from its service. - - Args: - service: The server deployment service. - - Returns: - The status of the server deployment. - """ - gc = GlobalConfiguration() - url: Optional[str] = None - service = cast(TerraformZenServer, service) - ca_crt = None - if service.is_running: - url = service.get_server_url() - ca_crt = service.get_certificate() - connected = url is not None and gc.store_configuration.url == url - - return ServerDeploymentStatus( - url=url, - status=service.status.state, - status_message=service.status.last_error, - connected=connected, - ca_crt=ca_crt, - ) diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/.gitignore b/src/zenml/zen_server/deploy/terraform/recipes/aws/.gitignore deleted file mode 100644 index e1640bf11fa..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.exe -*.hcl -*.info -*.backup -*.tfstate -*.pem -kubeconfig_* -.terraform \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/helm.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/helm.tf deleted file mode 100644 index 10fabdfdc9a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/helm.tf +++ /dev/null @@ -1,20 +0,0 @@ -# check if the host OS is Linux or Windows -data "external" "os" { - working_dir = path.module - program = ["printf", "{\"os\": \"Linux\"}"] -} -locals { - os = data.external.os.result.os - kubectl_config_path = local.os == "Windows" ? "%USERPROFILE%\\.kube\\config" : "~/.kube/config" -} - -# A default (non-aliased) provider configuration for "helm" -provider "helm" { - kubernetes { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path - } -} - -provider "kubernetes" { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/ingress.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/ingress.tf deleted file mode 100644 index f56ec3fa266..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/ingress.tf +++ /dev/null @@ -1,30 +0,0 @@ -# set up the nginx ingress controller -resource "kubernetes_namespace" "nginx-ns" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "${var.name}-ingress" - } -} - -resource "helm_release" "nginx-controller" { - name = "zenml" - count = var.create_ingress_controller ? 1 : 0 - repository = "https://kubernetes.github.io/ingress-nginx" - chart = "ingress-nginx" - # dependency on nginx-ns - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - depends_on = [ - resource.kubernetes_namespace.nginx-ns - ] -} - -data "kubernetes_service" "ingress-controller" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "zenml-ingress-nginx-controller" - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - } - depends_on = [ - resource.helm_release.nginx-controller - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/outputs.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/outputs.tf deleted file mode 100644 index bd7e0a2c97a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "zenml_server_url" { - value = var.create_ingress_controller ? "https://${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.hostname}" : "https://zenml.${var.ingress_controller_ip}.nip.io" -} -output "ca_crt" { - value = base64decode(data.kubernetes_secret.certificates.binary_data["ca.crt"]) - sensitive = true -} diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/printf.cmd b/src/zenml/zen_server/deploy/terraform/recipes/aws/printf.cmd deleted file mode 100644 index 467643b99d3..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/printf.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -echo {"os": "Windows"} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/sql.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/sql.tf deleted file mode 100644 index 86a722c8af5..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/sql.tf +++ /dev/null @@ -1,62 +0,0 @@ -# random string for the RDS instance name -resource "random_string" "rds_suffix" { - count = var.deploy_db ? 1 : 0 - length = 4 - upper = false - special = false -} - -module "metadata_store" { - source = "terraform-aws-modules/rds/aws" - version = "5.9.0" - count = var.deploy_db ? 1 : 0 - - identifier = "${var.name}${var.rds_name}${random_string.rds_suffix[0].result}" - - engine = var.db_type - engine_version = var.db_version - instance_class = var.db_instance_class - allocated_storage = var.db_allocated_storage - - db_name = var.db_name - username = var.database_username - password = var.database_password - create_random_password = var.database_password == "" ? true : false - port = "3306" - - # configure access - publicly_accessible = true - - # DB subnet group - create_db_subnet_group = true - subnet_ids = module.vpc[0].public_subnets - skip_final_snapshot = true - - iam_database_authentication_enabled = false - - # we've added MySQL ingress rules to - # this sg so we're using it here - vpc_security_group_ids = [module.vpc[0].default_security_group_id] - - # DB parameter group - family = "mysql5.7" - - # DB option group - major_engine_version = "5.7" - - tags = { - Owner = "user" - Environment = "zenml-env" - } - - parameters = [ - { - name = "character_set_client" - value = "utf8mb4" - }, - { - name = "character_set_server" - value = "utf8mb4" - } - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/terraform.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/terraform.tf deleted file mode 100644 index 5afab53a76a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/terraform.tf +++ /dev/null @@ -1,44 +0,0 @@ -# defining the providers for the recipe module -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - - random = { - source = "hashicorp/random" - version = "3.1.0" - } - - local = { - source = "hashicorp/local" - version = "2.1.0" - } - - null = { - source = "hashicorp/null" - version = "3.1.0" - } - - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.11.0" - } - - kubectl = { - source = "gavinbunney/kubectl" - version = "1.14.0" - } - - htpasswd = { - source = "loafoe/htpasswd" - version = "1.0.3" - } - } - - required_version = ">= 0.14.8" -} - -provider "aws" { - region = var.region -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/variables.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/variables.tf deleted file mode 100644 index e0e2d6fc248..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/variables.tf +++ /dev/null @@ -1,167 +0,0 @@ - -variable "name" { - description = "The prefix to use for all AWS resource names" - default = "zenmlserver" - type = string -} - -variable "region" { - description = "The region for your AWS resources" - default = "eu-west-1" - type = string -} - -variable "namespace" { - description = "The namespace to install the ZenML server Helm chart in" - default = "terraform-server" - type = string -} - -variable "helm_chart" { - description = "The path to the ZenML server helm chart" - default = "../../../helm" - type = string -} - -variable "kubectl_config_path" { - description = "The path to the kube config" - default = "" - type = string -} - -variable "analytics_opt_in" { - description = "The flag to enable/disable analytics" - default = true - type = bool -} - -# If you want a new RDS, choose a name and a password. If you already -# have an instance, provide the name and the password here too. -variable "database_username" { - description = "The username for the AWS RDS metadata store" - default = "admin" - type = string -} -variable "database_password" { - description = "The password for the AWS RDS metadata store" - default = "" - type = string -} - -# if you enable the deploy_db option, the recipe will -# create a new RDS MySQL instance and then use it for this -# ZenML Server. If disabled, you have to supply connection details -# in the section below. -variable "deploy_db" { - description = "Should the recipe create an RDS instance?" - default = true - type = bool -} -variable "rds_name" { - description = "The name for the AWS RDS metadata store" - default = "zenmlserver" - type = string -} -variable "db_name" { - description = "The name for the AWS RDS database" - default = "zenmlserver" - type = string -} -variable "db_type" { - description = "The type for the AWS RDS database" - default = "mysql" - type = string -} -variable "db_version" { - description = "The version for the AWS RDS database" - default = "5.7.38" - type = string -} -variable "db_instance_class" { - description = "The instance class to use for the database" - default = "db.t3.micro" - type = string -} - -variable "db_allocated_storage" { - description = "The allocated storage in gigabytes" - default = 5 - type = number -} - -# If you haven't enabled the deploy_db option, provide -# the following value in the values.tfvars.json file. -variable "database_url" { - description = "The URL for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_ca" { - description = "The server ca for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_cert" { - description = "The client cert for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_key" { - description = "The client key for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_verify_server_cert" { - description = "Should SSL be verified?" - default = true - type = bool -} - - -# set to true if you don't already have an nginx ingress -# controller in your cluster -variable "create_ingress_controller" { - description = "set to true if you want the recipe to create an ingress controller in your cluster" - default = true - type = bool -} - -# if you already have an ingress controller, supply it's URL -variable "ingress_controller_ip" { - description = "The URL for the ingress controller on your cluster" - default = "" - type = string -} -variable "ingress_tls" { - description = "Whether to enable tls on the ingress or not" - default = false - type = bool -} -variable "ingress_tls_generate_certs" { - description = "Whether to enable tls certificates or not" - default = false - type = bool -} -variable "ingress_tls_secret_name" { - description = "Name for the Kubernetes secret that stores certificates" - default = "zenml-tls-certs" - type = string -} - -variable "zenmlserver_image_repo" { - description = "The repository to use for the zenmlserver docker image." - default = "zenmldocker/zenml-server" - type = string -} -variable "zenmlserver_image_tag" { - description = "The tag to use for the zenmlserver docker image." - default = "latest" - type = string -} - -# variables for creating a ZenML stack configuration file -variable "zenml-version" { - description = "The version of ZenML being used" - default = "0.20.0" - type = string -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/vpc.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/vpc.tf deleted file mode 100644 index a82b55517af..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/vpc.tf +++ /dev/null @@ -1,47 +0,0 @@ -# VPC infra using https://github.com/terraform-aws-modules/terraform-aws-vpc -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = ">= 4.0.0" - count = var.deploy_db ? 1 : 0 - - name = "${var.name}-vpc" - cidr = "10.10.0.0/16" - - azs = ["${var.region}a", "${var.region}b", "${var.region}c"] - private_subnets = ["10.10.8.0/21", "10.10.16.0/21", "10.10.24.0/21"] - public_subnets = ["10.10.128.0/21", "10.10.136.0/21", "10.10.144.0/21"] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - # enabling MySQL access - create_database_internet_gateway_route = true - manage_default_security_group = true - default_security_group_ingress = [ - { - description = "MySQL traffic from everywhere" - from_port = 3306 - to_port = 3306 - protocol = "tcp" - cidr_blocks = "0.0.0.0/0" - }, - { - description = "All Traffic" - from_port = 0 - to_port = 0 - protocol = -1 - cidr_blocks = "0.0.0.0/0" - } - ] - default_security_group_egress = [ - { - description = "All Traffic" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = "0.0.0.0/0" - } - ] - -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/aws/zen_server.tf b/src/zenml/zen_server/deploy/terraform/recipes/aws/zen_server.tf deleted file mode 100644 index 780710889cb..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/aws/zen_server.tf +++ /dev/null @@ -1,103 +0,0 @@ -# create the ZenML Server deployment -resource "kubernetes_namespace" "zen-server" { - metadata { - name = "${var.name}-${var.namespace}" - } -} - -resource "helm_release" "zen-server" { - - name = "${var.name}-zenmlserver" - chart = var.helm_chart - namespace = kubernetes_namespace.zen-server.metadata[0].name - - set { - name = "zenml.image.repository" - value = var.zenmlserver_image_repo - } - set { - name = "zenml.image.tag" - value = var.zenmlserver_image_tag - } - set { - name = "zenml.deploymentType" - value = "aws" - } - - set { - name = "zenml.secretsStore.type" - value = "aws" - } - set { - name = "zenml.secretsStore.aws.region_name" - value = var.region - } - - set { - name = "zenml.analyticsOptIn" - value = var.analytics_opt_in - } - - # set up the right path for ZenML - set { - name = "zenml.ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/rewrite-target" - value = "" - } - set { - name = "zenml.ingress.host" - value = var.create_ingress_controller ? "${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.hostname}" : "zenml.${var.ingress_controller_ip}.nip.io" - } - set { - name = "zenml.ingress.tls.enabled" - value = var.ingress_tls - } - set { - name = "zenml.ingress.tls.generateCerts" - value = var.ingress_tls_generate_certs - } - set { - name = "zenml.ingress.tls.secretName" - value = "${var.name}-${var.ingress_tls_secret_name}" - } - - # set parameters for the mysql database - set { - name = "zenml.database.url" - value = var.deploy_db ? "mysql://${module.metadata_store[0].db_instance_username}:${module.metadata_store[0].db_instance_password}@${module.metadata_store[0].db_instance_address}:3306/${var.db_name}" : var.database_url - } - set { - name = "zenml.database.sslCa" - value = var.deploy_db ? "" : var.database_ssl_ca - } - set { - name = "zenml.database.sslCert" - value = var.deploy_db ? "" : var.database_ssl_cert - } - set { - name = "zenml.database.sslKey" - value = var.deploy_db ? "" : var.database_ssl_key - } - set { - name = "zenml.database.sslVerifyServerCert" - value = var.deploy_db ? false : var.database_ssl_verify_server_cert - } - depends_on = [ - resource.kubernetes_namespace.zen-server - ] -} - -data "kubernetes_secret" "certificates" { - metadata { - name = "${var.name}-${var.ingress_tls_secret_name}" - namespace = "${var.name}-${var.namespace}" - } - binary_data = { - "tls.crt" = "" - "tls.key" = "" - "ca.crt" = "" - } - - depends_on = [ - helm_release.zen-server - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/.gitignore b/src/zenml/zen_server/deploy/terraform/recipes/azure/.gitignore deleted file mode 100644 index e1640bf11fa..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.exe -*.hcl -*.info -*.backup -*.tfstate -*.pem -kubeconfig_* -.terraform \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/helm.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/helm.tf deleted file mode 100644 index 10fabdfdc9a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/helm.tf +++ /dev/null @@ -1,20 +0,0 @@ -# check if the host OS is Linux or Windows -data "external" "os" { - working_dir = path.module - program = ["printf", "{\"os\": \"Linux\"}"] -} -locals { - os = data.external.os.result.os - kubectl_config_path = local.os == "Windows" ? "%USERPROFILE%\\.kube\\config" : "~/.kube/config" -} - -# A default (non-aliased) provider configuration for "helm" -provider "helm" { - kubernetes { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path - } -} - -provider "kubernetes" { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/ingress.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/ingress.tf deleted file mode 100644 index f56ec3fa266..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/ingress.tf +++ /dev/null @@ -1,30 +0,0 @@ -# set up the nginx ingress controller -resource "kubernetes_namespace" "nginx-ns" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "${var.name}-ingress" - } -} - -resource "helm_release" "nginx-controller" { - name = "zenml" - count = var.create_ingress_controller ? 1 : 0 - repository = "https://kubernetes.github.io/ingress-nginx" - chart = "ingress-nginx" - # dependency on nginx-ns - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - depends_on = [ - resource.kubernetes_namespace.nginx-ns - ] -} - -data "kubernetes_service" "ingress-controller" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "zenml-ingress-nginx-controller" - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - } - depends_on = [ - resource.helm_release.nginx-controller - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/key_vault.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/key_vault.tf deleted file mode 100644 index 2bb02709837..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/key_vault.tf +++ /dev/null @@ -1,73 +0,0 @@ -data "azurerm_client_config" "current" {} - -data "azurerm_kubernetes_cluster" "current" { - name = local.cluster - resource_group_name = local.rg -} - -# create a key vault instance that can be used for storing secrets -resource "azurerm_key_vault" "secret_manager" { - name = "${var.key_vault_name}-${random_string.unique.result}" - location = data.azurerm_resource_group.rg.location - resource_group_name = local.rg - enabled_for_disk_encryption = true - tenant_id = data.azurerm_client_config.current.tenant_id - soft_delete_retention_days = 7 - purge_protection_enabled = false - - sku_name = "standard" - - access_policy { - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = data.azurerm_client_config.current.object_id - - key_permissions = [ - "Get", "List", "Create", "Delete", "Update" - ] - - secret_permissions = [ - "Get", "List", "Delete" - ] - - storage_permissions = [ - "Get", "List", "Set", "Delete", "Update" - ] - } -} - - -resource "azurerm_key_vault_access_policy" "kv-access" { - key_vault_id = azurerm_key_vault.secret_manager.id - tenant_id = data.azurerm_kubernetes_cluster.current.identity.0.tenant_id - object_id = data.azurerm_kubernetes_cluster.current.kubelet_identity.0.object_id - - key_permissions = [ - "Get", "List", "Create", "Delete", "Update" - ] - - secret_permissions = [ - "Get", "List", "Set", "Delete" - ] - - storage_permissions = [ - "Get", "List", "Set", "Delete", "Update" - ] -} - -resource "azurerm_key_vault_access_policy" "kv-access-user" { - key_vault_id = azurerm_key_vault.secret_manager.id - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = data.azurerm_client_config.current.object_id - - key_permissions = [ - "Get", "List", "Create", "Delete", "Update" - ] - - secret_permissions = [ - "Get", "List", "Set", "Delete" - ] - - storage_permissions = [ - "Get", "List", "Set", "Delete", "Update" - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/outputs.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/outputs.tf deleted file mode 100644 index 0be630cf718..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "zenml_server_url" { - value = var.create_ingress_controller ? "https://zenml.${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.ip}.nip.io" : "https://zenml.${var.ingress_controller_ip}.nip.io" -} -output "ca_crt" { - value = base64decode(data.kubernetes_secret.certificates.binary_data["ca.crt"]) - sensitive = true -} diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/printf.cmd b/src/zenml/zen_server/deploy/terraform/recipes/azure/printf.cmd deleted file mode 100644 index 467643b99d3..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/printf.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -echo {"os": "Windows"} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/rg.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/rg.tf deleted file mode 100644 index 573ddf6f71a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/rg.tf +++ /dev/null @@ -1,36 +0,0 @@ -# run kubectl config view commands to get the current cluster and rg -resource "null_resource" "get_cluster_and_rg" { - provisioner "local-exec" { - command = "kubectl config view --minify -o jsonpath='{.clusters[].name}' > cluster.txt" - } - provisioner "local-exec" { - command = "kubectl config view --minify -o jsonpath='{.contexts[].context.user}' | cut -d_ -f2 > namespace.txt" - } -} - -# get the current cluster and namespace -data "local_file" "cluster" { - filename = "cluster.txt" - - depends_on = [ - null_resource.get_cluster_and_rg - ] -} - -data "local_file" "rg" { - filename = "namespace.txt" - - depends_on = [ - null_resource.get_cluster_and_rg - ] -} - -# get the current cluster and namespace -locals { - cluster = chomp(data.local_file.cluster.content) - rg = chomp(data.local_file.rg.content) -} - -data "azurerm_resource_group" "rg" { - name = local.rg -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/sql.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/sql.tf deleted file mode 100644 index 01d2dea29d5..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/sql.tf +++ /dev/null @@ -1,65 +0,0 @@ -# random string for the Flexible Server instance name -resource "random_string" "flexible_server_suffix" { - count = var.deploy_db ? 1 : 0 - length = 4 - upper = false - special = false -} - -resource "azurerm_mysql_flexible_server" "mysql" { - count = var.deploy_db ? 1 : 0 - name = "${var.db_instance_name}-${random_string.flexible_server_suffix[0].result}" - resource_group_name = local.rg - location = data.azurerm_resource_group.rg.location - administrator_login = var.database_username - administrator_password = var.database_password == "" ? random_password.mysql_password.result : var.database_password - version = var.db_version - storage { - size_gb = var.db_disk_size - } - sku_name = var.db_sku_name -} - -resource "azurerm_mysql_flexible_database" "db" { - count = var.deploy_db ? 1 : 0 - name = var.db_name - resource_group_name = data.azurerm_resource_group.rg.name - server_name = azurerm_mysql_flexible_server.mysql[0].name - charset = "utf8" - collation = "utf8_unicode_ci" -} - -resource "azurerm_mysql_flexible_server_firewall_rule" "allow_IPs" { - count = var.deploy_db ? 1 : 0 - name = "all_traffic" - resource_group_name = data.azurerm_resource_group.rg.name - server_name = azurerm_mysql_flexible_server.mysql[0].name - start_ip_address = "0.0.0.0" - end_ip_address = "255.255.255.255" -} - -resource "azurerm_mysql_flexible_server_configuration" "require_ssl" { - count = var.deploy_db ? 1 : 0 - name = "require_secure_transport" - resource_group_name = data.azurerm_resource_group.rg.name - server_name = azurerm_mysql_flexible_server.mysql[0].name - value = "OFF" -} - -resource "random_password" "mysql_password" { - length = 12 - special = false - min_lower = 1 - min_numeric = 1 - min_upper = 1 -} - -# download SSL certificate -resource "null_resource" "download-SSL-certificate" { - count = var.deploy_db ? 1 : 0 - - provisioner "local-exec" { - command = "wget https://dl.cacerts.digicert.com/DigiCertGlobalRootCA.crt.pem" - } - -} diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/terraform.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/terraform.tf deleted file mode 100644 index 5d1ea92ce19..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/terraform.tf +++ /dev/null @@ -1,52 +0,0 @@ -# defining the providers for the recipe module -terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = ">=3.16.0" - } - - random = { - source = "hashicorp/random" - version = "3.1.0" - } - - local = { - source = "hashicorp/local" - version = "2.1.0" - } - - null = { - source = "hashicorp/null" - version = "3.1.0" - } - - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.11.0" - } - - kubectl = { - source = "gavinbunney/kubectl" - version = "1.14.0" - } - - htpasswd = { - source = "loafoe/htpasswd" - version = "1.0.3" - } - } - - required_version = ">= 0.14.8" -} - -provider "azurerm" { - features { - resource_group { - prevent_deletion_if_contains_resources = false - } - key_vault { - purge_soft_delete_on_destroy = true - } - } -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/variables.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/variables.tf deleted file mode 100644 index a3994ead06e..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/variables.tf +++ /dev/null @@ -1,176 +0,0 @@ -resource "random_string" "unique" { - length = 5 - special = false - upper = false -} - -variable "name" { - description = "The prefix to use for all AWS resource names" - default = "zenmlserver" - type = string -} - -variable "resource_group" { - description = "The resource group in Azure that you want to deploy ZenML to" - default = "zenml" - type = string -} - -variable "location" { - description = "The location for your Azure resources" - default = "uksouth" - type = string -} - -variable "namespace" { - description = "The namespace to install the ZenML server Helm chart in" - default = "terraform-server" - type = string -} - -variable "helm_chart" { - description = "The path to the ZenML server helm chart" - default = "../../../helm" - type = string -} - -variable "kubectl_config_path" { - description = "The path to the kube config" - default = "" - type = string -} - -variable "analytics_opt_in" { - description = "The flag to enable/disable analytics" - default = true - type = bool -} - -# If you want a new Flexible Server, choose a name and a password. If you already -# have an instance, provide the name and the password here too. -variable "database_username" { - description = "The username for the CloudSQL store" - default = "user" - type = string -} -variable "database_password" { - description = "The password for the CloudSQL store" - default = "" - type = string -} - -variable "key_vault_name" { - description = "The name of the Key Vault to use as secret store" - default = "zenml-key-vault" -} - -# if you enable the deploy_db option, the recipe will -# create a new Flexible MySQL instance and then use it for this -# ZenServer. If disabled, you have to supply connection details -# in the section below. -variable "deploy_db" { - description = "Should the recipe create a Flexible MySQL instance?" - default = true - type = bool -} -variable "db_instance_name" { - description = "The name for the Flexible MySQL store" - default = "zenmlserver" - type = string -} -variable "db_name" { - description = "The name for the database" - default = "zenmlserver" - type = string -} -variable "db_version" { - description = "The version of MySQL to use" - default = "5.7" -} -variable "db_sku_name" { - description = "The sku_name for the database resource" - default = "B_Standard_B1s" - type = string -} -variable "db_disk_size" { - description = "The allocated storage in gigabytes" - default = 20 - type = number -} - -# If you haven't enabled the deploy_db option, provide -# the following value in the values.tfvars.json file. -variable "database_url" { - description = "The URL for the Flexible MySQL instance" - default = "" - type = string -} -variable "database_ssl_ca" { - description = "The server ca for the Flexible MySQL instance" - default = "" - type = string -} -variable "database_ssl_cert" { - description = "The client cert for the Flexible MySQL instance" - default = "" - type = string -} -variable "database_ssl_key" { - description = "The client key for the Flexible MySQL instance" - default = "" - type = string -} -variable "database_ssl_verify_server_cert" { - description = "Should SSL be verified?" - default = true - type = bool -} - - -# set to true if you don't already have an nginx ingress -# controller in your cluster -variable "create_ingress_controller" { - description = "set to true if you want the recipe to create an ingress controller in your cluster" - default = false - type = bool -} - -# if you already have an ingress controller, supply it's URL -variable "ingress_controller_ip" { - description = "The hostname for the ingress controller on your cluster" - default = "" - type = string -} -variable "ingress_tls" { - description = "Whether to enable tls on the ingress or not" - default = false - type = bool -} -variable "ingress_tls_generate_certs" { - description = "Whether to enable tls certificates or not" - default = false - type = bool -} -variable "ingress_tls_secret_name" { - description = "Name for the Kubernetes secret that stores certificates" - default = "zenml-tls-certs" - type = string -} - -variable "zenmlserver_image_repo" { - description = "The repository to use for the zenmlserver docker image." - default = "zenmldocker/zenml-server" - type = string -} -variable "zenmlserver_image_tag" { - description = "The tag to use for the zenmlserver docker image." - default = "latest" - type = string -} - -# variables for creating a ZenML stack configuration file -variable "zenml-version" { - description = "The version of ZenML being used" - default = "0.20.0" - type = string -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/azure/zen_server.tf b/src/zenml/zen_server/deploy/terraform/recipes/azure/zen_server.tf deleted file mode 100644 index 6c3c001f1de..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/azure/zen_server.tf +++ /dev/null @@ -1,103 +0,0 @@ -# create the ZenServer deployment -resource "kubernetes_namespace" "zen-server" { - metadata { - name = "${var.name}-${var.namespace}" - } -} - -resource "helm_release" "zen-server" { - - name = "${var.name}-zenmlserver" - chart = var.helm_chart - namespace = kubernetes_namespace.zen-server.metadata[0].name - - set { - name = "zenml.image.repository" - value = var.zenmlserver_image_repo - } - set { - name = "zenml.image.tag" - value = var.zenmlserver_image_tag - } - set { - name = "zenml.deploymentType" - value = "azure" - } - - set { - name = "zenml.secretsStore.type" - value = "azure" - } - set { - name = "zenml.secretsStore.azure.key_vault_name" - value = azurerm_key_vault.secret_manager.name - } - - set { - name = "zenml.analyticsOptIn" - value = var.analytics_opt_in - } - - # set up the right path for ZenML - set { - name = "zenml.ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/rewrite-target" - value = "" - } - set { - name = "zenml.ingress.host" - value = var.create_ingress_controller ? "zenml.${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.ip}.nip.io" : "zenml.${var.ingress_controller_ip}.nip.io" - } - set { - name = "zenml.ingress.tls.enabled" - value = var.ingress_tls - } - set { - name = "zenml.ingress.tls.generateCerts" - value = var.ingress_tls_generate_certs - } - set { - name = "zenml.ingress.tls.secretName" - value = "${var.name}-${var.ingress_tls_secret_name}" - } - - # set parameters for the mysql database - set { - name = "zenml.database.url" - value = var.deploy_db ? "mysql://${var.database_username}:${azurerm_mysql_flexible_server.mysql[0].administrator_password}@${azurerm_mysql_flexible_server.mysql[0].name}.mysql.database.azure.com:3306/${var.db_name}" : var.database_url - } - set { - name = "zenml.database.sslCa" - value = var.deploy_db ? "" : var.database_ssl_ca - } - set { - name = "zenml.database.sslCert" - value = var.deploy_db ? "" : var.database_ssl_cert - } - set { - name = "zenml.database.sslKey" - value = var.deploy_db ? "" : var.database_ssl_key - } - set { - name = "zenml.database.sslVerifyServerCert" - value = var.deploy_db ? false : var.database_ssl_verify_server_cert - } - depends_on = [ - resource.kubernetes_namespace.zen-server - ] -} - -data "kubernetes_secret" "certificates" { - metadata { - name = "${var.name}-${var.ingress_tls_secret_name}" - namespace = "${var.name}-${var.namespace}" - } - binary_data = { - "tls.crt" = "" - "tls.key" = "" - "ca.crt" = "" - } - - depends_on = [ - helm_release.zen-server - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/.gitignore b/src/zenml/zen_server/deploy/terraform/recipes/gcp/.gitignore deleted file mode 100644 index e1640bf11fa..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.exe -*.hcl -*.info -*.backup -*.tfstate -*.pem -kubeconfig_* -.terraform \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/helm.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/helm.tf deleted file mode 100644 index 10fabdfdc9a..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/helm.tf +++ /dev/null @@ -1,20 +0,0 @@ -# check if the host OS is Linux or Windows -data "external" "os" { - working_dir = path.module - program = ["printf", "{\"os\": \"Linux\"}"] -} -locals { - os = data.external.os.result.os - kubectl_config_path = local.os == "Windows" ? "%USERPROFILE%\\.kube\\config" : "~/.kube/config" -} - -# A default (non-aliased) provider configuration for "helm" -provider "helm" { - kubernetes { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path - } -} - -provider "kubernetes" { - config_path = var.kubectl_config_path == "" ? local.kubectl_config_path : var.kubectl_config_path -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/ingress.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/ingress.tf deleted file mode 100644 index f56ec3fa266..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/ingress.tf +++ /dev/null @@ -1,30 +0,0 @@ -# set up the nginx ingress controller -resource "kubernetes_namespace" "nginx-ns" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "${var.name}-ingress" - } -} - -resource "helm_release" "nginx-controller" { - name = "zenml" - count = var.create_ingress_controller ? 1 : 0 - repository = "https://kubernetes.github.io/ingress-nginx" - chart = "ingress-nginx" - # dependency on nginx-ns - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - depends_on = [ - resource.kubernetes_namespace.nginx-ns - ] -} - -data "kubernetes_service" "ingress-controller" { - count = var.create_ingress_controller ? 1 : 0 - metadata { - name = "zenml-ingress-nginx-controller" - namespace = var.create_ingress_controller ? kubernetes_namespace.nginx-ns[0].metadata[0].name : "" - } - depends_on = [ - resource.helm_release.nginx-controller - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/outputs.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/outputs.tf deleted file mode 100644 index 0be630cf718..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "zenml_server_url" { - value = var.create_ingress_controller ? "https://zenml.${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.ip}.nip.io" : "https://zenml.${var.ingress_controller_ip}.nip.io" -} -output "ca_crt" { - value = base64decode(data.kubernetes_secret.certificates.binary_data["ca.crt"]) - sensitive = true -} diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/printf.cmd b/src/zenml/zen_server/deploy/terraform/recipes/gcp/printf.cmd deleted file mode 100644 index 467643b99d3..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/printf.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -echo {"os": "Windows"} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/sql.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/sql.tf deleted file mode 100644 index b8477ceb2e5..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/sql.tf +++ /dev/null @@ -1,64 +0,0 @@ -# random string for the CloudSQL instance name -resource "random_string" "cloudsql_suffix" { - count = var.deploy_db ? 1 : 0 - length = 4 - upper = false - special = false -} - -module "metadata_store" { - source = "GoogleCloudPlatform/sql-db/google//modules/mysql" - version = "11.0.0" - count = var.deploy_db ? 1 : 0 - - project_id = var.project_id - name = "${var.name}-${var.cloudsql_name}-${random_string.cloudsql_suffix[0].result}" - db_name = var.db_name - database_version = "MYSQL_5_7" - disk_size = var.db_disk_size - tier = var.db_instance_tier - region = var.region - zone = "${var.region}-c" - - user_name = var.database_username - user_password = var.database_password - - deletion_protection = false - - ip_configuration = { - authorized_networks = [ - { - name = "all", - value = "0.0.0.0/0" - } - ] - ipv4_enabled = true - private_network = null - require_ssl = false - allocated_ip_range = null - } -} - -# create the client certificate for CloudSQL -resource "google_sql_ssl_cert" "client_cert" { - count = var.deploy_db ? 1 : 0 - common_name = "sql-cert" - instance = module.metadata_store[0].instance_name -} - -# create the certificate files -resource "local_file" "server-ca" { - count = var.deploy_db ? 1 : 0 - content = google_sql_ssl_cert.client_cert[0].server_ca_cert - filename = "./server-ca.pem" -} -resource "local_file" "client-cert" { - count = var.deploy_db ? 1 : 0 - content = google_sql_ssl_cert.client_cert[0].cert - filename = "./client-cert.pem" -} -resource "local_file" "client-key" { - count = var.deploy_db ? 1 : 0 - content = google_sql_ssl_cert.client_cert[0].private_key - filename = "./client-key.pem" -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/terraform.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/terraform.tf deleted file mode 100644 index 312086e5c79..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/terraform.tf +++ /dev/null @@ -1,44 +0,0 @@ -# defining the providers for the recipe module -terraform { - required_providers { - google = { - source = "hashicorp/google" - } - - random = { - source = "hashicorp/random" - version = "3.1.0" - } - - local = { - source = "hashicorp/local" - version = "2.1.0" - } - - null = { - source = "hashicorp/null" - version = "3.1.0" - } - - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.11.0" - } - - kubectl = { - source = "gavinbunney/kubectl" - version = "1.14.0" - } - - htpasswd = { - source = "loafoe/htpasswd" - version = "1.0.3" - } - } - - required_version = ">= 0.14.8" -} - -provider "google" { - project = var.project_id -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/variables.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/variables.tf deleted file mode 100644 index 5c6dd460d4b..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/variables.tf +++ /dev/null @@ -1,171 +0,0 @@ -variable "name" { - description = "The prefix to use for all AWS resource names" - default = "zenmlserver" - type = string -} - -variable "project_id" { - description = "The project ID in GCP that you want to deploy ZenML to" - default = "" - type = string -} - -variable "region" { - description = "The region for your GCP resources" - default = "europe-west3" - type = string -} - -variable "namespace" { - description = "The namespace to install the ZenML server Helm chart in" - default = "terraform-server" - type = string -} - -variable "helm_chart" { - description = "The path to the ZenML server helm chart" - default = "../../../helm" - type = string -} - -variable "kubectl_config_path" { - description = "The path to the kube config" - default = "" - type = string -} - -variable "analytics_opt_in" { - description = "The flag to enable/disable analytics" - default = true - type = bool -} - -# If you want a new CloudSQL, choose a name and a password. If you already -# have an instance, provide the name and the password here too. -variable "database_username" { - description = "The username for the CloudSQL store" - default = "admin" - type = string -} -variable "database_password" { - description = "The password for the CloudSQL store" - default = "" - type = string -} - -# if you enable the deploy_db option, the recipe will -# create a new CloudSQL MySQL instance and then use it for this -# ZenServer. If disabled, you have to supply connection details -# in the section below. -variable "deploy_db" { - description = "Should the recipe create an CloudSQL instance?" - default = true - type = bool -} -variable "cloudsql_name" { - description = "The name for the CloudSQL store" - default = "zenmlserver" - type = string -} -variable "db_name" { - description = "The name for the database" - default = "zenmlserver" - type = string -} - -variable "db_instance_tier" { - description = "The instance class to use for the database" - default = "db-n1-standard-1" - type = string -} - -variable "db_disk_size" { - description = "The allocated storage in gigabytes" - default = 10 - type = number -} - -# If you haven't enabled the deploy_db option, provide -# the following value in the values.tfvars.json file. -variable "database_url" { - description = "The URL for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_ca" { - description = "The server ca for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_cert" { - description = "The client cert for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_key" { - description = "The client key for the AWS RDS instance" - default = "" - type = string -} -variable "database_ssl_verify_server_cert" { - description = "Should SSL be verified?" - default = true - type = bool -} - - -# Enable secrets manager API. Listing services might need elevated permissions. -# Disable this if you don't have the ListServices permission. -variable "enable_secrets_manager_api" { - description = "Enable the secrets manager API" - default = true - type = bool -} - -# set to true if you don't already have an nginx ingress -# controller in your cluster -variable "create_ingress_controller" { - description = "set to true if you want the recipe to create an ingress controller in your cluster" - default = false - type = bool -} - -# if you already have an ingress controller, supply it's URL -variable "ingress_controller_ip" { - description = "The hostname for the ingress controller on your cluster" - default = "" - type = string -} -variable "ingress_tls" { - description = "Whether to enable tls on the ingress or not" - default = false - type = bool -} -variable "ingress_tls_generate_certs" { - description = "Whether to enable tls certificates or not" - default = false - type = bool -} -variable "ingress_tls_secret_name" { - description = "Name for the Kubernetes secret that stores certificates" - default = "zenml-tls-certs" - type = string -} - -variable "zenmlserver_image_repo" { - description = "The repository to use for the zenmlserver docker image." - default = "zenmldocker/zenml-server" - type = string -} -variable "zenmlserver_image_tag" { - description = "The tag to use for the zenmlserver docker image." - default = "latest" - type = string -} - -# variables for creating a ZenML stack configuration file -variable "zenml-version" { - description = "The version of ZenML being used" - default = "0.20.0" - type = string -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/recipes/gcp/zen_server.tf b/src/zenml/zen_server/deploy/terraform/recipes/gcp/zen_server.tf deleted file mode 100644 index b2232c75b88..00000000000 --- a/src/zenml/zen_server/deploy/terraform/recipes/gcp/zen_server.tf +++ /dev/null @@ -1,114 +0,0 @@ -# create the ZenServer deployment -resource "kubernetes_namespace" "zen-server" { - metadata { - name = "${var.name}-${var.namespace}" - } -} - -# enable secret manager -resource "google_project_service" "secret_manager" { - count = var.enable_secrets_manager_api ? 1 : 0 - project = var.project_id - service = "secretmanager.googleapis.com" - - disable_on_destroy = false -} - -resource "helm_release" "zen-server" { - - name = "${var.name}-zenmlserver" - chart = var.helm_chart - namespace = kubernetes_namespace.zen-server.metadata[0].name - - set { - name = "zenml.image.repository" - value = var.zenmlserver_image_repo - } - set { - name = "zenml.image.tag" - value = var.zenmlserver_image_tag - } - - set { - name = "zenml.deploymentType" - value = "gcp" - } - - set { - name = "zenml.secretsStore.type" - value = var.enable_secrets_manager_api? "gcp" : "sql" - } - set { - name = "zenml.secretsStore.gcp.project_idd" - value = var.project_id - - } - - set { - name = "zenml.analyticsOptIn" - value = var.analytics_opt_in - } - - # set up the right path for ZenML - set { - name = "zenml.ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/rewrite-target" - value = "" - } - set { - name = "zenml.ingress.host" - value = var.create_ingress_controller ? "zenml.${data.kubernetes_service.ingress-controller[0].status.0.load_balancer.0.ingress.0.ip}.nip.io" : "zenml.${var.ingress_controller_ip}.nip.io" - } - set { - name = "zenml.ingress.tls.enabled" - value = var.ingress_tls - } - set { - name = "zenml.ingress.tls.generateCerts" - value = var.ingress_tls_generate_certs - } - set { - name = "zenml.ingress.tls.secretName" - value = "${var.name}-${var.ingress_tls_secret_name}" - } - - # set parameters for the mysql database - set { - name = "zenml.database.url" - value = var.deploy_db ? "mysql://${var.database_username}:${module.metadata_store[0].generated_user_password}@${module.metadata_store[0].instance_first_ip_address}:3306/${var.db_name}" : var.database_url - } - set { - name = "zenml.database.sslCa" - value = var.deploy_db ? "" : var.database_ssl_ca - } - set { - name = "zenml.database.sslCert" - value = var.deploy_db ? "" : var.database_ssl_cert - } - set { - name = "zenml.database.sslKey" - value = var.deploy_db ? "" : var.database_ssl_key - } - set { - name = "zenml.database.sslVerifyServerCert" - value = var.deploy_db ? false : var.database_ssl_verify_server_cert - } - depends_on = [ - resource.kubernetes_namespace.zen-server - ] -} - -data "kubernetes_secret" "certificates" { - metadata { - name = "${var.name}-${var.ingress_tls_secret_name}" - namespace = "${var.name}-${var.namespace}" - } - binary_data = { - "tls.crt" = "" - "tls.key" = "" - "ca.crt" = "" - } - - depends_on = [ - helm_release.zen-server - ] -} \ No newline at end of file diff --git a/src/zenml/zen_server/deploy/terraform/terraform_zen_server.py b/src/zenml/zen_server/deploy/terraform/terraform_zen_server.py deleted file mode 100644 index 2ab97b751c7..00000000000 --- a/src/zenml/zen_server/deploy/terraform/terraform_zen_server.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. -"""Service implementation for the ZenML terraform server deployment.""" - -import json -import os -from pathlib import Path -from typing import Any, Dict, Optional, cast -from uuid import UUID - -from pydantic import ConfigDict - -from zenml.logger import get_logger -from zenml.services import ServiceType -from zenml.services.terraform.terraform_service import ( - TerraformService, - TerraformServiceConfig, -) -from zenml.utils.io_utils import get_global_config_directory -from zenml.zen_server.deploy.deployment import ServerDeploymentConfig - -logger = get_logger(__name__) - -ZEN_SERVER_HEALTHCHECK_URL_PATH = "health" - -TERRAFORM_ZENML_SERVER_CONFIG_SUBPATH = os.path.join( - "zen_server", - "terraform", -) - -TERRAFORM_ZENML_SERVER_CONFIG_PATH = os.path.join( - get_global_config_directory(), - TERRAFORM_ZENML_SERVER_CONFIG_SUBPATH, -) -TERRAFORM_ZENML_SERVER_CONFIG_FILENAME = os.path.join( - TERRAFORM_ZENML_SERVER_CONFIG_PATH, "service.json" -) -TERRAFORM_ZENML_SERVER_RECIPE_SUBPATH = "recipes" -TERRAFORM_VALUES_FILE_PATH = "values.tfvars.json" -TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_URL = "zenml_server_url" -TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_TLS_CRT = "tls_crt" -TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_TLS_KEY = "tls_key" -TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_CA_CRT = "ca_crt" - -TERRAFORM_ZENML_SERVER_DEFAULT_TIMEOUT = 60 - - -ZENML_HELM_CHART_SUBPATH = "helm" - - -def get_helm_chart_path() -> str: - """Get the ZenML server helm chart path. - - The ZenML server helm chart files are located in a folder relative to the - `zenml.zen_server.deploy` Python module. - - Returns: - The helm chart path. - """ - import zenml.zen_server.deploy as deploy_module - - path = os.path.join( - os.path.dirname(deploy_module.__file__), - ZENML_HELM_CHART_SUBPATH, - ) - return path - - -class TerraformServerDeploymentConfig(ServerDeploymentConfig): - """Terraform server deployment configuration. - - Attributes: - log_level: The log level to set the terraform client to. Choose one of - TRACE, DEBUG, INFO, WARN or ERROR (case insensitive). - helm_chart: The path to the ZenML server helm chart to use for - deployment. - zenmlserver_image_repo: The repository to use for the zenml server. - zenmlserver_image_tag: The tag to use for the zenml server docker - image. - namespace: The Kubernetes namespace to deploy the ZenML server to. - kubectl_config_path: The path to the kubectl config file to use for - deployment. - ingress_tls: Whether to use TLS for the ingress. - ingress_tls_generate_certs: Whether to generate self-signed TLS - certificates for the ingress. - ingress_tls_secret_name: The name of the Kubernetes secret to use for - the ingress. - create_ingress_controller: Whether to deploy an nginx ingress - controller as part of the deployment. - ingress_controller_ip: The ingress controller IP to use for - the ingress self-signed certificate and to compute the ZenML server - URL. - deploy_db: Whether to create a SQL database service as part of the recipe. - database_username: The username for the database. - database_password: The password for the database. - database_url: The URL of the RDS instance to use for the ZenML server. - database_ssl_ca: The path to the SSL CA certificate to use for the - database connection. - database_ssl_cert: The path to the client SSL certificate to use for the - database connection. - database_ssl_key: The path to the client SSL key to use for the - database connection. - database_ssl_verify_server_cert: Whether to verify the database server - SSL certificate. - analytics_opt_in: Whether to enable analytics. - """ - - log_level: str = "ERROR" - - helm_chart: str = get_helm_chart_path() - zenmlserver_image_repo: str = "zenmldocker/zenml-server" - zenmlserver_image_tag: str = "latest" - namespace: str = "zenmlserver" - kubectl_config_path: str = os.path.join( - str(Path.home()), ".kube", "config" - ) - ingress_tls: bool = False - ingress_tls_generate_certs: bool = True - ingress_tls_secret_name: str = "zenml-tls-certs" - create_ingress_controller: bool = True - ingress_controller_ip: str = "" - deploy_db: bool = True - database_username: str = "user" - database_password: str = "" - database_url: str = "" - database_ssl_ca: str = "" - database_ssl_cert: str = "" - database_ssl_key: str = "" - database_ssl_verify_server_cert: bool = True - analytics_opt_in: bool = True - model_config = ConfigDict(extra="allow") - - -class TerraformZenServerConfig(TerraformServiceConfig): - """Terraform Zen server configuration. - - Attributes: - server: The deployment configuration. - """ - - server: TerraformServerDeploymentConfig - copy_terraform_files: bool = True - - -class TerraformZenServer(TerraformService): - """Service that can be used to start a terraform ZenServer. - - Attributes: - config: service configuration - endpoint: service endpoint - """ - - SERVICE_TYPE = ServiceType( - name="terraform_zenml_server", - type="zen_server", - flavor="terraform", - description="Terraform ZenML server deployment", - ) - - config: TerraformZenServerConfig - - @classmethod - def get_service(cls) -> Optional["TerraformZenServer"]: - """Load and return the terraform ZenML server service, if present. - - Returns: - The terraform ZenML server service or None, if the terraform server - deployment is not found. - """ - try: - with open(TERRAFORM_ZENML_SERVER_CONFIG_FILENAME, "r") as f: - return cast( - TerraformZenServer, TerraformZenServer.from_json(f.read()) - ) - except FileNotFoundError: - return None - - def get_vars(self) -> Dict[str, Any]: - """Get variables as a dictionary. - - Returns: - A dictionary of variables to use for the Terraform deployment. - """ - # get the contents of the server deployment config as dict - filter_vars = ["log_level", "provider"] - # filter keys that are not modeled as terraform deployment vars - vars = { - k: str(v) if isinstance(v, UUID) else v - for k, v in self.config.server.model_dump().items() - if k not in filter_vars - } - assert self.status.runtime_path - - with open( - os.path.join( - self.status.runtime_path, self.config.variables_file_path - ), - "w", - ) as fp: - json.dump(vars, fp, indent=4) - - return vars - - def provision(self) -> None: - """Provision the service.""" - super().provision() - logger.info( - f"Your ZenML server is now deployed with URL:\n" - f"{self.get_server_url()}" - ) - - def get_server_url(self) -> str: - """Returns the deployed ZenML server's URL. - - Returns: - The URL of the deployed ZenML server. - """ - return str( - self.terraform_client.output( - TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_URL, full_value=True - ) - ) - - def get_certificate(self) -> Optional[str]: - """Returns the CA certificate configured for the ZenML server. - - Returns: - The CA certificate configured for the ZenML server. - """ - return cast( - str, - self.terraform_client.output( - TERRAFORM_DEPLOYED_ZENSERVER_OUTPUT_CA_CRT, full_value=True - ), - ) diff --git a/src/zenml/zen_server/utils.py b/src/zenml/zen_server/utils.py index c74b33dc864..27fd551e36a 100644 --- a/src/zenml/zen_server/utils.py +++ b/src/zenml/zen_server/utils.py @@ -39,7 +39,6 @@ INFO, VERSION_1, ) -from zenml.enums import ServerProviderType from zenml.exceptions import IllegalOperationError, OAuthError from zenml.logger import get_logger from zenml.plugins.plugin_flavor_registry import PluginFlavorRegistry @@ -237,45 +236,23 @@ def server_config() -> ServerConfiguration: return _server_config -def get_active_deployment(local: bool = False) -> Optional["ServerDeployment"]: - """Get the active local or remote server deployment. +def get_local_server() -> Optional["ServerDeployment"]: + """Get the active local server. - Call this function to retrieve the local or remote server deployment that - was last provisioned on this machine. - - Args: - local: Whether to return the local active deployment or the remote one. + Call this function to retrieve the local server deployed on this machine. Returns: - The local or remote active server deployment or None, if no deployment - was found. + The local server deployment or None, if no local server deployment was + found. """ from zenml.zen_server.deploy.deployer import ServerDeployer deployer = ServerDeployer() - if local: - servers = deployer.list_servers(provider_type=ServerProviderType.LOCAL) - if not servers: - servers = deployer.list_servers( - provider_type=ServerProviderType.DOCKER - ) - else: - servers = deployer.list_servers() - + servers = deployer.list_servers() if not servers: return None - for server in servers: - if server.config.provider in [ - ServerProviderType.LOCAL, - ServerProviderType.DOCKER, - ]: - if local: - return server - elif not local: - return server - - return None + return servers[0] def get_active_server_details() -> Tuple[str, Optional[int]]: @@ -284,8 +261,7 @@ def get_active_server_details() -> Tuple[str, Optional[int]]: When multiple servers are present, the following precedence is used to determine which server to use: - If the client is connected to a server, that server has precedence. - - If no server is connected, a server that was deployed remotely has - precedence over a server that was deployed locally. + - If no server is connected, the local server is used, if present. Returns: The URL and port of the currently active server. @@ -299,14 +275,8 @@ def get_active_server_details() -> Tuple[str, Optional[int]]: logger.debug("Getting URL of connected server.") parsed_url = urlparse(gc.store_configuration.url) return f"{parsed_url.scheme}://{parsed_url.hostname}", parsed_url.port - # Else, check for deployed servers - server = get_active_deployment(local=False) - if server: - logger.debug("Getting URL of remote server.") - else: - server = get_active_deployment(local=True) - logger.debug("Getting URL of local server.") - + # Else, check for local servers + server = get_local_server() if server and server.status and server.status.url: if isinstance(server.config, LocalServerDeploymentConfig): return server.status.url, server.config.port diff --git a/tests/unit/utils/test_mlstacks_utils.py b/tests/unit/utils/test_mlstacks_utils.py deleted file mode 100644 index c4fc4d16359..00000000000 --- a/tests/unit/utils/test_mlstacks_utils.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright (c) ZenML GmbH 2022. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express -# or implied. See the License for the specific language governing -# permissions and limitations under the License. - -import pytest - -from zenml.utils.mlstacks_utils import ( - _add_extra_config_to_components, - _construct_base_stack, - _construct_components, - _get_component_flavor, - _validate_extra_config, - convert_click_params_to_mlstacks_primitives, - get_stack_spec_file_path, - stack_exists, - stack_spec_exists, -) - - -def test_stack_exists_works(local_stack): - """Tests that stack_exists util function works. - - Args: - local_stack: ZenML local stack fixture. - """ - stack_name = "aria_test_stack" - assert not stack_exists(stack_name) - assert stack_exists(local_stack.name) - - -def test_get_stack_spec_file_path_fails_when_no_stack(): - """Checks util function fails if no stack found.""" - with pytest.raises(KeyError): - get_stack_spec_file_path("blupus_stack") - - -def test_get_stack_spec_file_path_works(): - """Checks util function works for default stack (always present).""" - assert get_stack_spec_file_path("default") == "" - - -def test_get_stack_spec_file_path_only_works_with_full_name(): - """Checks util function only works for full name matches.""" - with pytest.raises(KeyError): - get_stack_spec_file_path("defau") # prefix of 'default' - - -def test_spec_file_exists_works_when_no_file(): - """Checks spec file search works when no file.""" - assert not stack_spec_exists("default") - assert not stack_spec_exists("humpty-dumpty") - - -def test_component_flavor_parsing_works(): - """Checks component flavor parsing.""" - assert ( - _get_component_flavor(key="artifact_store", value=True, provider="aws") - == "s3" - ) - assert ( - _get_component_flavor( - key="experiment_tracker", value="mlflow", provider="aws" - ) - == "mlflow" - ) - assert ( - _get_component_flavor( - key="experiment_tracker", value="mlflow", provider="azure" - ) - == "mlflow" - ) - assert ( - _get_component_flavor( - key="mlops_platform", value="zenml", provider="azure" - ) - == "zenml" - ) - assert ( - _get_component_flavor( - key="container_registry", value=True, provider="azure" - ) - == "azure" - ) - assert ( - _get_component_flavor(key="artifact_store", value=True, provider="k3d") - == "minio" - ) - - -def test_config_addition_works(): - """Checks ability to add extra_config values to components.""" - from mlstacks.models.component import Component - - artifact_store_name = "blupus-ka-artifact-store" - container_registry_name = "blupus-ka-container-registry" - bucket_name = "blupus-ka-bucket" - repo_name = "blupus-ka-repo" - components = [ - Component( - name=artifact_store_name, - component_type="artifact_store", - component_flavor="gcp", - provider="gcp", - ), - Component( - name=container_registry_name, - component_type="container_registry", - component_flavor="gcp", - provider="gcp", - ), - ] - extra_config = { - "repo_name": repo_name, - "bucket_name": bucket_name, - "project_id": "blupus-ka-project", # needed for GCP - } - _add_extra_config_to_components( - components=components, extra_config=extra_config - ) - artifact_store = [ - component - for component in components - if component.name == artifact_store_name - ][0] - assert artifact_store.metadata.config["bucket_name"] == "blupus-ka-bucket" - - container_registry = [ - component - for component in components - if component.name == container_registry_name - ][0] - assert container_registry.metadata.config["repo_name"] == "blupus-ka-repo" - - -def test_component_construction_works_for_stack_deploy(): - """Tests zenml component construction helper works for stack deploy.""" - from mlstacks.models.component import Component - - params = { - "provider": "aws", - "region": "us-east-1", - "mlops_platform": "zenml", - "artifact_store": True, - "extra_config": ( - "something_extra=something_blue", - "bucket_name=bikkel", - ), - } - components = _construct_components( - params=params, zenml_component_deploy=False - ) - assert isinstance(components, list) - assert len(components) == 2 - zenml_component = [ - component - for component in components - if component.component_flavor == "zenml" - ][0] - assert isinstance(zenml_component, Component) - assert zenml_component.component_flavor == "zenml" - assert zenml_component.component_type == "mlops_platform" - assert not zenml_component.metadata.config.get("something_extra") - - s3_bucket = [ - component - for component in components - if component.component_flavor == "s3" - ][0] - assert s3_bucket.component_flavor == "s3" - assert s3_bucket.component_type == "artifact_store" - - -def test_component_construction_works_for_component_deploy(): - """Tests zenml component construction helper works for stack deploy.""" - from mlstacks.models.component import Component - - artifact_store_name = "aria-ka-artifact-store" - params = { - "provider": "aws", - "region": "us-east-1", - "artifact_store": artifact_store_name, - "extra_config": ( - "something_extra=something_blue", - "bucket_name=bikkel", - ), - } - components = _construct_components( - params=params, zenml_component_deploy=True - ) - assert isinstance(components, list) - assert len(components) == 1 - - s3_bucket = [ - component - for component in components - if component.component_flavor == "s3" - ][0] - assert isinstance(s3_bucket, Component) - assert s3_bucket.component_flavor == "s3" - assert s3_bucket.name == artifact_store_name - assert s3_bucket.component_type == "artifact_store" - assert not s3_bucket.metadata.config.get("something_extra") - - -def test_stack_construction_works_for_stack_deploy(): - """Tests zenml component construction helper works for stack deploy.""" - from mlstacks.models.stack import Stack - - artifact_store_name = "aria-ka-artifact-store" - params = { - "provider": "aws", - "region": "us-east-1", - "stack_name": "aria", - "artifact_store": artifact_store_name, - "extra_config": ( - "something_extra=something_blue", - "bucket_name=bikkel", - ), - "tags": ("windy_city=chicago",), - } - stack = _construct_base_stack(params=params) - assert isinstance(stack, Stack) - assert stack.name == "aria" - assert stack.provider == "aws" - assert stack.default_region == "us-east-1" - assert stack.default_tags.get("windy_city") == "chicago" - assert len(stack.components) == 0 - - -def test_click_params_to_mlstacks_conversion(): - """Tests the conversion of click params to mlstacks primitives.""" - from mlstacks.models.component import Component - from mlstacks.models.stack import Stack - - params = { - "provider": "aws", - "region": "us-east-1", - "stack_name": "aria", - "artifact_store": True, - "mlops_platform": "zenml", - "extra_config": ( - "something_extra=something_blue", - "bucket_name=bikkel", - ), - "tags": ("windy_city=chicago",), - } - stack, components = convert_click_params_to_mlstacks_primitives( - params=params - ) - assert stack - assert isinstance(stack, Stack) - assert isinstance(components, list) - assert len(components) == 2 - assert stack.name == "aria" - assert stack.provider == "aws" - assert stack.default_region == "us-east-1" - assert stack.default_tags.get("windy_city") == "chicago" - assert all(c.endswith(".yaml") for c in stack.components) - assert all(isinstance(c, str) for c in stack.components) - - artifact_store = [ - component - for component in components - if component.component_flavor == "s3" - ][0] - assert isinstance(artifact_store, Component) - assert artifact_store.component_flavor == "s3" - assert artifact_store.name == "aws-artifact_store" - - -def test_extra_config_validation(): - """Tests that extra config validation checks for duplicate = characters.""" - valid_test_config = "project_id=zenml-core" - assert _validate_extra_config(valid_test_config) - - invalid_test_config = ( - "bucket_name=my-new-bucket=2024", - "project_id=zenml-core", - ) - assert not _validate_extra_config(invalid_test_config) From b5c6eacc9decef07a3cf643ba0cb1f820e26bcb8 Mon Sep 17 00:00:00 2001 From: Andrei Vishniakov <31008759+avishniakov@users.noreply.github.com> Date: Fri, 18 Oct 2024 15:15:41 +0200 Subject: [PATCH 2/2] Do not tag model via `Model` class on creation (#3098) * Refactor model.py: Remove unused 'tags' parameter in Model class constructor * Refactor Model class constructor: Remove unused 'tags' parameter * Refactor tagging documentation: Update code examples for model versioning --- .../how-to/handle-data-artifacts/tagging.md | 10 +++++--- src/zenml/model/model.py | 1 - src/zenml/models/v2/core/model.py | 1 + .../functional/model/test_model_version.py | 23 +++++++++---------- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/docs/book/how-to/handle-data-artifacts/tagging.md b/docs/book/how-to/handle-data-artifacts/tagging.md index 50dc298b27a..4bc58ec32e9 100644 --- a/docs/book/how-to/handle-data-artifacts/tagging.md +++ b/docs/book/how-to/handle-data-artifacts/tagging.md @@ -54,15 +54,19 @@ Note that [ZenML Pro](https://zenml.io/pro) users can tag artifacts directly in Just like artifacts, you can also tag your models to organize them semantically. Here's how to use tags with models in the ZenML Python SDK and CLI (or in the [ZenML Pro Dashboard directly](https://zenml.io/pro)). -When creating a model using the `Model` object, you can specify tags as key-value pairs that will be attached to the model upon creation: +When creating a model version using the `Model` object, you can specify tags as key-value pairs that will be attached to the model version upon creation. +{% hint style="warning" %} +During pipeline run a model can be also implicitly created (if not exists), in such cases it will not get the `tags` from the `Model` class. +You can manipulate the model tags using SDK (see below) or the ZenML Pro UI. +{% endhint %} ```python from zenml.models import Model -# Define tags to be added to the model +# Define tags to be added to the model version tags = ["experiment", "v1", "classification-task"] -# Create a model with tags +# Create a model version with tags model = Model( name="iris_classifier", version="1.0.0", diff --git a/src/zenml/model/model.py b/src/zenml/model/model.py index a759a7637b3..1aafefe4841 100644 --- a/src/zenml/model/model.py +++ b/src/zenml/model/model.py @@ -569,7 +569,6 @@ def _get_or_create_model(self) -> "ModelResponse": limitations=self.limitations, trade_offs=self.trade_offs, ethics=self.ethics, - tags=self.tags, user=zenml_client.active_user.id, workspace=zenml_client.active_workspace.id, save_models_to_registry=self.save_models_to_registry, diff --git a/src/zenml/models/v2/core/model.py b/src/zenml/models/v2/core/model.py index 9f05f83b18f..955abd14236 100644 --- a/src/zenml/models/v2/core/model.py +++ b/src/zenml/models/v2/core/model.py @@ -82,6 +82,7 @@ class ModelRequest(WorkspaceScopedRequest): ) tags: Optional[List[str]] = Field( title="Tags associated with the model", + default=None, ) save_models_to_registry: bool = Field( title="Whether to save all ModelArtifacts to Model Registry", diff --git a/tests/integration/functional/model/test_model_version.py b/tests/integration/functional/model/test_model_version.py index b9a6b3c7b54..e434afb998b 100644 --- a/tests/integration/functional/model/test_model_version.py +++ b/tests/integration/functional/model/test_model_version.py @@ -198,7 +198,7 @@ def test_model_create_model_and_version(self): assert mv.name == str(mv.number) assert mv.model.name == mdl_name assert {t.name for t in mv.tags} == {"tag1", "tag2"} - assert {t.name for t in mv.model.tags} == {"tag1", "tag2"} + assert len(mv.model.tags) == 0 def test_create_model_version_makes_proper_tagging(self): """Test if model versions get unique tags.""" @@ -208,14 +208,14 @@ def test_create_model_version_makes_proper_tagging(self): assert mv.name == str(mv.number) assert mv.model.name == mdl_name assert {t.name for t in mv.tags} == {"tag1", "tag2"} - assert {t.name for t in mv.model.tags} == {"tag1", "tag2"} + assert len(mv.model.tags) == 0 mv = Model(name=mdl_name, tags=["tag3", "tag4"]) mv = mv._get_or_create_model_version() assert mv.name == str(mv.number) assert mv.model.name == mdl_name assert {t.name for t in mv.tags} == {"tag3", "tag4"} - assert {t.name for t in mv.model.tags} == {"tag1", "tag2"} + assert len(mv.model.tags) == 0 def test_model_fetch_model_and_version_by_number(self): """Test model and model version retrieval by exact version number.""" @@ -301,15 +301,17 @@ def test_tags_properly_created(self): # run 2 times to first create, next get for _ in range(2): - model = mv._get_or_create_model() + model_version = mv._get_or_create_model_version() - assert len(model.tags) == 2 - assert {t.name for t in model.tags} == { + assert len(model_version.tags) == 2 + assert {t.name for t in model_version.tags} == { green_tag, new_tag, } assert { - t.color for t in model.tags if t.name == green_tag + t.color + for t in model_version.tags + if t.name == green_tag } == {"green"} def test_tags_properly_updated(self): @@ -324,10 +326,8 @@ def test_tags_properly_updated(self): client.update_model(model_id, add_tags=["tag1", "tag2"]) model = mv._get_or_create_model() - assert len(model.tags) == 4 + assert len(model.tags) == 2 assert {t.name for t in model.tags} == { - "foo", - "bar", "tag1", "tag2", } @@ -346,8 +346,7 @@ def test_tags_properly_updated(self): client.update_model(model_id, remove_tags=["tag1", "tag2"]) model = mv._get_or_create_model() - assert len(model.tags) == 2 - assert {t.name for t in model.tags} == {"foo", "bar"} + assert len(model.tags) == 0 client.update_model_version( model_id, "1", remove_tags=["tag3", "tag4"]