From bb724d93eabbbe400a3e0ebfce97ec74d3b828f6 Mon Sep 17 00:00:00 2001 From: "abby.huang" <78209557+abby-cyber@users.noreply.github.com> Date: Wed, 22 Nov 2023 11:39:09 +0800 Subject: [PATCH] reorg operator in 3.5.0-sc (#2366) * reorg operator in 3.5.0-sc * Update mkdocs.yml * Update 3.1.customize-installation.md * Update 3.1.customize-installation.md --- docs-2.0/20.appendix/0.FAQ.md | 2 +- docs-2.0/20.appendix/6.eco-tool-version.md | 2 +- docs-2.0/20.appendix/learning-path.md | 2 +- .../9.space-statements/6.clear-space.md | 2 +- .../6.deploy-nebula-graph-with-peripherals.md | 2 +- .../1.configurations/1.configurations.md | 4 +- .../3.license-manager.md | 7 +- .../nebula-br-ent/1.br-ent-overview.md | 2 +- .../1.introduction-to-nebula-operator.md | 25 +- .../2.get-started/2.1.install-operator.md | 90 +++ .../2.get-started/2.2.deploy-lm.md} | 41 +- .../2.get-started/2.3.create-cluster.md | 242 +++++++ .../2.get-started/2.4.connect-to-cluster.md} | 167 +++-- .../3.1.customize-installation.md | 95 +++ .../3.2.update-operator.md | 43 ++ .../3.3.upgrade-operator.md | 89 +++ .../3.4.unistall-operator.md | 32 + .../4.1.installation/4.1.1.cluster-install.md | 377 +++++++++++ .../4.1.2.cluster-upgrade.md} | 158 ++--- .../4.1.3.cluster-uninstall.md | 114 ++++ .../4.2.configuration.md | 180 ++++++ .../4.3.scaling/4.3.1.resizing.md | 117 ++++ .../4.3.scaling/4.3.2.enable-hpa.md | 201 ++++++ .../4.4.1.use-local-pv.md | 0 .../4.4.2.pv-expansion.md} | 4 +- .../4.4.3.configure-pv-reclaim.md} | 4 +- .../4.cluster-administration/4.5.logging.md} | 13 +- .../4.6.backup-and-restore.md} | 21 +- .../4.7.security/4.7.1.enable-mtls.md} | 485 +++++++------- .../4.7.2.enable-admission-control.md} | 6 +- .../4.8.1.self-healing.md} | 4 +- .../4.8.ha-and-balancing/4.8.2.enable-zone.md | 248 +++++++ .../4.8.3.balance-data-after-scale-out.md} | 2 +- .../4.9.1.rolling-update-strategy.md} | 11 +- .../4.9.advanced/4.9.2.restart-cluster.md | 0 .../5.FAQ.md} | 4 +- .../2.deploy-nebula-operator.md | 273 -------- .../3.1create-cluster-with-kubectl.md | 608 ------------------ .../3.2create-cluster-with-helm.md | 220 ------- .../6.get-started-with-operator.md | 10 - .../8.1.custom-conf-parameter.md | 172 ----- .../quick-start/st-ug-import-data.md | 6 +- mkdocs.yml | 61 +- 43 files changed, 2328 insertions(+), 1818 deletions(-) rename docs-2.0/{nebula-operator => k8s-operator}/1.introduction-to-nebula-operator.md (60%) create mode 100644 docs-2.0/k8s-operator/2.get-started/2.1.install-operator.md rename docs-2.0/{nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md => k8s-operator/2.get-started/2.2.deploy-lm.md} (84%) create mode 100644 docs-2.0/k8s-operator/2.get-started/2.3.create-cluster.md rename docs-2.0/{nebula-operator/4.connect-to-nebula-graph-service.md => k8s-operator/2.get-started/2.4.connect-to-cluster.md} (93%) create mode 100644 docs-2.0/k8s-operator/3.operator-management/3.1.customize-installation.md create mode 100644 docs-2.0/k8s-operator/3.operator-management/3.2.update-operator.md create mode 100644 docs-2.0/k8s-operator/3.operator-management/3.3.upgrade-operator.md create mode 100644 docs-2.0/k8s-operator/3.operator-management/3.4.unistall-operator.md create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md rename docs-2.0/{nebula-operator/9.upgrade-nebula-cluster.md => k8s-operator/4.cluster-administration/4.1.installation/4.1.2.cluster-upgrade.md} (54%) create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.3.cluster-uninstall.md create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.2.configuration.md create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.2.enable-hpa.md create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.1.use-local-pv.md rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md => k8s-operator/4.cluster-administration/4.4.storage-management/4.4.2.pv-expansion.md} (95%) rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md => k8s-operator/4.cluster-administration/4.4.storage-management/4.4.3.configure-pv-reclaim.md} (89%) rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md => k8s-operator/4.cluster-administration/4.5.logging.md} (90%) rename docs-2.0/{nebula-operator/10.backup-restore-using-operator.md => k8s-operator/4.cluster-administration/4.6.backup-and-restore.md} (93%) rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/8.5.enable-ssl.md => k8s-operator/4.cluster-administration/4.7.security/4.7.1.enable-mtls.md} (59%) rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md => k8s-operator/4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md} (92%) rename docs-2.0/{nebula-operator/5.operator-failover.md => k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md} (81%) create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md rename docs-2.0/{nebula-operator/8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md => k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md} (95%) rename docs-2.0/{nebula-operator/11.rolling-update-strategy.md => k8s-operator/4.cluster-administration/4.9.advanced/4.9.1.rolling-update-strategy.md} (92%) create mode 100644 docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.2.restart-cluster.md rename docs-2.0/{nebula-operator/7.operator-faq.md => k8s-operator/5.FAQ.md} (86%) delete mode 100644 docs-2.0/nebula-operator/2.deploy-nebula-operator.md delete mode 100644 docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md delete mode 100644 docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md delete mode 100644 docs-2.0/nebula-operator/6.get-started-with-operator.md delete mode 100644 docs-2.0/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md diff --git a/docs-2.0/20.appendix/0.FAQ.md b/docs-2.0/20.appendix/0.FAQ.md index e14cf12d15a..22fcf84ebb4 100644 --- a/docs-2.0/20.appendix/0.FAQ.md +++ b/docs-2.0/20.appendix/0.FAQ.md @@ -383,7 +383,7 @@ You also need to run [Balance Data and Balance leader](../8.service-tuning/load- {{ent.ent_begin}} You can scale Graph and Storage services with Dashboard Enterprise Edition. For details, see [Scale](../nebula-dashboard-ent/4.cluster-operator/operator/scale.md). -You can also use NebulaGraph Operator to scale Graph and Storage services. For details, see [Deploy NebulaGraph clusters with Kubectl](../nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) and [Deploy NebulaGraph clusters with Helm](../nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +You can also use NebulaGraph Operator to scale Graph and Storage services. For details, see [Scale a NebulaGraph Cluster](../k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md). {{ent.ent_end}} #### Add or remove disks in the Storage nodes diff --git a/docs-2.0/20.appendix/6.eco-tool-version.md b/docs-2.0/20.appendix/6.eco-tool-version.md index a887e7ea41d..7db65541975 100644 --- a/docs-2.0/20.appendix/6.eco-tool-version.md +++ b/docs-2.0/20.appendix/6.eco-tool-version.md @@ -76,7 +76,7 @@ NebulaGraph Exchange (Exchange for short) is an Apache Spark&trade application f ## NebulaGraph Operator -NebulaGraph Operator (Operator for short) is a tool to automate the deployment, operation, and maintenance of NebulaGraph clusters on Kubernetes. Building upon the excellent scalability mechanism of Kubernetes, NebulaGraph introduced its operation and maintenance knowledge into the Kubernetes system, which makes NebulaGraph a real cloud-native graph database. For more information, see [What is NebulaGraph Operator](../nebula-operator/1.introduction-to-nebula-operator.md). +NebulaGraph Operator (Operator for short) is a tool to automate the deployment, operation, and maintenance of NebulaGraph clusters on Kubernetes. Building upon the excellent scalability mechanism of Kubernetes, NebulaGraph introduced its operation and maintenance knowledge into the Kubernetes system, which makes NebulaGraph a real cloud-native graph database. For more information, see [What is NebulaGraph Operator](../k8s-operator/1.introduction-to-nebula-operator.md). |NebulaGraph version|Operator version| |:---|:---| diff --git a/docs-2.0/20.appendix/learning-path.md b/docs-2.0/20.appendix/learning-path.md index 6c4969601bb..ad50333720d 100644 --- a/docs-2.0/20.appendix/learning-path.md +++ b/docs-2.0/20.appendix/learning-path.md @@ -194,7 +194,7 @@ After completing the NebulaGraph learning path, taking [NebulaGraph Certificatio | Document | | -------- | - | [NebulaGraph Operator](../nebula-operator/1.introduction-to-nebula-operator.md) | + | [NebulaGraph Operator](../k8s-operator/1.introduction-to-nebula-operator.md) | - Graph algorithm diff --git a/docs-2.0/3.ngql-guide/9.space-statements/6.clear-space.md b/docs-2.0/3.ngql-guide/9.space-statements/6.clear-space.md index 1932ac6d420..ed5e3cb9c01 100644 --- a/docs-2.0/3.ngql-guide/9.space-statements/6.clear-space.md +++ b/docs-2.0/3.ngql-guide/9.space-statements/6.clear-space.md @@ -4,7 +4,7 @@ !!! note - It is recommended to execute [`SUBMIT JOB COMPACT`](../../4.job-statements/#submit_job_compact) immediately after executing the `CLEAR SPACE` operation improve the query performance. Note that the COMPACT operation may affect query performance, and it is recommended to perform this operation during low business hours (e.g., early morning). + It is recommended to execute [SUBMIT JOB COMPACT](../4.job-statements.md#submit_job_compact) immediately after executing the `CLEAR SPACE` operation improve the query performance. Note that the COMPACT operation may affect query performance, and it is recommended to perform this operation during low business hours (e.g., early morning). ## Permission requirements diff --git a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/6.deploy-nebula-graph-with-peripherals.md b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/6.deploy-nebula-graph-with-peripherals.md index df14dd0cd08..0dc9792e62a 100644 --- a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/6.deploy-nebula-graph-with-peripherals.md +++ b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/6.deploy-nebula-graph-with-peripherals.md @@ -12,7 +12,7 @@ You can install the Enterprise Edition and Community Edition of NebulaGraph with - To install NebulaGraph with **NebulaGraph Dashboard Enterprise Edition**, see [Create a cluster](../../nebula-dashboard-ent/3.create-import-dashboard/1.create-cluster.md). {{ ent.ent_end }} -- To install NebulaGraph with **NebulaGraph Operator**, see [Deploy NebulaGraph clusters with Kubectl](../../nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph clusters with Helm](../../nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +- To install NebulaGraph with **NebulaGraph Operator**, see [Customize installation defaults](../../k8s-operator/2.get-started/2.1.install-operator.md). {{ ent.ent_begin }} diff --git a/docs-2.0/5.configurations-and-logs/1.configurations/1.configurations.md b/docs-2.0/5.configurations-and-logs/1.configurations/1.configurations.md index f42fe2bfab0..da37f9ec54b 100644 --- a/docs-2.0/5.configurations-and-logs/1.configurations/1.configurations.md +++ b/docs-2.0/5.configurations-and-logs/1.configurations/1.configurations.md @@ -125,9 +125,9 @@ By default, each NebulaGraph service gets configured from its configuration file 1. In the `/nebula-docker-compose/docker-compose.yaml` file, modify the configurations of the target service. 2. In the `nebula-docker-compose` directory, run the command `docker-compose up -d` to restart the service involving configuration modifications. -* For clusters installed with Kubectl +* For clusters installed with NebulaGraph Operator - For details, see [Customize configuration parameters for a NebulaGraph cluster](../../nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md). + For details, see [Customize configuration parameters for a NebulaGraph cluster](../../k8s-operator/3.operator-management/3.1.customize-installation.md). ### Dynamically modifying configurations using command diff --git a/docs-2.0/9.about-license/2.license-management-suite/3.license-manager.md b/docs-2.0/9.about-license/2.license-management-suite/3.license-manager.md index fb0908d36dd..cea4ee758e7 100644 --- a/docs-2.0/9.about-license/2.license-management-suite/3.license-manager.md +++ b/docs-2.0/9.about-license/2.license-management-suite/3.license-manager.md @@ -2,7 +2,7 @@ A License Manager (LM) is an essential service that runs on a server for you to manage your license and license the NebulaGraph enterprise edition database and its associated software. You can use an LM client that communicates with the LM service to load [license keys](2.license-center.md#license_key) and view license information, including the license validity period and purchased nodes. By configuring the LM service address in the NebulaGraph database and its associated software, the validity of the license can be verified to ensure the normal use of the NebulaGraph database and its associated software. -This article introduces how to deploy and use an LM service in a Linux environment and how to configure it within the Nebula Graph database and its associated software. For information on how to deploy the LM in a K8s cluster, see [Deploy LM](../../nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md). +This article introduces how to deploy and use an LM service in a Linux environment and how to configure it within the Nebula Graph database and its associated software. For information on how to deploy the LM in a K8s cluster, see [Deploy LM](../../k8s-operator/2.get-started/2.2.deploy-lm.md). ## Preparations @@ -279,7 +279,7 @@ You can use monitoring tools to monitor the status of the LM service. !!! note - By default, LM uses port `9119`. If you need to change the port number, you can modify the value of the `Port` field in the LM configuration file above, or modify the value of `port` in the YAML file of [Deploying LM in K8s](../../nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md). + By default, LM uses port `9119`. If you need to change the port number, you can modify the value of the `Port` field in the LM configuration file above, or modify the value of `port` in the YAML file of [Deploying LM in K8s](../../k8s-operator/2.get-started/2.2.deploy-lm.md). @@ -324,9 +324,8 @@ After the configuration is complete, run `./run_pagerank.sh` in the `scripts` fo ### Configure LM in NebulaGraph Operator -- When deploying the cluster using Kubectl, configure the address and port of the LM through the `spec.metad.licenseManagerURL` field in the cluster configuration file. For more details, see [Deploying with Kubectl](../../nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +When deploying the cluster using NebulaGraph Operator, configure the address and port of the LM through the `spec.metad.licenseManagerURL` field in the cluster configuration file. For more details, see [Install a NebulaGraph cluster](../../k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md). -- When deploying the cluster using Helm, specify the address and port of the LM with `--set nebula.metad.licenseManagerURL`. For more details, see [Deploying with Helm](../../nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). ## FAQ diff --git a/docs-2.0/backup-and-restore/nebula-br-ent/1.br-ent-overview.md b/docs-2.0/backup-and-restore/nebula-br-ent/1.br-ent-overview.md index d844f0488d2..a0fe9a0f8cd 100644 --- a/docs-2.0/backup-and-restore/nebula-br-ent/1.br-ent-overview.md +++ b/docs-2.0/backup-and-restore/nebula-br-ent/1.br-ent-overview.md @@ -2,7 +2,7 @@ Backup Restore (BR for short) Enterprise Edition is a Command-Line Interface (CLI) tool. With BR Enterprise Edition, you can back up and restore NebulaGraph data. -For the deployment of BR in K8s Operator, see [Backup and restore data using NebulaGraph Operator](../../nebula-operator/10.backup-restore-using-operator.md). +For the deployment of BR in K8s Operator, see [Backup and restore data using NebulaGraph Operator](../../k8s-operator/4.cluster-administration/4.6.backup-and-restore.md). !!! enterpriseonly diff --git a/docs-2.0/nebula-operator/1.introduction-to-nebula-operator.md b/docs-2.0/k8s-operator/1.introduction-to-nebula-operator.md similarity index 60% rename from docs-2.0/nebula-operator/1.introduction-to-nebula-operator.md rename to docs-2.0/k8s-operator/1.introduction-to-nebula-operator.md index 0eab51cad0c..678fe8e2bf8 100644 --- a/docs-2.0/nebula-operator/1.introduction-to-nebula-operator.md +++ b/docs-2.0/k8s-operator/1.introduction-to-nebula-operator.md @@ -2,7 +2,7 @@ ## Concept -NebulaGraph Operator is a tool to automate the deployment, operation, and maintenance of [NebulaGraph](https://github.com/vesoft-inc/nebula) clusters on [Kubernetes](https://kubernetes.io). Building upon the excellent scalability mechanism of Kubernetes, NebulaGraph introduced its operation and maintenance knowledge into the Kubernetes system, which makes NebulaGraph a real [cloud-native graph database](https://www.nebula-cloud.io/). +NebulaGraph Operator is a tool to automate the deployment, operation, and maintenance of [NebulaGraph](https://github.com/vesoft-inc/nebula) clusters on [Kubernetes](https://kubernetes.io). Building upon the excellent scalability mechanism of Kubernetes, NebulaGraph introduced its operation and maintenance knowledge into the Kubernetes system, which makes NebulaGraph a real cloud-native graph database. ![operator_map](https://docs-cdn.nebula-graph.com.cn/figures/operator_map_2022-09-08_18-55-18.png) @@ -16,20 +16,17 @@ NebulaGraph Operator abstracts the deployment management of NebulaGraph clusters The following features are already available in NebulaGraph Operator: -- **Deploy and uninstall clusters**: NebulaGraph Operator simplifies the process of deploying and uninstalling clusters for users. NebulaGraph Operator allows you to quickly create, update, or delete a NebulaGraph cluster by simply providing the corresponding CR file. For more information, see [Deploy NebulaGraph Clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph Clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +- **Cluster deployment and deletion**: NebulaGraph Operator simplifies the process of deploying and uninstalling clusters for users. NebulaGraph Operator allows you to quickly create, update, or delete a NebulaGraph cluster by simply providing the corresponding CR file. For more information, see [Install NebulaGraph Clusters](../k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md). -{{ent.ent_begin}} -- **Manage Zones**: Supports dividing multiple storage hosts into managed zones and creating graph spaces on specified storage hosts to achieve resource isolation. For more information, see [Create clusters using Zones with kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md#create_clusters) or [Create clusters using Zones with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +- **Disaster Recovery Zones**: Supports evenly distributing Storage Pods across zones to facilitate disaster recovery. For more information, see [Create clusters with zones](../k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md). -- **Scale clusters**: NebulaGraph Operator calls NebulaGraph's native scaling interfaces in a control loop to implement the scaling logic. You can simply perform scaling operations with YAML configurations and ensure the stability of data. For more information, see [Scale clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Scale clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +- **Cluster scaling**: NebulaGraph Operator calls NebulaGraph's native scaling interfaces in a control loop to implement the scaling logic. You can simply perform scaling operations with YAML configurations and ensure the stability of data. For more information, see [Scale clusters](../k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md). -- **Backup and Recovery**:NebulaGraph supports data backup and recovery. Users can use NebulaGraph Operator to backup the data of the NebulaGraph cluster to storage services that are compatible with the S3 protocol, and can also restore data to the cluster from the storage service. For details, see [Backup and restore using NebulaGraph Operator](10.backup-restore-using-operator.md). - -{{ent.ent_end}} +- **Backup and Recovery**:NebulaGraph supports data backup and recovery. Users can use NebulaGraph Operator to backup the data of the NebulaGraph cluster to storage services that are compatible with the S3 protocol, and can also restore data to the cluster from the storage service. For details, see [Backup and restore using NebulaGraph Operator](../k8s-operator/4.cluster-administration/4.6.backup-and-restore.md). - **Cluster Upgrade**: NebulaGraph Operator supports cluster upgrading from version {{operator.upgrade_from}} to version {{operator.upgrade_to}}. -- **Self-Healing**: NebulaGraph Operator calls interfaces provided by NebulaGraph clusters to dynamically sense cluster service status. Once an exception is detected, NebulaGraph Operator performs fault tolerance. For more information, see [Self-Healing](5.operator-failover.md). +- **Self-Healing**: NebulaGraph Operator calls interfaces provided by NebulaGraph clusters to dynamically sense cluster service status. Once an exception is detected, NebulaGraph Operator performs fault tolerance. For more information, see [Self-Healing](../k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md). - **Balance Scheduling**: Based on the scheduler extension interface, the scheduler provided by NebulaGraph Operator evenly distributes Pods in a NebulaGraph cluster across all nodes. @@ -41,21 +38,17 @@ NebulaGraph Operator does not support the v1.x version of NebulaGraph. NebulaGra | NebulaGraph | NebulaGraph Operator | | ------------- | -------------------- | -| 3.5.x | 1.5.0 ~ 1.7.x | +| 3.5.x ~ 3.6.0 | 1.5.0 ~ 1.7.x | | 3.0.0 ~ 3.4.1 | 1.3.0, 1.4.0 ~ 1.4.2 | | 3.0.0 ~ 3.3.x | 1.0.0, 1.1.0, 1.2.0 | | 2.5.x ~ 2.6.x | 0.9.0 | | 2.5.x | 0.8.0 | -!!! Compatibility "Legacy version compatibility" +!!! compatibility "Legacy version compatibility" - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. - Starting from NebulaGraph Operator 0.9.0, logs and data are stored separately. Using NebulaGraph Operator 0.9.0 or later versions to manage a NebulaGraph 2.5.x cluster created with Operator 0.8.0 can cause compatibility issues. You can backup the data of the NebulaGraph 2.5.x cluster and then create a 2.6.x cluster with Operator 0.9.0. -### Feature limitations - -The NebulaGraph Operator scaling feature is only available for the Enterprise Edition of NebulaGraph clusters and does not support scaling the Community Edition version of NebulaGraph clusters. - ## Release note -[Release](https://github.com/vesoft-inc/nebula-operator/releases/tag/{{operator.tag}}) \ No newline at end of file +[Release](https://github.com/vesoft-inc/nebula-operator/releases/tag/{{operator.tag}}) diff --git a/docs-2.0/k8s-operator/2.get-started/2.1.install-operator.md b/docs-2.0/k8s-operator/2.get-started/2.1.install-operator.md new file mode 100644 index 00000000000..0e3cdc11bfc --- /dev/null +++ b/docs-2.0/k8s-operator/2.get-started/2.1.install-operator.md @@ -0,0 +1,90 @@ +# Install NebulaGraph Operator + +You can deploy NebulaGraph Operator with [Helm](https://helm.sh/). + +## Background + +[NebulaGraph Operator](../1.introduction-to-nebula-operator.md) automates the management of NebulaGraph clusters, and eliminates the need for you to install, scale, upgrade, and uninstall NebulaGraph clusters, which lightens the burden on managing different application versions. + +## Prerequisites + +Before installing NebulaGraph Operator, you need to install the following software and ensure the correct version of the software : + +| Software | Requirement | +| ------------------------------------------------------------ | --------- | +| [Kubernetes](https://kubernetes.io) | \>= 1.18 | +| [Helm](https://helm.sh) | \>= 3.2.0 | +| [CoreDNS](https://github.com/coredns/coredns) | \>= 1.6.0 | + +!!! note + + - If using a role-based access control policy, you need to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac) (optional). + + - [CoreDNS](https://coredns.io/) is a flexible and scalable DNS server that is [installed](https://github.com/coredns/helm) for Pods in NebulaGraph clusters. + +## Steps + +1. Add the NebulaGraph Operator Helm repository. + + ```bash + helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts + ``` + +2. Update information of available charts locally from repositories. + + ```bash + helm repo update + ``` + + For more information about `helm repo`, see [Helm Repo](https://helm.sh/docs/helm/helm_repo/). + +3. Create a namespace for NebulaGraph Operator. + + ```bash + kubectl create namespace + ``` + + For example, run the following command to create a namespace named `nebula-operator-system`. + + ```bash + kubectl create namespace nebula-operator-system + ``` + + All the resources of NebulaGraph Operator are deployed in this namespace. + +4. Install NebulaGraph Operator. + + ```bash + helm install nebula-operator nebula-operator/nebula-operator --namespace= --version=${chart_version} + ``` + + For example, the command to install NebulaGraph Operator of version {{operator.release}} is as follows. + + ```bash + helm install nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} + ``` + + `{{operator.release}}` is the version of the nebula-operator chart. When not specifying `--version`, the latest version of the nebula-operator chart is used by default. + + Run `helm search repo -l nebula-operator` to see chart versions. + + You can customize the configuration items of the NebulaGraph Operator chart before running the installation command. For more information, see [Customize installation defaults](../3.operator-management/3.1.customize-installation.md). + +5. View the information about the default-created CRD. + + ```bash + kubectl get crd + ``` + + Output: + + ```bash + NAME CREATED AT + nebulaautoscalers.autoscaling.nebula-graph.io 2023-11-01T04:16:51Z + nebulaclusters.apps.nebula-graph.io 2023-10-12T07:55:32Z + nebularestores.apps.nebula-graph.io 2023-02-04T23:01:00Z + ``` + +## What's next + +[Deploy LM](2.2.deploy-lm.md) diff --git a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md b/docs-2.0/k8s-operator/2.get-started/2.2.deploy-lm.md similarity index 84% rename from docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md rename to docs-2.0/k8s-operator/2.get-started/2.2.deploy-lm.md index c5a8b978254..154cedab6b4 100644 --- a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md +++ b/docs-2.0/k8s-operator/2.get-started/2.2.deploy-lm.md @@ -1,9 +1,5 @@ # Deploy LM -!!! enterpriseonly - - The LM service is only used to manage the NebulaGraph Enterprise license. If you are using the Community Edition of NebulaGraph, you do not need to deploy LM. - Before deploying NebulaGraph Enterprise 3.5.0 or later using Operator, you first need to deploy [License Manager (LM)](../../9.about-license/2.license-management-suite/3.license-manager.md) and configure the NebulaGraph Enterprise [License](../../9.about-license/1.license-overview.md) in LM. LM is a standalone service used to manage the NebulaGraph license. LM checks the validity of the license when NebulaGraph Enterprise database starts. If the License is invalid, the database will not be able to start. ## Deployment instructions @@ -31,14 +27,14 @@ For information on how to deploy LM on a machine outside the K8s cluster, see [L 1. Create a namespace. - ``` + ```bash # Create the nebula-license-manager namespace. kubectl create namespace nebula-license-manager ``` 2. Create a Secret for pulling the LM image from a private repository. - ``` + ```bash kubectl -n nebula-license-manager create secret docker-registry \ --docker-server=DOCKER_REGISTRY_SERVER \ --docker-username=DOCKER_USER \ @@ -52,7 +48,7 @@ For information on how to deploy LM on a machine outside the K8s cluster, see [L 3. Create a StatefulSet resource configuration file for LM. Here is an example: - ```yml + ```yaml apiVersion: v1 kind: Service metadata: @@ -115,16 +111,35 @@ For information on how to deploy LM on a machine outside the K8s cluster, see [L 4. Create LM. - ``` + ```bash kubectl apply -f nebula-license-manager.yaml ``` 5. Verify that LM has been successfully deployed. - ``` + ```bash kubectl -n nebula-license-manager get pods ``` +## Access LM + +You can access the License Manager (LM) through the created Service using the following command to get the IP and port of the LM Service: + +```bash +kubectl -n nebula-license-manager get svc nebula-license-manager +``` + +Output: + +```bash +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +nebula-license-manager ClusterIP 10.108.xxx.219 9119/TCP 37m +``` + +To access the LM within the cluster, you can use the `ClusterIP` type Service IP and port `9119`. For example, `10.108.xxx.219:9119`. + +Alternatively, you can access the LM using the domain name `..svc.cluster.local` and port `9119`. For example, `nebula-license-manager.nebula-license-manager.svc.cluster.local:9119`. + ## Monitor LM You can use monitoring tools, such as Dashboard Enterprise or Prometheus, to monitor the running status and metrics of LM. For more information, see [Monitor LM](../../9.about-license/2.license-management-suite/3.license-manager.md). @@ -135,7 +150,7 @@ You can use monitoring tools, such as Dashboard Enterprise or Prometheus, to mon - Commands for managing the license using LM deployed within the K8s cluster are as follows: - ``` + ```bash # View license information. kubectl -n nebula-license-manager exec -it nebula-license-manager-0 -- \ /usr/local/nebula-license-manager/nebula-license-manager-cli info @@ -150,8 +165,6 @@ You can use monitoring tools, such as Dashboard Enterprise or Prometheus, to mon /usr/local/nebula-license-manager/nebula-license-manager-cli usage ``` -## Next to do - -After deploying LM and loading the License Key, you need to configure the address and port of LM in the NebulaGraph Enterprise cluster through the `licenseManagerURL` parameter. +## What's next -For more information, see [Deploying Using Kubectl](3.1create-cluster-with-kubectl.md) or [Deploying Using Helm](3.2create-cluster-with-helm.md). +After deploying LM and loading the License Key, you need to configure the address and port of LM in the NebulaGraph Enterprise cluster through the `licenseManagerURL` parameter. For more information, see [Create a cluster](2.3.create-cluster.md). \ No newline at end of file diff --git a/docs-2.0/k8s-operator/2.get-started/2.3.create-cluster.md b/docs-2.0/k8s-operator/2.get-started/2.3.create-cluster.md new file mode 100644 index 00000000000..baaa3d6cbf1 --- /dev/null +++ b/docs-2.0/k8s-operator/2.get-started/2.3.create-cluster.md @@ -0,0 +1,242 @@ +# Create a NebulaGraph cluster + +This topic introduces how to create a {{nebula.name}} cluster with the following two methods: + +- Create a {{nebula.name}} cluster with Helm +- Create a {{nebula.name}} cluster with Kubectl + +## Prerequisites + +- [NebulaGraph Operator is installed.](2.1.install-operator.md) +- [LM is installed and the License Key is loaded.](2.2.deploy-lm.md) +- [A StorageClass is created.](https://kubernetes.io/docs/concepts/storage/storage-classes/) + +## Create a {{nebula.name}} cluster with Helm + +!!! compatibility "Legacy version compatibility" + + The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. + +1. Add the NebulaGraph Operator Helm repository. + + ```bash + helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts + ``` + +2. Update information of available charts locally from chart repositories. + + ```bash + helm repo update + ``` + +3. Set environment variables to your desired values. + + ```bash + export NEBULA_CLUSTER_NAME=nebula # The desired NebulaGraph cluster name. + export NEBULA_CLUSTER_NAMESPACE=nebula # The desired namespace where your NebulaGraph cluster locates. + export STORAGE_CLASS_NAME=fast-disks # The name of the StorageClass that has been created. + ``` + +4. Create a namespace for your NebulaGraph cluster (If you have created one, skip this step). + + ```bash + kubectl create namespace "${NEBULA_CLUSTER_NAMESPACE}" + ``` + +5. Create a Secret for pulling the NebulaGraph cluster image from a private repository. + + ```bash + kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" create secret docker-registry \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD + ``` + + - ``: Specify the name of the Secret. + - `DOCKER_REGISTRY_SERVER`: Specify the server address of the private repository from which the image will be pulled, such as `reg.example-inc.com`. + - `DOCKER_USER`: The username for the image repository. + - `DOCKER_PASSWORD`: The password for the image repository. + +6. Apply the variables to the Helm chart to create a NebulaGraph cluster. + + ```bash + helm install "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ + # Configure the access address and port (default port is '9119') that points to the LM. You must configure this parameter in order to obtain the license information. Only for NebulaGraph Enterprise Edition clusters. + --set nebula.metad.licenseManagerURL="192.168.8.XXX:9119" \ + # Configure the image addresses for each service in the cluster. + --set nebula.graphd.image="" \ + --set nebula.metad.image="" \ + --set nebula.storaged.image="" \ + # Configure the Secret for pulling images from a private repository. + --set imagePullSecrets[0].name="{}" \ + --set nameOverride="${NEBULA_CLUSTER_NAME}" \ + --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ + # Specify the version of the NebulaGraph cluster. + --set nebula.version=v{{nebula.release}} \ + # Specify the version of the nebula-cluster chart. If not specified, the latest version of the chart is installed by default. + # Run 'helm search repo nebula-operator/nebula-cluster' to view the available versions of the chart. + --version={{operator.release}} \ + --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ + ``` + + NebulaGraph Operator supports creating clusters with zones. For more information, see [Install NebulaGraph clusters](../4.cluster-administration/4.1.installation/4.1.1.cluster-install.md). + + +## Create a {{nebula.name}} cluster with Kubectl + +!!! compatibility "Legacy version compatibility" + + The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. + + +The following example shows how to create a NebulaGraph cluster by creating a cluster named `nebula`. + +1. Create a namespace, for example, `nebula`. If not specified, the `default` namespace is used. + + ```bash + kubectl create namespace nebula + ``` + +2. Create a Secret for pulling the NebulaGraph Enterprise image from a private repository. + + ```bash + kubectl -n create secret docker-registry \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD + ``` + + - ``: The namespace where this Secret will be stored. + - ``: Specify the name of the Secret. + - `DOCKER_REGISTRY_SERVER`: Specify the server address of the private repository from which the image will be pulled, such as `reg.example-inc.com`. + - `DOCKER_USER`: The username for the image repository. + - `DOCKER_PASSWORD`: The password for the image repository. + +3. Define the cluster configuration file. + + ??? info "Expand to see an example configuration for the cluster" + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + topologySpreadConstraints: + - topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: "ScheduleAnyway" + graphd: + # Container image for the Graph service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + # Storage class name for storing Graph service logs. + storageClassName: local-sc + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: v{{nebula.release}} + imagePullPolicy: Always + # Secret for pulling images from a private repository. + imagePullSecrets: + - name: secret-name + metad: + # LM access address and port number for obtaining License information. + licenseManagerURL: 192.168.x.xxx:9119 + # Container image for the Meta service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: v{{nebula.release}} + reference: + name: statefulsets.apps + version: v1 + schedulerName: default-scheduler + storaged: + # Container image for the Storage service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: local-sc + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: v{{nebula.release}} + ``` + + The following parameters must be customized: + + - `spec.metad.licenseManagerURL`: Configure the URL that points to the LM, which consists of the access address and port number (default port `9119`) of the LM. + - `spec..image`: Specify the container image of the Graph, Meta, and Storage service respectively. + - `spec.imagePullSecrets`: Specify the Secret for pulling the NebulaGraph Enterprise service images from a private repository. + - `spec..logVolumeClaim.storageClassName`: Specify the log disk storage configurations for the Graph, Meta, and Storage service respectively. + - `spec.metad.dataVolumeClaim.storageClassName`: Specify the data disk storage configurations for the Meta service. + - `spec.storaged.dataVolumeClaims.storageClassName`: Specify the data disk storage configurations for the Storage service. + + For more information about the other parameters, see [Install NebulaGraph clusters](../4.cluster-administration/4.1.installation/4.1.1.cluster-install.md). + + +4. Create a NebulaGraph cluster. + + ```bash + kubectl create -f apps_v1alpha1_nebulacluster.yaml + ``` + + Output: + + ```bash + nebulacluster.apps.nebula-graph.io/nebula created + ``` + +5. Check the status of the NebulaGraph cluster. + + ```bash + kubectl get nc nebula + ``` + + Output: + + ```bash + NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE + nebula True 1 1 1 1 1 1 86s + ``` + +## What's next + +[Connect to a cluster](2.4.connect-to-cluster.md) \ No newline at end of file diff --git a/docs-2.0/nebula-operator/4.connect-to-nebula-graph-service.md b/docs-2.0/k8s-operator/2.get-started/2.4.connect-to-cluster.md similarity index 93% rename from docs-2.0/nebula-operator/4.connect-to-nebula-graph-service.md rename to docs-2.0/k8s-operator/2.get-started/2.4.connect-to-cluster.md index acc70c4f4c5..81a0caec91a 100644 --- a/docs-2.0/nebula-operator/4.connect-to-nebula-graph-service.md +++ b/docs-2.0/k8s-operator/2.get-started/2.4.connect-to-cluster.md @@ -1,23 +1,17 @@ -# Connect to NebulaGraph databases with Nebular Operator +# Connect to a NebulaGraph cluster After creating a NebulaGraph cluster with NebulaGraph Operator on Kubernetes, you can connect to NebulaGraph databases from within the cluster and outside the cluster. ## Prerequisites -Create a NebulaGraph cluster with NebulaGraph Operator on Kubernetes. For more information, see [Deploy NebulaGraph clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +A NebulaGraph cluster is created on Kubernetes. For more information, see [Create a NebulaGraph cluster](2.3.create-cluster.md). -## Connect to NebulaGraph databases from outside a NebulaGraph cluster via `NodePort` - -You can create a `NodePort` type Service to access internal cluster services from outside the cluster using any node IP and the exposed node port. You can also utilize load balancing services provided by cloud vendors (such as Azure, AWS, etc.) by setting the Service type to `LoadBalancer`. This allows external access to internal cluster services through the public IP and port of the load balancer provided by the cloud vendor. - -The Service of type `NodePort` forwards the front-end requests via the label selector `spec.selector` to Graphd pods with labels `app.kubernetes.io/cluster: ` and `app.kubernetes.io/component: graphd`. - -After creating a NebulaGraph cluster based on the [example template](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/apps_v1alpha1_nebulacluster.yaml), where `spec.graphd.service.type=NodePort`, the NebulaGraph Operator will automatically create a NodePort type Service named `-graphd-svc` in the same namespace. You can directly connect to the NebulaGraph database through any node IP and the exposed node port (see step 4 below). You can also create a custom Service according to your needs. - -Steps: +## Connect to NebulaGraph databases from within a NebulaGraph cluster -1. Create a YAML file named `graphd-nodeport-service.yaml`. The file contents are as follows: +You can create a `ClusterIP` type Service to provide an access point to the NebulaGraph database for other Pods within the cluster. By using the Service's IP and the Graph service's port number (9669), you can connect to the NebulaGraph database. For more information, see [ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/). +1. Create a file named `graphd-clusterip-service.yaml`. The file contents are as follows: + ```yaml apiVersion: v1 kind: Service @@ -27,10 +21,9 @@ Steps: app.kubernetes.io/component: graphd app.kubernetes.io/managed-by: nebula-operator app.kubernetes.io/name: nebula-graph - name: nebula-graphd-svc-nodeport + name: nebula-graphd-svc namespace: default spec: - externalTrafficPolicy: Local ports: - name: thrift port: 9669 @@ -45,56 +38,62 @@ Steps: app.kubernetes.io/component: graphd app.kubernetes.io/managed-by: nebula-operator app.kubernetes.io/name: nebula-graph - type: NodePort # Set the type to NodePort. + type: ClusterIP # Set the type to ClusterIP. ``` - + - NebulaGraph uses port `9669` by default. `19669` is the HTTP port of the Graph service in a NebulaGraph cluster. - - The value of `targetPort` is the port mapped to the database Pods, which can be customized. + - `targetPort` is the port mapped to the database Pods, which can be customized. + +2. Create a ClusterIP Service. -2. Run the following command to create a NodePort Service. + ```bash + kubectl create -f graphd-clusterip-service.yaml + ``` +3. Check the IP of the Service: + ```bash - kubectl create -f graphd-nodeport-service.yaml + $ kubectl get service -l app.kubernetes.io/cluster= # is the name of your NebulaGraph cluster. + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + nebula-graphd-svc ClusterIP 10.98.213.34 9669/TCP,19669/TCP,19670/TCP 23h + ... ``` -3. Check the port mapped on all of your cluster nodes. +4. Run the following command to connect to the NebulaGraph database using the IP of the `-graphd-svc` Service above: ```bash - kubectl get services -l app.kubernetes.io/cluster= # is the name of your NebulaGraph cluster. + kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -port -u -p ``` - Output: + For example: ```bash - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - nebula-graphd-svc-nodeport NodePort 10.107.153.129 9669:32236/TCP,19669:31674/TCP,19670:31057/TCP 24h - ... + kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- nebula-console -addr 10.98.213.34 -port 9669 -u root -p vesoft ``` - As you see, the mapped port of NebulaGraph databases on all cluster nodes is `32236`. - -4. Connect to NebulaGraph databases with your node IP and the node port above. - - ```bash - kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -port -u -p - ``` + - `--image`: The image for the tool NebulaGraph Console used to connect to NebulaGraph databases. + - ``: The custom Pod name. + - `-addr`: The IP of the `ClusterIP` Service, used to connect to Graphd services. + - `-port`: The port to connect to Graphd services, the default port of which is `9669`. + - `-u`: The username of your NebulaGraph account. Before enabling authentication, you can use any existing username. The default username is root. + - `-p`: The password of your NebulaGraph account. Before enabling authentication, you can use any characters as the password. - For example: + A successful connection to the database is indicated if the following is returned: ```bash - kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- nebula-console -addr 192.168.8.24 -port 32236 -u root -p vesoft If you don't see a command prompt, try pressing enter. (root@nebula) [(none)]> ``` - - `--image`: The image for the tool NebulaGraph Console used to connect to NebulaGraph databases. - - ``: The custom Pod name. The above example uses `nebula-console`. - - `-addr`: The IP of any node in a NebulaGraph cluster. The above example uses `192.168.8.24`. - - `-port`: The mapped port of NebulaGraph databases on all cluster nodes. The above example uses `32236`. - - `-u`: The username of your NebulaGraph account. Before enabling authentication, you can use any existing username. The default username is root. - - `-p`: The password of your NebulaGraph account. Before enabling authentication, you can use any characters as the password. - + You can also connect to NebulaGraph databases with **Fully Qualified Domain Name (FQDN)**. The domain format is `-graphd..svc.`. The default value of `CLUSTER_DOMAIN` is `cluster.local`. + + ```bash + kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -graphd-svc.default.svc.cluster.local -port -u -p + ``` + + `service_port` is the port to connect to Graphd services, the default port of which is `9669`. + !!! note If the `spec.console` field is set in the cluster configuration file, you can also connect to NebulaGraph databases with the following command: @@ -104,17 +103,23 @@ Steps: kubectl exec -it nebula-console -- /bin/sh # Connect to NebulaGraph databases. - nebula-console -addr -port -u -p + nebula-console -addr nebula-graphd-svc.default.svc.cluster.local -port 9669 -u -p ``` For information about the nebula-console container, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). -## Connect to NebulaGraph databases from within a NebulaGraph cluster +## Connect to NebulaGraph databases from outside a NebulaGraph cluster via `NodePort` -You can also create a `ClusterIP` type Service to provide an access point to the NebulaGraph database for other Pods within the cluster. By using the Service's IP and the Graph service's port number (9669), you can connect to the NebulaGraph database. For more information, see [ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/). +You can create a `NodePort` type Service to access internal cluster services from outside the cluster using any node IP and the exposed node port. You can also utilize load balancing services provided by cloud vendors (such as Azure, AWS, etc.) by setting the Service type to `LoadBalancer`. This allows external access to internal cluster services through the public IP and port of the load balancer provided by the cloud vendor. + +The Service of type `NodePort` forwards the front-end requests via the label selector `spec.selector` to Graphd pods with labels `app.kubernetes.io/cluster: ` and `app.kubernetes.io/component: graphd`. + +After creating a NebulaGraph cluster based on the [example template](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/apps_v1alpha1_nebulacluster.yaml), where `spec.graphd.service.type=NodePort`, the NebulaGraph Operator will automatically create a NodePort type Service named `-graphd-svc` in the same namespace. You can directly connect to the NebulaGraph database through any node IP and the exposed node port (see step 4 below). You can also create a custom Service according to your needs. + +Steps: + +1. Create a YAML file named `graphd-nodeport-service.yaml`. The file contents are as follows: -1. Create a file named `graphd-clusterip-service.yaml`. The file contents are as follows: - ```yaml apiVersion: v1 kind: Service @@ -124,7 +129,7 @@ You can also create a `ClusterIP` type Service to provide an access point to the app.kubernetes.io/component: graphd app.kubernetes.io/managed-by: nebula-operator app.kubernetes.io/name: nebula-graph - name: nebula-graphd-svc + name: nebula-graphd-svc-nodeport namespace: default spec: externalTrafficPolicy: Local @@ -142,62 +147,56 @@ You can also create a `ClusterIP` type Service to provide an access point to the app.kubernetes.io/component: graphd app.kubernetes.io/managed-by: nebula-operator app.kubernetes.io/name: nebula-graph - type: ClusterIP # Set the type to ClusterIP. + type: NodePort # Set the type to NodePort. ``` - + - NebulaGraph uses port `9669` by default. `19669` is the HTTP port of the Graph service in a NebulaGraph cluster. - - `targetPort` is the port mapped to the database Pods, which can be customized. - -2. Create a ClusterIP Service. + - The value of `targetPort` is the port mapped to the database Pods, which can be customized. - ```bash - kubectl create -f graphd-clusterip-service.yaml - ``` +2. Run the following command to create a NodePort Service. -3. Check the IP of the Service: - ```bash - $ kubectl get service -l app.kubernetes.io/cluster= # is the name of your NebulaGraph cluster. - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - nebula-graphd-svc ClusterIP 10.98.213.34 9669/TCP,19669/TCP,19670/TCP 23h - ... + kubectl create -f graphd-nodeport-service.yaml ``` -4. Run the following command to connect to the NebulaGraph database using the IP of the `-graphd-svc` Service above: +3. Check the port mapped on all of your cluster nodes. ```bash - kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -port -u -p + kubectl get services -l app.kubernetes.io/cluster= # is the name of your NebulaGraph cluster. ``` - For example: + Output: ```bash - kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- nebula-console -addr 10.98.213.34 -port 9669 -u root -p vesoft + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + nebula-graphd-svc-nodeport NodePort 10.107.153.129 9669:32236/TCP,19669:31674/TCP,19670:31057/TCP 24h + ... ``` - - `--image`: The image for the tool NebulaGraph Console used to connect to NebulaGraph databases. - - ``: The custom Pod name. - - `-addr`: The IP of the `ClusterIP` Service, used to connect to Graphd services. - - `-port`: The port to connect to Graphd services, the default port of which is `9669`. - - `-u`: The username of your NebulaGraph account. Before enabling authentication, you can use any existing username. The default username is root. - - `-p`: The password of your NebulaGraph account. Before enabling authentication, you can use any characters as the password. - - A successful connection to the database is indicated if the following is returned: + As you see, the mapped port of NebulaGraph databases on all cluster nodes is `32236`. +4. Connect to NebulaGraph databases with your node IP and the node port above. + ```bash - If you don't see a command prompt, try pressing enter. - - (root@nebula) [(none)]> + kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -port -u -p ``` - You can also connect to NebulaGraph databases with **Fully Qualified Domain Name (FQDN)**. The domain format is `-graphd..svc.`. The default value of `CLUSTER_DOMAIN` is `cluster.local`. + For example: ```bash - kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- -addr -graphd-svc.default.svc.cluster.local -port -u -p - ``` + kubectl run -ti --image vesoft/nebula-console:{{console.tag}} --restart=Never -- nebula-console -addr 192.168.8.24 -port 32236 -u root -p vesoft + If you don't see a command prompt, try pressing enter. - `service_port` is the port to connect to Graphd services, the default port of which is `9669`. + (root@nebula) [(none)]> + ``` + - `--image`: The image for the tool NebulaGraph Console used to connect to NebulaGraph databases. + - ``: The custom Pod name. The above example uses `nebula-console`. + - `-addr`: The IP of any node in a NebulaGraph cluster. The above example uses `192.168.8.24`. + - `-port`: The mapped port of NebulaGraph databases on all cluster nodes. The above example uses `32236`. + - `-u`: The username of your NebulaGraph account. Before enabling authentication, you can use any existing username. The default username is root. + - `-p`: The password of your NebulaGraph account. Before enabling authentication, you can use any characters as the password. + !!! note If the `spec.console` field is set in the cluster configuration file, you can also connect to NebulaGraph databases with the following command: @@ -207,10 +206,10 @@ You can also create a `ClusterIP` type Service to provide an access point to the kubectl exec -it nebula-console -- /bin/sh # Connect to NebulaGraph databases. - nebula-console -addr nebula-graphd-svc.default.svc.cluster.local -port 9669 -u -p + nebula-console -addr -port -u -p ``` - For information about the nebula-console container, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). + For information about the nebula-console container, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). ## Connect to NebulaGraph databases from outside a NebulaGraph cluster via Ingress @@ -313,7 +312,7 @@ Steps are as follows. kubectl exec -it nebula-console -- /bin/sh # Connect to NebulaGraph databases. - nebula-console -addr -port -u -p + nebula-console -addr -port -u -p ``` - For information about the nebula-console container, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). + For information about the nebula-console container, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). \ No newline at end of file diff --git a/docs-2.0/k8s-operator/3.operator-management/3.1.customize-installation.md b/docs-2.0/k8s-operator/3.operator-management/3.1.customize-installation.md new file mode 100644 index 00000000000..68197525f61 --- /dev/null +++ b/docs-2.0/k8s-operator/3.operator-management/3.1.customize-installation.md @@ -0,0 +1,95 @@ +# Customize installation defaults + +This topic introduces how to customize the default configurations when installing NebulaGraph Operator. + +## Customizable parameters + +When executing the `helm install [NAME] [CHART] [flags]` command to install a chart, you can specify the chart configuration. For more information, see [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). + +You can view the configurable options in the [nebula-operator chart](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/charts/nebula-operator/values.yaml) configuration file. Alternatively, you can view the configurable options through the command `helm show values nebula-operator/nebula-operator`, as shown below. + +```yaml +[root@master ~]$ helm show values nebula-operator/nebula-operator +image: + nebulaOperator: + image: vesoft/nebula-operator:{{operator.tag}} + imagePullPolicy: Always + +imagePullSecrets: [ ] +kubernetesClusterDomain: "" + +controllerManager: + create: true + replicas: 2 + env: [ ] + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + +admissionWebhook: + create: false + # The TCP port the Webhook server binds to. (default 9443) + webhookBindPort: 9443 + +scheduler: + create: true + schedulerName: nebula-scheduler + replicas: 2 + env: [ ] + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + verbosity: 0 + plugins: + enabled: ["NodeZone"] + disabled: [] +... +``` + +Part of the above parameters are described as follows: + +| Parameter | Default value | Description | +| :------------------------------------- | :------------------------------ | :----------------------------------------- | +| `image.nebulaOperator.image` | `vesoft/nebula-operator:{{operator.tag}}` | The image of NebulaGraph Operator, version of which is {{operator.release}}. | +| `image.nebulaOperator.imagePullPolicy` | `IfNotPresent` | The image pull policy in Kubernetes. | +| `imagePullSecrets` | - | The image pull secret in Kubernetes. For example `imagePullSecrets[0].name="vesoft"`. | +| `kubernetesClusterDomain` | `cluster.local` | The cluster domain. | +| `controllerManager.create` | `true` | Whether to enable the controller-manager component. | +| `controllerManager.replicas` | `2` | The number of controller-manager replicas. | +| `admissionWebhook.create` | `false` | Whether to enable Admission Webhook. This option is disabled. To enable it, set the value to `true` and you will need to install [cert-manager](https://cert-manager.io/docs/installation/helm/). For details, see [Enable admission control](../4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md). | +| `shceduler.create` | `true` | Whether to enable Scheduler. | +| `shceduler.schedulerName` | `nebula-scheduler` | The name of the scheduler customized by NebulaGraph Operator. It is used to evenly distribute Storage Pods across different [zones](../4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md). | +| `shceduler.replicas` | `2` | The number of nebula-scheduler replicas. | + + +## Example + +The following example shows how to enable AdmissionWebhook when you install NebulaGraph Operator (AdmissionWebhook is disabled by default): + +```bash +helm install nebula-operator nebula-operator/nebula-operator --namespace= --set admissionWebhook.create=true +``` + +Check whether the specified configuration of NebulaGraph Operator is installed successfully: + +```bash +helm get values nebula-operator -n +``` + +Example output: + +```yaml +USER-SUPPLIED VALUES: +admissionWebhook: + create: true +``` + +For more information about `helm install`, see [Helm Install](https://helm.sh/docs/helm/helm_install/). diff --git a/docs-2.0/k8s-operator/3.operator-management/3.2.update-operator.md b/docs-2.0/k8s-operator/3.operator-management/3.2.update-operator.md new file mode 100644 index 00000000000..deea8065f4d --- /dev/null +++ b/docs-2.0/k8s-operator/3.operator-management/3.2.update-operator.md @@ -0,0 +1,43 @@ +# Update NebulaGraph Operator + +This topic introduces how to update the configuration of NebulaGraph Operator. + +## Steps + +1. Update the information of available charts locally from chart repositories. + + ```bash + helm repo update + ``` + +2. View the default values of NebulaGraph Operator. + + ```bash + helm show values nebula-operator/nebula-operator + ``` + +3. Update NebulaGraph Operator by passing configuration parameters via `--set`. + + - `--set`:Overrides values using the command line. For more configurable items, see [Customize installation defaults](3.1.customize-installation.md). + + For example, to enable the AdmissionWebhook, run the following command: + + ```bash + helm upgrade nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} --set admissionWebhook.create=true + ``` + + For more information, see [Helm upgrade](https://helm.sh/docs/helm/helm_upgrade/). + +4. Check whether the configuration of NebulaGraph Operator is updated successfully. + + ```bash + helm get values nebula-operator -n nebula-operator-system + ``` + + Example output: + + ```yaml + USER-SUPPLIED VALUES: + admissionWebhook: + create: true + ``` \ No newline at end of file diff --git a/docs-2.0/k8s-operator/3.operator-management/3.3.upgrade-operator.md b/docs-2.0/k8s-operator/3.operator-management/3.3.upgrade-operator.md new file mode 100644 index 00000000000..ad94b75bf3c --- /dev/null +++ b/docs-2.0/k8s-operator/3.operator-management/3.3.upgrade-operator.md @@ -0,0 +1,89 @@ +# Upgrade NebulaGraph Operator + +!!! compatibility "Legacy version compatibility" + + - Does not support upgrading 0.9.0 and below version NebulaGraph Operator to 1.x. + - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. + +## Steps + +1. View the current version of NebulaGraph Operator. + + ```bash + helm list --all-namespaces + ``` + + Example output: + + ```bash + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + nebula-operator nebula-operator-system 3 2023-11-06 12:06:24.742397418 +0800 CST deployed nebula-operator-1.7.0 1.7.0 + ``` + +2. Update the information of available charts locally from chart repositories. + + ```bash + helm repo update + ``` + +3. Upgrade NebulaGraph Operator to version {{operator.release}}. + + ```bash + helm upgrade nebula-operator nebula-operator/nebula-operator --namespace= --version={{operator.release}} + ``` + + For example: + + ```bash + helm upgrade nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} + ``` + + Output: + + ```bash + Release "nebula-operator" has been upgraded. Happy Helming! + NAME: nebula-operator + LAST DEPLOYED: Tue Apr 16 02:21:08 2022 + NAMESPACE: nebula-operator-system + STATUS: deployed + REVISION: 3 + TEST SUITE: None + NOTES: + NebulaGraph Operator installed! + ``` + +4. Pull the latest CRD configuration file. + + !!! note + You need to upgrade the corresponding CRD configurations after NebulaGraph Operator is upgraded. Otherwise, the creation of NebulaGraph clusters will fail. For information about the CRD configurations, see [apps.nebula-graph.io_nebulaclusters.yaml](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.tag}}/config/crd/bases/apps.nebula-graph.io_nebulaclusters.yaml). + + 1. Pull the NebulaGraph Operator chart package. + + ```bash + helm pull nebula-operator/nebula-operator --version={{operator.release}} + ``` + + - `--version`: The NebulaGraph Operator version you want to upgrade to. If not specified, the latest version will be pulled. + + 2. Run `tar -zxvf` to unpack the charts. + + For example: To unpack {{operator.tag}} chart to the `/tmp` path, run the following command: + + ```bash + tar -zxvf nebula-operator-{{operator.release}}.tgz -C /tmp + ``` + + - `-C /tmp`: If not specified, the chart files will be unpacked to the current directory. + + +5. Upgrade the CRD configuration file in the `nebula-operator` directory. + + ```bash + kubectl apply -f crds/nebulaclusters.yaml + ``` + + Output: + + ```bash + customresourcedefinition.apiextensions.k8s.io/nebulaclusters.apps.nebula-graph.io configured + ``` \ No newline at end of file diff --git a/docs-2.0/k8s-operator/3.operator-management/3.4.unistall-operator.md b/docs-2.0/k8s-operator/3.operator-management/3.4.unistall-operator.md new file mode 100644 index 00000000000..6a700646454 --- /dev/null +++ b/docs-2.0/k8s-operator/3.operator-management/3.4.unistall-operator.md @@ -0,0 +1,32 @@ +# Uninstall NebulaGraph Operator + +This topic introduces how to uninstall NebulaGraph Operator. + +## Steps + +1. Uninstall the NebulaGraph Operator chart. + + ```bash + helm uninstall nebula-operator --namespace= + ``` + +2. View the information about the default-created CRD. + + ```bash + kubectl get crd + ``` + + Output: + + ```bash + NAME CREATED AT + nebulaautoscalers.autoscaling.nebula-graph.io 2023-11-01T04:16:51Z + nebulaclusters.apps.nebula-graph.io 2023-10-12T07:55:32Z + nebularestores.apps.nebula-graph.io 2023-02-04T23:01:00Z + ``` + +3. Delete CRD. + + ```bash + kubectl delete crd nebulaclusters.apps.nebula-graph.io nebularestores.apps.nebula-graph.io nebulaautoscalers.autoscaling.nebula-graph.io + ``` \ No newline at end of file diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md new file mode 100644 index 00000000000..d158265d1c3 --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md @@ -0,0 +1,377 @@ +# Install a NebulaGraph cluster using NebulaGraph Operator + +Using NebulaGraph Operator to install NebulaGraph clusters enables automated cluster management with automatic error recovery. This topic covers two methods, `kubectl apply` and `helm`, for installing clusters using NebulaGraph Operator. + +!!! compatibility "Historical version compatibility" + + NebulaGraph Operator versions 1.x are not compatible with NebulaGraph versions below 3.x. + +## Prerequisites + +- [Install NebulaGraph Operator](../../2.get-started/2.1.install-operator.md) +- [Create a StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) +- [Install and Load the License Key](../../2.get-started/2.2.deploy-lm.md) + +## Use `kubectl apply` + +1. Create a namespace for storing NebulaGraph cluster-related resources. For example, create the `nebula` namespace. + + ```bash + kubectl create namespace nebula + ``` + +2. Create a Secret for pulling NebulaGraph images from a private registry. + + ```bash + kubectl -n create secret docker-registry \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD + ``` + + - ``: Namespace to store the Secret. + - ``: Name of the Secret. + - `DOCKER_REGISTRY_SERVE`: Private registry server address for pulling images, for example, `reg.example-inc.com`. + - `DOCKER_USE`: Username for the image registry. + - `DOCKER_PASSWORD`: Password for the image registry. + +3. Create a YAML configuration file for the cluster. For example, create a cluster named `nebula`. + + ??? info "Expand to view an example configuration for the `nebula` cluster" + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + # Control the Pod scheduling strategy. + topologySpreadConstraints: + - topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: "ScheduleAnyway" + # Enable PV recycling. + enablePVReclaim: false + # Enable the backup and restore feature. + enableBR: false + # Enable monitoring. + exporter: + image: vesoft/nebula-stats-exporter + version: v3.3.0 + replicas: 1 + maxRequests: 20 + # Custom Agent image for cluster backup and restore, and log cleanup. + agent: + image: vesoft/nebula-agent + version: latest + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "200m" + memory: "256Mi" + # Secret for pulling images from a private registry. + imagePullSecrets: + - name: secret-name + # Configure the image pull policy. + imagePullPolicy: Always + # Select the nodes for Pod scheduling. + nodeSelector: + nebula: cloud + # Dependent controller name. + reference: + name: statefulsets.apps + version: v1 + # Scheduler name. + schedulerName: default-scheduler + # Start NebulaGraph Console service for connecting to the Graph service. + console: + image: vesoft/nebula-console + version: nightly + username: "demo" + password: "test" + # Graph service configuration. + graphd: + # Used to check if the Graph service is running normally. + # readinessProbe: + # failureThreshold: 3 + # httpGet: + # path: /status + # port: 19669 + # scheme: HTTP + # initialDelaySeconds: 40 + # periodSeconds: 10 + # successThreshold: 1 + # timeoutSeconds: 10 + # Container image for the Graph service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + # Storage class name for storing Graph service logs. + storageClassName: local-sc + # Number of replicas for the Graph service Pod. + replicas: 1 + # Resource configuration for the Graph service. + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + # Version of the Graph service. + version: v{{nebula.release}} + # Custom flags configuration for the Graph service. + config: {} + # Meta service configuration. + metad: + # LM access address and port, used to obtain License information. + licenseManagerURL: 192.168.x.xxx:9119 + # readinessProbe: + # failureThreshold: 3 + # httpGet: + # path: /status + # port: 19559 + # scheme: HTTP + # initialDelaySeconds: 5 + # periodSeconds: 5 + # successThreshold: 1 + # timeoutSeconds: 5 + # Container image for the Meta service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: v{{nebula.release}} + # Custom flags configuration for the Meta service. + config: {} + # Storage service configuration. + storaged: + # readinessProbe: + # failureThreshold: 3 + # httpGet: + # path: /status + # port: 19779 + # scheme: HTTP + # initialDelaySeconds: 40 + # periodSeconds: 10 + # successThreshold: 1 + # timeoutSeconds: 5 + # Container image for the Storage service. + image: reg.example-inc.com/xxx/xxx + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-sc + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: local-sc + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: v{{nebula.release}} + # Custom flags configuration for the Storage service. + config: {} + ``` + + When creating the YAML configuration file for the cluster, you must customize the following parameters. For more detailed information about these parameters, see the **Cluster configuration parameters** section below. + + - `spec.metad.licenseManagerURL` + - `spec..image` + - `spec.imagePullSecrets` + - `spec...storageClassName` + + +4. Create the NebulaGraph cluster. + + ```bash + kubectl create -f apps_v1alpha1_nebulacluster.yaml -n nebula + ``` + + Output: + + ```bash + nebulacluster.apps.nebula-graph.io/nebula created + ``` + + If you don't specify the namespace using `-n`, it will default to the `default` namespace. + +5. Check the status of the NebulaGraph cluster. + + ```bash + kubectl get nebulaclusters nebula -n nebula + ``` + + Output: + + ```bash + NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE + nebula2 True 1 1 1 1 1 1 86s + ``` + +## Use `helm` + +1. Add the NebulaGraph Operator Helm repository (if it's already added, run the next step directly). + + ```bash + helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts + ``` + +2. Update the Helm repository to fetch the latest resources. + + ```bash + helm repo update nebula-operator + ``` + +3. Set environment variables for the configuration parameters required for installing the cluster. + + ```bash + export NEBULA_CLUSTER_NAME=nebula # Name of the NebulaGraph cluster. + export NEBULA_CLUSTER_NAMESPACE=nebula # Namespace for the NebulaGraph cluster. + export STORAGE_CLASS_NAME=local-sc # StorageClass for the NebulaGraph cluster. + ``` + +4. Create a namespace for the NebulaGraph cluster if it hasn't been created already. + + ```bash + kubectl create namespace "${NEBULA_CLUSTER_NAMESPACE}" + ``` + +5. Create a Secret for pulling images from a private repository. + + ```bash + kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" create secret docker-registry \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD + ``` + + - ``: Specify the name of the Secret. + - `DOCKER_REGISTRY_SERVER`: Specify the address of the private image repository (e.g., `reg.example-inc.com`). + - `DOCKER_USER`: Username for the image repository. + - `DOCKER_PASSWORD`: Password for the image repository. + +6. Check the customizable configuration parameters for the `nebula-cluster` Helm chart of the `nebula-operator` when creating the cluster. + + - Run the following command to view all the configurable parameters. + + ```bash + helm show values nebula-operator/nebula-cluster + ``` + + - Visit [nebula-cluster/values.yaml](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/charts/nebula-cluster/values.yaml) to see all the configuration parameters for the NebulaGraph cluster. Click on [Chart parameters](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/doc/user/nebula_cluster_helm_guide.md#optional-chart-parameters) to see the parameter descriptions and their default values. + +7. Create the NebulaGraph cluster. + + You can use the `--set` flag to customize the default values of the NebulaGraph cluster configuration. For example, `--set nebula.storaged.replicas=3` sets the number of replicas for the Storage service to 3. + + ```bash + helm install "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ + # Specify the version of the cluster chart. If not specified, it will install the latest version by default. + # You can check all chart versions by running the command: helm search repo -l nebula-operator/nebula-cluster + --version={{operator.release}} \ + # Specify the namespace for the NebulaGraph cluster. + --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ + # Configure the Secret for pulling images from the private repository. + --set imagePullSecrets[0].name="{}" \ + # Customize the cluster name. + --set nameOverride="${NEBULA_CLUSTER_NAME}" \ + # Configure the LM (License Manager) access address and port, with the default port being '9119'. + # You must configure this parameter to obtain the License information. + --set nebula.metad.licenseManagerURL="192.168.8.XXX:9119" \ + # Configure the image addresses for various services in the cluster. + --set nebula.graphd.image="" \ + --set nebula.metad.image="" \ + --set nebula.storaged.image="" \ + --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ + # Specify the version for the NebulaGraph cluster. + --set nebula.version=v{{nebula.release}} + ``` + +8. Check the status of NebulaGraph cluster pods. + + ```bash + kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" get pod -l "app.kubernetes.io/cluster=${NEBULA_CLUSTER_NAME}" + ``` + + Output: + + ```bash + NAME READY STATUS RESTARTS AGE + nebula-exporter-854c76989c-mp725 1/1 Running 0 14h + nebula-graphd-0 1/1 Running 0 14h + nebula-graphd-1 1/1 Running 0 14h + nebula-metad-0 1/1 Running 0 14h + nebula-metad-1 1/1 Running 0 14h + nebula-metad-2 1/1 Running 0 14h + nebula-storaged-0 1/1 Running 0 14h + nebula-storaged-1 1/1 Running 0 14h + nebula-storaged-2 1/1 Running 0 14h + ``` + +## Cluster configuration parameters + +The table below lists the configurable parameters and their descriptions for creating a NebulaGraph cluster using a YAML file. + +| Parameter | Default Value | Description | +| :---------------------------------------------------------- | :----------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `metadata.name` | - | The name of the created NebulaGraph cluster. | +| `spec.console` | - | Launches a Console container for connecting to the Graph service. For configuration details, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). | +| `spec.topologySpreadConstraints` | - | Controls the scheduling strategy for Pods. For more details, see [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). When the value of `topologyKey` is `kubernetes.io/zone`, the value of `whenUnsatisfiable` must be set to `DoNotSchedule`, and the value of `spec.schedulerName` should be `nebula-scheduler`. | +| `spec.graphd.replicas` | `1` | The number of replicas for the Graphd service. | +| `spec.graphd.image` | `vesoft/nebula-graphd` | The container image for the Graphd service. | +| `spec.graphd.version` | `{{nebula.tag}}` | The version of the Graphd service. | +| `spec.graphd.service` | | Configuration for accessing the Graphd service via a Service. | +| `spec.graphd.logVolumeClaim.storageClassName` | - | The storage class name for the log volume claim of the Graphd service. When using sample configuration, replace it with the name of the pre-created storage class. See [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) for creating a storage class. | +| `spec.metad.replicas` | `1` | The number of replicas for the Metad service. | +| `spec.metad.image` | `vesoft/nebula-metad` | The container image for the Metad service. | +| `spec.metad.version` | `{{nebula.tag}}` | The version of the Metad service. | +| `spec.metad.dataVolumeClaim.storageClassName` | - | Storage configuration for the data disk of the Metad service. When using sample configuration, replace it with the name of the pre-created storage class. See [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) for creating a storage class. | +| `spec.metad.logVolumeClaim.storageClassName` | - | Storage configuration for the log disk of the Metad service. When using sample configuration, replace it with the name of the pre-created storage class. See [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) for creating a storage class. | +| `spec.storaged.replicas` | `3` | The number of replicas for the Storaged service. | +| `spec.storaged.image` | `vesoft/nebula-storaged` | The container image for the Storaged service. | +| `spec.storaged.version` | `{{nebula.tag}}` | The version of the Storaged service. | +| `spec.storaged.dataVolumeClaims.resources.requests.storage` | - | The storage size for the data disk of the Storaged service. You can specify multiple data disks. When specifying multiple data disks, the paths are like `/usr/local/nebula/data1`, `/usr/local/nebula/data2`, and so on. | +| `spec.storaged.dataVolumeClaims.storageClassName` | - | Storage configuration for the data disks of the Storaged service. When using sample configuration, replace it with the name of the pre-created storage class. See [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) for creating a storage class. | +| `spec.storaged.logVolumeClaim.storageClassName` | - | Storage configuration for the log disk of the Storaged service. When using sample configuration, replace it with the name of the pre-created storage class. See [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) for creating a storage class. | +| `spec..securityContext` | `{}` | Defines the permission and access control for the cluster containers to control access and execution of container operations. For details, see [SecurityContext](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/doc/user/security_context.md). | +| `spec.agent` | `{}` | Configuration for the Agent service used for backup and recovery, and log cleaning functions. If you don't customize this configuration, the default configuration is used. | +| `spec.reference.name` | `{}` | The name of the controller it depends on. | +| `spec.schedulerName` | `default-scheduler` | The name of the scheduler. | +| `spec.imagePullPolicy` | `Always` | The image pull policy for NebulaGraph images. For more details on pull policies, please see [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). | +| `spec.logRotate` | `{}` | Log rotation configuration. For details, see [Managing Cluster Logs](../4.5.logging.md). | +| `spec.enablePVReclaim` | `false` | Defines whether to automatically delete PVCs after deleting the cluster to release data. For details, see [Reclaim PV](../4.4.storage-management/4.4.3.configure-pv-reclaim.md). | +| `spec.metad.licenseManagerURL` | - | Configures the URL pointing to the License Manager (LM), consisting of the access address and port (default port `9119`). For example, `192.168.8.xxx:9119`. **You must configure this parameter to obtain the License information; otherwise, the NebulaGraph cluster will not function.** | +| `spec.storaged.enableAutoBalance` | `false` | Whether to enable automatic balancing. For details, see [Balancing Storage Data After Scaling Out](../4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md). | +| `spec.enableBR` | `false` | Defines whether to enable the BR tool. For details, see [Backup and Restore](../4.6.backup-and-restore.md). | +| `spec.imagePullSecrets` | `[]` | Defines the Secret required to pull images from a private repository. | + +## Related topics + +[Enabling Zones in a Cluster](../4.8.ha-and-balancing/4.8.2.enable-zone.md) \ No newline at end of file diff --git a/docs-2.0/nebula-operator/9.upgrade-nebula-cluster.md b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.2.cluster-upgrade.md similarity index 54% rename from docs-2.0/nebula-operator/9.upgrade-nebula-cluster.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.2.cluster-upgrade.md index 81c52b76d84..5c762f2af65 100644 --- a/docs-2.0/nebula-operator/9.upgrade-nebula-cluster.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.2.cluster-upgrade.md @@ -2,7 +2,7 @@ This topic introduces how to upgrade a NebulaGraph cluster created with NebulaGraph Operator. -!!! Compatibility "Legacy version compatibility" +!!! compatibility "Legacy version compatibility" The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. @@ -12,22 +12,16 @@ This topic introduces how to upgrade a NebulaGraph cluster created with NebulaGr - Only support upgrading the NebulaGraph version from {{operator.upgrade_from}} to {{operator.upgrade_to}}. -{{ ent.ent_begin }} +- For upgrading NebulaGraph Enterprise Edition clusters, [contact us](https://www.nebula-graph.io/contact). -- For upgrading NebulaGraph Enterprise Edition clusters, [contact us](https://www.nebula-graph.com.cn/contact). -{{ ent.ent_end }} +## Prerequisites -## Upgrade a NebulaGraph cluster with Kubectl +You have created a NebulaGraph cluster. For details, see [Create a NebulaGraph cluster](4.1.1.cluster-install.md). -### Prerequisites +## Upgrade a NebulaGraph cluster with `kubectl` -You have created a NebulaGraph cluster with Kubectl. For details, see [Create a NebulaGraph cluster with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). - -The version of the NebulaGraph cluster to be upgraded in this topic is `{{operator.upgrade_from}}`, and its YAML file name is `apps_v1alpha1_nebulacluster.yaml`. - - -### Steps +The following steps upgrade a NebulaGraph cluster from version `{{operator.upgrade_from}}` to `{{nebula.tag}}`. 1. Check the image version of the services in the cluster. @@ -43,93 +37,38 @@ The version of the NebulaGraph cluster to be upgraded in this topic is `{{operat 3 vesoft/nebula-storaged:{{operator.upgrade_from}} ``` -2. Edit the `apps_v1alpha1_nebulacluster.yaml` file by changing the values of all the `version` parameters from {{operator.upgrade_from}} to {{nebula.tag}}. - - The modified YAML file reads as follows: - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - spec: - graphd: - resources: - requests: - cpu: "500m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-graphd - version: {{nebula.tag}} //Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. - service: - type: NodePort - externalTrafficPolicy: Local - logVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - metad: - resources: - requests: - cpu: "500m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-metad - version: {{nebula.tag}} //Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. - dataVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - logVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - storaged: - resources: - requests: - cpu: "500m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 3 - image: vesoft/nebula-storaged - version: {{nebula.tag}} //Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. - dataVolumeClaims: - - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - logVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - reference: - name: statefulsets.apps - version: v1 - schedulerName: default-scheduler - imagePullPolicy: Always - ``` +2. Edit the `nebula` cluster configuration to change the `version` value of the cluster services from {{operator.upgrade_from}} to {{nebula.tag}}. -3. Run the following command to apply the version update to the cluster CR. - - ```bash - kubectl apply -f apps_v1alpha1_nebulacluster.yaml - ``` + 1. Open the YAML file for the `nebula` cluster. + + ```bash + kubectl edit nebulacluster nebula -n + ``` + + 2. Change the value of `version`. + + After making these changes, the YAML file should look like this: + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + spec: + graphd: + version: {{nebula.tag}} // Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. + ... + metad: + version: {{nebula.tag}} // Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. + ... + storaged: + version: {{nebula.tag}} // Change the value from {{operator.upgrade_from}} to {{nebula.tag}}. + ... + ``` + +3. Apply the configuration. + + After saving the YAML file and exiting, Kubernetes automatically updates the cluster's configuration and starts the cluster upgrade. 4. After waiting for about 2 minutes, run the following command to see if the image versions of the services in the cluster have been changed to {{nebula.tag}}. @@ -145,13 +84,8 @@ The version of the NebulaGraph cluster to be upgraded in this topic is `{{operat 3 vesoft/nebula-storaged:{{nebula.tag}} ``` -## Upgrade a NebulaGraph cluster with Helm - -### Prerequisites - -You have created a NebulaGraph cluster with Helm. For details, see [Create a NebulaGraph cluster with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +## Upgrade a NebulaGraph cluster with `helm` -### Steps 1. Update the information of available charts locally from chart repositories. @@ -207,4 +141,18 @@ You have created a NebulaGraph cluster with Helm. For details, see [Create a Neb ## Accelerate the upgrade process -The upgrade process of a cluster is a rolling update process and can be time-consuming due to the state transition of the leader partition replicas in the Storage service. You can configure the `enableForceUpdate` field in the cluster instance's YAML file to skip the leader partition replica transfer operation, thereby accelerating the upgrade process. For more information, see [Specify a rolling update strategy](11.rolling-update-strategy.md). \ No newline at end of file +The upgrade process of a cluster is a rolling update process and can be time-consuming due to the state transition of the leader partition replicas in the Storage service. You can configure the `enableForceUpdate` field in the cluster instance's YAML file to skip the leader partition replica transfer operation, thereby accelerating the upgrade process. For more information, see [Specify a rolling update strategy](../4.9.advanced/4.9.1.rolling-update-strategy.md). + +## Troubleshooting + +If you encounter issues during the upgrade process, you can check the logs of the cluster service pods. + +```bash +kubectl logs -n +``` + +Additionally, you can inspect the cluster's status and events. + +```bash +kubectl describe nebulaclusters -n +``` \ No newline at end of file diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.3.cluster-uninstall.md b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.3.cluster-uninstall.md new file mode 100644 index 00000000000..36ff108e35a --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.1.installation/4.1.3.cluster-uninstall.md @@ -0,0 +1,114 @@ +# Delete a NebulaGraph cluster + +This topic explains how to delete a NebulaGraph cluster created using NebulaGraph Operator. + +## Usage limitations + +Deletion is only supported for NebulaGraph clusters created with the NebulaGraph Operator. + +## Delete a NebulaGraph cluster using `kubectl` + +1. View all created clusters. + + ```bash + kubectl get nc --all-namespaces + ``` + + Example output: + + ```bash + NAMESPACE NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE + default nebula True 2 2 3 3 3 3 38h + nebula nebula2 True 1 1 1 1 1 1 2m7s + ``` + +2. Delete a cluster. For example, run the following command to delete a cluster named `nebula2`: + + ```bash + kubectl delete nc nebula2 -n nebula + ``` + + Example output: + + ```bash + nebulacluster.nebula-graph.io "nebula2" deleted + ``` + +3. Confirm the deletion. + + ```bash + kubectl get nc nebula2 -n nebula + ``` + + Example output: + + ```bash + No resources found in nebula namespace. + ``` + +## Delete a NebulaGraph cluster using `helm` + +1. View all Helm releases. + + ```bash + helm list --all-namespaces + ``` + + Example output: + + ```bash + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + nebula default 1 2023-11-06 20:16:07.913136377 +0800 CST deployed nebula-cluster-1.7.1 1.7.1 + nebula-operator nebula-operator-system 3 2023-11-06 12:06:24.742397418 +0800 CST deployed nebula-operator-1.7.1 1.7.1 + ``` + +2. View detailed information about a Helm release. For example, to view the cluster information for a Helm release named `nebula`: + + ```bash + helm get values nebula -n default + ``` + + Example output: + + ```yaml + USER-SUPPLIED VALUES: + imagePullSecrets: + - name: secret_for_pull_image + nameOverride: nebula # The cluster name + nebula: + graphd: + image: reg.vesoft-inc.com/xx + metad: + image: reg.vesoft-inc.com/xx + licenseManagerURL: xxx:9119 + storageClassName: local-sc + storaged: + image: reg.vesoft-inc.com/xx + version: {{operator.tag}} # The cluster version + ``` + +3. Uninstall a Helm release. For example, to uninstall a Helm release named `nebula`: + + ```bash + helm uninstall nebula -n default + ``` + + Example output: + + ```bash + release "nebula" uninstalled + ``` + + Once the Helm release is uninstalled, NebulaGraph Operator will automatically remove all K8s resources associated with that release. + +4. Verify that the cluster resources are removed. + + ```bash + kubectl get nc nebula -n default + ``` + + Example output: + + ```bash + No resources found in default namespace. + ``` \ No newline at end of file diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.2.configuration.md b/docs-2.0/k8s-operator/4.cluster-administration/4.2.configuration.md new file mode 100644 index 00000000000..e8859c2d0a4 --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.2.configuration.md @@ -0,0 +1,180 @@ +# Customize the configuration of the NebulaGraph cluster + +The Meta, Storage, and Graph services each have their default configurations within the NebulaGraph cluster. NebulaGraph Operator allows for the customization of these cluster service configurations. This topic describes how to update the settings of the NebulaGraph cluster. + +!!! note + + Configuring the parameters of the NebulaGraph cluster via Helm isn't currently supported. + +## Prerequisites + +A cluster is created using NebulaGraph Operator. For details, see [Create a NebulaGraph Cluster](4.1.installation/4.1.1.cluster-install.md). + +## Configuration method + +You can update the configurations of cluster services by customizing parameters through `spec..config`. NebulaGraph Operator loads the configurations from `config` into the corresponding service's ConfigMap, which is then mounted into the service's configuration file directory (`/usr/local/nebula/etc/`) at the time of the service launch. + +The structure of `config` is as follows: + +```go +Config map[string]string `json:"config,omitempty"` +``` + +For instance, when updating the Graph service's `enable_authorize` parameter settings, the `spec.graphd.config` parameter can be specified at the time of cluster creation, or during cluster runtime. + +```yaml +apiVersion: apps.nebula-graph.io/v1alpha1 +kind: NebulaCluster +metadata: + name: nebula + namespace: default +spec: + graphd: + ... + config: // Custom-defined parameters for the Graph service. + "enable_authorize": "true" // Enable authorization. Default value is false. +... +``` + +If you need to configure `config` for the Meta and Storage services, add corresponding configuration items to `spec.metad.config` and `spec.storaged.config`. + +## Configurable parameters + +For more detailed information on the parameters that can be set under the `config` field, see the following: +- [Meta Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md) +- [Storage Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md) +- [Graph Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md) + +## Parameter updates & Pod restart rules + +Configuration parameters for cluster services fall into two categories: those which require a service restart for any updates; and those which can be dynamically updated during service runtime. For the latter type, the updates will not be saved; subsequent to a service restart, configurations will revert to the state as shown in the configuration file. + +Regarding if the configuration parameters support dynamic updates during service runtime, please verify the information within the **Whether supports runtime dynamic modifications** column on each of the service configuration parameter detail pages linked above or see [Dynamic runtime flags](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.tag}}/doc/user/custom_config.md#dynamic-runtime-flags). + +During the update of cluster service configurations, keep the following points in mind: + +- If the updated parameters under `config` **all allow for dynamic runtime updates**, a service Pod restart will not be triggered and the configuration parameter updates will not be saved. +- If the updated parameters under `config` **include one or more that don’t allow for dynamic runtime updates**, a service Pod restart will be triggered, but only updates to those parameters that don’t allow for dynamic updates will be saved. + +!!! note + + If you wish to modify the parameter settings during cluster runtime without triggering a Pod restart, make sure that all the parameters support dynamic updates during runtime. + +## Customize port configuration + +The following example demonstrates how to customize the port configurations for the Meta, Storage, and Graph services. + +You can add `port` and `ws_http_port` parameters to the `config` field in order to set custom ports. For detailed information regarding these two parameters, see the networking configuration sections at [Meta Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), [Graph Service Configuration Parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md). + +!!! note + + * After customizing the `port` and `ws_http_port` parameter settings, a Pod restart is triggered and then the updated settings take effect after the restart. + * Once the cluster is started, it is not recommended to modify the `port` parameter. + +1. Modify the cluster configuration file. + + 1. Open the cluster configuration file. + + ``` + kubectl edit nc nebula + ``` + + 2. Modify the configuration file as follows. + + Add the `config` field to the `graphd`, `metad`, and `storaged` sections to customize the port configurations for the Graph, Meta, and Storage services, respectively. + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + graphd: + config: // Custom port configuration for the Graph service. + port: "3669" + ws_http_port: "8080" + resources: + requests: + cpu: "200m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-graphd + version: {{nebula.tag}} + metad: + config: // Custom port configuration for the Meta service. + ws_http_port: 8081 + resources: + requests: + cpu: "300m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-metad + version: {{nebula.tag}} + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-path + storaged: + config: // Custom port configuration for the Storage service. + ws_http_port: 8082 + resources: + requests: + cpu: "300m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-storaged + version: {{nebula.tag}} + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: local-path + enableAutoBalance: true + reference: + name: statefulsets.apps + version: v1 + schedulerName: default-scheduler + imagePullPolicy: IfNotPresent + imagePullSecrets: + - name: nebula-image + enablePVReclaim: true + topologySpreadConstraints: + - topologyKey: kubernetes.io/hostname + whenUnsatisfiable: "ScheduleAnyway" + ``` + +2. Save the changes. + + Changes will be saved automatically after saving the file. + + 1. Press `Esc` to enter command mode. + 2. Enter `:wq` to save and exit. + +3. Validate that the configurations have taken effect. + + ```bash + kubectl get svc + ``` + + Example output: + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + nebula-graphd-headless ClusterIP None 3669/TCP,8080/TCP 10m + nebula-graphd-svc ClusterIP 10.102.13.115 3669/TCP,8080/TCP 10m + nebula-metad-headless ClusterIP None 9559/TCP,8081/TCP 11m + nebula-storaged-headless ClusterIP None 9779/TCP,8082/TCP,9778/TCP 11m + ``` + + As can be noticed, the Graph service's RPC daemon port is changed to `3669` (default `9669`), the HTTP port to `8080` (default `19669`); the Meta service's HTTP port is changed to `8081` (default `19559`); the Storage service's HTTP port is changed to `8082` (default `19779`). diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md b/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md new file mode 100644 index 00000000000..5b4cf7b8be0 --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md @@ -0,0 +1,117 @@ +# Scale a NebulaGraph Cluster + +This topic describes how to scale the number of service replicas in your NebulaGraph cluster. + +## Prerequisites + +A cluster is established using NebulaGraph Operator. For details, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). + +## Limitations + +Scaling the NebulaGraph cluster is only supported by NebulaGraph Operator v1.1.0 and above. + +## Scale clusters with `kubectl` + +You can scale the NebulaGraph cluster services by editing the `replicas` value in the cluster configuration file. + +### Scale out clusters + +As an example, let's scale out the Storage service in the NebulaGraph cluster to 5 replicas: + +1. View the list of clusters. + + ```bash + kubectl get nc --all-namespaces + ``` + + Example output: + + ```bash + NAMESPACE NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE + nebula nebula True 1 1 1 1 3 3 4d23h + ... + ``` + +2. Change the `spec.storaged.replicas` parameter value from `3` to `5` in the configuration file of the cluster named `nebula`. + + ```bash + kubectl patch nc nebula -n nebula --type='merge' --patch '{"spec": {"storaged": {"replicas":5}}}' + ``` + + Example output: + + ```bash + nebulacluster.apps.nebula-graph.io/nebula patched + ``` + +3. Check the number of Storage service replicas after the scale-out. + + ```bash + kubectl get pods -l app.kubernetes.io/cluster=nebula + ``` + + Example output: + + ```bash + NAME READY STATUS RESTARTS AGE + nebula-graphd-0 1/1 Running 0 2m + nebula-metad-0 1/1 Running 0 2m + nebula-storaged-0 1/1 Running 0 2m + nebula-storaged-1 1/1 Running 0 2m + nebula-storaged-2 1/1 Running 0 2m + nebula-storaged-3 1/1 Running 0 5m + nebula-storaged-4 1/1 Running 0 5m + ``` + +### Scale in clusters + +Scaling in the cluster follows the same principle as scaling out: just decrease the `replicas` value in the cluster configuration file. For details, see the **Scale out clusters** section above. + +If the scale-in operation remains incomplete for a long time, you can go to the console container started through the `spec.console` field and check the status of the scale-in job. If the scale-in Job status is `FAILED`, you can check the Meta service's log to find the cause of the scale-in failure. For more information about Jobs, please see [Job Management](../../../3.ngql-guide/4.job-statements.md). + +!!! caution + + - Scaling is supported only for the Graph and Storage services in the NebulaGraph cluster, but not for the Meta service. + - If the cluster with the zone feature is scaled in, it is necessary to ensure that the remaining number of Storage Pods after scaling in is not less than the number of zones specified by `spec.metad.config.zone_list`. For example, if the number of zones is 3, then the number of Storage Pods after scaling down cannot be less than 3. For more information about zones, please see [Zone](../../4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md). + + +## Scale clusters with `helm` + +The cluster can be scaled by modifying the `replicas` values for different services within NebulaGraph. + +### Scale out clusters + +1. Check the default configuration of the Helm release. + + ```bash + helm show values nebula-operator/nebula-cluster -n default + ``` + + Result: + + ```bash + nebula: + version: v{{nebula.release}} + ... + storaged: + replicas: 2 + ``` + +2. Scale out the Storage service of the NebulaGraph cluster to 5 replicas with the following command: + + ```bash + helm upgrade nebula nebula-operator/nebula-cluster \ + --namespace="default" \ + --set nebula.storaged.replicas=5 + ``` + +### Scale in clusters + +The principle of scaling in a cluster is the same as scaling out a cluster. Simply set the `replicas` value of the services in the NebulaGraph cluster to be less than the original. + +If the scale-in operation remains incomplete for a long time, you can go to the console container started through `nebula.console` field and check the state of scale-in Jobs. If the scale-in Job status is `FAILED`, you can check the Meta service's log to find out why the scale-in failed. For more information about Jobs, see [Job Management](../../../3.ngql-guide/4.job-statements.md). + +!!! caution + + - Scaling is supported only for the Graph and Storage services in the NebulaGraph cluster, but not for the Meta service. + - If the cluster with the zone feature enabled is scaled in, ensure that the remaining number of Storage Pods after the scaled-in is no less than the number of zones specified by `nebula.metad.config.zone_list`. For instance, if the number of zones is 3, then the number of Storage Pods after the scale-in cannot be less than 3. For more information about zones, see [Enable zones](../../4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md). \ No newline at end of file diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.2.enable-hpa.md b/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.2.enable-hpa.md new file mode 100644 index 00000000000..b30d914b5c8 --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.3.scaling/4.3.2.enable-hpa.md @@ -0,0 +1,201 @@ +# Enable Horizontal Pod Autoscaling (HPA) for Graph services + +NebulaGraph Operator provides the NebulaAutoscaler object for you to implement [Horizontal Pod Autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) for the Graph service. + +## Prerequisites + +A NebulaGraph cluster is created. For details, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). + +## Limitations + +Automatic scaling via HPA is supported only for the Graph service. + +## Steps + +The following procedure shows how to enable HPA in a NebulaGraph cluster: + +1. Install the metrics-server. + + Developed based on the HorizontalPodAutoscaler in Kubernetes, the NebulaAutoscaler automatically scales the number of pods based on the metrics collected by the [metrics-server](https://github.com/kubernetes-sigs/metrics-server). + + + Run the following command to install the latest metrics-server release: + + ```bash + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + ``` + +2. Verify that the metrics-server is working. + + The metrics-server implements the Metrics API, which provides information about resource usage for nodes and pods in the cluster. The following example calls the Metrics API to obtain resource usage data from the metrics-server. If resource usage data is successfully returned, it indicates the metrics-server is working. + + Run the following command to query resource usage on a pod named `nebula-graphd-1`: + + ```bash + kubectl get --raw "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods/nebula-graphd-1" | jq '.' + ``` + + Sample response: + + ```json + { + "kind": "PodMetrics", + "apiVersion": "metrics.k8s.io/v1beta1", + "metadata": { + "name": "nebula-graphd-1", + "namespace": "default", + "creationTimestamp": "2023-09-27T13:39:54Z", + "labels": { + "app.kubernetes.io/cluster": "nebula", + "app.kubernetes.io/component": "graphd", + "app.kubernetes.io/managed-by": "nebula-operator", + "app.kubernetes.io/name": "nebula-graph", + "controller-revision-hash": "nebula-graphd-56cf5f8b66", + "statefulset.kubernetes.io/pod-name": "nebula-graphd-1" + } + }, + "timestamp": "2023-09-27T13:39:48Z", + "window": "15.015s", + "containers": [ + { + "name": "graphd", + "usage": { + "cpu": "323307n", + "memory": "12644Ki" + } + } + ] + } + ``` + + Run the following command to query resource usage on a node named `192-168-8-35`: + + ```bash + kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes/192-168-8-35" | jq '.' + ``` + + Sample response: + + ```json + { + "kind": "NodeMetrics", + "apiVersion": "metrics.k8s.io/v1beta1", + "metadata": { + "name": "192-168-8-35", + "creationTimestamp": "2023-09-27T14:00:13Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "192-168-8-35", + "kubernetes.io/os": "linux", + "nebula": "cloud", + "node-role.kubernetes.io/control-plane": "", + "node.kubernetes.io/exclude-from-external-load-balancers": "" + } + }, + "timestamp": "2023-09-27T14:00:00Z", + "window": "20.045s", + "usage": { + "cpu": "164625163n", + "memory": "8616740Ki" + } + } + ``` + +3. Create a NebulaAutoscaler object. + + Use the following YAML sample to create a NebulaAutoscaler object that automatically adjusts the number of pods between 2 to 5 based on the average CPU utilization. + + ```yaml + apiVersion: autoscaling.nebula-graph.io/v1alpha1 + kind: NebulaAutoscaler + metadata: + name: nebula-autoscaler + spec: + nebulaClusterRef: + name: nebula + graphdPolicy: + minReplicas: 2 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + pollingPeriod: 30s + ``` + + The key parameters are as follows: + + - `spec.nebulaClusterRef`: The target cluster that the NebulaAutoscaler applies to. + - `spec.graphdPolicy`: The auto scaling policy adopted by the NebulaAutoscaler. All the child fields are compatible with the fields used by the Kubernetes HorizontalPodAutoscaler. Check the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2/#HorizontalPodAutoscalerSpec) for details. + - `spec.pollingPeriod`: The time interval between each resource usage checks by the NebulaAutoscaler. + + The NebulaAutoscaler also supports the `behavior` parameter, which enables you to control the scaling procedure in great detail by configuring separate scale-up and scale-down behaviors. + + Make sure you understand the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2/#HorizontalPodAutoscalerSpec) before you use the `behavior` parameter. + + The following sample creates a NebulaAutoscaler object that behaves differently in scale-up and scale-down. + + ```yaml + apiVersion: autoscaling.nebula-graph.io/v1alpha1 + kind: NebulaAutoscaler + metadata: + name: nebula-autoscaler + spec: + nebulaClusterRef: + name: nebula + graphdPolicy: + minReplicas: 2 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + - type: Pods + value: 4 + periodSeconds: 15 + selectPolicy: Max + pollingPeriod: 30s + ``` + +4. Check whether HPA is working. + + After running `kubectl apply` to create the NebulaAutoscaler object, you can use the following commands to check whether HPA is working. + + Run the `kubectl get na` command to check the NebulaAutoscaler status. + + Sample response: + + ``` + NAME REFERENCE MIN-REPLICAS MAX-REPLICAS CURRENT-REPLICAS ACTIVE ABLETOSCALE LIMITED READY AGE + nebula-autoscaler nebula 2 5 2 True True True True 19h + ``` + + Run the `kubectl get nc` command to check the cluster status. + + Sample response: + + ``` + NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE + nebula True 2 2 1 1 3 3 20h + ``` diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.1.use-local-pv.md b/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.1.use-local-pv.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md b/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.2.pv-expansion.md similarity index 95% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.2.pv-expansion.md index a856fd445c0..1bd3047f6e4 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.2.pv-expansion.md @@ -19,10 +19,10 @@ In NebulaGraph Operator, you cannot directly edit PVC because Operator automatic - A StorageClass has been created in the Kubernetes environment. For details, see [Expanding Persistent Volumes Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). - Ensure the `allowVolumeExpansion` field in the StorageClass is set to `true`. - Make sure that the `provisioner` configured in the StorageClass supports dynamic expansion. -- A NebulaGraph cluster has been created in Kubernetes. For specific steps, see [Create a Nebula Graph Cluster with Kubectl](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +- A NebulaGraph cluster is created in Kubernetes. For specific steps, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). - NebulaGraph cluster Pods are in running status. -## Dynamic volume expansion example +## Online volume expansion example In the following example, we assume that the StorageClass is named `ebs-sc` and the NebulaGraph cluster is named `nebula`. We will demonstrate how to dynamically expand the PV for the Storage service. diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md b/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.3.configure-pv-reclaim.md similarity index 89% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.3.configure-pv-reclaim.md index 17e96ecea25..579aeeb07b9 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.4.storage-management/4.4.3.configure-pv-reclaim.md @@ -6,7 +6,7 @@ You can also define the automatic deletion of PVCs to release data by setting th ## Prerequisites -You have created a cluster. For how to create a cluster with Kubectl, see [Create a cluster with Kubectl](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +A NebulaGraph cluster is created in Kubernetes. For specific steps, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). ## Steps @@ -101,3 +101,5 @@ The following example uses a cluster named `nebula` and the cluster's configurat ``` 3. Run `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. + +After setting `enablePVReclaim` to `true`, the PVCs of the cluster will be deleted automatically after the cluster is deleted. If you want to delete the PVs, you need to set the reclaim policy of the PVs to `Delete`. \ No newline at end of file diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md b/docs-2.0/k8s-operator/4.cluster-administration/4.5.logging.md similarity index 90% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.5.logging.md index 601d4dedb8b..5b99773d6b4 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.5.logging.md @@ -24,7 +24,7 @@ $ cd /usr/local/nebula/logs ## Clean logs -Running logs generated by cluster services during runtime will occupy disk space. To avoid occupying too much disk space, the Operator uses a sidecar container to periodically clean and archive logs. +Running logs generated by cluster services during runtime will occupy disk space. To avoid occupying too much disk space, the NebulaGraph Operator uses a sidecar container to periodically clean and archive logs. To facilitate log collection and management, each NebulaGraph service deploys a sidecar container responsible for collecting logs generated by the service container and sending them to the specified log disk. The sidecar container automatically cleans and archives logs using the [logrotate](https://linux.die.net/man/8/logrotate) tool. @@ -35,7 +35,9 @@ In the YAML configuration file of the cluster instance, set `spec.logRotate` to spec: graphd: config: - # Whether to include a timestamp in the log file name. You must set this parameter to false to enable log rotation. It is set to true by default. + # Whether to include a timestamp in the log file name. + # You must set this parameter to false to enable log rotation. + # It is set to true by default. "timestamp_in_logfile_name": "false" metad: config: @@ -44,7 +46,8 @@ spec: config: "timestamp_in_logfile_name": "false" logRotate: # Log rotation configuration - # The number of times a log file is rotated before being deleted. The default value is 5, and 0 means the log file will not be rotated before being deleted. + # The number of times a log file is rotated before being deleted. + # The default value is 5, and 0 means the log file will not be rotated before being deleted. rotate: 5 # The log file is rotated only if it grows larger than the specified size. The default value is 200M. size: "200M" @@ -54,7 +57,7 @@ spec: If you don't want to mount additional log disks to back up log files, or if you want to collect logs and send them to a log center using services like [fluent-bit](https://fluentbit.io/), you can configure logs to be output to standard error. The Operator uses the [glog](https://github.com/google/glog) tool to log to standard error output. -!!! caution +!!! note Currently, NebulaGraph Operator only collects standard error logs.。 @@ -97,4 +100,4 @@ spec: value: "1" image: vesoft/nebula-metad ... -``` \ No newline at end of file +``` diff --git a/docs-2.0/nebula-operator/10.backup-restore-using-operator.md b/docs-2.0/k8s-operator/4.cluster-administration/4.6.backup-and-restore.md similarity index 93% rename from docs-2.0/nebula-operator/10.backup-restore-using-operator.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.6.backup-and-restore.md index a00bad071b7..55a794b051c 100644 --- a/docs-2.0/nebula-operator/10.backup-restore-using-operator.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.6.backup-and-restore.md @@ -2,17 +2,13 @@ This article introduces how to back up and restore data of the NebulaGraph cluster on Kubernetes. -!!! enterpriseonly - - This feature is only for the enterprise edition NebulaGraph clusters on Kubernetes. - -!!! compatibility +!!! note - Make sure that the [Zone](../4.deployment-and-installation/5.zone.md) feature is not enabled in the NebulaGraph cluster before using the backup and restore in Operator, otherwise the backup and restore will fail. For details on Zones, see [Cluster with Zones](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md#create_clusters). + Make sure that the [zone](../../4.deployment-and-installation/5.zone.md) feature is not enabled in the NebulaGraph cluster before using the backup and restore in Operator, otherwise the backup and restore will fail. For details on zones, see [Cluster with zones](4.8.ha-and-balancing/4.8.2.enable-zone.md). ## Overview -[NebulaGraph BR (Enterprise Edition), short for BR-ent](../backup-and-restore/nebula-br-ent/1.br-ent-overview.md) is a command line tool for data backup and recovery of NebulaGraph enterprise edition. NebulaGraph Operator utilizes the BR-ent tool to achieve data backup and recovery for NebulaGraph clusters on Kubernetes. +[NebulaGraph BR (Enterprise Edition), short for BR-ent](../../backup-and-restore/nebula-br-ent/1.br-ent-overview.md) is a command line tool for data backup and recovery of NebulaGraph enterprise edition. NebulaGraph Operator utilizes the BR-ent tool to achieve data backup and recovery for NebulaGraph clusters on Kubernetes. When backing up data, NebulaGraph Operator creates a job to back up the data in the NebulaGraph cluster to the specified storage service. @@ -47,7 +43,7 @@ To backup and restore data using NebulaGraph Operator, the following conditions - NebulaGraph Operator supports full and incremental backups. - During data backup, DDL and DML statements in the specified graph space will be blocked. We recommend performing the operation during off-peak hours, such as from 2:00 am to 5:00 am. - The cluster executing incremental backups and the cluster specified for the last backup must be the same, and the (storage bucket) path for the last backup must be the same. -- Ensure that the time between each incremental backup and the last backup is less than a [`wal_ttl`](../5.configurations-and-logs/1.configurations/4.storage-config.md#raft_configurations). +- Ensure that the time between each incremental backup and the last backup is less than a [`wal_ttl`](../../5.configurations-and-logs/1.configurations/4.storage-config.md#raft_configurations). - Specifying the backup data of a specified graph space is not supported. - Before backing up data, you need to create a Secret to restore the credential for pulling the image of the BR-ent tool. @@ -142,7 +138,7 @@ The main parameters are described as follows: | `spec.parallelism` |1 |The number of tasks executed in parallel. | | `spec.ttlSecondsAfterFinished` | 60 | The time to keep task information after the task is completed. | | `spec.template.spec.containers[0].image` | `vesoft/br-ent:{{br_ent.release}}`|The image address of the NebulaGraph BR Enterprise Edition tool. | -| `spec.template.spec.containers[0].command`| - | The command for backing up data to the storage service compatible with the S3 protocol.
For descriptions of the options in the command, see [Parametr description](../backup-and-restore/nebula-br-ent/3.backup-data.md#_13). | +| `spec.template.spec.containers[0].command`| - | The command for backing up data to the storage service compatible with the S3 protocol.
For descriptions of the options in the command, see [Parametr description](../../backup-and-restore/nebula-br-ent/3.backup-data.md#_13). | For more settings of the job, see [Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/). @@ -162,7 +158,7 @@ If you enable mTLS authentication for the NebulaGraph cluster, you need to certi !!! note - Create Secrets to store the certificate files before creating the backup job, and make sure the Secrets are in the namespace where the backup job is located. For details, see [Enable mTLS in NebulaGraph](8.custom-cluster-configurations/8.5.enable-ssl.md). + Create Secrets to store the certificate files before creating the backup job, and make sure the Secrets are in the namespace where the backup job is located. For details, see [Enable mTLS in NebulaGraph](4.7.security/4.7.1.enable-mtls.md). The following provides an example of the YAML file for a full backup job with mTLS authentication enabled: @@ -334,7 +330,4 @@ kubectl get nc -n NAME GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE nebula 1 1 1 1 3 3 2d3h ngxvsm 1 1 1 1 3 3 92m # The newly created cluster. -``` - - - +``` \ No newline at end of file diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.5.enable-ssl.md b/docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.1.enable-mtls.md similarity index 59% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/8.5.enable-ssl.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.1.enable-mtls.md index f6e4305a765..d583b5e4a67 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.5.enable-ssl.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.1.enable-mtls.md @@ -6,8 +6,8 @@ In the NebulaGraph environment running in Kubernetes, mutual TLS (mTLS) is used ## Prerequisites -- NebulaGraph Operator has been installed. -- A NebulaGraph cluster has been created. For details, see [Create a NebulaGraph cluster with kubectl](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Create a NebulaGraph cluster with Helm](../3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +- NebulaGraph Operator is installed. +- A NebulaGraph cluster is created. For details, see [Create a NebulaGraph cluster ](../4.1.installation/4.1.1.cluster-install.md). - Certificates and their corresponding private keys have been generated for the client and server, and the CA certificate has been generated. For details, see [Generate Certificates Manually](https://kubernetes.io/docs/tasks/administer-cluster/certificates/). !!! note @@ -27,7 +27,7 @@ The following two scenarios are commonly used for encryption: - The Graph service in NebulaGraph is the entry point for all client requests. The Graph service communicates with the Meta service and the Storage service to complete the client requests. Therefore, the Graph service needs to be able to communicate with the Meta service and the Storage service. - The Storage and Meta services in NebulaGraph communicate with each other through heartbeat messages to ensure their availability and health. Therefore, the Storage service needs to be able to communicate with the Meta service and vice versa. -For all encryption scenarios, see [Authentication policies](../../7.data-security/4.ssl.md#authentication_policies). +For all encryption scenarios, see [Authentication policies](../../../7.data-security/4.ssl.md#authentication_policies). ## mTLS with certificate hot-reloading @@ -55,7 +55,7 @@ The following provides examples of the configuration file to enable mTLS between cert_path: certs/server.crt key_path: certs/server.key enable_graph_ssl: "true" - # The following parameters are required for creating a cluster with Zones. + # The following parameters are required for creating a cluster with zones. # accept_partial_success: "true" # prioritize_intra_zone_reading: "true" # sync_meta_when_use_space: "true" @@ -104,7 +104,7 @@ The following provides examples of the configuration file to enable mTLS between version: v3.5.0-sc metad: # Zone names CANNOT be modified once set. - # It's suggested to set an odd number of Zones. + # It's suggested to set an odd number of zones. # zone_list: az1,az2,az3 validate_session_timestamp: "false" licenseManagerURL: "192.168.8.xxx:9119" @@ -158,7 +158,7 @@ The following provides examples of the configuration file to enable mTLS between imagePullSecrets: - name: nebula-image enablePVReclaim: true - # Used to evenly distribute Pods across Zones. + # Used to evenly distribute Pods across zones. # topologySpreadConstraints: # - topologyKey: "kubernetes.io/zone" # whenUnsatisfiable: "DoNotSchedule" @@ -192,7 +192,7 @@ The following provides examples of the configuration file to enable mTLS between key_path: certs/server.key enable_meta_ssl: "true" enable_storage_ssl: "true" - # The following parameters are required for creating a cluster with Zones. + # The following parameters are required for creating a cluster with zones. # accept_partial_success: "true" # prioritize_intra_zone_reading: "true" # sync_meta_when_use_space: "true" @@ -242,7 +242,7 @@ The following provides examples of the configuration file to enable mTLS between metad: config: # Zone names CANNOT be modified once set. - # It's suggested to set an odd number of Zones. + # It's suggested to set an odd number of zones. # zone_list: az1,az2,az3 validate_session_timestamp: "false" # The following parameters are used to enable mTLS between services. @@ -365,7 +365,7 @@ The following provides examples of the configuration file to enable mTLS between - name: nebula-image # Whether to automatically delete PVCs when deleting a cluster. # enablePVReclaim: true - # Used to evenly distribute Pods across Zones. + # Used to evenly distribute Pods across zones. # topologySpreadConstraints: # - topologyKey: "kubernetes.io/zone" # whenUnsatisfiable: "DoNotSchedule" @@ -529,7 +529,7 @@ After applying the cluster configuration file by running `kubectl apply -f`, you !!! note - When mTLS is required for external clients to connect to the Graph service, you need to set the relevant SSL fields depending on different [clients](../../14.client/1.nebula-client.md). + When mTLS is required for external clients to connect to the Graph service, you need to set the relevant SSL fields depending on different [clients](../../../14.client/1.nebula-client.md). You can configure `spec.console` to start a NebulaGraph Console container in the cluster. For details, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). @@ -546,255 +546,254 @@ nebula-console -addr nebula-graphd-svc.default.svc.cluster.local -port 9669 -u r ## mTLS without hot-reloading -??? info "If you don't need to perform TLS certificate hot-reloading and prefer to use TLS certificates stored in a Secret when deploying Kubernetes applications, expand to follow these steps" +If you don't need to perform TLS certificate hot-reloading and prefer to use TLS certificates stored in a Secret when deploying Kubernetes applications, you can follow the steps below to enable mTLS in NebulaGraph. - ### Create a TLS-type Secret +### Create a TLS-type Secret - In a K8s cluster, you can create Secrets to store sensitive information, such as passwords, OAuth tokens, and TLS certificates. In NebulaGraph, you can create a Secret to store TLS certificates and private keys. When creating a Secret, the type `tls` should be specified. A `tls` Secret is used to store TLS certificates. +In a K8s cluster, you can create Secrets to store sensitive information, such as passwords, OAuth tokens, and TLS certificates. In NebulaGraph, you can create a Secret to store TLS certificates and private keys. When creating a Secret, the type `tls` should be specified. A `tls` Secret is used to store TLS certificates. - For example, to create a Secret for storing server certificates and private keys: +For example, to create a Secret for storing server certificates and private keys: - ```bash - kubectl create secret tls --key= --cert= --namespace= - ``` +```bash +kubectl create secret tls --key= --cert= --namespace= +``` - - ``: The name of the Secret storing the server certificate and private key. - - ``: The path to the server private key file. - - ``: The path to the server certificate file. - - ``: The namespace where the Secret is located. If `--namespace` is not specified, it defaults to the `default` namespace. +- ``: The name of the Secret storing the server certificate and private key. +- ``: The path to the server private key file. +- ``: The path to the server certificate file. +- ``: The namespace where the Secret is located. If `--namespace` is not specified, it defaults to the `default` namespace. - You can follow the above steps to create Secrets for the client certificate and private key, and the CA certificate. +You can follow the above steps to create Secrets for the client certificate and private key, and the CA certificate. - To view the created Secrets: +To view the created Secrets: - ```bash - kubectl get secret --namespace= - ``` +```bash +kubectl get secret --namespace= +``` - ### Configure certifications +### Configure certifications - Operator provides the `sslCerts` field to specify the encrypted certificates. The `sslCerts` field contains four subfields. These three fields `serverSecret`, `clientSecret`, and `caSecret` are used to specify the Secret names of the NebulaGraph server certificate, client certificate, and CA certificate, respectively. - When you specify these three fields, Operator reads the certificate content from the corresponding Secret and mounts it into the cluster's Pod. The `autoMountServerCerts` must be set to `true` if you want to automatically mount the server certificate and private key into the Pod. The default value is `false`. +Operator provides the `sslCerts` field to specify the encrypted certificates. The `sslCerts` field contains four subfields. These three fields `serverSecret`, `clientSecret`, and `caSecret` are used to specify the Secret names of the NebulaGraph server certificate, client certificate, and CA certificate, respectively. +When you specify these three fields, Operator reads the certificate content from the corresponding Secret and mounts it into the cluster's Pod. The `autoMountServerCerts` must be set to `true` if you want to automatically mount the server certificate and private key into the Pod. The default value is `false`. - ```yaml +```yaml +sslCerts: + autoMountServerCerts: "true" # Automatically mount the server certificate and private key into the Pod. + serverSecret: "server-cert" # The name of the server certificate Secret. + serverCert: "" # The key name of the certificate in the server certificate Secret, default is tls.crt. + serverKey: "" # The key name of the private key in the server certificate Secret, default is tls.key. + clientSecret: "client-cert" # The name of the client certificate Secret. + clientCert: "" # The key name of the certificate in the client certificate Secret, default is tls.crt. + clientKey: "" # The key name of the private key in the client certificate Secret, default is tls.key. + caSecret: "ca-cert" # The name of the CA certificate Secret. + caCert: "" # The key name of the certificate in the CA certificate Secret, default is ca.crt. +``` + +The `serverCert` and `serverKey`, `clientCert` and `clientKey`, and `caCert` are used to specify the key names of the certificate and private key of the server Secret, the key names of the certificate and private key of the client Secret, and the key name of the CA Secret certificate. If you do not customize these field values, Operator defaults `serverCert` and `clientCert` to `tls.crt`, `serverKey` and `clientKey` to `tls.key`, and `caCert` to `ca.crt`. However, in the K8s cluster, the TLS type Secret uses `tls.crt` and `tls.key` as the default key names for the certificate and private key. Therefore, after creating the NebulaGraph cluster, you need to manually change the `caCert` field from `ca.crt` to `tls.crt` in the cluster configuration, so that the Operator can correctly read the content of the CA certificate. Before you customize these field values, you need to specify the key names of the certificate and private key in the Secret when creating it. For how to create a Secret with the key name specified, run the `kubectl create secret generic -h` command for help. + +You can use the `insecureSkipVerify` field to decide whether the client will verify the server's certificate chain and hostname. In production environments, it is recommended to set this to `false` to ensure the security of communication. If set to `true`, the client will not verify the server's certificate chain and hostname. + +```yaml +sslCerts: + # Determines whether the client needs to verify the server's certificate chain and hostname when establishing an SSL connection. + insecureSkipVerify: false +``` + +!!! caution + + Make sure that you have added the hostname or IP of the server to the server's certificate's `subjectAltName` field before the `insecureSkipVerify` is set to `false`. If the hostname or IP of the server is not added, an error will occur when the client verifies the server's certificate chain and hostname. For details, see [openssl](https://kubernetes.io/docs/tasks/administer-cluster/certificates/#openssl). + +When the certificates are approaching expiration, they can be automatically updated by installing [cert-manager](https://cert-manager.io/docs/installation/supported-releases/). NebulaGraph will monitor changes to the certificate directory files, and once a change is detected, it will load the new certificate content into memory. + +### Encryption strategies + +NebulaGraph offers three encryption strategies that you can choose and configure according to your needs. + +- Encryption of client-graph and all inter-service communications + + If you want to encrypt all data transmission between the client, Graph service, Meta service, and Storage service, you need to add the `enable_ssl = true` field to each service. + + Here is an example configuration: + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: sslCerts: - autoMountServerCerts: "true" # Automatically mount the server certificate and private key into the Pod. - serverSecret: "server-cert" # The name of the server certificate Secret. - serverCert: "" # The key name of the certificate in the server certificate Secret, default is tls.crt. - serverKey: "" # The key name of the private key in the server certificate Secret, default is tls.key. - clientSecret: "client-cert" # The name of the client certificate Secret. - clientCert: "" # The key name of the certificate in the client certificate Secret, default is tls.crt. - clientKey: "" # The key name of the private key in the client certificate Secret, default is tls.key. - caSecret: "ca-cert" # The name of the CA certificate Secret. - caCert: "" # The key name of the certificate in the CA certificate Secret, default is ca.crt. - ``` + autoMountServerCerts: "true" # Automatically mount the server certificate and private key into the Pod. + serverSecret: "server-cert" # The Secret name of the server certificate and private key. + clientSecret: "client-cert" # The Secret name of the client certificate and private key. + caSecret: "ca-cert" # The Secret name of the CA certificate. + graphd: + config: + enable_ssl: "true" + metad: + config: + enable_ssl: "true" + storaged: + config: + enable_ssl: "true" + ``` - The `serverCert` and `serverKey`, `clientCert` and `clientKey`, and `caCert` are used to specify the key names of the certificate and private key of the server Secret, the key names of the certificate and private key of the client Secret, and the key name of the CA Secret certificate. If you do not customize these field values, Operator defaults `serverCert` and `clientCert` to `tls.crt`, `serverKey` and `clientKey` to `tls.key`, and `caCert` to `ca.crt`. However, in the K8s cluster, the TLS type Secret uses `tls.crt` and `tls.key` as the default key names for the certificate and private key. Therefore, after creating the NebulaGraph cluster, you need to manually change the `caCert` field from `ca.crt` to `tls.crt` in the cluster configuration, so that the Operator can correctly read the content of the CA certificate. Before you customize these field values, you need to specify the key names of the certificate and private key in the Secret when creating it. For how to create a Secret with the key name specified, run the `kubectl create secret generic -h` command for help. - You can use the `insecureSkipVerify` field to decide whether the client will verify the server's certificate chain and hostname. In production environments, it is recommended to set this to `false` to ensure the security of communication. If set to `true`, the client will not verify the server's certificate chain and hostname. +- Encryption of only Graph service communication - ```yaml + If the K8s cluster is deployed in the same data center and only the port of the Graph service is exposed externally, you can choose to encrypt only data transmission between the client and the Graph service. In this case, other services can communicate internally without encryption. Just add the `enable_graph_ssl = true` field to the Graph service. + + Here is an example configuration: + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: sslCerts: - # Determines whether the client needs to verify the server's certificate chain and hostname when establishing an SSL connection. - insecureSkipVerify: false - ``` + autoMountServerCerts: "true" + serverSecret: "server-cert" + caSecret: "ca-cert" + graphd: + config: + enable_graph_ssl: "true" + ``` + + !!! note + + Because Operator doesn't need to call the Graph service through an interface, it's not necessary to set `clientSecret` in `sslCerts`. + +- Encryption of only Meta service communication + + If you need to transmit confidential information to the Meta service, you can choose to encrypt data transmission related to the Meta service. In this case, you need to add the `enable_meta_ssl = true` configuration to each component. + + Here is an example configuration: + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + sslCerts: + autoMountServerCerts: "true" + serverSecret: "server-cert" + clientSecret: "client-cert" + caSecret: "ca-cert" + graphd: + config: + enable_meta_ssl: "true" + metad: + config: + enable_meta_ssl: "true" + storaged: + config: + enable_meta_ssl: "true" + ``` + + After setting up the encryption policy, when an external [client](../../../14.client/1.nebula-client.md) needs to connect to the Graph service with mutual TLS, you still need to set the relevant TLS fields according to the different clients. See the Use NebulaGraph Console to connect to Graph service section below for examples. + +### Example of enabling mTLS without hot-reloading + +1. Use the pre-generated server and client certificates and private keys, and the CA certificate to create a Secret for each. + + ```yaml + kubectl create secret tls --key= --cert= + ``` + + - `tls`: Indicates that the type of secret being created is TLS, which is used to store TLS certificates. + - ``: Specifies the name of the new secret being created, which can be customized. + - `--key=`: Specifies the path to the private key file of the TLS certificate to be stored in the secret. + - `--cert=`: Specifies the path to the public key certificate file of the TLS certificate to be stored in the secret. + + +2. Add server certificate, client certificate, CA certificate configuration, and encryption policy configuration in the corresponding cluster instance YAML file. For details, see [Encryption strategies](#encryption_strategies). + + For example, add encryption configuration for transmission data between client, Graph service, Meta service, and Storage service. + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + sslCerts: + autoMountServerCerts: "true" + serverSecret: "server-cert" // The name of the server Certificate Secret. + clientSecret: "client-cert" // The name of the client Certificate Secret. + caSecret: "ca-cert" // The name of the CA Certificate Secret. + graphd: + config: + enable_ssl: "true" + metad: + config: + enable_ssl: "true" + storaged: + config: + enable_ssl: "true" + ``` + +3. Use `kubectl apply -f` to apply the file to the Kubernetes cluster. + +4. Verify that the values of `serverCert`, `serverKey`, `clientCert`, `clientKey`, `caCert` under the `sslCerts` field in the cluster configuration match the key names of the certificates and private keys stored in the created Secret. + + ```bash + # Check the key names of the certificate and private key stored in the Secret. For example, check the key name of the CA certificate stored in the Secret. + kubectl get secret ca-cert -o yaml + ``` + + ```bash + # Check the cluster configuration file. + kubectl get nebulacluster nebula -o yaml + ``` + + Example output: + + ``` + ... + spec: + sslCerts: + autoMountServerCerts: "true" + serverSecret: server-cert + serverCert: tls.crt + serverKey: tls.key + clientSecret: client-cert + clientCert: tls.crt + clientKey: tls.key + caSecret: ca-cert + caCert: ca.crt + ... + ``` + + If the key names of the certificates and private keys stored in the Secret are different from the values of `serverCert`, `serverKey`, `clientCert`, `clientKey`, `caCert` under the `sslCerts` field in the cluster configuration, you need to execute `kubectl edit nebulacluster ` to manually modify the cluster configuration file. + + In the example output, the key name of the CA certificate in the TLS-type Secret is `tls.crt`, so you need to change the value of caCert from `ca.crt` to `tls.crt`. + +5. Use NebulaGraph Console to connect to the Graph service and establish a secure TLS connection. + + Example: + + ``` + kubectl run -ti --image vesoft/nebula-console:v{{console.release}} --restart=Never -- nebula-console -addr 10.98.xxx.xx -port 9669 -u root -p nebula -enable_ssl -ssl_root_ca_path /path/to/cert/root.crt -ssl_cert_path /path/to/cert/client.crt -ssl_private_key_path /path/to/cert/client.key + ``` + + - `-enable_ssl`: Use mTLS when connecting to NebulaGraph. + - `-ssl_root_ca_path`: Specify the storage path of the CA root certificate. + - `-ssl_cert_path`: Specify the storage path of the TLS public key certificate. + - `-ssl_private_key_path`: Specify the storage path of the TLS private key. + - For details on using NebulaGraph Console to connect to the Graph service, see [Connect to NebulaGraph](../../../4.deployment-and-installation/connect-to-nebula-graph.md). + + !!! note - !!! caution - - Make sure that you have added the hostname or IP of the server to the server's certificate's `subjectAltName` field before the `insecureSkipVerify` is set to `false`. If the hostname or IP of the server is not added, an error will occur when the client verifies the server's certificate chain and hostname. For details, see [openssl](https://kubernetes.io/docs/tasks/administer-cluster/certificates/#openssl). - - When the certificates are approaching expiration, they can be automatically updated by installing [cert-manager](https://cert-manager.io/docs/installation/supported-releases/). NebulaGraph will monitor changes to the certificate directory files, and once a change is detected, it will load the new certificate content into memory. - - ### Encryption strategies - - NebulaGraph offers three encryption strategies that you can choose and configure according to your needs. - - - Encryption of client-graph and all inter-service communications - - If you want to encrypt all data transmission between the client, Graph service, Meta service, and Storage service, you need to add the `enable_ssl = true` field to each service. - - Here is an example configuration: - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - sslCerts: - autoMountServerCerts: "true" # Automatically mount the server certificate and private key into the Pod. - serverSecret: "server-cert" # The Secret name of the server certificate and private key. - clientSecret: "client-cert" # The Secret name of the client certificate and private key. - caSecret: "ca-cert" # The Secret name of the CA certificate. - graphd: - config: - enable_ssl: "true" - metad: - config: - enable_ssl: "true" - storaged: - config: - enable_ssl: "true" - ``` - - - - Encryption of only Graph service communication - - If the K8s cluster is deployed in the same data center and only the port of the Graph service is exposed externally, you can choose to encrypt only data transmission between the client and the Graph service. In this case, other services can communicate internally without encryption. Just add the `enable_graph_ssl = true` field to the Graph service. - - Here is an example configuration: - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - sslCerts: - autoMountServerCerts: "true" - serverSecret: "server-cert" - caSecret: "ca-cert" - graphd: - config: - enable_graph_ssl: "true" - ``` - - !!! note - - Because Operator doesn't need to call the Graph service through an interface, it's not necessary to set `clientSecret` in `sslCerts`. - - - Encryption of only Meta service communication - - If you need to transmit confidential information to the Meta service, you can choose to encrypt data transmission related to the Meta service. In this case, you need to add the `enable_meta_ssl = true` configuration to each component. - - Here is an example configuration: - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - sslCerts: - autoMountServerCerts: "true" - serverSecret: "server-cert" - clientSecret: "client-cert" - caSecret: "ca-cert" - graphd: - config: - enable_meta_ssl: "true" - metad: - config: - enable_meta_ssl: "true" - storaged: - config: - enable_meta_ssl: "true" - ``` - - After setting up the encryption policy, when an external [client](../../14.client/1.nebula-client.md) needs to connect to the Graph service with mutual TLS, you still need to set the relevant TLS fields according to the different clients. See the Use NebulaGraph Console to connect to Graph service section below for examples. - - ### Example of enabling mTLS without hot-reloading - - 1. Use the pre-generated server and client certificates and private keys, and the CA certificate to create a Secret for each. - - ```yaml - kubectl create secret tls --key= --cert= - ``` - - - `tls`: Indicates that the type of secret being created is TLS, which is used to store TLS certificates. - - ``: Specifies the name of the new secret being created, which can be customized. - - `--key=`: Specifies the path to the private key file of the TLS certificate to be stored in the secret. - - `--cert=`: Specifies the path to the public key certificate file of the TLS certificate to be stored in the secret. - - - 2. Add server certificate, client certificate, CA certificate configuration, and encryption policy configuration in the corresponding cluster instance YAML file. For details, see [Encryption strategies](#encryption_strategies). - - For example, add encryption configuration for transmission data between client, Graph service, Meta service, and Storage service. - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - sslCerts: - autoMountServerCerts: "true" - serverSecret: "server-cert" // The name of the server Certificate Secret. - clientSecret: "client-cert" // The name of the client Certificate Secret. - caSecret: "ca-cert" // The name of the CA Certificate Secret. - graphd: - config: - enable_ssl: "true" - metad: - config: - enable_ssl: "true" - storaged: - config: - enable_ssl: "true" - ``` - - 3. Use `kubectl apply -f` to apply the file to the Kubernetes cluster. - - 4. Verify that the values of `serverCert`, `serverKey`, `clientCert`, `clientKey`, `caCert` under the `sslCerts` field in the cluster configuration match the key names of the certificates and private keys stored in the created Secret. - - ```bash - # Check the key names of the certificate and private key stored in the Secret. For example, check the key name of the CA certificate stored in the Secret. - kubectl get secret ca-cert -o yaml - ``` - - ```bash - # Check the cluster configuration file. - kubectl get nebulacluster nebula -o yaml - ``` - - Example output: - - ``` - ... - spec: - sslCerts: - autoMountServerCerts: "true" - serverSecret: server-cert - serverCert: tls.crt - serverKey: tls.key - clientSecret: client-cert - clientCert: tls.crt - clientKey: tls.key - caSecret: ca-cert - caCert: ca.crt - ... - ``` - - If the key names of the certificates and private keys stored in the Secret are different from the values of `serverCert`, `serverKey`, `clientCert`, `clientKey`, `caCert` under the `sslCerts` field in the cluster configuration, you need to execute `kubectl edit nebulacluster ` to manually modify the cluster configuration file. - - In the example output, the key name of the CA certificate in the TLS-type Secret is `tls.crt`, so you need to change the value of caCert from `ca.crt` to `tls.crt`. - - 5. Use NebulaGraph Console to connect to the Graph service and establish a secure TLS connection. - - Example: - - ``` - kubectl run -ti --image vesoft/nebula-console:v{{console.release}} --restart=Never -- nebula-console -addr 10.98.xxx.xx -port 9669 -u root -p nebula -enable_ssl -ssl_root_ca_path /path/to/cert/root.crt -ssl_cert_path /path/to/cert/client.crt -ssl_private_key_path /path/to/cert/client.key - ``` - - - `-enable_ssl`: Use mTLS when connecting to NebulaGraph. - - `-ssl_root_ca_path`: Specify the storage path of the CA root certificate. - - `-ssl_cert_path`: Specify the storage path of the TLS public key certificate. - - `-ssl_private_key_path`: Specify the storage path of the TLS private key. - - For details on using NebulaGraph Console to connect to the Graph service, see [Connect to NebulaGraph](../4.connect-to-nebula-graph-service.md). - - !!! note - - If you set `spec.console` to start a NebulaGraph Console container in the cluster, you can enter the console container and run the following command to connect to the Graph service. - - ```bash - nebula-console -addr 10.98.xxx.xx -port 9669 -u root -p nebula -enable_ssl -ssl_root_ca_path /path/to/cert/root.crt -ssl_cert_path /path/to/cert/client.crt -ssl_private_key_path /path/to/cert/client.key - ``` - - At this point, you can enable mTLS in NebulaGraph. + If you set `spec.console` to start a NebulaGraph Console container in the cluster, you can enter the console container and run the following command to connect to the Graph service. + ```bash + nebula-console -addr 10.98.xxx.xx -port 9669 -u root -p nebula -enable_ssl -ssl_root_ca_path /path/to/cert/root.crt -ssl_cert_path /path/to/cert/client.crt -ssl_private_key_path /path/to/cert/client.key + ``` + +At this point, you can enable mTLS in NebulaGraph. diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md b/docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md similarity index 92% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md index 653a7bab384..85cd7c9aeaf 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md @@ -4,7 +4,7 @@ Kubernetes [Admission Control](https://kubernetes.io/docs/reference/access-authn ## Prerequisites -You have already created a cluster using Kubernetes. For detailed steps, see [Creating a NebulaGraph Cluster with Kubectl](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +A NebulaGraph cluster is created with NebulaGrpah Operator. For detailed steps, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). ## Admission control rules @@ -18,7 +18,7 @@ Kubernetes admission control allows you to insert custom logic or policies befor !!! note - High availability mode refers to the high availability of NebulaGraph cluster services. Storage and Meta services are stateful, and the number of replicas should be an odd number due to [Raft](../../1.introduction/3.nebula-graph-architecture/4.storage-service.md#raft) protocol requirements for data consistency. In high availability mode, at least 3 Storage services and 3 Meta services are required. Graph services are stateless, so their number of replicas can be even but should be at least 2. + High availability mode refers to the high availability of NebulaGraph cluster services. Storage and Meta services are stateful, and the number of replicas should be an odd number due to [Raft](../../../1.introduction/3.nebula-graph-architecture/4.storage-service.md#raft) protocol requirements for data consistency. In high availability mode, at least 3 Storage services and 3 Meta services are required. Graph services are stateless, so their number of replicas can be even but should be at least 2. - Preventing additional PVs from being added to Storage service via `dataVolumeClaims`. @@ -99,4 +99,4 @@ Once cert-manager is installed and admission control is enabled, NebulaGraph Ope nebulacluster.apps.nebula-graph.io/nebula patched $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"replicas": 3}}}' Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: [spec.storaged: Forbidden: field is immutable while in ScaleOut phase, spec.storaged.replicas: Invalid value: 3: field is immutable while not in Running phase] - ``` + ``` \ No newline at end of file diff --git a/docs-2.0/nebula-operator/5.operator-failover.md b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md similarity index 81% rename from docs-2.0/nebula-operator/5.operator-failover.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md index 171b4123cae..837d6fdba17 100644 --- a/docs-2.0/nebula-operator/5.operator-failover.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md @@ -4,11 +4,11 @@ NebulaGraph Operator calls the interface provided by NebulaGraph clusters to dyn ## Prerequisites -[Install NebulaGraph Operator](2.deploy-nebula-operator.md) +[Install NebulaGraph Operator](../../2.get-started/2.1.install-operator.md) ## Steps -1. Create a NebulaGraph cluster. For more information, see [Deploy NebulaGraph clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). +1. Create a NebulaGraph cluster. For more information, see [Create a NebulaGraph clusters](../4.1.installation/4.1.1.cluster-install.md). 2. Delete the Pod named `-storaged-2` after all pods are in the `Running` status. diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md new file mode 100644 index 00000000000..a845c69b3ed --- /dev/null +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md @@ -0,0 +1,248 @@ +# Enable zones + +NebulaGraph Operator supports the use of zones. A zone is a logical rack used to group Storage Pods within a cluster. Zones help improve the resilience of the cluster by ensuring an even distribution of data replicas across each zone. This topic explains how to create a cluster with zones. + +## Prerequisites + +A cluster is created using NebulaGraph Operator. For details on creating a cluster, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). + +## Background + +NebulaGraph efficiently manages its distributed architecture using the functionality provided by zones. The data in NebulaGraph is divided into different partitions, and replicas of these partitions are evenly distributed across all available zones. Queries can be directed preferentially to the Storage Pods within the same zone. Using zones significantly reduces the network traffic costs between zones and improves data transfer speed. For more detailed information about zones, see [Managing zones](../../../4.deployment-and-installation/5.zone.md). + +## Configure zones + +To make full use of the Zone feature, you first need to determine the actual Zones in which the nodes of the cluster reside. Typically, nodes deployed on cloud platforms come with labels indicating their respective Zones. + +Once you have this information, you can configure it by setting the `spec.metad.config.zone_list` parameter in the cluster's configuration file. This parameter is a comma-separated list of Zone names and should match the actual Zone names of the nodes. For example, if your nodes are actually in the az1, az2, and az3 regions, the configuration should look like this: + +```yaml +spec: + metad: + config: + zone_list: az1, az2, az3 +``` + +NebulaGraph Operator utilizes Kubernetes's [TopoloySpread](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) feature to manage the scheduling of Storage Pods and Graph Pods. Once `zone_list` is configured, Storage Pods will be automatically assigned to their respective Zones based on the `topology.kubernetes.io/zone` label. + +## Prioritize intra-zone data access + +For intra-zone data access, the Graph service dynamically assigns itself to a zone using the `--assigned_zone=$NODE_ZONE` parameter. The Alpine Linux image specified in `spec.alpineImage` (default: `reg.vesoft-inc.com/nebula-alpine:latest`) plays a role in obtaining zone information. It identifies the zone name of the node where the Graph service resides by utilizing an init-container to fetch this information. + +By setting `spec.graphd.config.prioritize_intra_zone_reading` to `true` in the cluster configuration file, you enable the Graph service to prioritize sending queries to Storage services within the same zone. In the event of a read failure within that zone, the behavior depends on the value of `spec.graphd.config.stick_to_intra_zone_on_failure`. If set to `true`, the Graph service avoids reading data from other zones and returns an error. Otherwise, it reads data from leader partition replicas in other zones. + +```yaml +spec: + alpineImage: reg.vesoft-inc.com/xxx/xxx:latest + graphd: + config: + prioritize_intra_zone_reading: "true" + stick_to_intra_zone_on_failure: "false" +``` + +## Required parameters + +If you need to create a cluster with zones, you must add the following parameters to the cluster configuration file. For other fields and descriptions, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). + +```yaml +spec: + # Used to obtain the zone information of the node. + alpineImage: "reg.example-inc.com/xxx/xxx:latest" + graphd: + image: reg.example-inc.com/xxx/xxx + config: + # Prioritize sending queries to storage nodes within the same zone. + prioritize_intra_zone_reading: "true" + stick_to_intra_zone_on_failure: "false" + metad: + image: reg.example-inc.com/xxx/xxx + config: + # List of zone names, separated by commas. It is recommended to set an odd number. + zone_list: az1,az2,az3 + licenseManagerURL: "192.168.8.xxx:9119" + storaged: + image: reg.example-inc.com/xxx/xxx + imagePullSecrets: + - name: nebula-image + # Used to schedule the restart of Graph/Storage Pods to the original zone. + schedulerName: nebula-scheduler + # Field used to control the distribution of Storage Pods. + topologySpreadConstraints: + - topologyKey: "topology.kubernetes.io/zone" + whenUnsatisfiable: "DoNotSchedule" +``` + +Parameters in the above table are described as follows: + +| Parameter | Default value | Description | +| :---- | :--- | :--- | +| `spec.metad.licenseManagerURL` | - | Configure the URL that points to the LM, which consists of the access address and port number (default port `9119`) of the LM. For example, `192.168.8.xxx:9119`. **You must configure this parameter in order to obtain the license information; otherwise, the enterprise edition cluster cannot be used.** | +|`spec..image`|-|The container image of the Graph, Meta, or Storage service of the enterprise edition.| +|`spec.imagePullSecrets`| - |Specifies the Secret for pulling the NebulaGraph Enterprise service images from a private repository.| +|`spec.alpineImage`|-|The Alpine Linux image, used to obtain the zone information where nodes are located.| +|`spec.metad.config.zone_list`|-|A list of zone names, split by comma. For example: zone1,zone2,zone3.
**Zone names CANNOT be modified once be set.**| +|`spec.graphd.config.prioritize_intra_zone_reading`|`false`|Specifies whether to prioritize sending queries to the storage pods in the same zone.
When set to `true`, the query is sent to the storage pods in the same zone. If reading fails in that zone, it will decide based on `stick_to_intra_zone_on_failure` whether to read the leader partition replica data from other zones. | +|`spec.graphd.config.stick_to_intra_zone_on_failure`|`false`|Specifies whether to stick to intra-zone routing if unable to find the requested partitions in the same zone. When set to `true`, if unable to find the partition replica in that zone, it does not read data from other zones.| +|`spec.schedulerName`|`kube-scheduler`|Schedules the restarted Graph and Storage pods to the same zone. The value must be set to `nebula-scheduler`.| +|`spec.topologySpreadConstraints`|-| It is a field in Kubernetes used to control the distribution of storage Pods. Its purpose is to ensure that your storage Pods are evenly spread across zones.
**To use the zone feature, you must set the value of `topologySpreadConstraints[0].topologyKey` to `topology.kubernetes.io/zone` and the value of `topologySpreadConstraints[0].whenUnsatisfiable` to `DoNotSchedule`**. Run `kubectl get node --show-labels` to check the key. For more information, see [TopologySpread](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#example-multiple-topologyspreadconstraints).| + + +!!! warning + + DO NOT manually modify the configmaps created by NebulaGraph Operator. Doing so may cause unexpected behavior. + + Once Storage and Graph services are assigned to zones, the mapping between the pod and its corresponding zone is stored in a configmap named `-graphd|storaged-zone`. This mapping facilitates pod scheduling during rolling updates and pod restarts, ensuring that services return to their original zones as needed. + +!!! caution + + Make sure storage Pods are evenly distributed across zones before ingesting data by running `SHOW ZONES` in nebula-console. For zone-related commands, see [Zones](../../../4.deployment-and-installation/5.zone.md). + +## Cluster configuration example with zones + +### Example of creating a cluster configuration with zones using `kubectl` + +Here is an example of a YAML configuration for creating a cluster with Zone using `kubectl`: + +```yaml +apiVersion: apps.nebula-graph.io/v1alpha1 +kind: NebulaCluster +metadata: + name: nebula + namespace: default +spec: + # Alpine Linux image used to obtain zone information for node location. + alpineImage: "reg.example-inc.com/xxx/xxx:latest" + # Agent configuration for backup, restore, and log cleanup. + # If you don't customize this configuration, the default configuration is used. + agent: + image: reg.example-inc.com/xxx/xxx + version: v{{nebula.release}} + exporter: + image: vesoft/nebula-stats-exporter + replicas: 1 + maxRequests: 20 + # Console container for connecting to the cluster. + console: + version: "nightly" + graphd: + config: + # The following parameters are required to create a cluster with Zone. + accept_partial_success: "true" + prioritize_intra_zone_reading: "true" + sync_meta_when_use_space: "true" + stick_to_intra_zone_on_failure: "false" + session_reclaim_interval_secs: "300" + # The following parameters are required for log collection. + logtostderr: "1" + redirect_stdout: "false" + stderrthreshold: "0" + resources: + requests: + cpu: "2" + memory: "2Gi" + limits: + cpu: "2" + memory: "2Gi" + replicas: 1 + image: reg.example-inc.com/xxx/xxx + version: v3.5.0-sc + metad: + config: + redirect_stdout: "false" + stderrthreshold: "0" + logtostder: "true" + # Once set, the zone name cannot be changed. + # It is recommended to set an odd number of zones. + zone_list: az1,az2,az3 + validate_session_timestamp: "false" + # LM access address and port number. + licenseManagerURL: "192.168.8.xxx:9119" + resources: + requests: + cpu: "300m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 3 + image: reg.example-inc.com/xxx/xxx + version: v3.5.0-sc + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-path + storaged: + config: + redirect_stdout: "false" + stderrthreshold: "0" + logtostder: "true" + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + replicas: 3 + image: reg.example-inc.com/xxx/xxx + version: v3.5.0-sc + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: local-path + # Automatically balance storage data after scaling. + enableAutoBalance: true + reference: + name: statefulsets.apps + version: v1 + schedulerName: nebula-scheduler + nodeSelector: + nebula: cloud + imagePullPolicy: Always + imagePullSecrets: + - name: nebula-image + # Distribute storage Pods evenly among zones. + # Must be set when using zones. + topologySpreadConstraints: + - topologyKey: "topology.kubernetes.io/zone" + whenUnsatisfiable: "DoNotSchedule" +``` + +### Example of creating a cluster with zones using `helm` + +Here is an example of creating a cluster with Zone using `helm`: + +```bash +helm install "" nebula-operator/nebula-cluster \ + # Specify the version of the cluster chart; if not specified, the latest version is installed by default. + # Execute the helm search repo nebula-operator/nebula-cluster command to view all chart versions. + --version={{operator.release}} \ + # Specify the namespace of the cluster. + --namespace="" \ + # Configure the Secret for pulling images from the private repository. + --set imagePullSecrets[0].name="{}" \ + --set nameOverride="" \ + # Configure the LM access address and port, default port is `9119`. + --set nebula.metad.licenseManagerURL="192.168.8.XXX:9119" \ + # Configure the image addresses for various services in the cluster. + --set nebula.graphd.image="" \ + --set nebula.metad.image="" \ + --set nebula.storaged.image="" \ + --set nebula.storageClassName="" \ + # Specify the version of the Nebula cluster. + --set nebula.version=v{{nebula.release}} \ + # Configure Zone. + # Once configured, the information of Zone cannot be modified. It is recommended to configure an odd number of Zones. + --set nebula.metad.config.zone_list="" \ + --set nebula.graphd.config.prioritize_intra_zone_reading="true" \ + --set nebula.graphd.config.stick_to_intra_zone_on_failure="false" \ + # Configure the Alpine Linux image for obtaining node Zone information. + --set nebula.alpineImage="" \ + # Set the distribution of Storage Pods to different zones. + --set nebula.topologySpreadConstraints[0].topologyKey="topology.kubernetes.io/zone" \ + --set nebula.topologySpreadConstraints[0].whenUnsatisfiable="DoNotSchedule" \ + # Schedule the restarting Graph/Storage Pods to the original zones. + --set nebula.schedulerName="nebula-scheduler" +``` diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md similarity index 95% rename from docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md index 267b00df96b..8fe8b3a28e3 100644 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md @@ -8,7 +8,7 @@ You can define whether to balance data automatically or not with the parameter ` ## Prerequisites -You have created a NebulaGraph cluster. For how to create a cluster with Kubectl, see [Create a cluster with Kubectl](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +A NebulaGraph cluster is created. For details, see [Create a NebulaGraph cluster](../4.1.installation/4.1.1.cluster-install.md). ## Steps diff --git a/docs-2.0/nebula-operator/11.rolling-update-strategy.md b/docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.1.rolling-update-strategy.md similarity index 92% rename from docs-2.0/nebula-operator/11.rolling-update-strategy.md rename to docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.1.rolling-update-strategy.md index 8dea20c348c..10271c93015 100644 --- a/docs-2.0/nebula-operator/11.rolling-update-strategy.md +++ b/docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.1.rolling-update-strategy.md @@ -1,4 +1,4 @@ -# NebulaGraph cluster rolling update strategy +# Optimize leader transfer in rolling updates NebulaGraph clusters use a distributed architecture to divide data into multiple logical partitions, which are typically evenly distributed across different nodes. In distributed systems, there are usually multiple replicas of the same data. To ensure the consistency of data across multiple replicas, NebulaGraph clusters use the Raft protocol to synchronize multiple partition replicas. In the Raft protocol, each partition elects a leader replica, which is responsible for handling write requests, while follower replicas handle read requests. @@ -19,7 +19,7 @@ In the YAML file for creating a cluster instance, add the `spec.storaged.enableF When `enableForceUpdate` is set to `true`, it means that the partition leader replicas will not be migrated, thus speeding up the rolling update process. Conversely, when set to `false`, it means that the leader replicas will be migrated to other nodes to ensure the read and write availability of the cluster. The default value is `false`. -!!! caution +!!! warning When setting `enableForceUpdate` to `true`, make sure there is no traffic entering the cluster for read and write operations. This is because this setting will force the cluster pods to be rebuilt, and during this process, data loss or client request failures may occur. @@ -30,8 +30,9 @@ Configuration example: spec: ... storaged: - enableForceUpdate: true // When set to true, it speeds up the rolling update process. + # When set to true, + # it means that the partition leader replicas will not be migrated, + # but the cluster pods will be rebuilt directly. + enableForceUpdate: true ... ``` - - diff --git a/docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.2.restart-cluster.md b/docs-2.0/k8s-operator/4.cluster-administration/4.9.advanced/4.9.2.restart-cluster.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs-2.0/nebula-operator/7.operator-faq.md b/docs-2.0/k8s-operator/5.FAQ.md similarity index 86% rename from docs-2.0/nebula-operator/7.operator-faq.md rename to docs-2.0/k8s-operator/5.FAQ.md index 70384f84cf9..38eaa777405 100644 --- a/docs-2.0/nebula-operator/7.operator-faq.md +++ b/docs-2.0/k8s-operator/5.FAQ.md @@ -19,7 +19,7 @@ They are different concepts. A replica in the Operator docs indicates a pod repl ## How to view the logs of each service in the NebulaGraph cluster? -The logs for the NebulaGraph cluster are not gathered in the K8s cluster, which also means that they cannot be retrieved through the `kubectl logs` command. To obtain the logs of each cluster service, you need to access the container and view the log files that are stored inside. This is the only option available for users to get the service logs individually in the NebulaGraph cluster. +To obtain the logs of each cluster service, you need to access the container and view the log files that are stored inside. Steps to view the logs of each service in the NebulaGraph cluster: @@ -63,4 +63,4 @@ This error is generally caused by a DNS resolution failure, and you need to chec ``` helm upgrade nebula-operator nebula-operator/nebula-operator --namespace= --version={{operator.release}} --set kubernetesClusterDomain= ``` - is the namespace where Operator is located and is the updated domain name. + is the namespace where Operator is located and is the updated domain name. \ No newline at end of file diff --git a/docs-2.0/nebula-operator/2.deploy-nebula-operator.md b/docs-2.0/nebula-operator/2.deploy-nebula-operator.md deleted file mode 100644 index 7ef7ed7729d..00000000000 --- a/docs-2.0/nebula-operator/2.deploy-nebula-operator.md +++ /dev/null @@ -1,273 +0,0 @@ -# Deploy NebulaGraph Operator - -You can deploy NebulaGraph Operator with [Helm](https://helm.sh/). - -## Background - -[NebulaGraph Operator](1.introduction-to-nebula-operator.md) automates the management of NebulaGraph clusters, and eliminates the need for you to install, scale, upgrade, and uninstall NebulaGraph clusters, which lightens the burden on managing different application versions. - -## Prerequisites - -Before installing NebulaGraph Operator, you need to install the following software and ensure the correct version of the software : - -| Software | Requirement | -| ------------------------------------------------------------ | --------- | -| [Kubernetes](https://kubernetes.io) | \>= 1.16 | -| [Helm](https://helm.sh) | \>= 3.2.0 | -| [CoreDNS](https://github.com/coredns/coredns) | \>= 1.6.0 | - -!!! note - - - If using a role-based access control policy, you need to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac) (optional). - - - [CoreDNS](https://coredns.io/) is a flexible and scalable DNS server that is [installed](https://github.com/coredns/deployment/tree/master/kubernetes) for Pods in NebulaGraph clusters. - -## Steps - -### Install NebulaGraph Operator - -1. Add the NebulaGraph Operator Helm repository. - - ```bash - helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts - ``` - -2. Update information of available charts locally from repositories. - - ```bash - helm repo update - ``` - - For more information about `helm repo`, see [Helm Repo](https://helm.sh/docs/helm/helm_repo/). - -3. Create a namespace for NebulaGraph Operator. - - ```bash - kubectl create namespace - ``` - - For example, run the following command to create a namespace named `nebula-operator-system`. - - ```bash - kubectl create namespace nebula-operator-system - ``` - - - All the resources of NebulaGraph Operator are deployed in this namespace. - - You can also use a different name. - -4. Install NebulaGraph Operator. - - ```bash - helm install nebula-operator nebula-operator/nebula-operator --namespace= --version=${chart_version} - ``` - - For example, the command to install NebulaGraph Operator of version {{operator.release}} is as follows. - - ```bash - helm install nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} - ``` - - - `nebula-operator-system` is a user-created namespace name. If you have not created this namespace, run `kubectl create namespace nebula-operator-system` to create one. You can also use a different name. - - - `{{operator.release}}` is the version of the nebula-operator chart. When not specifying `--version`, the latest version of the nebula-operator chart is used by default. Run `helm search repo -l nebula-operator` to see chart versions. - - You can customize the configuration items of the NebulaGraph Operator chart before running the installation command. For more information, see **Customize Helm charts** below. - -### Customize Helm charts - -When executing the `helm install [NAME] [CHART] [flags]` command to install a chart, you can specify the chart configuration. For more information, see [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). - -View the related configuration options in the [nebula-operator chart](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/charts/nebula-operator/values.yaml) configuration file. - -Alternatively, you can view the configurable options through the command `helm show values nebula-operator/nebula-operator`, as shown below. - - -For example: - -```yaml -[k8s@master ~]$ helm show values nebula-operator/nebula-operator -image: - nebulaOperator: - image: vesoft/nebula-operator:{{operator.tag}} - imagePullPolicy: Always - kubeRBACProxy: - image: bitnami/kube-rbac-proxy:0.14.2 - imagePullPolicy: Always - kubeScheduler: - image: registry.k8s.io/kube-scheduler:v1.24.11 - imagePullPolicy: Always - -imagePullSecrets: [] -kubernetesClusterDomain: "" - -controllerManager: - create: true - replicas: 2 - env: [] - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 100m - memory: 100Mi - -admissionWebhook: - create: false - -scheduler: - create: true - schedulerName: nebula-scheduler - replicas: 2 - env: [] - resources: - limits: - cpu: 200m - memory: 20Mi - requests: - cpu: 100m - memory: 100Mi -``` - -Part of the above parameters are described as follows: - -| Parameter | Default value | Description | -| :------------------------------------- | :------------------------------ | :----------------------------------------- | -| `image.nebulaOperator.image` | `vesoft/nebula-operator:{{operator.tag}}` | The image of NebulaGraph Operator, version of which is {{operator.release}}. | -| `image.nebulaOperator.imagePullPolicy` | `IfNotPresent` | The image pull policy in Kubernetes. | -| `imagePullSecrets` | - | The image pull secret in Kubernetes. | -| `kubernetesClusterDomain` | `cluster.local` | The cluster domain. | -| `controllerManager.create` | `true` | Whether to enable the controller-manager component. | -| `controllerManager.replicas` | `2` | The numeric value of controller-manager replicas. | -| `admissionWebhook.create` | `false` | Whether to enable Admission Webhook. This option is disabled. To enable it, set the value to `true` and you will need to install [cert-manager](https://cert-manager.io/docs/installation/helm/). | -| `shceduler.create` | `true` | Whether to enable Scheduler. | -| `shceduler.schedulerName` | `nebula-scheduler` | The Scheduler name. | -| `shceduler.replicas` | `2` | The numeric value of nebula-scheduler replicas. | - -You can run `helm install [NAME] [CHART] [flags]` to specify chart configurations when installing a chart. For more information, see [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). - -The following example shows how to specify the NebulaGraph Operator's AdmissionWebhook mechanism to be turned on when you install NebulaGraph Operator (AdmissionWebhook is disabled by default): - -```bash -helm install nebula-operator nebula-operator/nebula-operator --namespace= --set admissionWebhook.create=true -``` - -For more information about `helm install`, see [Helm Install](https://helm.sh/docs/helm/helm_install/). - -### Update NebulaGraph Operator - -1. Update the information of available charts locally from chart repositories. - - ```bash - helm repo update - ``` - -1. Update NebulaGraph Operator by passing configuration parameters via `--set`. - - - `--set`:Overrides values using the command line. For configurable items, see the above-mentioned section **Customize Helm charts**. - - For example, to enable the AdmissionWebhook, run the following command: - - ```bash - helm upgrade nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} --set admissionWebhook.create=true - ``` - - For more information, see [Helm upgrade](https://helm.sh/docs/helm/helm_update/). - -### Upgrade NebulaGraph Operator - -!!! Compatibility "Legacy version compatibility" - - - Does not support upgrading 0.9.0 and below version NebulaGraph Operator to 1.x. - - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. - -1. Update the information of available charts locally from chart repositories. - - ```bash - helm repo update - ``` - -2. Upgrade Operator to {{operator.tag}}. - - ```bash - helm upgrade nebula-operator nebula-operator/nebula-operator --namespace= --version={{operator.release}} - ``` - - For example: - - ```bash - helm upgrade nebula-operator nebula-operator/nebula-operator --namespace=nebula-operator-system --version={{operator.release}} - ``` - - Output: - - ```bash - Release "nebula-operator" has been upgraded. Happy Helming! - NAME: nebula-operator - LAST DEPLOYED: Tue Apr 16 02:21:08 2022 - NAMESPACE: nebula-operator-system - STATUS: deployed - REVISION: 3 - TEST SUITE: None - NOTES: - NebulaGraph Operator installed! - ``` - -3. Pull the latest CRD configuration file. - - !!! note - You need to upgrade the corresponding CRD configurations after NebulaGraph Operator is upgraded. Otherwise, the creation of NebulaGraph clusters will fail. For information about the CRD configurations, see [apps.nebula-graph.io_nebulaclusters.yaml](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.tag}}/config/crd/bases/apps.nebula-graph.io_nebulaclusters.yaml). - - 1. Pull the NebulaGraph Operator chart package. - - ```bash - helm pull nebula-operator/nebula-operator --version={{operator.release}} - ``` - - - `--version`: The NebulaGraph Operator version you want to upgrade to. If not specified, the latest version will be pulled. - - 2. Run `tar -zxvf` to unpack the charts. - - For example: To unpack {{operator.tag}} chart to the `/tmp` path, run the following command: - - ```bash - tar -zxvf nebula-operator-{{operator.release}}.tgz -C /tmp - ``` - - - `-C /tmp`: If not specified, the chart files will be unpacked to the current directory. - - -4. Upgrade the CRD configuration file in the `nebula-operator` directory. - - ```bash - kubectl apply -f crds/nebulacluster.yaml - ``` - - Output: - - ```bash - customresourcedefinition.apiextensions.k8s.io/nebulaclusters.apps.nebula-graph.io configured - ``` - -### Uninstall NebulaGraph Operator - -1. Uninstall the NebulaGraph Operator chart. - - ```bash - helm uninstall nebula-operator --namespace= - ``` - -2. Delete CRD. - - ```bash - kubectl delete crd nebulaclusters.apps.nebula-graph.io - ``` - -## What's next - -Automate the deployment of NebulaGraph clusters with NebulaGraph Operator. For more information, see [Deploy NebulaGraph Clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph Clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). - -{{ent.ent_begin}} -For the NebulaGraph Enterprise Edition deployment, you need first to deploy the License Manager and have the license key loaded. For more information, see [Deploy LM](3.deploy-nebula-graph-cluster/3.0.deploy-lm.md). -{{ent.ent_end}} - diff --git a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md b/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md deleted file mode 100644 index a4e11147fa7..00000000000 --- a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md +++ /dev/null @@ -1,608 +0,0 @@ -# Deploy NebulaGraph clusters with Kubectl - -!!! Compatibility "Legacy version compatibility" - - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. - -## Prerequisites - -- [You have installed NebulaGraph Operator](../2.deploy-nebula-operator.md) - -- [You have created StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) - -{{ ent.ent_begin }} - -- [LM has been installed and the License Key has been successfully loaded](3.0.deploy-lm.md) (Enterprise only) - -{{ ent.ent_end }} - -## Create clusters - -The following example shows how to create a NebulaGraph cluster by creating a cluster named `nebula`. - -1. Create a namespace, for example, `nebula`. If not specified, the `default` namespace is used. - - ```bash - kubectl create namespace nebula - ``` - - {{ent.ent_begin}} -2. Create a Secret for pulling the NebulaGraph Enterprise image from a private repository. - - !!! note - Skip this step if you are using NebulaGraph Community Edition. - - ```bash - kubectl -n create secret docker-registry \ - --docker-server=DOCKER_REGISTRY_SERVER \ - --docker-username=DOCKER_USER \ - --docker-password=DOCKER_PASSWORD - ``` - - - ``: The namespace where this Secret will be stored. - - ``: Specify the name of the Secret. - - `DOCKER_REGISTRY_SERVER`: Specify the server address of the private repository from which the image will be pulled, such as `reg.example-inc.com`. - - `DOCKER_USER`: The username for the image repository. - - `DOCKER_PASSWORD`: The password for the image repository. - - {{ent.ent_end}} - -3. Create a file named `apps_v1alpha1_nebulacluster.yaml`. - - - {{ ent.ent_begin }} - - - To create a NebulaGraph Enterprise cluster - - === "Cluster without Zones" - - You must set the following parameters in the configuration file for the enterprise edition. Other parameters can be changed as needed. For information on other parameters, see the [sample configuration](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/nebulacluster.yaml). - - - | Parameter | Default value | Description | - | :---- | :--- | :--- | - | `spec.metad.licenseManagerURL` | - | Configure the URL that points to the LM, which consists of the access address and port number (default port `9119`) of the LM. For example, `192.168.8.xxx:9119`. **You must configure this parameter in order to obtain the license information; otherwise, the enterprise edition cluster cannot be used.** | - |`spec..image`|-|The container image of the Graph, Meta, or Storage service of the enterprise edition.| - |`spec.imagePullSecrets`| - |Specifies the Secret for pulling the NebulaGraph Enterprise service images from a private repository.| - - - === "Cluster with Zones" - - NebulaGraph Operator supports creating a cluster with [Zones](../../4.deployment-and-installation/5.zone.md). - - ??? info "Expand to view sample configurations of a cluster with Zones" - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - # Used to obtain the Zone information where nodes are located. - alpineImage: "reg.vesoft-inc.com/xxx/xxx:latest" - # Used for backup and recovery as well as log cleanup functions. - # If you do not customize this configuration, - # the default configuration will be used. - agent: - image: reg.vesoft-inc.com/xxx/xxx - version: v3.6.0-sc - exporter: - image: vesoft/nebula-stats-exporter - replicas: 1 - maxRequests: 20 - # Used to create a console container, - # which is used to connect to the NebulaGraph cluster. - console: - version: "nightly" - graphd: - config: - # The following parameters are required for creating a cluster with Zones. - accept_partial_success: "true" - prioritize_intra_zone_reading: "true" - sync_meta_when_use_space: "true" - stick_to_intra_zone_on_failure: "false" - session_reclaim_interval_secs: "300" - # The following parameters are required for collecting logs. - logtostderr: "1" - redirect_stdout: "false" - stderrthreshold: "0" - resources: - requests: - cpu: "2" - memory: "2Gi" - limits: - cpu: "2" - memory: "2Gi" - replicas: 1 - image: reg.vesoft-inc.com/xxx/xxx - version: v3.5.0-sc - metad: - config: - redirect_stdout: "false" - stderrthreshold: "0" - logtostder: "true" - # Zone names CANNOT be modified once set. - # It's suggested to set an odd number of Zones. - zone_list: az1,az2,az3 - validate_session_timestamp: "false" - # LM access address and port number. - licenseManagerURL: "192.168.8.xxx:9119" - resources: - requests: - cpu: "300m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 3 - image: reg.vesoft-inc.com/xxx/xxx - version: v3.5.0-sc - dataVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: local-path - storaged: - config: - redirect_stdout: "false" - stderrthreshold: "0" - logtostder: "true" - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - replicas: 3 - image: reg.vesoft-inc.com/xxx/xxx - version: v3.5.0-sc - dataVolumeClaims: - - resources: - requests: - storage: 2Gi - storageClassName: local-path - # Automatically balance storage data after scaling out. - enableAutoBalance: true - reference: - name: statefulsets.apps - version: v1 - schedulerName: nebula-scheduler - nodeSelector: - nebula: cloud - imagePullPolicy: Always - imagePullSecrets: - - name: nebula-image - # Evenly distribute storage Pods across Zones. - # Must be set when using Zones. - topologySpreadConstraints: - - topologyKey: "topology.kubernetes.io/zone" - whenUnsatisfiable: "DoNotSchedule" - ``` - - !!! caution - - Make sure storage Pods are evenly distributed across zones before ingesting data by running `SHOW ZONES` in nebula-console. For zone-related commands, see [Zones](../../4.deployment-and-installation/5.zone.md). - - You must set the following parameters for creating a cluster with Zones. Other parameters can be changed as needed. - - | Parameter | Default value | Description | - | :---- | :--- | :--- | - | `spec.metad.licenseManagerURL` | - | Configure the URL that points to the LM, which consists of the access address and port number (default port `9119`) of the LM. For example, `192.168.8.xxx:9119`. **You must configure this parameter in order to obtain the license information; otherwise, the enterprise edition cluster cannot be used.** | - |`spec..image`|-|The container image of the Graph, Meta, or Storage service of the enterprise edition.| - |`spec.imagePullSecrets`| - |Specifies the Secret for pulling the NebulaGraph Enterprise service images from a private repository.| - |`spec.alpineImage`|-|The Alpine Linux image, used to obtain the Zone information where nodes are located.| - |`spec.metad.config.zone_list`|-|A list of zone names, split by comma. For example: zone1,zone2,zone3.
**Zone names CANNOT be modified once be set.**| - |`spec.graphd.config.prioritize_intra_zone_reading`|`false`|Specifies whether to prioritize sending queries to the storage pods in the same zone.
When set to `true`, the query is sent to the storage pods in the same zone. If reading fails in that Zone, it will decide based on `stick_to_intra_zone_on_failure` whether to read the leader partition replica data from other Zones. | - |`spec.graphd.config.stick_to_intra_zone_on_failure`|`false`|Specifies whether to stick to intra-zone routing if unable to find the requested partitions in the same zone. When set to `true`, if unable to find the partition replica in that Zone, it does not read data from other Zones.| - |`spec.schedulerName`|`kube-scheduler`|Schedules the restarted Graph and Storage pods to the same Zone. The value must be set to `nebula-scheduler`.| - |`spec.topologySpreadConstraints`|-| It is a field in Kubernetes used to control the distribution of storage Pods. Its purpose is to ensure that your storage Pods are evenly spread across Zones.
**To use the Zone feature, you must set the value of `topologySpreadConstraints[0].topologyKey` to `topology.kubernetes.io/zone` and the value of `topologySpreadConstraints[0].whenUnsatisfiable` to `DoNotSchedule`**. Run `kubectl get node --show-labels` to check the key. For more information, see [TopologySpread](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#example-multiple-topologyspreadconstraints).| - - ???+ note "Learn more about Zones in NebulaGraph Operator" - - **Understanding NebulaGraph's Zone Feature** - - NebulaGraph utilizes a feature called Zones to efficiently manage its distributed architecture. Each Zone represents a logical grouping of Storage pods and Graph pods, responsible for storing the complete graph space data. The data within NebulaGraph's spaces is partitioned, and replicas of these partitions are evenly distributed across all available Zones. The utilization of Zones can significantly reduce inter-Zone network traffic costs and boost data transfer speeds. Moreover, intra-zone-reading allows for increased availability, because replicas of a partition spread out among different zones. - - **Configuring NebulaGraph Zones** - - To make the most of the Zone feature, you first need to determine the actual Zone where your cluster nodes are located. Typically, nodes deployed on cloud platforms are labeled with their respective Zones. Once you have this information, you can configure it in your cluster's configuration file by setting the `spec.metad.config.zone_list` parameter. This parameter should be a list of Zone names, separated by commas, and should match the actual Zone names where your nodes are located. For example, if your nodes are in Zones `az1`, `az2`, and `az3`, your configuration would look like this: - - ```yaml - spec: - metad: - config: - zone_list: az1,az2,az3 - ``` - - **Operator's Use of Zone Information** - - NebulaGraph Operator leverages Kubernetes' [TopoloySpread](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) feature to manage the scheduling of Storage and Graph pods. Once the `zone_list` is configured, Storage services are automatically assigned to their respective Zones based on the `topology.kubernetes.io/zone` label. - - For intra-zone data access, the Graph service dynamically assigns itself to a Zone using the `--assigned_zone=$NODE_ZONE` parameter. It identifies the Zone name of the node where the Graph service resides by utilizing an init-container to fetch this information. The Alpine Linux image specified in `spec.alpineImage` (default: `reg.vesoft-inc.com/nebula-alpine:latest`) plays a role in obtaining Zone information. - - **Prioritizing Intra-Zone Data Access** - - By setting `spec.graphd.config.prioritize_intra_zone_reading` to `true` in the cluster configuration file, you enable the Graph service to prioritize sending queries to Storage services within the same Zone. In the event of a read failure within that Zone, the behavior depends on the value of `spec.graphd.config.stick_to_intra_zone_on_failure`. If set to `true`, the Graph service avoids reading data from other Zones and returns an error. Otherwise, it reads data from leader partition replicas in other Zones. - - ```yaml - spec: - alpineImage: reg.vesoft-inc.com/xxx/xxx:latest - graphd: - config: - prioritize_intra_zone_reading: "true" - stick_to_intra_zone_on_failure: "false" - ``` - - **Zone Mapping for Resilience** - - Once Storage and Graph services are assigned to Zones, the mapping between the pod and its corresponding Zone is stored in a configmap named `-graphd|storaged-zone`. This mapping facilitates pod scheduling during rolling updates and pod restarts, ensuring that services return to their original Zones as needed. - - !!! caution - - DO NOT manually modify the configmaps created by NebulaGraph Operator. Doing so may cause unexpected behavior. - - - Other optional parameters for the enterprise edition are as follows: - - | Parameter | Default value | Description | - | :---- | :--- | :--- | - |`spec.storaged.enableAutoBalance`| `false`| Specifies whether to enable automatic data balancing. For more information, see [Balance storage data after scaling out](../8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md).| - |`spec.enableBR`|`false`|Specifies whether to enable the BR tool. For more information, see [Backup and restore](../10.backup-restore-using-operator.md).| - |`spec.graphd.enable_graph_ssl`|`false`| Specifies whether to enable SSL for the Graph service. For more details, see [Enable mTLS](../8.custom-cluster-configurations/8.5.enable-ssl.md). | - - {{ ent.ent_end }} - - - To create a NebulaGraph Community cluster - - See [community cluster configurations](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/nebulacluster.yaml). - - ??? Info "Expand to show parameter descriptions of community clusters" - - | Parameter | Default value | Description | - | :---- | :--- | :--- | - | `metadata.name` | - | The name of the created NebulaGraph cluster. | - |`spec.console`|-| Configuration of the Console service. For details, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console).| - | `spec.graphd.replicas` | `1` | The numeric value of replicas of the Graphd service. | - | `spec.graphd.image` | `vesoft/nebula-graphd` | The container image of the Graphd service. | - | `spec.graphd.version` | `{{nebula.tag}}` | The version of the Graphd service. | - | `spec.graphd.service` | - | The Service configurations for the Graphd service. | - | `spec.graphd.logVolumeClaim.storageClassName` | - | The log disk storage configurations for the Graphd service. | - | `spec.metad.replicas` | `1` | The numeric value of replicas of the Metad service. | - | `spec.metad.image` | `vesoft/nebula-metad` | The container image of the Metad service. | - | `spec.metad.version` | `{{nebula.tag}}` | The version of the Metad service. | - | `spec.metad.dataVolumeClaim.storageClassName` | - | The data disk storage configurations for the Metad service. | - | `spec.metad.logVolumeClaim.storageClassName`|- | The log disk storage configurations for the Metad service.| - | `spec.storaged.replicas` | `3` | The numeric value of replicas of the Storaged service. | - | `spec.storaged.image` | `vesoft/nebula-storaged` | The container image of the Storaged service. | - | `spec.storaged.version` | `{{nebula.tag}}` | The version of the Storaged service. | - | `spec.storaged.dataVolumeClaims.resources.requests.storage` | - | Data disk storage size for the Storaged service. You can specify multiple data disks to store data. When multiple disks are specified, the storage path is `/usr/local/nebula/data1`, `/usr/local/nebula/data2`, etc.| - | `spec.storaged.dataVolumeClaims.resources.storageClassName` | - | The data disk storage configurations for Storaged. If not specified, the global storage parameter is applied. | - | `spec.storaged.logVolumeClaim.storageClassName`|- | The log disk storage configurations for the Storaged service.| - | `spec.storaged.enableAutoBalance` | `true` |Whether to balance data automatically. | - |`spec.agent`|`{}`| Configuration of the Agent service. This is used for backup and recovery as well as log cleanup functions. If you do not customize this configuration, the default configuration will be used.| - | `spec.reference.name` | - | The name of the dependent controller. | - | `spec.schedulerName` | - | The scheduler name. | - | `spec.imagePullPolicy` | The image policy to pull the NebulaGraph image. For details, see [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). | The image pull policy in Kubernetes. | - |`spec.logRotate`| - |Log rotation configuration. For more information, see [Manage cluster logs](../8.custom-cluster-configurations/8.4.manage-running-logs.md).| - |`spec.enablePVReclaim`|`false`|Define whether to automatically delete PVCs and release data after deleting the cluster. For more information, see [Reclaim PVs](../8.custom-cluster-configurations/storage/8.2.pv-reclaim.md).| - - -1. Create a NebulaGraph cluster. - - ```bash - kubectl create -f apps_v1alpha1_nebulacluster.yaml - ``` - - Output: - - ```bash - nebulacluster.apps.nebula-graph.io/nebula created - ``` - -5. Check the status of the NebulaGraph cluster. - - ```bash - kubectl get nebulaclusters.apps.nebula-graph.io nebula - ``` - - Output: - - ```bash - NAME GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE - nebula 1 1 1 1 3 3 86s - ``` - -## Scaling clusters - -- The cluster scaling feature is for NebulaGraph Enterprise Edition only. - -{{ ent.ent_begin }} - -- Scaling a NebulaGraph cluster for Enterprise Edition is supported only with NebulaGraph Operator version 1.1.0 or later. - -You can modify the value of `replicas` in `apps_v1alpha1_nebulacluster.yaml` to scale a NebulaGraph cluster. - -### Scale out clusters - -The following shows how to scale out a NebulaGraph cluster by changing the number of Storage services to 5: - -1. Change the value of the `storaged.replicas` from `3` to `5` in `apps_v1alpha1_nebulacluster.yaml`. - - ```yaml - storaged: - resources: - requests: - cpu: "500m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 5 - image: vesoft/nebula-storaged - version: {{nebula.tag}} - dataVolumeClaims: - - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - logVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - reference: - name: statefulsets.apps - version: v1 - schedulerName: default-scheduler - ``` - -2. Run the following command to update the NebulaGraph cluster CR. - - ```bash - kubectl apply -f apps_v1alpha1_nebulacluster.yaml - ``` - -3. Check the number of Storage services. - - ```bash - kubectl get pods -l app.kubernetes.io/cluster=nebula - ``` - - Output: - - ```bash - NAME READY STATUS RESTARTS AGE - nebula-graphd-0 1/1 Running 0 2m - nebula-metad-0 1/1 Running 0 2m - nebula-storaged-0 1/1 Running 0 2m - nebula-storaged-1 1/1 Running 0 2m - nebula-storaged-2 1/1 Running 0 2m - nebula-storaged-3 1/1 Running 0 5m - nebula-storaged-4 1/1 Running 0 5m - ``` - - As you can see above, the number of Storage services is scaled up to 5. - -### Scale in clusters - -The principle of scaling in a cluster is the same as scaling out a cluster. You scale in a cluster if the numeric value of the `replicas` in `apps_v1alpha1_nebulacluster.yaml` is changed smaller than the current number. For more information, see the **Scale out clusters** section above. - -In the process of downsizing the cluster, if the operation is not complete successfully and seems to be stuck, you may need to check the status of the job using the `nebula-console` client specified in the `spec.console` field. Analyzing the logs and manually intervening can help ensure that the Job runs successfully. For information on how to check jobs, see [Job statements](../../3.ngql-guide/4.job-statements.md). - -!!! caution - - - NebulaGraph Operator currently only supports scaling Graph and Storage services and does not support scale Meta services. - - If you scale in a cluster with Zones, make sure that the number of remaining storage pods is not less than the number of Zones specified in the `spec.metad.config.zone_list` field. Otherwise, the cluster will fail to start. - -### Enable HPA - -NebulaGraph Operator provides the NebulaAutoscaler object for you to implement [horizontal pod autoscaling (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) for the Graph service. - - -The following procedure shows how to enable HPA in a NebulaGraph cluster: - -1. Install the metrics-server. - - Developed based on the HorizontalPodAutoscaler in Kubernetes, the NebulaAutoscaler automatically scales the number of pods based on the metrics collected by the [metrics-server](https://github.com/kubernetes-sigs/metrics-server). - - - Run the following command to install the latest metrics-server release: - - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Verify that the metrics-server is working. - - The metrics-server implements the Metrics API, which provides information about resource usage for nodes and pods in the cluster. The following example calls the Metrics API to obtain resource usage data from the metrics-server. If resource usage data is successfully returned, it indicates the metrics-server is working. - - Run the following command to query resource usage on a pod named `nebula-graphd-1`: - - ```bash - kubectl get --raw "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods/nebula-graphd-1" | jq '.' - ``` - - Sample response: - - ```json - { - "kind": "PodMetrics", - "apiVersion": "metrics.k8s.io/v1beta1", - "metadata": { - "name": "nebula-graphd-1", - "namespace": "default", - "creationTimestamp": "2023-09-27T13:39:54Z", - "labels": { - "app.kubernetes.io/cluster": "nebula", - "app.kubernetes.io/component": "graphd", - "app.kubernetes.io/managed-by": "nebula-operator", - "app.kubernetes.io/name": "nebula-graph", - "controller-revision-hash": "nebula-graphd-56cf5f8b66", - "statefulset.kubernetes.io/pod-name": "nebula-graphd-1" - } - }, - "timestamp": "2023-09-27T13:39:48Z", - "window": "15.015s", - "containers": [ - { - "name": "graphd", - "usage": { - "cpu": "323307n", - "memory": "12644Ki" - } - } - ] - } - ``` - - Run the following command to query resource usage on a node named `192-168-8-35`: - - ```bash - kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes/192-168-8-35" | jq '.' - ``` - - Sample response: - - ```json - { - "kind": "NodeMetrics", - "apiVersion": "metrics.k8s.io/v1beta1", - "metadata": { - "name": "192-168-8-35", - "creationTimestamp": "2023-09-27T14:00:13Z", - "labels": { - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/os": "linux", - "kubernetes.io/arch": "amd64", - "kubernetes.io/hostname": "192-168-8-35", - "kubernetes.io/os": "linux", - "nebula": "cloud", - "node-role.kubernetes.io/control-plane": "", - "node.kubernetes.io/exclude-from-external-load-balancers": "" - } - }, - "timestamp": "2023-09-27T14:00:00Z", - "window": "20.045s", - "usage": { - "cpu": "164625163n", - "memory": "8616740Ki" - } - } - ``` - -3. Create a NebulaAutoscaler object. - - Use the following YAML sample to create a NebulaAutoscaler object that automatically adjusts the number of pods between 2 to 5 based on the average CPU utilization. - - ```yaml - apiVersion: autoscaling.nebula-graph.io/v1alpha1 - kind: NebulaAutoscaler - metadata: - name: nebula-autoscaler - spec: - nebulaClusterRef: - name: nebula - graphdPolicy: - minReplicas: 2 - maxReplicas: 5 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 50 - pollingPeriod: 30s - ``` - - The key parameters are as follows: - - - nebulaClusterRef: The target cluster that the NebulaAutoscaler applies to. - - graphdPolicy: The auto scaling policy adopted by the NebulaAutoscaler. All the child fields are compatible with the fields used by the Kubernetes HorizontalPodAutoscaler. Check the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2/#HorizontalPodAutoscalerSpec) for details. - - pollingPeriod: The time interval between each resource usage checks by the NebulaAutoscaler. - - The NebulaAutoscaler also supports the `behavior` parameter, which enables you to control the scaling procedure in great detail by configuring separate scale-up and scale-down behaviors. - - Make sure you understand the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2/#HorizontalPodAutoscalerSpec) before you use the `behavior` parameter. - - The following sample creates a NebulaAutoscaler object that behaves differently in scale-up and scale-down. - - ```yaml - apiVersion: autoscaling.nebula-graph.io/v1alpha1 - kind: NebulaAutoscaler - metadata: - name: nebula-autoscaler - spec: - nebulaClusterRef: - name: nebula - graphdPolicy: - minReplicas: 2 - maxReplicas: 5 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 50 - behavior: - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - scaleUp: - stabilizationWindowSeconds: 0 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - - type: Pods - value: 4 - periodSeconds: 15 - selectPolicy: Max - pollingPeriod: 30s - ``` - -4. Check whether HPA is working. - - After running `kubectl apply` to create the NebulaAutoscaler object, you can use the following commands to check whether HPA is working. - - Run the `kubectl get na` command to check the NebulaAutoscaler status. - - Sample response: - - ``` - NAME REFERENCE MIN-REPLICAS MAX-REPLICAS CURRENT-REPLICAS ACTIVE ABLETOSCALE LIMITED READY AGE - nebula-autoscaler nebula 2 5 2 True True True True 19h - ``` - - Run the `kubectl get nc` command to check the cluster status. - - Sample response: - - ``` - NAME READY GRAPHD-DESIRED GRAPHD-READY METAD-DESIRED METAD-READY STORAGED-DESIRED STORAGED-READY AGE - nebula True 2 2 1 1 3 3 20h - ``` - -{{ ent.ent_end }} - -## Delete clusters - -Run the following command to delete a NebulaGraph cluster with Kubectl: - -```bash -kubectl delete -f apps_v1alpha1_nebulacluster.yaml -``` - -## What's next - -[Connect to NebulaGraph databases](../4.connect-to-nebula-graph-service.md) diff --git a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md b/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md deleted file mode 100644 index b0ea3458363..00000000000 --- a/docs-2.0/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md +++ /dev/null @@ -1,220 +0,0 @@ -# Deploy NebulaGraph clusters with Helm - -!!! Compatibility "Legacy version compatibility" - - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. - -## Prerequisite - -- [You have installed NebulaGraph Operator](../2.deploy-nebula-operator.md) - -- [You have created StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) - -{{ ent.ent_begin }} - -- [LM has been installed and the License Key has been successfully loaded](3.0.deploy-lm.md) (Enterprise only) - -{{ ent.ent_end }} - -## Create clusters - -1. Add the NebulaGraph Operator Helm repository. - - ```bash - helm repo add nebula-operator https://vesoft-inc.github.io/nebula-operator/charts - ``` - -2. Update information of available charts locally from chart repositories. - - ```bash - helm repo update - ``` - -3. Set environment variables to your desired values. - - ```bash - export NEBULA_CLUSTER_NAME=nebula # The desired NebulaGraph cluster name. - export NEBULA_CLUSTER_NAMESPACE=nebula # The desired namespace where your NebulaGraph cluster locates. - export STORAGE_CLASS_NAME=fast-disks # The name of the StorageClass that has been created. - ``` - -4. Create a namespace for your NebulaGraph cluster (If you have created one, skip this step). - - ```bash - kubectl create namespace "${NEBULA_CLUSTER_NAMESPACE}" - ``` - - {{ent.ent_begin}} - -5. Create a Secret for pulling the NebulaGraph cluster image from a private repository (Enterprise only). - - ```bash - kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" create secret docker-registry \ - --docker-server=DOCKER_REGISTRY_SERVER \ - --docker-username=DOCKER_USER \ - --docker-password=DOCKER_PASSWORD - ``` - - - ``: Specify the name of the Secret. - - `DOCKER_REGISTRY_SERVER`: Specify the server address of the private repository from which the image will be pulled, such as `reg.example-inc.com`. - - `DOCKER_USER`: The username for the image repository. - - `DOCKER_PASSWORD`: The password for the image repository. - -6. Apply the variables to the Helm chart to create a NebulaGraph cluster. - - ```bash - helm install "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ - --set nameOverride=${NEBULA_CLUSTER_NAME} \ - --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ - # Specify the version of the NebulaGraph cluster. - --set nebula.version=v{{nebula.release}} \ - # Specify the version of the nebula-cluster chart. If not specified, the latest version of the chart is installed by default. - # Run 'helm search repo nebula-operator/nebula-cluster' to view the available versions of the chart. - --version={{operator.release}} \ - --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ - ``` - - {{ent.ent_begin}} - - To create a NebulaGraph cluster for Enterprise Edition, run the following command: - - === "Cluster without Zones" - - ```bash - helm install "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ - # Configure the access address and port (default port is '9119') that points to the LM. You must configure this parameter in order to obtain the license information. Only for NebulaGraph Enterprise Edition clusters. - --set nebula.metad.licenseManagerURL=`192.168.8.XXX:9119` \ - # Configure the image addresses for each service in the cluster. - --set nebula.graphd.image= \ - --set nebula.metad.image= \ - --set nebula.storaged.image= \ - # Configure the Secret for pulling images from a private repository. - --set nebula.imagePullSecrets= \ - --set nameOverride=${NEBULA_CLUSTER_NAME} \ - --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ - # Specify the version of the NebulaGraph cluster. - --set nebula.version=v{{nebula.release}} \ - # Specify the version of the nebula-cluster chart. If not specified, the latest version of the chart is installed by default. - # Run 'helm search repo nebula-operator/nebula-cluster' to view the available versions of the chart. - --version={{operator.release}} \ - --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ - ``` - - === "Cluster with Zones" - - NebulaGraph Operator supports the [Zones](../../4.deployment-and-installation/5.zone.md) feature. For how to use Zones in NebulaGraph Operator, see [Learn more about Zones in NebulaGraph Operator](3.1create-cluster-with-kubectl.md) - - ```bash - helm install "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ - # Configure the access address and port (default port is '9119') that points to the LM. You must configure this parameter in order to obtain the license information. Only for NebulaGraph Enterprise Edition clusters. - --set nebula.metad.licenseManagerURL=`192.168.8.XXX:9119` \ - # Configure the image addresses for each service in the cluster. - --set nebula.graphd.image= \ - --set nebula.metad.image= \ - --set nebula.storaged.image= \ - # Configure the Secret for pulling images from a private repository. - --set nebula.imagePullSecrets= \ - --set nameOverride=${NEBULA_CLUSTER_NAME} \ - --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ - # Specify the version of the NebulaGraph cluster. - --set nebula.version=v{{nebula.release}} \ - # Specify the version of the nebula-cluster chart. If not specified, the latest version of the chart is installed by default. - # Run 'helm search repo nebula-operator/nebula-cluster' to view the available versions of the chart. - --version={{operator.release}} \ - # Configure Zones - # Once Zones are configured, the Zone information cannot be modified. - # It's suggested to configure an odd number of Zones. - --set nebula.metad.config.zone_list= \ - --set nebula.graphd.config.prioritize_intra_zone_reading=true \ - --set nebula.graphd.config.stick_to_intra_zone_on_failure=false \ - # Evenly distribute the Pods of the Storage service across Zones. - --set nebula.topologySpreadConstraints[0].topologyKey=topology.kubernetes.io/zone \ - --set nebula.topologySpreadConstraints[0].whenUnsatisfiable=DoNotSchedule \ - # Used to schedule restarted Graph or Storage Pods to the specified Zone. - --set nebula.schedulerName=nebula-scheduler \ - --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ - ``` - - !!! caution - - Make sure storage Pods are evenly distributed across zones before ingesting data by running `SHOW ZONES` in nebula-console. For zone-related commands, see [Zones](../../4.deployment-and-installation/5.zone.md). - - {{ent.ent_end}} - - To view all configuration parameters of the NebulaGraph cluster, run the `helm show values nebula-operator/nebula-cluster` command or click [nebula-cluster/values.yaml](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/charts/nebula-cluster/values.yaml). - - Click [Chart parameters](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/doc/user/nebula_cluster_helm_guide.md#optional-chart-parameters) to see descriptions and default values of the configurable cluster parameters. - - Use the `--set` argument to set configuration parameters for the cluster. For example, `--set nebula.storaged.replicas=3` will set the number of replicas for the Storage service in the cluster to 3. - - -7. Check the status of the NebulaGraph cluster you created. - - ```bash - kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" get pod -l "app.kubernetes.io/cluster=${NEBULA_CLUSTER_NAME}" - ``` - - Output: - - ```bash - NAME READY STATUS RESTARTS AGE - nebula-graphd-0 1/1 Running 0 5m34s - nebula-graphd-1 1/1 Running 0 5m34s - nebula-metad-0 1/1 Running 0 5m34s - nebula-metad-1 1/1 Running 0 5m34s - nebula-metad-2 1/1 Running 0 5m34s - nebula-storaged-0 1/1 Running 0 5m34s - nebula-storaged-1 1/1 Running 0 5m34s - nebula-storaged-2 1/1 Running 0 5m34s - ``` - -## Scaling clusters - -- The cluster scaling feature is for NebulaGraph Enterprise Edition only. - -{{ ent.ent_begin }} - -- Scaling a NebulaGraph cluster for Enterprise Edition is supported only with NebulaGraph Operator version 1.1.0 or later. - -You can scale a NebulaGraph cluster by defining the value of the `replicas` corresponding to the different services in the cluster. - -For example, run the following command to scale out a NebulaGraph cluster by changing the number of Storage services from 2 (the original value) to 5: - -```bash -helm upgrade "${NEBULA_CLUSTER_NAME}" nebula-operator/nebula-cluster \ - --namespace="${NEBULA_CLUSTER_NAMESPACE}" \ - --set nameOverride=${NEBULA_CLUSTER_NAME} \ - --set nebula.storageClassName="${STORAGE_CLASS_NAME}" \ - --set nebula.storaged.replicas=5 -``` - -Similarly, you can scale in a NebulaGraph cluster by setting the value of the `replicas` corresponding to the different services in the cluster smaller than the original value. - -In the process of downsizing the cluster, if the operation job is not complete successfully and seems to be stuck, you may need to check the status of the job using the `nebula-console` client specified in the `nebula.console` field. And then analyzing service logs can help ensure that the Job runs successfully. For information on how to check jobs, see [Job statements](../../3.ngql-guide/4.job-statements.md). - -!!! caution - - - NebulaGraph Operator currently only supports scaling Graph and Storage services and does not support scale Meta services. - - If you scale in a cluster with Zones, make sure that the number of remaining storage pods is not less than the number of Zones specified in the `nebula.metad.config.zone_list` field. Otherwise, the cluster will fail to start. - -You can click on [nebula-cluster/values.yaml](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.tag}}/charts/nebula-cluster/values.yaml) to see more configurable parameters of the nebula-cluster chart. For more information about the descriptions of configurable parameters, see **Configuration parameters of the nebula-cluster Helm chart** below. - -{{ ent.ent_end }} - -## Delete clusters - -Run the following command to delete a NebulaGraph cluster with Helm: - -```bash -helm uninstall "${NEBULA_CLUSTER_NAME}" --namespace="${NEBULA_CLUSTER_NAMESPACE}" -``` - -Or use variable values to delete a NebulaGraph cluster with Helm: - -```bash -helm uninstall nebula --namespace=nebula -``` - -## What's next - -[Connect to NebulaGraph Databases](../4.connect-to-nebula-graph-service.md) \ No newline at end of file diff --git a/docs-2.0/nebula-operator/6.get-started-with-operator.md b/docs-2.0/nebula-operator/6.get-started-with-operator.md deleted file mode 100644 index 4b608c68475..00000000000 --- a/docs-2.0/nebula-operator/6.get-started-with-operator.md +++ /dev/null @@ -1,10 +0,0 @@ -# Overview of using NebulaGraph Operator - -To use NebulaGraph Operator to connect to NebulaGraph databases, see steps as follows: - -1. [Install NebulaGraph Operator](2.deploy-nebula-operator.md). -2. Create a NebulaGraph cluster. - - For more information, see [Deploy NebulaGraph clusters with Kubectl](3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md) or [Deploy NebulaGraph clusters with Helm](3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md). - -3. [Connect to a NebulaGraph database](4.connect-to-nebula-graph-service.md). diff --git a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md b/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md deleted file mode 100644 index d1c20db6682..00000000000 --- a/docs-2.0/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md +++ /dev/null @@ -1,172 +0,0 @@ -# Customize parameters for a NebulaGraph cluster - -Meta, Storage, and Graph services in a NebulaGraph cluster have their own configuration settings, which are defined in the YAML file of the NebulaGraph cluster instance as `config`. These settings are mapped and loaded into the corresponding service's ConfigMap in Kubernetes. At the time of startup, the configuration present in the ConfigMap is mounted onto the directory `/usr/local/nebula/etc/` for every service. - -!!! note - - It is not available to customize configuration parameters for NebulaGraph Clusters deployed with Helm. - -The structure of `config` is as follows. - -```go -Config map[string]string `json:"config,omitempty"` -``` -## Prerequisites - -You have created a NebulaGraph cluster. For how to create a cluster with Kubectl, see [Create a cluster with Kubectl](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). - -## Steps - -The following example uses a cluster named `nebula` and the cluster's configuration file named `nebula_cluster.yaml` to show how to set `config` for the Graph service in a NebulaGraph cluster. - -1. Run the following command to access the edit page of the `nebula` cluster. - - ```bash - kubectl edit nebulaclusters.apps.nebula-graph.io nebula - ``` - -2. Customize parameters under the `spec.graphd.config` field. In the following sample, the `enable_authorize` and `auth_type` parameters are used for demonstration purposes. - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - graphd: - resources: - requests: - cpu: "500m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-graphd - version: {{nebula.tag}} - storageClaim: - resources: - requests: - storage: 2Gi - storageClassName: fast-disks - config: // Customize parameters for the Graph service in a cluster. - "enable_authorize": "true" - "auth_type": "password" - ... - ``` - - The parameters that can be added under the `config` field are listed in detail in the [Meta service configuration parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage service configuration parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), and [Graph service configuration parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md) topics. - - !!! note - - * To update cluster configurations without incurring pod restart, ensure that all parameters added under the `config` field support runtime dynamic modification. Check the **Whether supports runtime dynamic modifications** column of the parameter tables on the aforementioned parameter details pages to see if a parameter supports runtime dynamic modification. - * If one or more parameters that do not support runtime dynamic modification are added under the `config` field, pod restart is required for the parameters to take effect. - - - To add the `config` for the Meta and Storage services, add `spec.metad.config` and `spec.storaged.config` respectively. - - -3. Run `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. - - After customizing the parameters, the configurations in the corresponding ConfigMap (`nebula-graphd`) of the Graph service will be overwritten. - -## Customize port configurations - -You can add the `port` and `ws_http_port` parameters under the `config` field to customize port configurations. For details about these two parameters, see the Networking configurations section in [Meta service configuration parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage service configuration parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), and [Graph service configuration parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md). - -!!! note - - * Pod restart is required for the `port` and `ws_http_port` parameters to take effect. - * It is NOT recommnended to modify the `port` parameter after the cluster is started. - -1. Modifiy the cluster configuration file. - - ```yaml - apiVersion: apps.nebula-graph.io/v1alpha1 - kind: NebulaCluster - metadata: - name: nebula - namespace: default - spec: - graphd: - config: - port: "3669" - ws_http_port: "8080" - resources: - requests: - cpu: "200m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-graphd - version: {{nebula.tag}} - metad: - config: - ws_http_port: 8081 - resources: - requests: - cpu: "300m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-metad - version: {{nebula.tag}} - dataVolumeClaim: - resources: - requests: - storage: 2Gi - storageClassName: local-path - storaged: - config: - ws_http_port: 8082 - resources: - requests: - cpu: "300m" - memory: "500Mi" - limits: - cpu: "1" - memory: "1Gi" - replicas: 1 - image: vesoft/nebula-storaged - version: {{nebula.tag}} - dataVolumeClaims: - - resources: - requests: - storage: 2Gi - storageClassName: local-path - enableAutoBalance: true - reference: - name: statefulsets.apps - version: v1 - schedulerName: default-scheduler - imagePullPolicy: IfNotPresent - imagePullSecrets: - - name: nebula-image - enablePVReclaim: true - topologySpreadConstraints: - - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: "ScheduleAnyway" - ``` - -2. Run the `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. - -3. Verify that the configuration takes effect. - - ```bash - kubectl get svc - ``` - - Sample response: - - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - nebula-graphd-headless ClusterIP None 3669/TCP,8080/TCP 10m - nebula-graphd-svc ClusterIP 10.102.13.115 3669/TCP,8080/TCP 10m - nebula-metad-headless ClusterIP None 9559/TCP,8081/TCP 11m - nebula-storaged-headless ClusterIP None 9779/TCP,8082/TCP,9778/TCP 11m - ``` \ No newline at end of file diff --git a/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md b/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md index d6622e40842..5aca60cbc95 100644 --- a/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md +++ b/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md @@ -27,11 +27,11 @@ To upload files, follow these steps: !!! note - You can choose multiple CSV files at the same time. The CSV file used in this article can be downloaded in the [Design a schema](/docs-2.0/nebula-studio/quick-start/st-ug-plan-schema.md). + You can choose multiple CSV files at the same time. The CSV file used in this article can be downloaded in the [Design a schema](../../nebula-studio/quick-start/st-ug-create-schema.md). -3. After uploading, you can click the ![detail](https://docs-cdn.nebula-graph.com.cn/figures/detail.png) button in the **Operations** column to preview the file content, or click the ![delete](https://docs-cdn.nebula-graph.com.cn/figures/alert-delete.png) button to delete the uploaded file. +1. After uploading, you can click the ![detail](https://docs-cdn.nebula-graph.com.cn/figures/detail.png) button in the **Operations** column to preview the file content, or click the ![delete](https://docs-cdn.nebula-graph.com.cn/figures/alert-delete.png) button to delete the uploaded file. - ![preview](https://docs-cdn.nebula-graph.com.cn/figures/st-ug-010-en.png) + ![preview](https://docs-cdn.nebula-graph.com.cn/figures/st-ug-010-en.png) ## Import Data diff --git a/mkdocs.yml b/mkdocs.yml index e69febde933..b9cd2ba5f05 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -68,7 +68,7 @@ markdown_extensions: alternate_style: true - pymdownx.details -# Plugins +Plugins plugins: - search # This is the original mkdocs search plugin. To use algolia search, comment out this plugin. - macros: @@ -725,28 +725,43 @@ nav: - Exchange FAQ: nebula-exchange/ex-ug-FAQ.md - NebulaGraph Operator: - - What is NebulaGraph Operator: nebula-operator/1.introduction-to-nebula-operator.md - - Overview of using NebulaGraph Operator: nebula-operator/6.get-started-with-operator.md - - Deploy NebulaGraph Operator: nebula-operator/2.deploy-nebula-operator.md - - Deploy clusters: - - Deploy LM: nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md - - Deploy clusters with Kubectl: nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md - - Deploy clusters with Helm: nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md - - Connect to NebulaGraph databases: nebula-operator/4.connect-to-nebula-graph-service.md - - Configure clusters: - - Customize configuration parameters for a NebulaGraph cluster: nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md - - Storage: - - Dynamically expand persistent volumes: nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md - - Balance storage data after scaling out: nebula-operator/8.custom-cluster-configurations/storage/8.3.balance-data-when-scaling-storage.md - - Reclaim PVs: nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md - - Manage cluster logs: nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md - - Enable mTLS: nebula-operator/8.custom-cluster-configurations/8.5.enable-ssl.md - - Enable admission control: nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md - - Upgrade NebulaGraph clusters: nebula-operator/9.upgrade-nebula-cluster.md - - Specify a rolling update strategy: nebula-operator/11.rolling-update-strategy.md - - Backup and restore: nebula-operator/10.backup-restore-using-operator.md - - Self-healing: nebula-operator/5.operator-failover.md - - FAQ: nebula-operator/7.operator-faq.md + - What is NebulaGraph Operator: k8s-operator/1.introduction-to-nebula-operator.md + - Getting started: + - Install NebulaGraph Operator: k8s-operator/2.get-started/2.1.install-operator.md + - Deploy LM: k8s-operator/2.get-started/2.2.deploy-lm.md + - Create a NebulaGraph cluster: k8s-operator/2.get-started/2.3.create-cluster.md + - Connect to a NebulaGraph cluster: k8s-operator/2.get-started/2.4.connect-to-cluster.md + - NebulaGraph Operator management: + - Customize installation defaults: k8s-operator/3.operator-management/3.1.customize-installation.md + - Update NebulaGraph Operator: k8s-operator/3.operator-management/3.2.update-operator.md + - Upgrade NebulaGraph Operator: k8s-operator/3.operator-management/3.3.upgrade-operator.md + - Uninstall NebulaGraph Operator: k8s-operator/3.operator-management/3.4.unistall-operator.md + - Cluster administration: + - Deployment: + - Install clusters: k8s-operator/4.cluster-administration/4.1.installation/4.1.1.cluster-install.md + - Upgrade clusters: k8s-operator/4.cluster-administration/4.1.installation/4.1.2.cluster-upgrade.md + - Uninstall clusters: k8s-operator/4.cluster-administration/4.1.installation/4.1.3.cluster-uninstall.md + - Customize cluster configurations: k8s-operator/4.cluster-administration/4.2.configuration.md + - Scaling: + - Scaling clusters: k8s-operator/4.cluster-administration/4.3.scaling/4.3.1.resizing.md + - Enable HPA: k8s-operator/4.cluster-administration/4.3.scaling/4.3.2.enable-hpa.md + - Storage management: +# - Use local PV: k8s-operator/4.cluster-administration/4.4.storage-management/4.4.1.use-local-pv.md + - Dynamically expand persistent volumes: k8s-operator/4.cluster-administration/4.4.storage-management/4.4.2.pv-expansion.md + - Configure PV reclaim: k8s-operator/4.cluster-administration/4.4.storage-management/4.4.3.configure-pv-reclaim.md + - Log management: k8s-operator/4.cluster-administration/4.5.logging.md + - Backup and restore: k8s-operator/4.cluster-administration/4.6.backup-and-restore.md + - Security: + - Enable mTLS: k8s-operator/4.cluster-administration/4.7.security/4.7.1.enable-mtls.md + - Enable admission control: k8s-operator/4.cluster-administration/4.7.security/4.7.2.enable-admission-control.md + - HA and balancing: + - Self-healing overview: k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.1.self-healing.md + - Enable disaster recovery zones: k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.2.enable-zone.md + - Balance data after scale-out: k8s-operator/4.cluster-administration/4.8.ha-and-balancing/4.8.3.balance-data-after-scale-out.md + - Advanced: + - Optimize leader transfer in rolling updates: k8s-operator/4.cluster-administration/4.9.advanced/4.9.1.rolling-update-strategy.md +# - Restart clusters: k8s-operator/4.cluster-administration/4.9.advanced/4.9.2.restart-cluster.md + - FAQ: k8s-operator/5.FAQ.md - Graph computing: