diff --git a/charts/evi-hbase-3.0.3/.helmignore b/charts/evi-hbase-3.0.3/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/evi-hbase-3.0.3/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/evi-hbase-3.0.3/Chart.yaml b/charts/evi-hbase-3.0.3/Chart.yaml new file mode 100644 index 0000000..cbd6c7c --- /dev/null +++ b/charts/evi-hbase-3.0.3/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +appVersion: 1.16.0 +dependencies: +- condition: hbase.enabled + name: hbase + repository: https://itboy87.github.io/bigdata-charts/ + tags: + - hbase + version: 0.1.7 +description: A Helm chart for Kubernetes +name: evi-hbase +type: application +version: 3.0.3 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/.helmignore b/charts/evi-hbase-3.0.3/charts/hbase/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/evi-hbase-3.0.3/charts/hbase/Chart.yaml b/charts/evi-hbase-3.0.3/charts/hbase/Chart.yaml new file mode 100644 index 0000000..ba44666 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +appVersion: 2.4.13 +dependencies: +- condition: hadoop.enabled + name: hadoop + repository: file://../hadoop + version: ~1.2.1 +- condition: zookeeper.enabled + name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 10.0.2 +description: HBase is an open-source non-relational distributed database modeled after + Google's Bigtable and written in Java. +home: https://hbase.apache.org/ +icon: http://hbase.apache.org/images/hbase_logo_with_orca.png +maintainers: +- email: mr.hussnain.it@gmail.com + name: sabeeh +name: hbase +version: 0.1.7 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/README.md b/charts/evi-hbase-3.0.3/charts/hbase/README.md new file mode 100644 index 0000000..15a5f1b --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/README.md @@ -0,0 +1,67 @@ +# hbase + +![Version: 0.1.6](https://img.shields.io/badge/Version-0.1.6-informational?style=flat-square) ![AppVersion: 2.0.1](https://img.shields.io/badge/AppVersion-2.0.1-informational?style=flat-square) + +HBase is an open-source non-relational distributed database modeled after Google's Bigtable and written in Java. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| sabeeh | mr.hussnain.it@gmail.com | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | zookeeper | ~6.3.0 | +| https://gradiant.github.io/charts | hdfs | ~0.1.10 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| antiAffinity | string | `"soft"` | | +| conf.hadoopUserName | string | `"hdfs"` | | +| conf.hbaseSite."hbase.rootdir" | string | `nil` | | +| conf.hbaseSite."hbase.zookeeper.quorum" | string | `nil` | | +| hbase.master.replicas | int | `1` | | +| hbase.master.resources.limits.cpu | string | `"1000m"` | | +| hbase.master.resources.limits.memory | string | `"2048Mi"` | | +| hbase.master.resources.requests.cpu | string | `"10m"` | | +| hbase.master.resources.requests.memory | string | `"256Mi"` | | +| hbase.regionServer.replicas | int | `1` | | +| hbase.regionServer.resources.limits.cpu | string | `"1000m"` | | +| hbase.regionServer.resources.limits.memory | string | `"2048Mi"` | | +| hbase.regionServer.resources.requests.cpu | string | `"10m"` | | +| hbase.regionServer.resources.requests.memory | string | `"256Mi"` | | +| hbaseVersion | string | `"2.0.1"` | | +| hdfs.enabled | bool | `true` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"gradiant/hbase-base"` | | +| image.tag | string | `"2.0.1"` | | +| prometheus.config.lowercaseOutputLabelNames | bool | `true` | | +| prometheus.config.lowercaseOutputName | bool | `true` | | +| prometheus.config.rules[0].labels.namespace | string | `"$1"` | | +| prometheus.config.rules[0].labels.region | string | `"$3"` | | +| prometheus.config.rules[0].labels.table | string | `"$2"` | | +| prometheus.config.rules[0].name | string | `"HBase_metric_$4"` | | +| prometheus.config.rules[0].pattern | string | `"Hadoop<>Namespace_([^\\W_]+)_table_([^\\W_]+)_region_([^\\W_]+)_metric_(\\w+)"` | | +| prometheus.config.rules[1].labels.name | string | `"$2"` | | +| prometheus.config.rules[1].labels.sub | string | `"$3"` | | +| prometheus.config.rules[1].name | string | `"hadoop_$1_$4"` | | +| prometheus.config.rules[1].pattern | string | `"Hadoop<>([\\w._]+)"` | | +| prometheus.config.rules[2].pattern | string | `".+"` | | +| prometheus.enabled | bool | `true` | | +| prometheus.image | string | `"spdigital/prometheus-jmx-exporter-kubernetes"` | | +| prometheus.imageTag | string | `"0.3.1"` | | +| prometheus.port | int | `9709` | | +| prometheus.resources | object | `{}` | | +| prometheus.thriftPort | int | `5557` | | +| zookeeper.enabled | bool | `true` | | +| zookeeper.replicaCount | int | `1` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.4.0](https://github.com/norwoodj/helm-docs/releases/v1.4.0) diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/.helmignore b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/.helmignore new file mode 100644 index 0000000..c12e0db --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +docs diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/Chart.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/Chart.yaml new file mode 100644 index 0000000..3b225f9 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: 3.3.3 +description: The Apache Hadoop software library is a framework that allows for the + distributed processing of large data sets across clusters of computers using simple + programming models. +home: https://hadoop.apache.org/ +icon: http://hadoop.apache.org/images/hadoop-logo.jpg +maintainers: +- email: github@farberg.de + name: pfisterer + url: http://github.com/pfisterer +name: hadoop +sources: +- https://github.com/pfisterer/apache-hadoop-helm +- https://github.com/apache/hadoop +version: 1.2.1 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/LICENSE b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/LICENSE new file mode 100644 index 0000000..547752b --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright The Helm Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/README.md b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/README.md new file mode 100644 index 0000000..8c825f6 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/README.md @@ -0,0 +1,107 @@ +# Hadoop Chart + +This chart is modified from [stable/hadoop](https://github.com/helm/charts/tree/master/stable/hadoop) and [mgit-at/helm-hadoop-3](https://github.com/mgit-at/helm-hadoop-3) and has been updated to: + +- run use multi-architecture Docker image and +- use the currently latest version of Hadoop. + +This chart is primarily intended to be used for YARN and MapReduce job execution where HDFS is just used as a means to transport small artifacts within the framework and not for a distributed filesystem. Data should be read from cloud based datastores such as Google Cloud Storage, S3 or Swift. + +## Chart Details + +## Installing the Chart + +To install the chart with the release name `hadoop`: + +```bash +helm helm repo add pfisterer-hadoop https://pfisterer.github.io/apache-hadoop-helm/ +helm install --name hadoop pfisterer-hadoop/hadoop +``` + +## Configuration + +The following table lists the configurable parameters of the Hadoop chart and their default values. + +| Parameter | Description | Default | +| -------------------------------------- | -------------------------------------------------------------- |-------------------------------------------------------------------| +| `image.repository` | Hadoop image | `ghcr.io/fleeksoft/hbase/hdfs` | +| `image.tag` | Hadoop image tag | `3.3.3.2` | +| `imagee.pullPolicy` | Pull policy for the images | `IfNotPresent` | +| `hadoopVersion` | Version of hadoop libraries being used | `3.3.3` | +| `antiAffinity` | Pod antiaffinity, `hard` or `soft` | `hard` | +| `hdfs.nameNode.pdbMinAvailable` | PDB for HDFS NameNode | `1` | +| `hdfs.nameNode.resources` | resources for the HDFS NameNode | `requests:memory=256Mi,cpu=10m,limits:memory=2048Mi,cpu=1000m` | +| `hdfs.dataNode.replicas` | Number of HDFS DataNode replicas | `1` | +| `hdfs.dataNode.pdbMinAvailable` | PDB for HDFS DataNode | `1` | +| `hdfs.dataNode.resources` | resources for the HDFS DataNode | `requests:memory=256Mi,cpu=10m,limits:memory=2048Mi,cpu=1000m` | +| `hdfs.webhdfs.enabled` | Enable WebHDFS REST API | `true` | +| `yarn.resourceManager.pdbMinAvailable` | PDB for the YARN ResourceManager | `1` | +| `yarn.resourceManager.resources` | resources for the YARN ResourceManager | `requests:memory=256Mi,cpu=10m,limits:memory=2048Mi,cpu=1000m` | +| `yarn.nodeManager.pdbMinAvailable` | PDB for the YARN NodeManager | `1` | +| `yarn.nodeManager.replicas` | Number of YARN NodeManager replicas | `1` | +| `yarn.nodeManager.parallelCreate` | Create all nodeManager statefulset pods in parallel (K8S 1.7+) | `false` | +| `yarn.nodeManager.resources` | Resource limits and requests for YARN NodeManager pods | `requests:memory=2048Mi,cpu=1000m,limits:memory=2048Mi,cpu=1000m` | +| `persistence.nameNode.enabled` | Enable/disable persistent volume | `false` | +| `persistence.nameNode.storageClass` | Name of the StorageClass to use per your volume provider | `-` | +| `persistence.nameNode.accessMode` | Access mode for the volume | `ReadWriteOnce` | +| `persistence.nameNode.size` | Size of the volume | `50Gi` | +| `persistence.dataNode.enabled` | Enable/disable persistent volume | `false` | +| `persistence.dataNode.storageClass` | Name of the StorageClass to use per your volume provider | `-` | +| `persistence.dataNode.accessMode` | Access mode for the volume | `ReadWriteOnce` | +| `persistence.dataNode.size` | Size of the volume | `200Gi` | + + +--- + +## Customized Hadoop Base Docker Image + +This image is modified from [comcast/kube-yarn](https://github.com/Comcast/kube-yarn/tree/add-hadoop-image-versions) and [mgit-at/helm-hadoop-3](https://github.com/mgit-at/helm-hadoop-3). Currently, native libraries are not been included. + +### Build and Push the Docker Image + +```bash +# Set version +HADOOP_VERSION=3.3.3 + +# Build +docker buildx build --push --platform "linux/arm64,linux/amd64" -t ghcr.io/fleeksoft/hbase/hdfs:latest -t ghcr.io/fleeksoft/hbase/hdfs:$HADOOP_VERSION . +``` + +### Testing with minikube + +If you are running locally with minikube and want to try your images without pushing them to a registry, build the images on the minikube VM first: + +```bash +eval $(minikube docker-env) +# use the build command from above +``` + +--- + +## Development + +Help is always appreciated. Please create pull requests. + +### Open Issues + +- Include native libraries +- List of ports needs to be updated (cf. https://www.oreilly.com/library/view/big-data-analytics/9781788628846/5c5821cc-4a3d-498a-a3eb-23256cd79c8b.xhtml) + +### Upload a new version of the chart + +```bash +helm lint +helm package . +mv hadoop*.tgz docs/ +helm repo index docs/ --url https://pfisterer.github.io/apache-hadoop-helm/ +git add docs/ +git commit -a -m "Updated helm repository" +git push origin master +``` + +## Changes + +Version 1.2.0 +- Initial release of this chart +- Use multi-architecture base image +- Apache Hadoop 3.3.3 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/image/Dockerfile b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/image/Dockerfile new file mode 100644 index 0000000..e60896f --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/image/Dockerfile @@ -0,0 +1,31 @@ +FROM openjdk:11-jdk + +ENV HADOOP_VERSION 3.3.3 +ENV HADOOP_HOME=/opt/hadoop + +ENV HADOOP_COMMON_HOME=${HADOOP_HOME} \ + HADOOP_HDFS_HOME=${HADOOP_HOME} \ + HADOOP_MAPRED_HOME=${HADOOP_HOME} \ + HADOOP_YARN_HOME=${HADOOP_HOME} \ + HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop \ + PATH=${PATH}:${HADOOP_HOME}/bin + +RUN curl --silent --output /tmp/hadoop.tgz https://ftp-stud.hs-esslingen.de/pub/Mirrors/ftp.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz && tar --directory /opt -xzf /tmp/hadoop.tgz && rm /tmp/hadoop.tgz +RUN ln -s /opt/hadoop-${HADOOP_VERSION} ${HADOOP_HOME} + +WORKDIR $HADOOP_HOME + +# Hdfs ports +EXPOSE 50010 50020 50070 50075 50090 8020 9000 + +# Mapred ports +EXPOSE 19888 + +#Yarn ports +EXPOSE 8030 8031 8032 8033 8040 8042 8088 + +#Other ports +EXPOSE 49707 2122 + + +#docker build --platform=linux/amd64 -t ghcr.io/fleeksoft/hbase/hdfs:3.3.3.2 . \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/NOTES.txt b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/NOTES.txt new file mode 100644 index 0000000..65a543b --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/NOTES.txt @@ -0,0 +1,26 @@ +1. You can check the status of HDFS by running this command: + kubectl exec -n {{ .Release.Namespace }} -it {{ include "hadoop.fullname" . }}-hdfs-nn-0 -- /opt/hadoop/bin/hdfs dfsadmin -report + +2. You can list the yarn nodes by running this command: + kubectl exec -n {{ .Release.Namespace }} -it {{ include "hadoop.fullname" . }}-yarn-rm-0 -- /opt/hadoop/bin/yarn node -list + +3. Create a port-forward to the yarn resource manager UI: + kubectl port-forward -n {{ .Release.Namespace }} {{ include "hadoop.fullname" . }}-yarn-rm-0 8088:8088 + + Then open the ui in your browser: + + open http://localhost:8088 + +4. You can run included hadoop tests like this: + kubectl exec -n {{ .Release.Namespace }} -it {{ include "hadoop.fullname" . }}-yarn-nm-0 -- /opt/hadoop/bin/hadoop jar /opt/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-{{ .Values.hadoopVersion }}-tests.jar TestDFSIO -write -nrFiles 5 -fileSize 128MB -resFile /tmp/TestDFSIOwrite.txt + +5. You can list the mapreduce jobs like this: + kubectl exec -n {{ .Release.Namespace }} -it {{ include "hadoop.fullname" . }}-yarn-rm-0 -- /opt/hadoop/bin/mapred job -list + +6. This chart can also be used with the zeppelin chart + helm install --namespace {{ .Release.Namespace }} --set hdfs.useConfigMap=true,hdfs.configMapName={{ include "hadoop.fullname" . }} stable/zeppelin + +7. You can scale the number of yarn nodes like this: + helm upgrade {{ .Release.Name }} --set yarn.nodeManager.replicas=4 stable/hadoop + + Make sure to update the values.yaml if you want to make this permanent. diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/_helpers.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/_helpers.tpl new file mode 100644 index 0000000..89b72a1 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/_helpers.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hadoop.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "hadoop.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hadoop.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hadoop-configmap.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hadoop-configmap.yaml new file mode 100644 index 0000000..e63d5b0 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hadoop-configmap.yaml @@ -0,0 +1,323 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "hadoop.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +data: + bootstrap.sh: | + #!/bin/bash -x + + echo Starting + + : ${HADOOP_HOME:=/opt/hadoop} + + echo Using ${HADOOP_HOME} as HADOOP_HOME + + . $HADOOP_HOME/etc/hadoop/hadoop-env.sh + + # ------------------------------------------------------ + # Directory to find config artifacts + # ------------------------------------------------------ + + CONFIG_DIR="/tmp/hadoop-config" + + # ------------------------------------------------------ + # Copy config files from volume mount + # ------------------------------------------------------ + + for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HADOOP_HOME/etc/hadoop/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + + # ------------------------------------------------------ + # installing libraries if any + # (resource urls added comma separated to the ACP system variable) + # ------------------------------------------------------ + cd $HADOOP_HOME/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd - + + # ------------------------------------------------------ + # Start NAMENODE + # ------------------------------------------------------ + if [[ "${HOSTNAME}" =~ "hdfs-nn" ]]; then + # sed command changing REPLACEME in $HADOOP_HOME/etc/hadoop/hdfs-site.xml to actual port numbers + # sed -i "s/EXTERNAL_HTTP_PORT_REPLACEME/9864/" $HADOOP_HOME/etc/hadoop/hdfs-site.xml + # sed -i "s/EXTERNAL_DATA_PORT_REPLACEME/9866/" $HADOOP_HOME/etc/hadoop/hdfs-site.xml + + mkdir -p /root/hdfs/namenode + if [ ! -f /root/hdfs/namenode/formated ]; then + # Only format if necessary + $HADOOP_HOME/bin/hdfs namenode -format -force -nonInteractive && echo 1 > /root/hdfs/namenode/formated + fi + $HADOOP_HOME/bin/hdfs --loglevel {{ .Values.logLevel }} --daemon start namenode + fi + + # ------------------------------------------------------ + # Start DATA NODE + # ------------------------------------------------------ + if [[ "${HOSTNAME}" =~ "hdfs-dn" ]]; then + # Split hostname at "-" into an array + # Example hostname: hadoop-hadoop-hdfs-dn-0 + HOSTNAME_ARR=(${HOSTNAME//-/ }) + # Add instance number to start of external port ranges + # EXTERNAL_HTTP_PORT=$(({{ .Values.hdfs.dataNode.externalHTTPPortRangeStart }} + ${HOSTNAME_ARR[4]})) + # EXTERNAL_DATA_PORT=$(({{ .Values.hdfs.dataNode.externalDataPortRangeStart }} + ${HOSTNAME_ARR[4]})) + + # sed command changing REPLACEME in $HADOOP_HOME/etc/hadoop/hdfs-site.xml to actual port numbers + # sed -i "s/EXTERNAL_HTTP_PORT_REPLACEME/${EXTERNAL_HTTP_PORT}/" $HADOOP_HOME/etc/hadoop/hdfs-site.xml + # sed -i "s/EXTERNAL_DATA_PORT_REPLACEME/${EXTERNAL_DATA_PORT}/" $HADOOP_HOME/etc/hadoop/hdfs-site.xml + + mkdir -p /root/hdfs/datanode + + # Wait (with timeout) for namenode + TMP_URL="http://{{ include "hadoop.fullname" . }}-hdfs-nn:9870" + if timeout 5m bash -c "until curl -sf $TMP_URL; do echo Waiting for $TMP_URL; sleep 5; done"; then + $HADOOP_HOME/bin/hdfs --loglevel {{ .Values.logLevel }} --daemon start datanode + else + echo "$0: Timeout waiting for $TMP_URL, exiting." + exit 1 + fi + + fi + + # ------------------------------------------------------ + # Start RESOURCE MANAGER and PROXY SERVER as daemons + # ------------------------------------------------------ + if [[ "${HOSTNAME}" =~ "yarn-rm" ]]; then + $HADOOP_HOME/bin/yarn --loglevel {{ .Values.logLevel }} --daemon start resourcemanager + $HADOOP_HOME/bin/yarn --loglevel {{ .Values.logLevel }} --daemon start proxyserver + fi + + # ------------------------------------------------------ + # Start NODE MANAGER + # ------------------------------------------------------ + if [[ "${HOSTNAME}" =~ "yarn-nm" ]]; then + sed -i '/<\/configuration>/d' $HADOOP_HOME/etc/hadoop/yarn-site.xml + cat >> $HADOOP_HOME/etc/hadoop/yarn-site.xml <<- EOM + + yarn.nodemanager.resource.memory-mb + ${MY_MEM_LIMIT:-2048} + + + + yarn.nodemanager.resource.cpu-vcores + ${MY_CPU_LIMIT:-2} + + EOM + + echo '' >> $HADOOP_HOME/etc/hadoop/yarn-site.xml + + # Wait with timeout for resourcemanager + TMP_URL="http://{{ include "hadoop.fullname" . }}-yarn-rm:8088/ws/v1/cluster/info" + if timeout 5m bash -c "until curl -sf $TMP_URL; do echo Waiting for $TMP_URL; sleep 5; done"; then + #wait for zookeeper to get leaders after nn live + sleep 10; + $HADOOP_HOME/bin/yarn nodemanager --loglevel {{ .Values.logLevel }} + else + echo "$0: Timeout waiting for $TMP_URL, exiting." + exit 1 + fi + + fi + + # ------------------------------------------------------ + # Tail logfiles for daemonized workloads (parameter -d) + # ------------------------------------------------------ + if [[ $1 == "-d" ]]; then + until find ${HADOOP_HOME}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HADOOP_HOME}/logs/* & + while true; do sleep 1000; done + fi + + # ------------------------------------------------------ + # Start bash if requested (parameter -bash) + # ------------------------------------------------------ + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + + core-site.xml: | + + + + + fs.defaultFS + hdfs://{{ include "hadoop.fullname" . }}-hdfs-nn:9000/ + NameNode URI + + + + hdfs-site.xml: | + + + + +{{- if .Values.hdfs.webhdfs.enabled -}} + + dfs.webhdfs.enabled + true + +{{- end -}} + + + dfs.datanode.use.datanode.hostname + false + + + + dfs.client.use.datanode.hostname + false + + +{{/* */}} +{{/* dfs.datanode.hostname*/}} +{{/* {{ .Values.hdfs.dataNode.externalHostname }}*/}} +{{/* */}} + +{{/* */}} +{{/* dfs.datanode.http.address*/}} +{{/* 0.0.0.0:EXTERNAL_HTTP_PORT_REPLACEME*/}} +{{/* */}} + +{{/* */}} +{{/* dfs.datanode.address*/}} +{{/* 0.0.0.0:EXTERNAL_DATA_PORT_REPLACEME*/}} +{{/* */}} + + + dfs.replication + 3 + + + + dfs.datanode.data.dir + file:///root/hdfs/datanode + DataNode directory + + + + dfs.namenode.name.dir + file:///root/hdfs/namenode + NameNode directory for namespace and transaction logs storage. + + + + dfs.namenode.datanode.registration.ip-hostname-check + false + + + + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + + + + + mapred-site.xml: | + + + + + + mapreduce.framework.name + yarn + + + mapreduce.jobhistory.address + {{ include "hadoop.fullname" . }}-yarn-rm-0.{{ include "hadoop.fullname" . }}-yarn-rm.{{ .Release.Namespace }}.svc.cluster.local:10020 + + + mapreduce.jobhistory.webapp.address + {{ include "hadoop.fullname" . }}-yarn-rm-0.{{ include "hadoop.fullname" . }}-yarn-rm.{{ .Release.Namespace }}.svc.cluster.local:19888 + + + + slaves: | + localhost + + yarn-site.xml: | + + + + + + yarn.resourcemanager.hostname + {{ include "hadoop.fullname" . }}-yarn-rm + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + + + yarn.nodemanager.bind-host + 0.0.0.0 + + + yarn.timeline-service.bind-host + 0.0.0.0 + + + + + yarn.nodemanager.vmem-check-enabled + false + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler + + + + List of directories to store localized files in. + yarn.nodemanager.local-dirs + /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir + + + + Where to store container logs. + yarn.nodemanager.log-dirs + /var/log/hadoop-yarn/containers + + + + Where to aggregate logs to. + yarn.nodemanager.remote-app-log-dir + /var/log/hadoop-yarn/apps + + + + yarn.application.classpath + + /opt/hadoop/etc/hadoop, + /opt/hadoop/share/hadoop/common/*, + /opt/hadoop/share/hadoop/common/lib/*, + /opt/hadoop/share/hadoop/hdfs/*, + /opt/hadoop/share/hadoop/hdfs/lib/*, + /opt/hadoop/share/hadoop/mapreduce/*, + /opt/hadoop/share/hadoop/mapreduce/lib/*, + /opt/hadoop/share/hadoop/yarn/*, + /opt/hadoop/share/hadoop/yarn/lib/* + + + diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-pdb.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-pdb.yaml new file mode 100644 index 0000000..9ad7172 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-pdb.yaml @@ -0,0 +1,16 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-dn + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + minAvailable: {{ .Values.hdfs.dataNode.pdbMinAvailable }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-statefulset.yaml new file mode 100644 index 0000000..4642ac5 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-statefulset.yaml @@ -0,0 +1,113 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-dn + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + serviceName: {{ include "hadoop.fullname" . }}-hdfs-dn + replicas: {{ .Values.hdfs.dataNode.replicas }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + {{- end }} + terminationGracePeriodSeconds: 0 + containers: + - name: hdfs-dn + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/bash" + - "/tmp/hadoop-config/bootstrap.sh" + - "-d" + resources: +{{ toYaml .Values.hdfs.dataNode.resources | indent 10 }} + startupProbe: + httpGet: + path: / + port: 9864 + initialDelaySeconds: 20 + failureThreshold: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: / + port: 9864 + initialDelaySeconds: 30 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: / + port: 9864 + initialDelaySeconds: 30 + timeoutSeconds: 2 + volumeMounts: + - name: hadoop-config + mountPath: /tmp/hadoop-config + - name: dfs + mountPath: /root/hdfs/datanode + volumes: + - name: hadoop-config + configMap: + name: {{ include "hadoop.fullname" . }} + {{- if .Values.persistence.dataNode.enabled }} + volumeClaimTemplates: + - metadata: + name: dfs + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn + spec: + accessModes: + - {{ .Values.persistence.dataNode.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.dataNode.size | quote }} + {{- if .Values.persistence.dataNode.storageClass }} + {{- if (eq "-" .Values.persistence.dataNode.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.dataNode.storageClass }}" + {{- end }} + {{- end }} + {{- else }} + - name: dfs + emptyDir: {} + {{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-svc.yaml new file mode 100644 index 0000000..712aafa --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-dn-svc.yaml @@ -0,0 +1,22 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-dn + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn +spec: + ports: + - name: dfs + port: 9000 + protocol: TCP + - name: webhdfs + port: 9864 + clusterIP: None + selector: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-dn diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-pdb.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-pdb.yaml new file mode 100644 index 0000000..d77c6a4 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-pdb.yaml @@ -0,0 +1,16 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-nn + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + minAvailable: {{ .Values.hdfs.nameNode.pdbMinAvailable }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-statefulset.yaml new file mode 100644 index 0000000..1f2e2db --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-statefulset.yaml @@ -0,0 +1,105 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-nn + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + serviceName: {{ include "hadoop.fullname" . }}-hdfs-nn + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + {{- end }} + terminationGracePeriodSeconds: 0 + containers: + - name: hdfs-nn + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/bash" + - "/tmp/hadoop-config/bootstrap.sh" + - "-d" + resources: +{{ toYaml .Values.hdfs.nameNode.resources | indent 10 }} + readinessProbe: + httpGet: + path: / + port: 9870 + initialDelaySeconds: 30 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: / + port: 9870 + initialDelaySeconds: 30 + timeoutSeconds: 2 + volumeMounts: + - name: hadoop-config + mountPath: /tmp/hadoop-config + - name: dfs + mountPath: /root/hdfs/namenode + volumes: + - name: hadoop-config + configMap: + name: {{ include "hadoop.fullname" . }} + {{- if .Values.persistence.nameNode.enabled }} + volumeClaimTemplates: + - metadata: + name: dfs + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn + spec: + accessModes: + - {{ .Values.persistence.nameNode.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.nameNode.size | quote }} + {{- if .Values.persistence.nameNode.storageClass }} + {{- if (eq "-" .Values.persistence.nameNode.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.nameNode.storageClass }}" + {{- end }} + {{- end }} + {{- else }} + - name: dfs + emptyDir: {} + {{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-svc.yaml new file mode 100644 index 0000000..7496787 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/hdfs-nn-svc.yaml @@ -0,0 +1,22 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hadoop.fullname" . }}-hdfs-nn + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn +spec: + ports: + - name: dfs + port: 9000 + protocol: TCP + - name: webhdfs + port: 9870 + clusterIP: None + selector: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: hdfs-nn diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-pdb.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-pdb.yaml new file mode 100644 index 0000000..a8e34bc --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-pdb.yaml @@ -0,0 +1,16 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-nm + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm + minAvailable: {{ .Values.yarn.nodeManager.pdbMinAvailable }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-statefulset.yaml new file mode 100644 index 0000000..63a719b --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-statefulset.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-nm + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm + serviceName: {{ include "hadoop.fullname" . }}-yarn-nm + replicas: {{ .Values.yarn.nodeManager.replicas }} +{{- if .Values.yarn.nodeManager.parallelCreate }} + podManagementPolicy: Parallel +{{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm + {{- end }} + terminationGracePeriodSeconds: 0 + containers: + - name: yarn-nm + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 8088 + name: web + command: + - "/bin/bash" + - "/tmp/hadoop-config/bootstrap.sh" + - "-d" + resources: +{{ toYaml .Values.yarn.nodeManager.resources | indent 10 }} + startupProbe: + httpGet: + path: /node + port: 8042 + initialDelaySeconds: 15 + failureThreshold: 30 + periodSeconds: 2 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: /node + port: 8042 + initialDelaySeconds: 35 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: /node + port: 8042 + initialDelaySeconds: 10 + timeoutSeconds: 2 + env: + - name: MY_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: yarn-nm + resource: limits.cpu + divisor: 1 + - name: MY_MEM_LIMIT + valueFrom: + resourceFieldRef: + containerName: yarn-nm + resource: limits.memory + divisor: 1M + volumeMounts: + - name: hadoop-config + mountPath: /tmp/hadoop-config + volumes: + - name: hadoop-config + configMap: + name: {{ include "hadoop.fullname" . }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-svc.yaml new file mode 100644 index 0000000..d70f596 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-nm-svc.yaml @@ -0,0 +1,23 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-nm + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm +spec: + ports: + - port: 8088 + name: web + - port: 8082 + name: web2 + - port: 8042 + name: api + clusterIP: None + selector: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-nm diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-pdb.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-pdb.yaml new file mode 100644 index 0000000..fafeecc --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-pdb.yaml @@ -0,0 +1,16 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-rm + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm + minAvailable: {{ .Values.yarn.resourceManager.pdbMinAvailable }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-statefulset.yaml new file mode 100644 index 0000000..0d86491 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-statefulset.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-rm + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm + serviceName: {{ include "hadoop.fullname" . }}-yarn-rm + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm + {{- end }} + terminationGracePeriodSeconds: 0 + containers: + - name: yarn-rm + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 8088 + name: web + command: + - "/bin/bash" + - "/tmp/hadoop-config/bootstrap.sh" + - "-d" + resources: +{{ toYaml .Values.yarn.resourceManager.resources | indent 10 }} + readinessProbe: + httpGet: + path: /ws/v1/cluster/info + port: 8088 + initialDelaySeconds: 5 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: /ws/v1/cluster/info + port: 8088 + initialDelaySeconds: 10 + timeoutSeconds: 2 + volumeMounts: + - name: hadoop-config + mountPath: /tmp/hadoop-config + volumes: + - name: hadoop-config + configMap: + name: {{ include "hadoop.fullname" . }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-svc.yaml new file mode 100644 index 0000000..47c2b45 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-rm-svc.yaml @@ -0,0 +1,19 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-rm + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm +spec: + ports: + - port: 8088 + name: web + clusterIP: None + selector: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-ui-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-ui-svc.yaml new file mode 100644 index 0000000..2ef3d6a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/templates/yarn-ui-svc.yaml @@ -0,0 +1,18 @@ +# Service to access the yarn web ui +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hadoop.fullname" . }}-yarn-ui + labels: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + helm.sh/chart: {{ include "hadoop.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-ui +spec: + ports: + - port: 8088 + name: web + selector: + app.kubernetes.io/name: {{ include "hadoop.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: yarn-rm diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/values.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/values.yaml new file mode 100644 index 0000000..0409484 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/hadoop/values.yaml @@ -0,0 +1,90 @@ +image: + repository: ghcr.io/fleeksoft/hbase/hdfs + tag: 3.3.3.2 + pullPolicy: IfNotPresent + +# The version of the hadoop libraries being used in the image. +hadoopVersion: 3.3.3 +logLevel: INFO + +# Select antiAffinity as either hard or soft, default is soft +antiAffinity: "soft" + +hdfs: + nameNode: + pdbMinAvailable: 1 + + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "1000m" + + dataNode: + # Will be used as dfs.datanode.hostname + # You still need to set up services + ingress for every DN + # Datanodes will expect to +# externalHostname: example.com +# externalDataPortRangeStart: 50500 +# externalHTTPPortRangeStart: 51000 + + replicas: 1 + + pdbMinAvailable: 1 + + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "1000m" + + webhdfs: + enabled: true + +yarn: + resourceManager: + pdbMinAvailable: 1 + + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "2000m" + + nodeManager: + pdbMinAvailable: 1 + + # The number of YARN NodeManager instances. + replicas: 1 + + # Create statefulsets in parallel (K8S 1.7+) + parallelCreate: false + + # CPU and memory resources allocated to each node manager pod. + # This should be tuned to fit your workload. + resources: + requests: + memory: "2048Mi" + cpu: "1000m" + limits: + memory: "2048Mi" + cpu: "1000m" + +persistence: + nameNode: + enabled: false + storageClass: "-" + accessMode: ReadWriteOnce + size: 30Gi + + dataNode: + enabled: false + storageClass: "-" + accessMode: ReadWriteOnce + size: 50Gi diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/.helmignore b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/Chart.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c9540c7 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png +keywords: +- zookeeper +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 10.0.2 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/README.md b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/README.md new file mode 100644 index 0000000..a94be78 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/README.md @@ -0,0 +1,523 @@ + + +# Apache ZooKeeper packaged by Bitnami + +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.0-debian-11-r5` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r4` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +### TLS/SSL parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.auth` | SSL Client auth. Can be "none", "want" or "need". | `none` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.existingSecretKeystoreKey` | The secret key from the tls.client.existingSecret containing the Keystore. | `""` | +| `tls.client.existingSecretTruststoreKey` | The secret key from the tls.client.existingSecret containing the Truststore. | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.passwordsSecretKeystoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.auth` | SSL Quorum Client auth. Can be "none", "want" or "need". | `none` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.existingSecretKeystoreKey` | The secret key from the tls.quorum.existingSecret containing the Keystore. | `""` | +| `tls.quorum.existingSecretTruststoreKey` | The secret key from the tls.quorum.existingSecret containing the Truststore. | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.passwordsSecretKeystoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +``` +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +``` + +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 10.0.0 + +This new version of the chart adds support for server-server authentication. +The chart previously supported client-server authentication, to avioud confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`. + +### To 9.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed. + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/.helmignore b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/Chart.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 0000000..bd152e3 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.16.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.16.0 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/README.md b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/README.md new file mode 100644 index 0000000..3b5e09c --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_affinities.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_capabilities.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_errors.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_images.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_ingress.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_labels.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_names.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_secrets.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_storage.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_utils.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_warnings.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mysql.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/values.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/NOTES.txt b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 0000000..c287e1e --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if eq .Values.service.type "NodePort" }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if eq .Values.service.type "LoadBalancer" }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} + +{{- else if eq .Values.service.type "ClusterIP" }} + + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} + +{{- end }} +{{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/_helpers.tpl b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 0000000..d855bad --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,361 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ZooKeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper client-server authentication credentials secret +*/}} +{{- define "zookeeper.client.secretName" -}} +{{- if .Values.auth.client.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}} +{{- else -}} + {{- printf "%s-client-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper server-server authentication credentials secret +*/}} +{{- define "zookeeper.quorum.secretName" -}} +{{- if .Values.auth.quorum.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}} +{{- else -}} + {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper client-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.client.createSecret" -}} +{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper server-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.quorum.createSecret" -}} +{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return the ZooKeeper configuration ConfigMap name +*/}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ConfigMap object should be created for ZooKeeper configuration +*/}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper quorum TLS certificates +*/}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsKeystoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsTruststoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsKeystoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsTruststoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.client.auth" -}} +{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }} +zookeeper: auth.client.enabled + In order to enable client-server authentication, you need to provide the list + of users to be created and the user to use for clients authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.quorum.auth" -}} +{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }} +zookeeper: auth.quorum.enabled + In order to enable server-server authentication, you need to provide the list + of users to be created and the user to use for quorum authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/configmap.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/configmap.yaml new file mode 100644 index 0000000..12b4f48 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "zookeeper.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/extra-list.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/metrics-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/metrics-svc.yaml new file mode 100644 index 0000000..5afc4b3 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/networkpolicy.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/networkpolicy.yaml new file mode 100644 index 0000000..6353283 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,41 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound connections to ZooKeeper + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/pdb.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 0000000..f7faf65 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/prometheusrule.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 0000000..87dcd35 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/scripts-configmap.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 0000000..00afbfe --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,98 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/secrets.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/secrets.yaml new file mode 100644 index 0000000..41314d0 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,77 @@ +{{- if (include "zookeeper.client.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }} + server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }} + quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/serviceaccount.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/serviceaccount.yaml new file mode 100644 index 0000000..958a57a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/servicemonitor.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/servicemonitor.yaml new file mode 100644 index 0000000..2c8af33 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "zookeeper.namespace" . }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 0000000..ae2f7a1 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,542 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/zookeeper + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.containerPorts.client | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }} + {{- if .Values.auth.client.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.client.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.client.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: server-password + {{- end }} + - name: ZOO_ENABLE_QUORUM_AUTH + value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }} + {{- if .Values.auth.quorum.enabled }} + - name: ZOO_QUORUM_LEARNER_USER + value: {{ .Values.auth.quorum.learnerUser | quote }} + - name: ZOO_QUORUM_LEARNER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-learner-password + - name: ZOO_QUORUM_SERVER_USERS + value: {{ .Values.auth.quorum.serverUsers | quote }} + - name: ZOO_QUORUM_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_PORT_NUMBER + value: {{ .Values.containerPorts.tls | quote }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_AUTH + value: {{ .Values.tls.client.auth | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.tls.client.keystorePath | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_CLIENT_AUTH + value: {{ .Values.tls.auth.quorum | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.tls.quorum.keystorePath | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: client + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-tls + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} + - name: follower + containerPort: {{ .Values.containerPorts.follower }} + - name: election + containerPort: {{ .Values.containerPorts.election }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- else if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "zookeeper.configmapName" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc-headless.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc-headless.yaml new file mode 100644 index 0000000..ee05e1d --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc.yaml new file mode 100644 index 0000000..6ad0b10 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/tls-secrets.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 0000000..a07480d --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,55 @@ +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-client-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-quorum-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/values.yaml b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/values.yaml new file mode 100644 index 0000000..0592ce4 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/charts/zookeeper/values.yaml @@ -0,0 +1,866 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.0-debian-11-r11 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## + headless: + publishNotReadyAddresses: true + annotations: {} +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r10 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port + ## + containerPort: 9141 + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/ci/basic-values.yaml b/charts/evi-hbase-3.0.3/charts/hbase/ci/basic-values.yaml new file mode 100644 index 0000000..f3f952a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/ci/basic-values.yaml @@ -0,0 +1,6 @@ +--- +hbase: + master: + replicas: 3 + regionServer: + replicas: 3 diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/NOTES.txt b/charts/evi-hbase-3.0.3/charts/hbase/templates/NOTES.txt new file mode 100644 index 0000000..0041db8 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. You can get an HBASE Shell by running this command: + kubectl exec -n {{ .Release.Namespace }} -it {{ include "hbase.fullname" . }}-master-0 -- hbase shell + +2. Inspect hbase master service ports with: + kubectl exec -n {{ .Release.Namespace }} describe service {{ include "hbase.fullname" . }}-master + +3. Create a port-forward to the hbase manager UI: + kubectl port-forward -n {{ .Release.Namespace }} svc/{{ include "hbase.fullname" . }}-master 16010:16010 + + Then open the ui in your browser: + + open http://localhost:16010 + +4. Create a port-forward to the hbase thrift manager UI: + kubectl port-forward -n {{ .Release.Namespace }} svc/{{ include "hbase.fullname" . }}-master 9095:9095 + + Then open the ui in your browser: + + open http://localhost:9095 + + diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/_helpers.tpl b/charts/evi-hbase-3.0.3/charts/hbase/templates/_helpers.tpl new file mode 100644 index 0000000..6ae634f --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/_helpers.tpl @@ -0,0 +1,34 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hbase.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 47 chars because some Kubernetes name fields are limited to 63 (by the DNS naming spec), +as we append -master or -region to the names +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hbase.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Standard Labels from Helm documentation https://helm.sh/docs/chart_best_practices/#labels-and-annotations +*/}} + +{{- define "hbase.labels" -}} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/part-of: {{ .Chart.Name }} +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/exporter-configmap.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/exporter-configmap.yaml new file mode 100644 index 0000000..19ef28c --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/exporter-configmap.yaml @@ -0,0 +1,13 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "hbase.fullname" . }}-exporter + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 4 }} +data: + jmx-hbase-prometheus.yml: |+ +{{ toYaml .Values.prometheus.config | indent 4 }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-configmap.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-configmap.yaml new file mode 100644 index 0000000..eccf765 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-configmap.yaml @@ -0,0 +1,100 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "hbase.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + {{- include "hbase.labels" . | nindent 4 }} +data: + bootstrap.sh: | + #!/bin/bash + + : ${HBASE_PREFIX:=/usr/local/hbase} + + . $HBASE_PREFIX/conf/hbase-env.sh + + # Directory to find config artifacts + CONFIG_DIR="/tmp/hbase-config" + + # Copy config files from volume mount + + for f in hbase-site.xml hbase-env.sh; do + if [[ -e ${CONFIG_DIR}/$f ]]; then + cp ${CONFIG_DIR}/$f $HBASE_PREFIX/conf/$f + else + echo "ERROR: Could not find $f in $CONFIG_DIR" + exit 1 + fi + done + {{- if .Values.prometheus.enabled }} + _HBASE_OPTS="$HBASE_OPTS" + # SET HBASE_OPTS with prometheus javaagent jmx exporter port + export HBASE_OPTS="$_HBASE_OPTS -javaagent:/jmx-exporter/jmx_prometheus_javaagent.jar={{ .Values.prometheus.port }}:/etc/exporter/jmx-hbase-prometheus.yml" + {{- end }} + + echo "starting $2"; + if [[ $2 == "master" ]]; then + # wait up to 300 seconds for namenode + NAMENODE_URL={{- printf "http://%s-hadoop-hdfs-nn:9870/index.html" .Release.Name }} + echo "start pinging $NAMENODE_URL"; + (while [[ $count -lt 60 && -z `curl -sf $NAMENODE_URL` ]]; do ((count=count+1)) ; echo "Waiting for $NAMENODE_URL" ; sleep 5; done && [[ $count -lt 60 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for namenode, exiting." && exit 1 + + $HBASE_PREFIX/bin/hbase-daemon.sh start master + {{- if .Values.prometheus.enabled }} + # RESET HBASE_OPTS with thrift jmx exporter port + export HBASE_OPTS="$_HBASE_OPTS -javaagent:/jmx-exporter/jmx_prometheus_javaagent.jar={{ .Values.prometheus.thriftPort }}:/etc/exporter/jmx-hbase-prometheus.yml" + {{- end }} + $HBASE_PREFIX/bin/hbase-daemon.sh start thrift + fi + if [[ $2 == "regionserver" ]]; then + # wait up to 200 seconds for masternode + (while [[ $count -lt 100 && -z `curl -sf http://{{ include "hbase.fullname" . }}-master:16010` ]]; do ((count=count+1)) ; echo "Waiting for {{ include "hbase.fullname" . }}-master" ; sleep 2; done && [[ $count -lt 100 ]]) + [[ $? -ne 0 ]] && echo "Timeout waiting for hbase-master, exiting." && exit 1 + $HBASE_PREFIX/bin/hbase-daemon.sh start regionserver + fi + if [[ $1 == "-d" ]]; then + until find ${HBASE_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo "`date`: Waiting for logs..." ; do sleep 2 ; done + tail -F ${HBASE_PREFIX}/logs/* & + while true; do sleep 1000; done + fi + + if [[ $1 == "-bash" ]]; then + /bin/bash + fi + hbase-env.sh: | + # Extra Java runtime options. + # Below are what we set by default. May only work with SUN JVM. + # For more on why as well as other possible settings, + # see http://hbase.apache.org/book.html#performance + export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC" + hbase-site.xml: | + + + + + hbase.cluster.distributed + true + + + hbase.master + >{{ include "hbase.fullname" . }}-master:16000 + + {{- if not (index .Values.conf "hbaseSite" "hbase.zookeeper.quorum") }} + + hbase.zookeeper.quorum + {{- printf "%s-zookeeper:2181" .Release.Name }} + + {{- end }} + {{- if not (index .Values.conf "hbaseSite" "hbase.rootdir") }} + + hbase.rootdir + {{- printf "hdfs://%s-hadoop-hdfs-nn:9000/hbase" .Release.Name }} + + {{- end }} + {{- if index .Values.conf "hbaseSite" }} + {{- range $key, $value := index .Values.conf "hbaseSite" }} + {{ $key }}{{ $value }} + {{- end }} + {{- end }} + diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-exporter-service.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-exporter-service.yaml new file mode 100644 index 0000000..4d5ccbe --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-exporter-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.prometheus.enabled -}} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "hbase.fullname" . }}-master-metrics + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 4 }} +spec: + selector: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - port: {{ .Values.prometheus.port }} + name: metrics + targetPort: {{ .Values.prometheus.port }} +{{- end -}} \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-statefulset.yaml new file mode 100644 index 0000000..3cac0d4 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-statefulset.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hbase.fullname" . }}-master + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hbase-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + app.kubernetes.io/instance: {{ .Release.Name | quote }} + serviceName: {{ include "hbase.fullname" . }}-master + replicas: {{ .Values.hbase.master.replicas }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + app.kubernetes.io/instance: {{ .Release.Name | quote }} + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 16 }} + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 18 }} + {{- end }} + {{- if .Values.prometheus.enabled }} + initContainers: + - name: inject-exporter-jar + image: "{{ .Values.prometheus.image }}:{{ .Values.prometheus.imageTag }}" + env: + - name: SHARED_VOLUME_PATH + value: /jmx-exporter + volumeMounts: + - mountPath: /jmx-exporter + name: jmx-exporter + {{- end }} + containers: + - name: master + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/bash" + - "/tmp/hbase-config/bootstrap.sh" + - "-d" + - "master" + env: + - name: HADOOP_USER_NAME + value: {{ .Values.conf.hadoopUserName }} + resources: +{{ toYaml .Values.hbase.master.resources | indent 10 }} + startupProbe: + httpGet: + path: / + port: 16010 + failureThreshold: 60 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: / + port: 16010 + initialDelaySeconds: 90 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: / + port: 16010 + initialDelaySeconds: 10 + timeoutSeconds: 2 + volumeMounts: + - name: hbase-config + mountPath: /tmp/hbase-config + {{- if .Values.prometheus.enabled }} + - name: jmx-exporter + mountPath: /jmx-exporter + - name: exporter-config + mountPath: /etc/exporter + {{- end }} + volumes: + - name: hbase-config + configMap: + name: {{ include "hbase.fullname" . }} + {{- if .Values.prometheus.enabled }} + - name: jmx-exporter + emptyDir: {} + - name: exporter-config + configMap: + name: {{ include "hbase.fullname" . }}-exporter + {{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-svc.yaml new file mode 100644 index 0000000..ed00f5c --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-svc.yaml @@ -0,0 +1,29 @@ +#Headless service +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hbase.fullname" . }}-master + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + {{- include "hbase.labels" . | nindent 4 }} +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: thrift + port: 9090 + protocol: TCP + - name: thrift-ui + port: 9095 + protocol: TCP + - name: hbase-master + port: 16000 + protocol: TCP + - name: hbase-ui + port: 16010 + protocol: TCP + selector: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-thrift-exporter-service.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-thrift-exporter-service.yaml new file mode 100644 index 0000000..0f05658 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-master-thrift-exporter-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.prometheus.enabled -}} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "hbase.fullname" . }}-master-thrift-metrics + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: thrift + {{- include "hbase.labels" . | nindent 4 }} +spec: + selector: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: master + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - port: {{ .Values.prometheus.thriftPort }} + name: metrics + targetPort: {{ .Values.prometheus.thriftPort }} +{{- end -}} \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-exporter-service.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-exporter-service.yaml new file mode 100644 index 0000000..d62317a --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-exporter-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.prometheus.enabled -}} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "hbase.fullname" . }}-region-metrics + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + {{- include "hbase.labels" . | nindent 4 }} +spec: + selector: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - port: {{ .Values.prometheus.port }} + name: metrics + targetPort: {{ .Values.prometheus.port }} +{{- end -}} \ No newline at end of file diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-svc.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-svc.yaml new file mode 100644 index 0000000..b0c6535 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-region-svc.yaml @@ -0,0 +1,23 @@ +# A headless service to create DNS records +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hbase.fullname" . }}-regionserver + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + {{- include "hbase.labels" . | nindent 4 }} +spec: + ports: + - name: hbase-regionserver + port: 16020 + protocol: TCP + - name: hbase-ui + port: 16030 + protocol: TCP + clusterIP: None + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-regionserver-statefulset.yaml b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-regionserver-statefulset.yaml new file mode 100644 index 0000000..47f9234 --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/templates/hbase-regionserver-statefulset.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "hbase.fullname" . }}-regionserver + annotations: + checksum/config: {{ include (print $.Template.BasePath "/hbase-configmap.yaml") . | sha256sum }} + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + {{- include "hbase.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + app.kubernetes.io/instance: {{ .Release.Name | quote }} + serviceName: {{ include "hbase.fullname" . }}-regionserver + replicas: {{ .Values.hbase.regionServer.replicas }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + app.kubernetes.io/instance: {{ .Release.Name | quote }} + spec: + affinity: + podAntiAffinity: + {{- if eq .Values.antiAffinity "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + {{- include "hbase.labels" . | nindent 16 }} + {{- else if eq .Values.antiAffinity "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ include "hbase.name" . }} + app.kubernetes.io/component: regionserver + {{- include "hbase.labels" . | nindent 18 }} + {{- end }} + {{- if .Values.prometheus.enabled }} + initContainers: + - name: inject-exporter-jar + image: "{{ .Values.prometheus.image }}:{{ .Values.prometheus.imageTag }}" + env: + - name: SHARED_VOLUME_PATH + value: /jmx-exporter + volumeMounts: + - mountPath: /jmx-exporter + name: jmx-exporter + {{- end }} + containers: + - name: regionserver + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/bash" + - "/tmp/hbase-config/bootstrap.sh" + - "-d" + - "regionserver" + env: + - name: HADOOP_USER_NAME + value: {{ .Values.conf.hadoopUserName }} + resources: +{{ toYaml .Values.hbase.regionServer.resources | indent 10 }} + startupProbe: + httpGet: + path: / + port: 16030 + initialDelaySeconds: 5 + timeoutSeconds: 2 + failureThreshold: 60 + periodSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 16030 + initialDelaySeconds: 5 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: / + port: 16030 + initialDelaySeconds: 10 + timeoutSeconds: 2 + volumeMounts: + - name: hbase-config + mountPath: /tmp/hbase-config + {{- if .Values.prometheus.enabled }} + - name: jmx-exporter + mountPath: /jmx-exporter + - name: exporter-config + mountPath: /etc/exporter + {{- end }} + volumes: + - name: hbase-config + configMap: + name: {{ include "hbase.fullname" . }} + {{- if .Values.prometheus.enabled }} + - name: jmx-exporter + emptyDir: {} + - name: exporter-config + configMap: + name: {{ include "hbase.fullname" . }}-exporter + {{- end }} diff --git a/charts/evi-hbase-3.0.3/charts/hbase/values.yaml b/charts/evi-hbase-3.0.3/charts/hbase/values.yaml new file mode 100644 index 0000000..25599fc --- /dev/null +++ b/charts/evi-hbase-3.0.3/charts/hbase/values.yaml @@ -0,0 +1,98 @@ +# Default values for hbase +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# The base hadoop image to use for all components. +# See this repo for image build details: https://github.com/Comcast/kube-yarn/tree/master/image +image: + repository: ghcr.io/fleeksoft/hbase/hbase-base + tag: 2.4.13.2 + pullPolicy: IfNotPresent + +# The version of the hadoop libraries being used in the image. +hbaseVersion: 2.4.13 + +# Select antiAffinity as either hard or soft, default is 'soft' +# 'hard' is recommended for higher availaility +antiAffinity: "soft" + +conf: + hadoopUserName: root + hbaseSite: + hbase.rootdir: # default is "hdfs://{{.Release.Name}}-hdfs-namenode:9000/hbase" + hbase.zookeeper.quorum: # default is "{{.Release.Name}}-zookeeper:2181" + +hbase: + master: + replicas: 2 + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "1000m" + regionServer: + replicas: 2 + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "1000m" + +## Prometheus Exporter Configuration +## ref: https://prometheus.io/docs/instrumenting/exporters/ +prometheus: + ## JMX Exporter Configuration + ## ref: https://github.com/prometheus/jmx_exporter + ## ref: https://github.com/chuyeow/prometheus-jmx-exporter-kubernetes + enabled: true + port: 9709 + # hbase-master pods runs two jvm processes: hbase and the thrift api. + thriftPort: 5557 + resources: {} + image: spdigital/prometheus-jmx-exporter-kubernetes + imageTag: "0.3.1" + # prometheus jmx_exporter config https://github.com/prometheus/jmx_exporter + config: + lowercaseOutputName: true + lowercaseOutputLabelNames: true + rules: + - pattern: Hadoop<>Namespace_([^\W_]+)_table_([^\W_]+)_region_([^\W_]+)_metric_(\w+) + name: HBase_metric_$4 + labels: + namespace: "$1" + table: "$2" + region: "$3" + - pattern: Hadoop<>([\w._]+) + name: hadoop_$1_$4 + labels: + "name": "$2" + "sub": "$3" + - pattern: .+ + +# manage hdfs from dependencies +hadoop: + enabled: true + hdfs: + dataNode: + replicas: 3 + persistence: + nameNode: + enabled: true + storageClass: "do-block-storage" + size: 30Gi + dataNode: + enabled: true + storageClass: "do-block-storage" + size: 50Gi + +# manage zookeeper from dependencies +zookeeper: + enabled: true + replicaCount: 3 + maxClientCnxns: 1000 + persistence: + enabled: false diff --git a/charts/evi-hbase-3.0.3/templates/pv-dn.yaml b/charts/evi-hbase-3.0.3/templates/pv-dn.yaml new file mode 100644 index 0000000..13d3eb8 --- /dev/null +++ b/charts/evi-hbase-3.0.3/templates/pv-dn.yaml @@ -0,0 +1,42 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2022 Intel Corporation. +# +# This software and the related documents are Intel copyrighted materials, and your use of +# them is governed by the express license under which they were provided to you (License). +# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, +# disclose or transmit this software or the related documents without Intel's prior written permission. +# +# This software and the related documents are provided as is, with no express or implied warranties, +# other than those that are expressly stated in the License. + +--- +{{- range $key, $val := .Values.persistance.datanode }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: evi-hdfs-dn-{{ $key }} + labels: + app: datanode +spec: + capacity: + storage: {{ required "A valid persistance.datanodeSize is required!" $.Values.persistance.datanodeSize }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + claimRef: + namespace: {{ $.Values.namespace.value }} + name: dfs-{{ $.Release.Name }}-hadoop-hdfs-dn-{{ $key | trunc -1 }} + storageClassName: {{ $.Values.storageClass.name }} + local: + path: {{ required "A valid persistance.datanode.pv.path is required!" $val.path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ required "A valid persistance.datanode.pv.hostname is required!" $val.hostname }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/templates/pv-nn.yaml b/charts/evi-hbase-3.0.3/templates/pv-nn.yaml new file mode 100644 index 0000000..654dae3 --- /dev/null +++ b/charts/evi-hbase-3.0.3/templates/pv-nn.yaml @@ -0,0 +1,42 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2022 Intel Corporation. +# +# This software and the related documents are Intel copyrighted materials, and your use of +# them is governed by the express license under which they were provided to you (License). +# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, +# disclose or transmit this software or the related documents without Intel's prior written permission. +# +# This software and the related documents are provided as is, with no express or implied warranties, +# other than those that are expressly stated in the License. + +--- +{{- range $key, $val := .Values.persistance.namenode }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: evi-hdfs-nn-{{ $key }} + labels: + app: namenode +spec: + capacity: + storage: {{ required "A valid persistance.namenodeSize is required!" $.Values.persistance.namenodeSize }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + claimRef: + namespace: {{ $.Values.namespace.value }} + name: dfs-{{ $.Release.Name }}-hadoop-hdfs-nn-{{ $key | trunc -1 }} + storageClassName: {{ $.Values.storageClass.name }} + local: + path: {{ required "A valid persistance.namenode.pv.path is required!" $val.path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ required "A valid persistance.namenode.pv.hostname is required!" $val.hostname }} +{{- end }} diff --git a/charts/evi-hbase-3.0.3/templates/pv-zookeeper.yaml b/charts/evi-hbase-3.0.3/templates/pv-zookeeper.yaml new file mode 100644 index 0000000..b721366 --- /dev/null +++ b/charts/evi-hbase-3.0.3/templates/pv-zookeeper.yaml @@ -0,0 +1,39 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2022 Intel Corporation. +# +# This software and the related documents are Intel copyrighted materials, and your use of +# them is governed by the express license under which they were provided to you (License). +# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, +# disclose or transmit this software or the related documents without Intel's prior written permission. +# +# This software and the related documents are provided as is, with no express or implied warranties, +# other than those that are expressly stated in the License. + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: evi-hbase-zookeeper-pv + labels: + app: zookeeper +spec: + capacity: + storage: {{ required "A valid persistance.zookeeperSize is required!" .Values.persistance.zookeeperSize }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + claimRef: + namespace: {{ .Values.namespace.value }} + name: data-{{ .Release.Name }}-zookeeper-0 + storageClassName: {{ .Values.storageClass.name }} + local: + path: {{ required "A valid persistance.zookeeper.path is required!" .Values.persistance.zookeeper.path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ required "A valid persistance.zookeeper.hostname is required!" .Values.persistance.zookeeper.hostname }} diff --git a/charts/evi-hbase-3.0.3/templates/storageclass.yaml b/charts/evi-hbase-3.0.3/templates/storageclass.yaml new file mode 100644 index 0000000..4d23763 --- /dev/null +++ b/charts/evi-hbase-3.0.3/templates/storageclass.yaml @@ -0,0 +1,22 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2022 Intel Corporation. +# +# This software and the related documents are Intel copyrighted materials, and your use of +# them is governed by the express license under which they were provided to you (License). +# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, +# disclose or transmit this software or the related documents without Intel's prior written permission. +# +# This software and the related documents are provided as is, with no express or implied warranties, +# other than those that are expressly stated in the License. + +--- +{{- if .Values.storageClass.create -}} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.storageClass.name }} +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +{{- end -}} diff --git a/charts/evi-hbase-3.0.3/values.yaml b/charts/evi-hbase-3.0.3/values.yaml new file mode 100644 index 0000000..3fb70fa --- /dev/null +++ b/charts/evi-hbase-3.0.3/values.yaml @@ -0,0 +1,197 @@ +# INTEL CONFIDENTIAL +# +# Copyright (C) 2022 Intel Corporation. +# +# This software and the related documents are Intel copyrighted materials, and your use of +# them is governed by the express license under which they were provided to you (License). +# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, +# disclose or transmit this software or the related documents without Intel's prior written permission. +# +# This software and the related documents are provided as is, with no express or implied warranties, +# other than those that are expressly stated in the License. + +--- +# Default values for new-hbase. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +persistance: + # persistance.*size (Required) Capacity size of pvs + namenodeSize: + datanodeSize: + zookeeperSize: + # persistance.*.path (Required) Directory for pvs to use + # persistance.*.hostname (Required) Node hostname for pv affinity + namenode: + pv0: + path: + hostname: + datanode: + pv0: + path: + hostname: + pv1: + path: + hostname: + zookeeper: + path: + hostname: + +namespace: + create: false + value: dev + istioInjection: + create: false + +storageClass: + # storageClass.create Define whether to create storage class + create: true + name: evi-local-storage-hdfs + +# ----------------------------------------- settings for hbase chart +hbase: + enabled: true + + image: + repository: ghcr.io/fleeksoft/hbase/hbase-base + tag: 2.4.13.2 + pullPolicy: IfNotPresent + + # The version of the hadoop libraries being used in the image. + hbaseVersion: 2.4.13 + + # Select antiAffinity as either hard or soft, default is 'soft' + # 'hard' is recommended for higher availaility + antiAffinity: "soft" + + conf: + hadoopUserName: root + hbaseSite: + hbase.rootdir: # default is "hdfs://{{.Release.Name}}-hdfs-namenode:9000/hbase" + hbase.zookeeper.quorum: # default is "{{.Release.Name}}-zookeeper:2181" + + hbase: + master: + replicas: 2 + resources: + requests: + memory: "8192Mi" + cpu: "4" + limits: + memory: "16Gi" + cpu: "16" + regionServer: + replicas: 2 + resources: + requests: + memory: "8192Mi" + cpu: "4" + limits: + memory: "60Gi" # This value can be dynamically adjusted according to the size of the data volume + cpu: "16" + + prometheus: + enabled: false + port: 9709 + # hbase-master pods runs two jvm processes: hbase and the thrift api. + thriftPort: 5557 + + # ----------------------------------------- settings for hadoop chart + hadoop: + enabled: true + + image: + repository: ghcr.io/fleeksoft/hbase/hdfs + tag: 3.3.3.2 + pullPolicy: IfNotPresent + + # The version of the hadoop libraries being used in the image. + hadoopVersion: 3.3.3 + logLevel: INFO + + # Select antiAffinity as either hard or soft, default is soft + antiAffinity: "soft" + + persistence: + nameNode: + enabled: true + storageClass: evi-local-storage-hdfs + accessMode: ReadWriteOnce + size: 30Gi + dataNode: + enabled: true + storageClass: evi-local-storage-hdfs + accessMode: ReadWriteOnce + size: 50Gi + + # ----------------------------------------- settings for hadoop.hdfs + hdfs: + nameNode: + pdbMinAvailable: 1 + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "3Gi" + cpu: "1000m" + dataNode: + replicas: 2 + pdbMinAvailable: 2 + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "4096Mi" + cpu: "1000m" + webhdfs: + enabled: true + + # ----------------------------------------- settings for hadoop.yarn + yarn: + resourceManager: + pdbMinAvailable: 1 + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "3072Mi" + cpu: "2000m" + + nodeManager: + pdbMinAvailable: 1 + + # The number of YARN NodeManager instances. + replicas: 1 + + # Create statefulsets in parallel (K8S 1.7+) + parallelCreate: false + + # CPU and memory resources allocated to each node manager pod. + # This should be tuned to fit your workload. + resources: + requests: + memory: "256Mi" + cpu: "10m" + limits: + memory: "2048Mi" + cpu: "1000m" + + # ----------------------------------------- settings for zookeeper chart + zookeeper: + enabled: true + replicaCount: 1 + persistence: + enabled: true + existingClaim: "" + storageClass: evi-local-storage-hdfs + accessModes: + - ReadWriteOnce + size: 25Gi + resources: + limits: {} + requests: + memory: 512Mi + cpu: 250m