Skip to content
This repository has been archived by the owner on May 16, 2023. It is now read-only.

[elasticsearch] Add support for NetworkPolicy. #498

Merged
merged 1 commit into from
Jan 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions elasticsearch/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ support multiple versions with minimal changes.
| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` |
| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` |
| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` |
| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false,transport.enabled: false}`|
| `nodeAffinity` | Value for the [node affinity settings][] | `{}` |
| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` |
| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` |
Expand Down
13 changes: 13 additions & 0 deletions elasticsearch/examples/networkpolicy/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
default: test
include ../../../helpers/examples.mk

RELEASE := helm-es-networkpolicy

install:
helm upgrade --wait --timeout=600s --install $(RELEASE) --values ./values.yaml ../../ ; \

restart:
helm upgrade --set terminationGracePeriod=121 --wait --timeout=600s --install $(RELEASE) --values ./values.yaml ../../ ; \

purge:
helm del --purge $(RELEASE)
37 changes: 37 additions & 0 deletions elasticsearch/examples/networkpolicy/values.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
networkPolicy:
http:
enabled: true
explicitNamespacesSelector:
# Accept from namespaces with all those different rules (from whitelisted Pods)
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
transport:
enabled: true
allowExternal: true
explicitNamespacesSelector:
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
61 changes: 61 additions & 0 deletions elasticsearch/templates/networkpolicy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ template "elasticsearch.uname" . }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
app: "{{ template "elasticsearch.uname" . }}"
spec:
podSelector:
matchLabels:
app: "{{ template "elasticsearch.uname" . }}"
ingress: # Allow inbound connections

{{- if .Values.networkPolicy.http.enabled }}
# For HTTP access
- ports:
- port: {{ .Values.httpPort }}
from:
# From authorized Pods (having the correct label)
- podSelector:
matchLabels:
{{ template "elasticsearch.uname" . }}-http-client: "true"
{{- with .Values.networkPolicy.http.explicitNamespacesSelector }}
# From authorized namespaces
namespaceSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.networkPolicy.transport.additionalRules }}
# Or from custom additional rules
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}

{{- if .Values.networkPolicy.transport.enabled }}
# For transport access
- ports:
- port: {{ .Values.transportPort }}
from:
# From authorized Pods (having the correct label)
- podSelector:
matchLabels:
{{ template "elasticsearch.uname" . }}-transport-client: "true"
{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }}
# From authorized namespaces
namespaceSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.networkPolicy.transport.additionalRules }}
# Or from custom additional rules
{{ toYaml . | indent 8 }}
{{- end }}
# Or from other ElasticSearch Pods
- podSelector:
matchLabels:
app: "{{ template "elasticsearch.uname" . }}"
{{- end }}

{{- end }}
94 changes: 94 additions & 0 deletions elasticsearch/tests/elasticsearch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1367,3 +1367,97 @@ def test_hostaliases():
r = helm_template(config)
hostAliases = r["statefulset"][uname]["spec"]["template"]["spec"]["hostAliases"]
assert {"ip": "127.0.0.1", "hostnames": ["foo.local", "bar.local"]} in hostAliases


def test_network_policy():
config = """
networkPolicy:
http:
enabled: true
explicitNamespacesSelector:
# Accept from namespaces with all those different rules (from whitelisted Pods)
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
transport:
enabled: true
allowExternal: true
explicitNamespacesSelector:
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
"""
r = helm_template(config)
ingress = r["networkpolicy"][uname]["spec"]["ingress"]
pod_selector = r["networkpolicy"][uname]["spec"]["podSelector"]
http = ingress[0]
transport = ingress[1]
assert http["from"] == [
{
"podSelector": {
"matchLabels": {"elasticsearch-master-http-client": "true"}
},
"namespaceSelector": {
"matchExpressions": [
{"key": "role", "operator": "In", "values": ["frontend"]}
],
"matchLabels": {"role": "frontend"},
},
},
{"podSelector": {"matchLabels": {"role": "frontend"}}},
{
"podSelector": {
"matchExpressions": [
{"key": "role", "operator": "In", "values": ["frontend"]}
]
}
},
]
assert http["ports"][0]["port"] == 9200
assert transport["from"] == [
{
"podSelector": {
"matchLabels": {"elasticsearch-master-transport-client": "true"}
},
"namespaceSelector": {
"matchExpressions": [
{"key": "role", "operator": "In", "values": ["frontend"]}
],
"matchLabels": {"role": "frontend"},
},
},
{"podSelector": {"matchLabels": {"role": "frontend"}}},
{
"podSelector": {
"matchExpressions": [
{"key": "role", "operator": "In", "values": ["frontend"]}
]
}
},
{"podSelector": {"matchLabels": {"app": "elasticsearch-master"}}},
]
assert transport["ports"][0]["port"] == 9300
assert pod_selector == {"matchLabels": {"app": "elasticsearch-master",}}
57 changes: 56 additions & 1 deletion elasticsearch/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ podSecurityPolicy:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
- emptyDir

persistence:
enabled: true
Expand Down Expand Up @@ -283,6 +283,61 @@ sysctlInitContainer:

keystore: []

networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access Elasticsearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## elasticsearch-master-http-client: "true"
## Example for default configuration to access transport port:
## elasticsearch-master-transport-client: "true"

http:
enabled: false
## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
## and matching all criteria can reach the DB.
## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
## parameter to select these namespaces
##
# explicitNamespacesSelector:
# # Accept from namespaces with all those different rules (only from whitelisted Pods)
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}

## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
##
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend

transport:
## Note that all Elasticsearch Pods can talks to themselves using transport port even if enabled.
enabled: false
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend

# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""