Skip to content

Commit

Permalink
Enable and test NuoDocker topology discovery (#367)
Browse files Browse the repository at this point in the history
I am adding new logic to NuoDocker to add admin labels, process labels, and admin affinity based on topology labels on the kubernetes node. This functionality requires new permissions for the Service Account (gated behind `nuodb.addClusterRoleBinding` chart value). Also, this functionality cannot be easily tested in existing NuoDocker test suite, so add an integration test to this repo.
  • Loading branch information
kontaras authored Jul 29, 2024
1 parent eb9a8f3 commit d9eaa10
Show file tree
Hide file tree
Showing 9 changed files with 202 additions and 2 deletions.
1 change: 1 addition & 0 deletions stable/admin/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ The following tables list the configurable parameters for the `nuodb` option:
| `serviceAccount` | The name of the service account used by NuoDB Pods | `nuodb` |
| `addServiceAccount` | Whether to create a new service account for NuoDB containers | `true` |
| `addRoleBinding` | Whether to add role and role-binding giving `serviceAccount` access to Kubernetes APIs (Pods, PersistentVolumes, PersistentVolumeClaims, StatefulSets) | `true` |
| `addClusterRoleBinding` | If `addRoleBinding` is enabled, also add cluster role and cluster role-binding giving `serviceAccount` access to Cluster wide Kubernetes APIs (read access to Nodes) | `true` |

The `registry` option can be used to connect to private image repositories, such as Artifactory.

Expand Down
14 changes: 14 additions & 0 deletions stable/admin/templates/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,18 @@ rules:
- get
- create
- update
{{- if eq (include "defaulttrue" .Values.nuodb.addClusterRoleBinding) "true" }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nuodb-kube-inspector
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
{{- end }}
{{- end }}
15 changes: 15 additions & 0 deletions stable/admin/templates/rolebinding.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,19 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ default "nuodb" .Values.nuodb.serviceAccount }}
{{- if eq (include "defaulttrue" .Values.nuodb.addClusterRoleBinding) "true" }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nuodb-kube-inspector
roleRef:
kind: ClusterRole
name: nuodb-kube-inspector
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ default "nuodb" .Values.nuodb.serviceAccount }}
namespace: {{ default .Release.Namespace .Values.admin.namespace }}
{{- end }}
{{- end }}
55 changes: 55 additions & 0 deletions test/integration/template_admin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1554,3 +1554,58 @@ func TestAdminLicenseFromSecret(t *testing.T) {
assert.Contains(t, obj.Spec.Template.Spec.Containers[0].Args, "licenseFilePath=/etc/nuodb/license/nuodb.lic")
}
}

func TestClusterRole(t *testing.T) {
// Path to the helm chart we will test
helmChartPath := testlib.ADMIN_HELM_CHART_PATH

t.Run("testEnabled", func(t *testing.T) {
output := helm.RenderTemplate(t, &helm.Options{}, helmChartPath,
"release-name", []string{"templates/role.yaml", "templates/rolebinding.yaml"})

// Verify that nuodb-kube-inspector ClusterRole is created
for _, obj := range testlib.SplitAndRenderClusterRole(t, output, 1) {
assert.Equal(t, "nuodb-kube-inspector", obj.Name)

for _, rule := range obj.Rules {
isNode := false
for _, resource := range rule.Resources {
if resource == "nodes" {
isNode = true
break
}
}
if !isNode {
continue
}

assert.Contains(t, rule.Verbs, "get")
}
}

// Verify that nuodb-kube-inspector ClusterRoleBinding is created
for _, obj := range testlib.SplitAndRenderClusterClusterRoleBinding(t, output, 1) {
assert.Equal(t, "nuodb-kube-inspector", obj.Name)
// Verify that it is binding to the correct role
assert.Equal(t, "ClusterRole", obj.RoleRef.Kind)
assert.Equal(t, "nuodb-kube-inspector", obj.RoleRef.Name)
// Verify that it is binding to the correct user
subjects := obj.Subjects
assert.Equal(t, 1, len(subjects))
assert.Equal(t, "ServiceAccount", subjects[0].Kind)
assert.Equal(t, "nuodb", subjects[0].Name)
}
})

t.Run("testDisabled", func(t *testing.T) {
output := helm.RenderTemplate(t, &helm.Options{SetValues: map[string]string{
"nuodb.addClusterRoleBinding": "false",
}}, helmChartPath, "release-name", []string{"templates/role.yaml", "templates/rolebinding.yaml"})

// Verify that a ClusterRole is not created
assert.Empty(t, testlib.SplitAndRenderClusterRole(t, output, 0))

// Verify that a ClusterRoleBinding is not created
assert.Empty(t, testlib.SplitAndRenderClusterClusterRoleBinding(t, output, 0))
})
}
58 changes: 57 additions & 1 deletion test/minikube/minikube_kaa_additions_test.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
//go:build short
// +build short

package minikube
Expand All @@ -18,6 +19,7 @@ import (
"github.com/ghodss/yaml"

"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
rbacv1 "k8s.io/api/rbac/v1"
)

Expand Down Expand Up @@ -118,4 +120,58 @@ func TestKaaRolebindingDisabled(t *testing.T) {
require.True(t, len(config.StatefulSets) == 0)
require.True(t, len(config.Volumes) == 0)
require.True(t, len(config.Pods) == 0)
}
}

func TestKubernetesTopologyDiscover(t *testing.T) {
testlib.SkipTestOnNuoDBVersionCondition(t, "< 6.0.3")
testlib.AwaitTillerUp(t)
defer testlib.VerifyTeardown(t)

currentRegions := testlib.LabelNodesIfMissing(t, "topology.kubernetes.io/region", "test-region")
currentZones := testlib.LabelNodesIfMissing(t, "topology.kubernetes.io/zone", "test-zone")

defer testlib.Teardown(testlib.TEARDOWN_ADMIN)

options := helm.Options{}
helmChartReleaseName, namespaceName := testlib.StartAdmin(t, &options, 1, "")
kubectlOptions := k8s.NewKubectlOptions("", "", namespaceName)
options.KubectlOptions = kubectlOptions
admin := fmt.Sprintf("%s-nuodb-cluster0", helmChartReleaseName)
admin0 := fmt.Sprintf("%s-0", admin)

adminPod := testlib.GetPod(t, namespaceName, admin0)
adminNode := adminPod.Spec.NodeName
testlib.VerifyAdminLabels(t, namespaceName, admin0,
map[string]string{
"node": adminNode,
"zone": currentZones[adminNode],
"region": currentRegions[adminNode],
})

dbName := "db"
options.SetValues = map[string]string{
"database.sm.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.sm.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.te.resources.requests.cpu": testlib.MINIMAL_VIABLE_ENGINE_CPU,
"database.te.resources.requests.memory": testlib.MINIMAL_VIABLE_ENGINE_MEMORY,
"database.sm.noHotCopy.replicas": "1",
"database.sm.hotCopy.replicas": "0",
"database.name": dbName,
}

defer testlib.Teardown(testlib.TEARDOWN_DATABASE)

testlib.StartDatabase(t, namespaceName, admin0, &options)

processes, err := testlib.GetDatabaseProcessesE(t, namespaceName, admin0, dbName)
require.NoError(t, err)
for _, process := range processes {
pod := testlib.GetPod(t, namespaceName, process.Hostname)
node := pod.Spec.NodeName
require.Equal(t, node, process.Labels["node"])
require.Equal(t, currentZones[node], process.Labels["zone"])
require.Equal(t, currentRegions[node], process.Labels["region"])
require.Equal(t, 1, testlib.GetStringOccurrenceInLog(t, namespaceName, process.Hostname,
"Looking for admin with labels matching: node zone region", &v12.PodLogOptions{}))
}
}
1 change: 1 addition & 0 deletions test/testlib/NuoDBServer.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ type NuoDBServer struct {
PeerMemberState string `json:"peerMemberState"`
PeerState string `json:"peerState"`
Version string `json:"version"`
Labels map[string]string `json:"labels"`
}

func UnmarshalDomainServers(s string) (err error, servers map[string]NuoDBServer) {
Expand Down
40 changes: 39 additions & 1 deletion test/testlib/minikube_utilities.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ func AwaitNrReplicasScheduled(t *testing.T, namespace string, expectedName strin
if strings.Contains(pod.Name, expectedName) {
//ignore all completed pods
if pod.Status.Phase == corev1.PodSucceeded {
continue;
continue
}

if arePodConditionsMet(&pod, corev1.PodScheduled, corev1.ConditionTrue) {
Expand Down Expand Up @@ -1287,6 +1287,32 @@ func LabelNodes(t *testing.T, namespaceName string, labelName string, labelValue
}
}

// LabelNodesIfMissing checks if all Nodes in the cluster have a labelName label. Any that do not,
// have that label set to defaultValue. It returns the list of all current values encountered.
func LabelNodesIfMissing(t *testing.T, labelName string, defaultValue string) map[string]string {
options := k8s.NewKubectlOptions("", "", "")

labelString := fmt.Sprintf("%s=%s", labelName, defaultValue)
nodes := k8s.GetNodes(t, options)
require.True(t, len(nodes) > 0)

values := make(map[string]string)

for _, node := range nodes {
currentValue, present := node.Labels[labelName]
if present {
values[node.Name] = currentValue
} else {
err := k8s.RunKubectlE(t, options, "label", "node", node.Name, labelString)
require.NoError(t, err, "Labeling node %s with '%s' failed", node.Name, labelString)

values[node.Name] = defaultValue
}
}

return values
}

func GetNodesInternalAddresses(t *testing.T) map[string]string {
addresses := make(map[string]string)
options := k8s.NewKubectlOptions("", "", "")
Expand All @@ -1302,6 +1328,18 @@ func GetNodesInternalAddresses(t *testing.T) map[string]string {
return addresses
}

// GetNodeNames fetches the names of all nodes in the kubernetes cluster.
func GetNodeNames(t *testing.T) []string {
var addresses []string
options := k8s.NewKubectlOptions("", "", "")
nodes := k8s.GetNodes(t, options)
require.True(t, len(nodes) > 0)
for _, node := range nodes {
addresses = append(addresses, node.Name)
}
return addresses
}

func GetNamespaces(t *testing.T) []corev1.Namespace {
clientset, err := k8s.GetKubernetesClientE(t)
require.NoError(t, err)
Expand Down
12 changes: 12 additions & 0 deletions test/testlib/nuodb_admin_utilities.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,3 +303,15 @@ func AwaitServerState(t *testing.T, namespace string, adminPod string,
return false
}, timeout)
}

// VerifyAdminLabels checks if adminPod in namespace has all the label values in expectedLabelValues
func VerifyAdminLabels(t *testing.T, namespace string, adminPod string, expectedLabelValues map[string]string) {
admins, err := GetDomainServersE(t, namespace, adminPod)
require.NoError(t, err)
require.Contains(t, admins, adminPod)

labels := admins[adminPod].Labels
for label, expectedValue := range expectedLabelValues {
require.Equal(t, expectedValue, labels[label])
}
}
8 changes: 8 additions & 0 deletions test/testlib/template_utilities.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,14 @@ func SplitAndRenderRole(t *testing.T, output string, expectedNrObjects int) []rb
return SplitAndRender[rbacv1.Role](t, output, expectedNrObjects, "Role")
}

func SplitAndRenderClusterRole(t *testing.T, output string, expectedNrObjects int) []rbacv1.ClusterRole {
return SplitAndRender[rbacv1.ClusterRole](t, output, expectedNrObjects, "ClusterRole")
}

func SplitAndRenderClusterClusterRoleBinding(t *testing.T, output string, expectedNrObjects int) []rbacv1.ClusterRoleBinding {
return SplitAndRender[rbacv1.ClusterRoleBinding](t, output, expectedNrObjects, "ClusterRoleBinding")
}

func SplitAndRenderServiceAccount(t *testing.T, output string, expectedNrObjects int) []v1.ServiceAccount {
return SplitAndRender[v1.ServiceAccount](t, output, expectedNrObjects, "ServiceAccount")
}
Expand Down

0 comments on commit d9eaa10

Please sign in to comment.