Skip to content

Commit

Permalink
feat: add k8s hint integration tests under helm
Browse files Browse the repository at this point in the history
  • Loading branch information
pkoutsovasilis committed Dec 10, 2024
1 parent 684ef9a commit 6fa8ffb
Show file tree
Hide file tree
Showing 2 changed files with 287 additions and 0 deletions.
224 changes: 224 additions & 0 deletions testing/integration/kubernetes_agent_standalone_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,8 @@ func TestKubernetesAgentHelm(t *testing.T) {
atLeastAgentPods int
runK8SInnerTests bool
agentPodLabelSelectors []string
preAgentDeployed func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string)
postAgentDeployed func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string)
}{
{
name: "helm standalone agent default kubernetes privileged",
Expand Down Expand Up @@ -461,6 +463,217 @@ func TestKubernetesAgentHelm(t *testing.T) {
"name=agent-pernode-helm-agent",
},
},
{
name: "helm standalone agent unprivileged kubernetes hints",
values: map[string]any{
"agent": map[string]any{
// NOTE: Setting the version to something released is mandatory as when we enable hints
// we have an init container that downloads a released agent archive and extracts
// the templates from there. If and when we embed the templates directly in the
// agent image, we can remove this.
"version": "8.16.0",
"unprivileged": true,
"image": map[string]any{
"repository": kCtx.agentImageRepo,
"tag": kCtx.agentImageTag,
"pullPolicy": "Never",
},
},
"kubernetes": map[string]any{
"enabled": true,
"hints": map[string]any{
"enabled": true,
},
},
"outputs": map[string]any{
"default": map[string]any{
"type": "ESPlainAuthAPI",
"url": kCtx.esHost,
"api_key": kCtx.esAPIKey,
},
},
},
runK8SInnerTests: true,
// - perNode Daemonset (totalK8SNodes pods)
// - clusterWide Deployment (1 agent pod)
// - ksmSharded Statefulset (1 agent pod)
atLeastAgentPods: totalK8SNodes + 1 + 1,
agentPodLabelSelectors: []string{
// name=agent-{preset}-{release}
"name=agent-pernode-helm-agent",
"name=agent-clusterwide-helm-agent",
"name=agent-ksmsharded-helm-agent",
},
postAgentDeployed: func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) {
r, err := os.Open("testdata/k8s.hints.redis.yaml")
require.NoError(t, err, "failed to open redis k8s test data")

redisObjs, err := k8sYAMLToObjects(bufio.NewReader(r))
require.NoError(t, err, "failed to convert redis yaml to k8s objects")

t.Cleanup(func() {
err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisObjs...)
require.NoError(t, err, "failed to delete redis k8s objects")
})

err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true, waitTimeout: 120 * time.Second, namespace: namespace}, redisObjs...)
require.NoError(t, err, "failed to create redis k8s objects")

redisPod := &corev1.Pod{}
err = kCtx.client.Resources(namespace).Get(ctx, "redis", namespace, redisPod)
require.NoError(t, err, "failed to get redis pod")

perNodePodList := &corev1.PodList{}
err = kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) {
opt.LabelSelector = "name=agent-pernode-helm-agent"
})
require.NoError(t, err, "failed to list pods with selector ", perNodePodList)
require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList)

var agentSameNodePod *corev1.Pod
for _, pod := range perNodePodList.Items {
if pod.Spec.NodeName != redisPod.Spec.NodeName {
continue
}
agentSameNodePod = &pod
break
}
require.NotNil(t, agentSameNodePod, "no agent pod found on the same node as redis")

var stdout, stderr bytes.Buffer
err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, agentSameNodePod.Name, "agent", map[string]bool{
"redis/metrics": true,
})
if err != nil {
t.Errorf("failed to check agent status %s: %v", agentSameNodePod.Name, err)
t.Logf("stdout: %s\n", stdout.String())
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
}

err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisObjs...)
require.NoError(t, err, "failed to delete redis k8s objects")

err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, agentSameNodePod.Name, "agent", map[string]bool{
"redis/metrics": false,
})
if err != nil {
t.Errorf("failed to check agent status %s: %v", agentSameNodePod.Name, err)
t.Logf("stdout: %s\n", stdout.String())
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
}
},
},

{
name: "helm standalone agent unprivileged kubernetes hints pre-deployed",
values: map[string]any{
"agent": map[string]any{
// NOTE: Setting the version to something released is mandatory as when we enable hints
// we have an init container that downloads a released agent archive and extracts
// the templates from there. If and when we embed the templates directly in the
// agent image, we can remove this.
"version": "8.16.0",
"unprivileged": true,
"image": map[string]any{
"repository": kCtx.agentImageRepo,
"tag": kCtx.agentImageTag,
"pullPolicy": "Never",
},
},
"kubernetes": map[string]any{
"enabled": true,
"hints": map[string]any{
"enabled": true,
},
},
"outputs": map[string]any{
"default": map[string]any{
"type": "ESPlainAuthAPI",
"url": kCtx.esHost,
"api_key": kCtx.esAPIKey,
},
},
},
runK8SInnerTests: true,
// - perNode Daemonset (totalK8SNodes pods)
// - clusterWide Deployment (1 agent pod)
// - ksmSharded Statefulset (1 agent pod)
atLeastAgentPods: totalK8SNodes + 1 + 1,
agentPodLabelSelectors: []string{
// name=agent-{preset}-{release}
"name=agent-pernode-helm-agent",
"name=agent-clusterwide-helm-agent",
"name=agent-ksmsharded-helm-agent",
},
preAgentDeployed: func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) {
r, err := os.Open("testdata/k8s.hints.redis.yaml")
require.NoError(t, err, "failed to open redis k8s test data")

redisObjs, err := k8sYAMLToObjects(bufio.NewReader(r))
require.NoError(t, err, "failed to convert redis yaml to k8s objects")

t.Cleanup(func() {
err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisObjs...)
require.NoError(t, err, "failed to delete redis k8s objects")
})

err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true, waitTimeout: 120 * time.Second, namespace: namespace}, redisObjs...)
require.NoError(t, err, "failed to create redis k8s objects")
},
postAgentDeployed: func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) {
redisPod := &corev1.Pod{}
err = kCtx.client.Resources(namespace).Get(ctx, "redis", namespace, redisPod)
require.NoError(t, err, "failed to get redis pod")

// get all pods of agent preset perNode (hints enabled)
perNodePodList := &corev1.PodList{}
err = kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) {
opt.LabelSelector = "name=agent-pernode-helm-agent"
})
require.NoError(t, err, "failed to list pods with selector ", perNodePodList)
require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList)

// check for appropriate existence/status of redis unit in all agent pods of perNode preset
var agentSameNodeFound bool
for _, pod := range perNodePodList.Items {
redisUnitShouldExist := pod.Spec.NodeName == redisPod.Spec.NodeName
if redisUnitShouldExist {
agentSameNodeFound = redisUnitShouldExist
}

var stdout, stderr bytes.Buffer
err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, "agent", map[string]bool{
"redis/metrics": redisUnitShouldExist,
})
if err != nil {
t.Errorf("failed to check agent status %s: %v", pod.Name, err)
t.Logf("stdout: %s\n", stdout.String())
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
}
}
require.True(t, agentSameNodeFound, "no agent pod found on the same node as redis")

err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisPod)
require.NoError(t, err, "failed to delete redis k8s objects")

// check for non-existence of redis unit in all agent pods of perNode preset
for _, pod := range perNodePodList.Items {
var stdout, stderr bytes.Buffer
err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, "agent", map[string]bool{
"redis/metrics": false,
})
if err != nil {
t.Errorf("failed to check agent status %s: %v", pod.Name, err)
t.Logf("stdout: %s\n", stdout.String())
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
}
}
},
},
}

for _, tc := range testCases {
Expand Down Expand Up @@ -503,6 +716,13 @@ func TestKubernetesAgentHelm(t *testing.T) {
}
})

err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true}, k8sNamespace)
require.NoError(t, err, "failed to create k8s namespace")

if tc.preAgentDeployed != nil {
tc.preAgentDeployed(t, ctx, kCtx, testNamespace)
}

installAction := action.NewInstall(actionConfig)
installAction.Namespace = testNamespace
installAction.CreateNamespace = true
Expand Down Expand Up @@ -552,6 +772,10 @@ func TestKubernetesAgentHelm(t *testing.T) {

require.GreaterOrEqual(t, healthyAgentPods, tc.atLeastAgentPods,
fmt.Sprintf("at least %d agent containers should be checked", tc.atLeastAgentPods))

if tc.postAgentDeployed != nil {
tc.postAgentDeployed(t, ctx, kCtx, testNamespace)
}
})
}
}
Expand Down
63 changes: 63 additions & 0 deletions testing/integration/testdata/k8s.hints.redis.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: redis
annotations:
co.elastic.hints/package: redis
co.elastic.hints/data_streams: info
co.elastic.hints/host: '${kubernetes.pod.ip}:6379'
co.elastic.hints/info.period: 5s
labels:
k8s-app: redis
app: redis
spec:
containers:
- name: redis
image: redis:5.0.4
command:
- redis-server
- "/redis-master/redis.conf"
env:
- name: MASTER
value: "true"
ports:
- containerPort: 6379
resources:
limits:
cpu: "0.1"
volumeMounts:
- mountPath: /redis-master-data
name: data
- mountPath: /redis-master
name: config
volumes:
- name: data
emptyDir: {}
- name: config
configMap:
name: example-redis-config
items:
- key: redis-config
path: redis.conf
---
apiVersion: v1
kind: ConfigMap
metadata:
name: example-redis-config
data:
redis-config: |
maxmemory 2mb
maxmemory-policy allkeys-lru
---
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: 6379
name: client
selector:
app: redis

0 comments on commit 6fa8ffb

Please sign in to comment.