Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Docker image running Elastic Agent in Otel mode #5248

Merged
merged 13 commits into from
Aug 20, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,9 @@ set -eo pipefail
# `./elastic-agent container --help`
#

if [[ "$ELASTIC_AGENT_OTEL" == "true" ]]
then
exec {{ .BeatName }} otel "$@"
else
exec {{ .BeatName }} container "$@"
fi
129 changes: 126 additions & 3 deletions testing/integration/kubernetes_agent_standalone_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,124 @@ func TestKubernetesAgentStandalone(t *testing.T) {

ctx := context.Background()

deployK8SAgent(t, ctx, client, k8sObjects, testNamespace, tc.runK8SInnerTests, testLogsBasePath)
deployK8SAgent(t, ctx, client, k8sObjects, testNamespace, tc.runK8SInnerTests, testLogsBasePath, nil)
})
}

}

func TestKubernetesAgentOtel(t *testing.T) {
info := define.Require(t, define.Requirements{
Stack: &define.Stack{},
Local: false,
Sudo: false,
OS: []define.OS{
{Type: define.Kubernetes},
},
Group: define.Kubernetes,
})

agentImage := os.Getenv("AGENT_IMAGE")
require.NotEmpty(t, agentImage, "AGENT_IMAGE must be set")

client, err := info.KubeClient()
require.NoError(t, err)
require.NotNil(t, client)

testLogsBasePath := os.Getenv("K8S_TESTS_POD_LOGS_BASE")
require.NotEmpty(t, testLogsBasePath, "K8S_TESTS_POD_LOGS_BASE must be set")

err = os.MkdirAll(filepath.Join(testLogsBasePath, t.Name()), 0755)
require.NoError(t, err, "failed to create test logs directory")

namespace := info.Namespace

esHost := os.Getenv("ELASTICSEARCH_HOST")
require.NotEmpty(t, esHost, "ELASTICSEARCH_HOST must be set")

esAPIKey, err := generateESAPIKey(info.ESClient, namespace)
require.NoError(t, err, "failed to generate ES API key")
require.NotEmpty(t, esAPIKey, "failed to generate ES API key")

renderedManifest, err := renderKustomize(agentK8SKustomize)
require.NoError(t, err, "failed to render kustomize")

testCases := []struct {
name string
envAdd []corev1.EnvVar
runK8SInnerTests bool
componentPresence map[string]bool
}{

{
"run agent in otel mode",
[]corev1.EnvVar{
{Name: "ELASTIC_AGENT_OTEL", Value: "true"},
},
false,
map[string]bool{
"beat/metrics-monitoring": false,
"filestream-monitoring": false,
"system/metrics-default": false,
Comment on lines +264 to +266
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there actually a way you can tell we started the OTel collector?

This test would still pass if we started elastic agent as usual, but disabled monitoring and didn't use the system/metrics integration/

Copy link
Contributor Author

@michalpristas michalpristas Aug 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes but scenario you mentioned is not the default config nor any config we test with so chance of false positive is really small, especially when agent is reported healthy.
no way to tell agent is in otel besides logs that are with default config printed to stdout.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There isn't an HTTP port or something that returns otel specific data or something?

I don't feel that strongly about this

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Improving this is something we could follow up with.

},
},
}

for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
hasher := sha256.New()
hasher.Write([]byte(tc.name))
testNamespace := strings.ToLower(base64.URLEncoding.EncodeToString(hasher.Sum(nil)))
testNamespace = noSpecialCharsRegexp.ReplaceAllString(testNamespace, "")

k8sObjects, err := yamlToK8SObjects(bufio.NewReader(bytes.NewReader(renderedManifest)))
require.NoError(t, err, "failed to convert yaml to k8s objects")

adjustK8SAgentManifests(k8sObjects, testNamespace, "elastic-agent-standalone",
func(container *corev1.Container) {
// set agent image
container.Image = agentImage
// set ImagePullPolicy to "Never" to avoid pulling the image
// as the image is already loaded by the kubernetes provisioner
container.ImagePullPolicy = "Never"

// set Elasticsearch host and API key
for idx, env := range container.Env {
if env.Name == "ES_HOST" {
container.Env[idx].Value = esHost
container.Env[idx].ValueFrom = nil
}
if env.Name == "API_KEY" {
container.Env[idx].Value = esAPIKey
container.Env[idx].ValueFrom = nil
}
}

if len(tc.envAdd) > 0 {
container.Env = append(container.Env, tc.envAdd...)
}

// drop arguments overriding default config
container.Args = []string{}
},
func(pod *corev1.PodSpec) {
for volumeIdx, volume := range pod.Volumes {
// need to update the volume path of the state directory
// to match the test namespace
if volume.Name == "elastic-agent-state" {
hostPathType := corev1.HostPathDirectoryOrCreate
pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{
Type: &hostPathType,
Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace),
}
}
}
})

ctx := context.Background()

deployK8SAgent(t, ctx, client, k8sObjects, testNamespace, tc.runK8SInnerTests, testLogsBasePath, tc.componentPresence)
})
}

Expand All @@ -214,7 +331,7 @@ func TestKubernetesAgentStandalone(t *testing.T) {
// deployK8SAgent is a helper function to deploy the elastic-agent in k8s and invoke the inner k8s tests if
// runK8SInnerTests is true
func deployK8SAgent(t *testing.T, ctx context.Context, client klient.Client, objects []k8s.Object, namespace string,
runInnerK8STests bool, testLogsBasePath string) {
runInnerK8STests bool, testLogsBasePath string, componentPresence map[string]bool) {

objects = append([]k8s.Object{&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -288,9 +405,10 @@ func deployK8SAgent(t *testing.T, ctx context.Context, client klient.Client, obj
time.Sleep(time.Second * 1)
}

statusString := stdout.String()
if agentHealthyErr != nil {
t.Errorf("elastic-agent never reported healthy: %v", agentHealthyErr)
t.Logf("stdout: %s\n", stdout.String())
t.Logf("stdout: %s\n", statusString)
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
return
Expand All @@ -299,6 +417,11 @@ func deployK8SAgent(t *testing.T, ctx context.Context, client klient.Client, obj
stdout.Reset()
stderr.Reset()

for component, shouldBePresent := range componentPresence {
isPresent := strings.Contains(statusString, component)
require.Equal(t, shouldBePresent, isPresent)
}

if runInnerK8STests {
err := client.Resources().ExecInPod(ctx, namespace, agentPodName, "elastic-agent-standalone",
[]string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr)
Expand Down