Skip to content

Commit

Permalink
test: added missing tests for the Pod analyzer
Browse files Browse the repository at this point in the history
- Fixed a small bug where failures were being appended multiple times
  for CrashLoopBackOff and ContainerCreating container status reasons.

- Added missing test cases to ensure proper testing of the Pod analyzer.
  The addition of these missing test cases has increased the code
  coverage of this analyzer to 92%.

Partially addresses: k8sgpt-ai#889

Signed-off-by: VaibhavMalik4187 <vaibhavmalik2018@gmail.com>
  • Loading branch information
VaibhavMalik4187 committed Mar 23, 2024
1 parent 5db4bc2 commit a4c9ea9
Show file tree
Hide file tree
Showing 2 changed files with 178 additions and 22 deletions.
23 changes: 8 additions & 15 deletions pkg/analyzer/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,19 +59,9 @@ func (PodAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {

// Check through container status to check for crashes or unready
for _, containerStatus := range pod.Status.ContainerStatuses {

if containerStatus.State.Waiting != nil {

if isErrorReason(containerStatus.State.Waiting.Reason) && containerStatus.State.Waiting.Message != "" {
failures = append(failures, common.Failure{
Text: containerStatus.State.Waiting.Message,
Sensitive: []common.Sensitive{},
})
}

// This represents a container that is still being created or blocked due to conditions such as OOMKilled
if containerStatus.State.Waiting.Reason == "ContainerCreating" && pod.Status.Phase == "Pending" {

// This represents a container that is still being created or blocked due to conditions such as OOMKilled
// parse the event log and append details
evt, err := FetchLatestEvent(a.Context, a.Client, pod.Namespace, pod.Name)
if err != nil || evt == nil {
Expand All @@ -83,14 +73,17 @@ func (PodAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
Sensitive: []common.Sensitive{},
})
}
}

// This represents container that is in CrashLoopBackOff state due to conditions such as OOMKilled
if containerStatus.State.Waiting.Reason == "CrashLoopBackOff" {
} else if containerStatus.State.Waiting.Reason == "CrashLoopBackOff" && containerStatus.LastTerminationState.Terminated != nil {
// This represents container that is in CrashLoopBackOff state due to conditions such as OOMKilled
failures = append(failures, common.Failure{
Text: fmt.Sprintf("the last termination reason is %s container=%s pod=%s", containerStatus.LastTerminationState.Terminated.Reason, containerStatus.Name, pod.Name),
Sensitive: []common.Sensitive{},
})
} else if isErrorReason(containerStatus.State.Waiting.Reason) && containerStatus.State.Waiting.Message != "" {
failures = append(failures, common.Failure{
Text: containerStatus.State.Waiting.Message,
Sensitive: []common.Sensitive{},
})
}
} else {
// when pod is Running but its ReadinessProbe fails
Expand Down
177 changes: 170 additions & 7 deletions pkg/analyzer/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@ package analyzer

import (
"context"
"sort"
"testing"

"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/magiconair/properties/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
Expand Down Expand Up @@ -67,6 +68,65 @@ func TestPodAnalyzer(t *testing.T) {
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "example3",
Namespace: "default",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{
Name: "container1",
Ready: false,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
// Valid error reason.
Reason: "CrashLoopBackOff",
Message: "this'll produce a failure",
},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: "test-reason",
},
},
},
{
Name: "container2",
Ready: false,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
// Invalid error reason. This won't contribute to failures.
Reason: "invalid-reason",
Message: "this won't produce a failure",
},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: "test-reason",
},
},
},
{
Name: "container3",
Ready: false,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
// Valid error reason.
Reason: "CrashLoopBackOff",
Message: "this'll produce a failure",
},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: "test-reason",
},
},
},
},
},
},
// simulate event: 30s Warning Unhealthy pod/my-nginx-7fb4dbcf47-4ch4w Readiness probe failed: bash: xxxx: command not found
&v1.Event{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -94,18 +154,56 @@ func TestPodAnalyzer(t *testing.T) {
Context: context.Background(),
Namespace: "default",
}

podAnalyzer := PodAnalyzer{}
var analysisResults []common.Result
analysisResults, err := podAnalyzer.Analyze(config)
results, err := podAnalyzer.Analyze(config)

sort.Slice(results, func(i, j int) bool {
return results[i].Name < results[j].Name
})

if err != nil {
t.Error(err)
}
assert.Equal(t, len(analysisResults), 2)

expectations := []struct {
name string
failuresCount int
}{
{
name: "default/example",
failuresCount: 1,
},
{
name: "default/example2",
failuresCount: 1,
},
{
name: "default/example3",
failuresCount: 2,
},
}

require.Equal(t, len(expectations), len(results))

for i, result := range results {
require.Equal(t, expectations[i].name, result.Name)
require.Equal(t, expectations[i].failuresCount, len(result.Error))
}
}

func TestPodAnalyzerNamespaceFiltering(t *testing.T) {

clientset := fake.NewSimpleClientset(
// This event will get discovered.
&v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Namespace: "default",
},
Reason: "FailedCreatePodSandBox",
Message: "this is a test event",
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Expand All @@ -121,6 +219,47 @@ func TestPodAnalyzerNamespaceFiltering(t *testing.T) {
Message: "0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.",
},
},
ContainerStatuses: []v1.ContainerStatus{
{
Name: "container4",
Ready: false,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
// This'll contribute to a failure.
Reason: "ContainerCreating",
Message: "This container is being created",
},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: "test-reason",
},
},
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "example2",
Namespace: "default",
},
Status: v1.PodStatus{
Phase: v1.PodPending,
ContainerStatuses: []v1.ContainerStatus{
{
// This'll contribute to a failure.
Name: "container5",
Ready: false,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
// Valid error reason.
Reason: "ErrImagePull",
Message: "Failed to pull image",
},
},
},
},
},
},
&v1.Pod{
Expand Down Expand Up @@ -148,11 +287,35 @@ func TestPodAnalyzerNamespaceFiltering(t *testing.T) {
Context: context.Background(),
Namespace: "default",
}

podAnalyzer := PodAnalyzer{}
var analysisResults []common.Result
analysisResults, err := podAnalyzer.Analyze(config)
results, err := podAnalyzer.Analyze(config)
if err != nil {
t.Error(err)
}
assert.Equal(t, len(analysisResults), 1)

sort.Slice(results, func(i, j int) bool {
return results[i].Name < results[j].Name
})

expectations := []struct {
name string
failuresCount int
}{
{
name: "default/example",
failuresCount: 2,
},
{
name: "default/example2",
failuresCount: 1,
},
}

require.Equal(t, len(expectations), len(results))

for i, result := range results {
require.Equal(t, expectations[i].name, result.Name)
require.Equal(t, expectations[i].failuresCount, len(result.Error))
}
}

0 comments on commit a4c9ea9

Please sign in to comment.