Skip to content

Commit

Permalink
Use a single handler for executing all commands (#6826)
Browse files Browse the repository at this point in the history
* Document current implementations of command handlers

* Add unit tests for execHAndler

* Refactor pkg/devfile/image to inject Backend as dependency

* Use same handler for kubedev/podmandev

* Fail after SelectBackend==nil only if backend is needed

* Move runHandler to dev/common

* Unit tests for runHandler

* Create a component.ExecuteTerminatingCommand

* ExecuteTerminatingCommand/ExecuteNonTerminatingCommand for Handler

* Fix calling other command types

* Consider parent group to determine if a command is terminating

* Replace component.execHandler by common.runHandler

* Remove execHandler

* Make runHandler and most of fields private and pass containersRunning to handler

* Pass containersRunning value

* deploy using common Handler

* Fix tests

* Use specific Dev/Deploy mode for Apply

* Fix cmdline for job

* Fix unit tests

* Pass appName and componentName with ctx to handler

* Move handler to pkg/component package

* Update doc

* Unit tests Deploy

* Unit tests Build

* Unit tests Run

* Unit tests PostStart

* Unit tests PreStop

* Update doc

* Fix Podman tests

* Fix hotReload on podman

* Change podman version timeout to 30s for tests

* Cleanup + fix doc
  • Loading branch information
feloy authored May 26, 2023
1 parent 511aaa2 commit 9a239c4
Show file tree
Hide file tree
Showing 39 changed files with 2,377 additions and 507 deletions.
46 changes: 46 additions & 0 deletions docs/proposals/commands-semantic.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Semantic of commands

Components:
- container
- cluster resource (Kubernetes/OpenShift)
- volume
- image

| Command | PreStart | PostStart | PreStop | PostStop |
|-----------------------------|----------|-----------|---------|-----------|
| exec on container | | Yt | Yt | |
| exec on cluster resource | N/A | N/A | N/A | N/A |
| exec on volume | N/A | N/A | N/A | N/A |
| exec on image | N/A | N/A | N/A | N/A |
|   | | | | |
| apply on container | ? | ? | ? | ? |
| apply on cluster resource | | Yt | Yt | |
| apply on volume | ? | ? | ? | ? |
| apply on image | | Yt | Yt | |
|   | | | | |
| composite serial | | | | |
| composite parallel | | | | |


| Command | Build | Run/Debug | Deploy |
|-----------------------------|-------|-----------|--------|
| exec on container | Yt | Yt | Yt |
| exec on cluster resource | N/A | N/A | N/A |
| exec on volume | N/A | N/A | N/A |
| exec on image | N/A | N/A | N/A |
|   | | | |
| apply on container | ? | ? | ? |
| apply on cluster resource | Yt | Yt | Yt |
| apply on volume | ? | ? | ? |
| apply on image | Yt | Yt | Yt |
|   | | | |
| composite serial | | | |
| composite parallel | | | |


Legend:

- 0: Supported by handler but not implemented
- Y: Implemented by pkg/component.NewRunHandler (Yt: tested in pkg/component/handler_test.go)
- N/A: Not applicable (by spec)
- ?: Spec is not clear
8 changes: 8 additions & 0 deletions pkg/component/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -538,3 +538,11 @@ func ListRoutesAndIngresses(client kclient.ClientInterface, componentName, appNa

return ings, routes, nil
}

func GetContainersNames(pod *corev1.Pod) []string {
result := make([]string, 0, len(pod.Spec.Containers))
for _, container := range pod.Spec.Containers {
result = append(result, container.Name)
}
return result
}
31 changes: 24 additions & 7 deletions pkg/component/delete/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"k8s.io/klog"

"github.com/redhat-developer/odo/pkg/component"
"github.com/redhat-developer/odo/pkg/configAutomount"
"github.com/redhat-developer/odo/pkg/exec"
"github.com/redhat-developer/odo/pkg/kclient"
odolabels "github.com/redhat-developer/odo/pkg/labels"
Expand All @@ -28,9 +29,10 @@ import (
)

type DeleteComponentClient struct {
kubeClient kclient.ClientInterface
podmanClient podman.Client
execClient exec.Client
kubeClient kclient.ClientInterface
podmanClient podman.Client
execClient exec.Client
configAutomountClient configAutomount.Client
}

var _ Client = (*DeleteComponentClient)(nil)
Expand All @@ -39,11 +41,13 @@ func NewDeleteComponentClient(
kubeClient kclient.ClientInterface,
podmanClient podman.Client,
execClient exec.Client,
configAutomountClient configAutomount.Client,
) *DeleteComponentClient {
return &DeleteComponentClient{
kubeClient: kubeClient,
podmanClient: podmanClient,
execClient: execClient,
kubeClient: kubeClient,
podmanClient: podmanClient,
execClient: execClient,
configAutomountClient: configAutomountClient,
}
}

Expand Down Expand Up @@ -216,7 +220,20 @@ func (do *DeleteComponentClient) ExecutePreStopEvents(ctx context.Context, devfi

klog.V(4).Infof("Executing %q event commands for component %q", libdevfile.PreStop, componentName)
// ignore the failures if any; delete should not fail because preStop events failed to execute
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, component.NewExecHandler(do.kubeClient, do.execClient, appName, componentName, pod.Name, "Executing pre-stop command in container", false, false))
handler := component.NewRunHandler(
ctx,
do.kubeClient,
do.execClient,
do.configAutomountClient,
pod.Name,
false,
component.GetContainersNames(pod),
"Executing pre-stop command in container",

// TODO(feloy) set these values when we want to support Apply Image/Kubernetes/OpenShift commands for PreStop events
nil, nil, parser.DevfileObj{}, "",
)
err = libdevfile.ExecPreStopEvents(ctx, devfileObj, handler)
if err != nil {
klog.V(4).Infof("Failed to execute %q event commands for component %q, cause: %v", libdevfile.PreStop, componentName, err.Error())
}
Expand Down
21 changes: 12 additions & 9 deletions pkg/component/delete/delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func TestDeleteComponentClient_ListClusterResourcesToDelete(t *testing.T) {
ctrl := gomock.NewController(t)
kubeClient := tt.fields.kubeClient(ctrl)
execClient := exec.NewExecClient(kubeClient)
do := NewDeleteComponentClient(kubeClient, nil, execClient)
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
ctx := odocontext.WithApplication(context.TODO(), "app")
got, err := do.ListClusterResourcesToDelete(ctx, tt.args.componentName, tt.args.namespace, tt.args.mode)
if (err != nil) != tt.wantErr {
Expand Down Expand Up @@ -277,7 +277,7 @@ func TestDeleteComponentClient_DeleteResources(t *testing.T) {
ctrl := gomock.NewController(t)
kubeClient := tt.fields.kubeClient(ctrl)
execClient := exec.NewExecClient(kubeClient)
do := NewDeleteComponentClient(kubeClient, nil, execClient)
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
got := do.DeleteResources(tt.args.resources, false)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("DeleteComponentClient.DeleteResources() mismatch (-want +got):\n%s", diff)
Expand Down Expand Up @@ -686,10 +686,10 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
client := kclient.NewMockClientInterface(ctrl)

selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
client.EXPECT().GetRunningPodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "runtime"), nil)
client.EXPECT().GetRunningPodFromSelector(selector).Return(odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime"), nil)

cmd := []string{"/bin/sh", "-c", "cd /projects/nodejs-starter && (echo \"Hello World!\") 1>>/proc/1/fd/1 2>>/proc/1/fd/2"}
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "runtime", cmd, gomock.Any(), gomock.Any(), nil, false).Return(nil)
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "mypod", cmd, gomock.Any(), gomock.Any(), nil, false).Return(nil)

return client
},
Expand All @@ -707,7 +707,7 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
client := kclient.NewMockClientInterface(ctrl)

selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
pod := odoTestingUtil.CreateFakePod(componentName, "runtime")
pod := odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime")
pod.Status.Phase = corev1.PodFailed
client.EXPECT().GetRunningPodFromSelector(selector).Return(pod, nil)
return client
Expand All @@ -726,14 +726,14 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
client := kclient.NewMockClientInterface(ctrl)

selector := odolabels.GetSelector(componentName, "app", odolabels.ComponentDevMode, false)
fakePod := odoTestingUtil.CreateFakePod(componentName, "runtime")
fakePod := odoTestingUtil.CreateFakePod(componentName, "mypod", "runtime")
// Expecting this method to be called twice because if the command execution fails, we try to get the pod logs by calling GetOnePodFromSelector again.
client.EXPECT().GetRunningPodFromSelector(selector).Return(fakePod, nil).Times(2)

client.EXPECT().GetPodLogs(fakePod.Name, gomock.Any(), gomock.Any()).Return(nil, errors.New("an error"))

cmd := []string{"/bin/sh", "-c", "cd /projects/nodejs-starter && (echo \"Hello World!\") 1>>/proc/1/fd/1 2>>/proc/1/fd/2"}
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "runtime", cmd, gomock.Any(), gomock.Any(), nil, false).Return(errors.New("some error"))
client.EXPECT().ExecCMDInContainer(gomock.Any(), "runtime", "mypod", cmd, gomock.Any(), gomock.Any(), nil, false).Return(errors.New("some error"))

return client
},
Expand All @@ -750,8 +750,11 @@ func TestDeleteComponentClient_ExecutePreStopEvents(t *testing.T) {
ctrl := gomock.NewController(t)
kubeClient := tt.fields.kubeClient(ctrl)
execClient := exec.NewExecClient(kubeClient)
do := NewDeleteComponentClient(kubeClient, nil, execClient)
if err := do.ExecutePreStopEvents(context.Background(), tt.args.devfileObj, tt.args.appName, tt.args.devfileObj.GetMetadataName()); (err != nil) != tt.wantErr {
do := NewDeleteComponentClient(kubeClient, nil, execClient, nil)
ctx := context.Background()
ctx = odocontext.WithApplication(ctx, appName)
ctx = odocontext.WithComponentName(ctx, componentName)
if err := do.ExecutePreStopEvents(ctx, tt.args.devfileObj, tt.args.appName, tt.args.devfileObj.GetMetadataName()); (err != nil) != tt.wantErr {
t.Errorf("DeleteComponent() error = %v, wantErr %v", err, tt.wantErr)
}
})
Expand Down
180 changes: 180 additions & 0 deletions pkg/component/execute_new_container.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
package component

import (
"context"
"fmt"
"strings"
"time"

"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/devfile/library/v2/pkg/devfile/generator"
"github.com/devfile/library/v2/pkg/devfile/parser"
"github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common"

"github.com/redhat-developer/odo/pkg/configAutomount"
"github.com/redhat-developer/odo/pkg/dev/kubedev/storage"
"github.com/redhat-developer/odo/pkg/kclient"
odolabels "github.com/redhat-developer/odo/pkg/labels"
odogenerator "github.com/redhat-developer/odo/pkg/libdevfile/generator"
"github.com/redhat-developer/odo/pkg/log"
"github.com/redhat-developer/odo/pkg/util"

batchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/utils/pointer"
)

func ExecuteInNewContainer(
ctx context.Context,
kubeClient kclient.ClientInterface,
configAutomountClient configAutomount.Client,
devfileObj parser.DevfileObj,
componentName string,
appName string,
command v1alpha2.Command,
) error {
policy, err := kubeClient.GetCurrentNamespacePolicy()
if err != nil {
return err
}
podTemplateSpec, err := generator.GetPodTemplateSpec(devfileObj, generator.PodTemplateParams{
Options: common.DevfileOptions{
FilterByName: command.Exec.Component,
},
PodSecurityAdmissionPolicy: policy,
})
if err != nil {
return err
}
// Setting the restart policy to "never" so that pods are kept around after the job finishes execution; this is helpful in obtaining logs to debug.
podTemplateSpec.Spec.RestartPolicy = "Never"

if len(podTemplateSpec.Spec.Containers) != 1 {
return fmt.Errorf("could not find the component")
}

podTemplateSpec.Spec.Containers[0].Command = []string{"/bin/sh"}
podTemplateSpec.Spec.Containers[0].Args = getJobCmdline(command)

volumes, err := storage.GetAutomountVolumes(configAutomountClient, podTemplateSpec.Spec.Containers, podTemplateSpec.Spec.InitContainers)
if err != nil {
return err
}

podTemplateSpec.Spec.Volumes = volumes

// Create a Kubernetes Job and use the container image referenced by command.Exec.Component
// Get the component for the command with command.Exec.Component
getJobName := func() string {
maxLen := kclient.JobNameOdoMaxLength - len(command.Id)
// We ignore the error here because our component name or app name will never be empty; which are the only cases when an error might be raised.
name, _ := util.NamespaceKubernetesObjectWithTrim(componentName, appName, maxLen)
name += "-" + command.Id
return name
}
completionMode := batchv1.CompletionMode("Indexed")
jobParams := odogenerator.JobParams{
TypeMeta: generator.GetTypeMeta(kclient.JobsKind, kclient.JobsAPIVersion),
ObjectMeta: metav1.ObjectMeta{
Name: getJobName(),
},
PodTemplateSpec: *podTemplateSpec,
SpecParams: odogenerator.JobSpecParams{
CompletionMode: &completionMode,
TTLSecondsAfterFinished: pointer.Int32(60),
BackOffLimit: pointer.Int32(1),
},
}
job := odogenerator.GetJob(jobParams)
// Set labels and annotations
job.SetLabels(odolabels.GetLabels(componentName, appName, GetComponentRuntimeFromDevfileMetadata(devfileObj.Data.GetMetadata()), odolabels.ComponentDeployMode, false))
job.Annotations = map[string]string{}
odolabels.AddCommonAnnotations(job.Annotations)
odolabels.SetProjectType(job.Annotations, GetComponentTypeFromDevfileMetadata(devfileObj.Data.GetMetadata()))

// Make sure there are no existing jobs
checkAndDeleteExistingJob := func() {
items, dErr := kubeClient.ListJobs(odolabels.GetSelector(componentName, appName, odolabels.ComponentDeployMode, false))
if dErr != nil {
klog.V(4).Infof("failed to list jobs; cause: %s", dErr.Error())
return
}
jobName := getJobName()
for _, item := range items.Items {
if strings.Contains(item.Name, jobName) {
dErr = kubeClient.DeleteJob(item.Name)
if dErr != nil {
klog.V(4).Infof("failed to delete job %q; cause: %s", item.Name, dErr.Error())
}
}
}
}
checkAndDeleteExistingJob()

log.Sectionf("Executing command:")
spinner := log.Spinnerf("Executing command in container (command: %s)", command.Id)
defer spinner.End(false)

var createdJob *batchv1.Job
createdJob, err = kubeClient.CreateJob(job, "")
if err != nil {
return err
}
defer func() {
err = kubeClient.DeleteJob(createdJob.Name)
if err != nil {
klog.V(4).Infof("failed to delete job %q; cause: %s", createdJob.Name, err)
}
}()

var done = make(chan struct{}, 1)
// Print the tip to use `odo logs` if the command is still running after 1 minute
go func() {
select {
case <-time.After(1 * time.Minute):
log.Info("\nTip: Run `odo logs --deploy --follow` to get the logs of the command output.")
case <-done:
return
}
}()

// Wait for the command to complete execution
_, err = kubeClient.WaitForJobToComplete(createdJob)
done <- struct{}{}

spinner.End(err == nil)

if err != nil {
err = fmt.Errorf("failed to execute (command: %s)", command.Id)
// Print the job logs if the job failed
jobLogs, logErr := kubeClient.GetJobLogs(createdJob, command.Exec.Component)
if logErr != nil {
log.Warningf("failed to fetch the logs of execution; cause: %s", logErr)
}
fmt.Println("Execution output:")
_ = util.DisplayLog(false, jobLogs, log.GetStderr(), componentName, 100)
}

return err
}

func getJobCmdline(command v1alpha2.Command) []string {
// deal with environment variables
var cmdLine string
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)

if setEnvVariable == "" {
cmdLine = command.Exec.CommandLine
} else {
cmdLine = setEnvVariable + " && " + command.Exec.CommandLine
}
var args []string
if command.Exec.WorkingDir != "" {
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
args = []string{"-c", "cd " + command.Exec.WorkingDir + " && " + cmdLine}
} else {
args = []string{"-c", cmdLine}
}
return args
}
Loading

0 comments on commit 9a239c4

Please sign in to comment.