Skip to content

Commit

Permalink
support RuntimeClass.handler, will useful like nvidia isn't the defau…
Browse files Browse the repository at this point in the history
…lt runtime

Signed-off-by: zhangguanzhang <zhangguanzhang@qq.com>
  • Loading branch information
zhangguanzhang committed Apr 21, 2024
1 parent b138f52 commit 4a7d4ef
Show file tree
Hide file tree
Showing 7 changed files with 210 additions and 29 deletions.
6 changes: 6 additions & 0 deletions core/container_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ func (ds *dockerService) CreateContainer(
containerName := makeContainerName(sandboxConfig, config)
mounts := config.GetMounts()
terminationMessagePath, _ := config.Annotations["io.kubernetes.container.terminationMessagePath"]

sandboxInfo, err := ds.client.InspectContainer(r.GetPodSandboxId())
if err != nil {
return nil, fmt.Errorf("unable to get container's sandbox ID: %v", err)
}
createConfig := dockerbackend.ContainerCreateConfig{
Name: containerName,
Config: &container.Config{
Expand All @@ -91,6 +96,7 @@ func (ds *dockerService) CreateContainer(
RestartPolicy: container.RestartPolicy{
Name: "no",
},
Runtime: sandboxInfo.HostConfig.Runtime,
},
}

Expand Down
109 changes: 85 additions & 24 deletions core/container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,15 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {
podName, namespace := "foo", "bar"
containerName, image := "sidecar", "logger"

type podInfo struct {
ContainerId string
SandboxID string
}

const count = 20
configs := make([]*runtimeapi.ContainerConfig, 0, count)
sConfigs := make([]*runtimeapi.PodSandboxConfig, 0, count)

for i := 0; i < count; i++ {
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
Expand All @@ -80,8 +86,8 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {
configs = append(configs, c)
}

containerIDs := make(
chan string,
podInfos := make(
chan podInfo,
len(configs),
) // make channel non-blocking to simulate concurrent containers creation

Expand All @@ -94,39 +100,64 @@ func TestConcurrentlyCreateAndDeleteContainers(t *testing.T) {

go func() {
creationWg.Wait()
close(containerIDs)
close(podInfos)
}()
for i := range configs {
go func(i int) {
defer creationWg.Done()
// We don't care about the sandbox id; pass a bogus one.
sandboxID := fmt.Sprintf("sandboxid%d", i)

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfigs[i],
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: configs[i],
SandboxConfig: sConfigs[i],
}

createResp, err := ds.CreateContainer(getTestCTX(), req)
if err != nil {
t.Errorf("CreateContainer: %v", err)
return
}
containerIDs <- createResp.ContainerId
podInfos <- podInfo{
ContainerId: createResp.ContainerId,
SandboxID: runSandboxResp.PodSandboxId,
}
}(i)
}

for containerID := range containerIDs {
for pod := range podInfos {
deletionWg.Add(1)
go func(id string) {
go func(i podInfo) {
defer deletionWg.Done()
_, err := ds.RemoveContainer(
getTestCTX(),
&runtimeapi.RemoveContainerRequest{ContainerId: id},
&runtimeapi.RemoveContainerRequest{ContainerId: i.ContainerId},
)
if err != nil {
t.Errorf("RemoveContainer: %v", err)
}
}(containerID)
_, err = ds.StopPodSandbox(
getTestCTX(),
&runtimeapi.StopPodSandboxRequest{PodSandboxId: i.SandboxID},
)
if err != nil {
t.Errorf("StopPodSandbox: %v", err)
}
_, err = ds.RemovePodSandbox(
getTestCTX(),
&runtimeapi.RemovePodSandboxRequest{PodSandboxId: i.SandboxID},
)
if err != nil {
t.Errorf("RemovePodSandbox: %v", err)
}
}(pod)
}
deletionWg.Wait()
}
Expand Down Expand Up @@ -155,10 +186,15 @@ func TestListContainers(t *testing.T) {
state := runtimeapi.ContainerState_CONTAINER_RUNNING
var createdAt int64 = fakeClock.Now().UnixNano()
for i := range configs {
// We don't care about the sandbox id; pass a bogus one.
sandboxID := fmt.Sprintf("sandboxid%d", i)
runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfigs[i],
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}
req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: configs[i],
SandboxConfig: sConfigs[i],
}
Expand All @@ -174,7 +210,7 @@ func TestListContainers(t *testing.T) {
expected = append([]*runtimeapi.Container{{
Metadata: configs[i].Metadata,
Id: id,
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
State: state,
CreatedAt: createdAt,
Image: configs[i].Image,
Expand Down Expand Up @@ -226,12 +262,20 @@ func TestContainerStatus(t *testing.T) {

fDocker.InjectImages([]dockertypes.ImageSummary{{ID: imageName}})

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

// Create the container.
fClock.SetTime(time.Now().Add(-1 * time.Hour))
expected.CreatedAt = fClock.Now().UnixNano()

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand All @@ -243,7 +287,7 @@ func TestContainerStatus(t *testing.T) {
c, err := fDocker.InspectContainer(id)
require.NoError(t, err)
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxID)
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], runSandboxResp.PodSandboxId)

// Set the id manually since we don't know the id until it's created.
expected.Id = id
Expand Down Expand Up @@ -309,8 +353,16 @@ func TestContainerLogPath(t *testing.T) {
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil)
config.LogPath = containerLogPath

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
t.Errorf("RunPodSandbox: %v", err)
return
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand Down Expand Up @@ -378,36 +430,45 @@ func TestContainerCreationConflict(t *testing.T) {
expectCalls []string
expectFields int
}{
// sandBox run called "inspect_image", "pull", "create", "start", "inspect_container",
"no create error": {
expectCalls: []string{"create"},
expectCalls: []string{"inspect_image", "pull", "create", "start", "inspect_container", "create"},
expectFields: 6,
},
"random create error": {
createError: randomError,
expectError: randomError,
expectCalls: []string{"create"},
expectCalls: []string{"inspect_image", "pull", "create", "start", "inspect_container", "create"},
},
"conflict create error with successful remove": {
createError: conflictError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
expectCalls: []string{"inspect_image", "pull", "create", "start", "inspect_container", "create", "remove"},
},
"conflict create error with random remove error": {
createError: conflictError,
removeError: randomError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
expectCalls: []string{"inspect_image", "pull", "create", "start", "inspect_container", "create", "remove"},
},
"conflict create error with no such container remove error": {
createError: conflictError,
removeError: noContainerError,
expectCalls: []string{"create", "remove", "create"},
expectCalls: []string{"inspect_image", "pull", "create", "start", "inspect_container", "create", "remove", "create"},
expectFields: 7,
},
} {
t.Logf("TestCase: %s", desc)
ds, fDocker, _ := newTestDockerService()

runSandboxResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{
Config: sConfig,
})
if err != nil {
require.EqualError(t, err, test.expectError.Error())
continue
}

if test.createError != nil {
fDocker.InjectError("create", test.createError)
}
Expand All @@ -416,7 +477,7 @@ func TestContainerCreationConflict(t *testing.T) {
}

req := &runtimeapi.CreateContainerRequest{
PodSandboxId: sandboxID,
PodSandboxId: runSandboxResp.PodSandboxId,
Config: config,
SandboxConfig: sConfig,
}
Expand Down
25 changes: 25 additions & 0 deletions core/sandbox_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
dockerbackend "github.com/docker/docker/api/types/backend"
dockercontainer "github.com/docker/docker/api/types/container"
dockerregistry "github.com/docker/docker/api/types/registry"
dockersystem "github.com/docker/docker/api/types/system"
"github.com/sirupsen/logrus"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
Expand All @@ -58,6 +59,30 @@ var (
defaultSandboxGracePeriod = time.Duration(10) * time.Second
)

// Returns docker runtimes
func (ds *dockerService) GetRuntimes() (map[string]dockersystem.RuntimeWithStatus, error) {
info, err := ds.getDockerInfo()
if err != nil {
return nil, fmt.Errorf("failed to get docker info: %v", err)
}
return info.Runtimes, nil
}

func (ds *dockerService) IsRuntimeConfigured(runtime string) error {
runtimeWithStatus, err := ds.GetRuntimes()
if err != nil {
return err
}

for r := range runtimeWithStatus {
if r == runtime {
return nil
}
}

return fmt.Errorf("no runtime for %q is configured", runtime)
}

// Returns whether the sandbox network is ready, and whether the sandbox is known
func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) {
ds.networkReadyLock.Lock()
Expand Down
70 changes: 70 additions & 0 deletions core/sandbox_helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -388,3 +388,73 @@ func TestSetUpPodFailure(t *testing.T) {
assert.NotNil(t, sandbox)
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
}

// TestRuntimeHandler checks that the sandbox with RuntimeHandler
func TestRuntimeHandler(t *testing.T) {
ds, _, _ := newTestDockerService()
name, namespace := "foo", "bar11"
var configs []*runtimeapi.PodSandboxConfig

rtHandlerTestCases := []struct {
Runtimehandler string
expectRuntimehandler string
expectError error
}{
{
Runtimehandler: "",
expectRuntimehandler: "",
expectError: nil,
},
{
Runtimehandler: "docker",
expectRuntimehandler: "",
expectError: nil,
},
{
Runtimehandler: "runc",
expectRuntimehandler: "runc",
expectError: nil,
},
{
Runtimehandler: "error_runtime",
expectRuntimehandler: "",
expectError: fmt.Errorf("failed to get sandbox runtime: no runtime for %q is configured", "error_runtime"),
},
}

for i := 0; i < len(rtHandlerTestCases); i++ {
c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0,
map[string]string{"label": fmt.Sprintf("foo%d", i)},
map[string]string{"annotation": fmt.Sprintf("bar%d", i)},
)
configs = append(configs, c)
}

for i := range configs {
runResp, err := ds.RunPodSandbox(
getTestCTX(),
&runtimeapi.RunPodSandboxRequest{
Config: configs[i],
RuntimeHandler: rtHandlerTestCases[i].Runtimehandler,
},
)
if rtHandlerTestCases[i].expectError != nil {
assert.EqualError(t, err, rtHandlerTestCases[i].expectError.Error())
continue
}

require.NoError(t, err)

if runResp != nil {
listResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{
PodSandboxId: runResp.PodSandboxId},
)
require.NoError(t, err)
assert.Equal(t, rtHandlerTestCases[i].expectRuntimehandler, listResp.Status.GetRuntimeHandler())

}

}

}
Loading

0 comments on commit 4a7d4ef

Please sign in to comment.