Skip to content

Commit

Permalink
add unit tests
Browse files Browse the repository at this point in the history
Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>
  • Loading branch information
nunnatsa committed Dec 18, 2023
1 parent 45c1cf3 commit 341359d
Show file tree
Hide file tree
Showing 2 changed files with 191 additions and 17 deletions.
177 changes: 177 additions & 0 deletions pkg/kubevirt/machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
k8sfake "k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
kubevirtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
Expand Down Expand Up @@ -777,6 +778,182 @@ var _ = Describe("util functions", func() {
})
})

var _ = Describe("with dataVolume", func() {
var machineContext *context.MachineContext
namespace := kubevirtMachine.Namespace
virtualMachineInstance := testing.NewVirtualMachineInstance(kubevirtMachine)
virtualMachine := testing.NewVirtualMachine(virtualMachineInstance)
dataVolume := &cdiv1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "dv-name",
Namespace: namespace,
},
}

BeforeEach(func() {
kubevirtMachine.Spec.BootstrapCheckSpec = v1alpha1.VirtualMachineBootstrapCheckSpec{}

machineContext = &context.MachineContext{
Context: gocontext.TODO(),
Cluster: cluster,
KubevirtCluster: kubevirtCluster,
Machine: machine,
KubevirtMachine: kubevirtMachine,
BootstrapDataSecret: bootstrapDataSecret,
Logger: logger,
}

virtualMachine.Spec.DataVolumeTemplates = []kubevirtv1.DataVolumeTemplateSpec{
{
ObjectMeta: metav1.ObjectMeta{Name: "dv-name"},
},
}

if virtualMachine.Spec.Template == nil {
virtualMachine.Spec.Template = &kubevirtv1.VirtualMachineInstanceTemplateSpec{}
}
virtualMachine.Spec.Template.Spec.Volumes = []kubevirtv1.Volume{
{
Name: "dv-disk",
VolumeSource: kubevirtv1.VolumeSource{
DataVolume: &kubevirtv1.DataVolumeSource{
Name: "dv-name",
},
},
},
}

fakeVMCommandExecutor = FakeVMCommandExecutor{true}
})
JustBeforeEach(func() {
objects := []client.Object{
cluster,
kubevirtCluster,
machine,
kubevirtMachine,
virtualMachineInstance,
virtualMachine,
dataVolume,
}
fakeClient = fake.NewClientBuilder().WithScheme(testing.SetupScheme()).WithObjects(objects...).Build()
})

It("NewMachine should have all client, machineContext and vmiInstance NOT nil", func() {
externalMachine, err := defaultTestMachine(machineContext, namespace, fakeClient, fakeVMCommandExecutor, []byte(sshKey))
Expect(err).NotTo(HaveOccurred())
Expect(externalMachine.client).ToNot(BeNil())
Expect(externalMachine.machineContext).To(Equal(machineContext))
Expect(externalMachine.vmiInstance).ToNot(BeNil())
Expect(externalMachine.dataVolume).ToNot(BeNil())
Expect(externalMachine.dataVolume.Name).To(Equal(dataVolume.Name))
})
})

var _ = Describe("check GetVMUnscheduledReason", func() {
DescribeTable("not-ready reason", func(vm *kubevirtv1.VirtualMachine, dv *cdiv1.DataVolume, expectedReason, expectedMsg string) {
m := Machine{
vmInstance: vm,
dataVolume: dv,
}
reason, msg := m.GetVMUnscheduledReason()
Expect(reason).To(Equal(expectedReason))
Expect(msg).To(ContainSubstring(expectedMsg))
},
Entry("no vm instance", nil, nil, defaultCondReason, defaultCondMessage),
Entry("no vm conditions", &kubevirtv1.VirtualMachine{}, nil, defaultCondReason, defaultCondMessage),
Entry("vm PodScheduled condition is true", &kubevirtv1.VirtualMachine{
Status: kubevirtv1.VirtualMachineStatus{
Conditions: []kubevirtv1.VirtualMachineCondition{
{
Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled),
Status: corev1.ConditionTrue,
},
},
},
}, nil, defaultCondReason, defaultCondMessage),
Entry("vm PodScheduled condition is false, with unknown reason", &kubevirtv1.VirtualMachine{
Status: kubevirtv1.VirtualMachineStatus{
Conditions: []kubevirtv1.VirtualMachineCondition{
{
Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled),
Status: corev1.ConditionFalse,
Reason: "somethingElse",
},
},
},
}, nil, defaultCondReason, defaultCondMessage),
Entry("vm PodScheduled condition is false, with 'Unschedulable' reason", &kubevirtv1.VirtualMachine{
Status: kubevirtv1.VirtualMachineStatus{
Conditions: []kubevirtv1.VirtualMachineCondition{
{
Type: kubevirtv1.VirtualMachineConditionType(corev1.PodScheduled),
Status: corev1.ConditionFalse,
Reason: "Unschedulable",
Message: "test message",
},
},
},
}, nil, "Unschedulable", "test message"),
Entry("dv with Running condition; phase = Succeeded", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.Succeeded,
},
}, defaultCondReason, defaultCondMessage),
Entry("dv with Running condition; phase = Pending", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.Pending,
},
}, "DVPending", "is pending"),
Entry("dv with Running condition; phase = Failed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.Failed,
},
}, "DVFailed", "failed"),
Entry("dv with Running condition; phase is something else; Running condition true", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.ImportInProgress,
Conditions: []cdiv1.DataVolumeCondition{
{
Type: cdiv1.DataVolumeRunning,
Status: corev1.ConditionTrue,
},
},
},
}, "DVNotReady", "is still provisioning"),
Entry("dv with Running condition; phase is something else; Running condition false; reason=Completed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.ImportInProgress,
Conditions: []cdiv1.DataVolumeCondition{
{
Type: cdiv1.DataVolumeRunning,
Status: corev1.ConditionFalse,
Reason: "Completed",
},
},
},
}, "DVNotReady", "is still provisioning"),
Entry("dv with Running condition; phase is something else; Running condition false; reason!=Completed", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.ImportInProgress,
Conditions: []cdiv1.DataVolumeCondition{
{
Type: cdiv1.DataVolumeRunning,
Status: corev1.ConditionFalse,
Reason: "SomethingElse",
Message: "test message",
},
},
},
}, "DVNotReady", "test message"),
Entry("dv with Running condition; phase is something else; no Running condition", &kubevirtv1.VirtualMachine{}, &cdiv1.DataVolume{
Status: cdiv1.DataVolumeStatus{
Phase: cdiv1.ImportInProgress,
Conditions: []cdiv1.DataVolumeCondition{},
},
}, "DVNotReady", "DataVolume is not ready"),
)
})

func validateVMNotExist(expected *kubevirtv1.VirtualMachine, fakeClient client.Client, machineContext *context.MachineContext) {
vm := &kubevirtv1.VirtualMachine{}
key := client.ObjectKey{Name: expected.Name, Namespace: expected.Namespace}
Expand Down
31 changes: 14 additions & 17 deletions pkg/testing/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubevirtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
Expand Down Expand Up @@ -188,23 +189,19 @@ func NewBootstrapDataSecret(userData []byte) *corev1.Secret {
// SetupScheme setups the scheme for a fake client.
func SetupScheme() *runtime.Scheme {
s := runtime.NewScheme()
if err := clusterv1.AddToScheme(s); err != nil {
panic(err)
}
if err := infrav1.AddToScheme(s); err != nil {
panic(err)
}
if err := kubevirtv1.AddToScheme(s); err != nil {
panic(err)
}
if err := corev1.AddToScheme(s); err != nil {
panic(err)
}
if err := appsv1.AddToScheme(s); err != nil {
panic(err)
}
if err := rbacv1.AddToScheme(s); err != nil {
panic(err)
for _, f := range []func(*runtime.Scheme) error{
clusterv1.AddToScheme,
infrav1.AddToScheme,
kubevirtv1.AddToScheme,
cdiv1.AddToScheme,
corev1.AddToScheme,
appsv1.AddToScheme,
rbacv1.AddToScheme,
} {
if err := f(s); err != nil {
panic(err)
}
}

return s
}

0 comments on commit 341359d

Please sign in to comment.