From e8956e35fa8554c9616909627994bac187d839d9 Mon Sep 17 00:00:00 2001 From: Jianjun Shen Date: Fri, 18 Mar 2022 20:00:46 -0400 Subject: [PATCH] Antrea IPAM for secondary networks managed by Multus Extend Antrea IPAM and Antrea CNI plugin to support IPAM for Pod secondary networks. A CNI call that specifies a non-Antrea CNI type and Antrea IPAM type is identified as an IPAM request for a secondary network. The IPAM configuration of the CNI call should specify the Antrea IPPool(s) to allocate IPs for the secondary network. Additionally, Routes and DNS parameters are supported in the IPAM configuration. This implementation is for secondary network managed by Multus, not Antrea native secondary network support. Only a single IPv4 IPPool is supported as of now. An example Multus NetworkAttachmentDefinition with Antrea IPAM: kind: NetworkAttachmentDefinition metadata: name: macvlan-network1 spec: config: '{ "cniVersion": "0.3.0", "type": "macvlan", "master": "enp0s9", "mode": "bridge", "ipam": { "type": "antrea-ipam", "ippool": "macvlan-subnet1" "routes": [ { "dst": "0.0.0.0/0" }, { "dst": "192.168.0.0/16", "gw": "10.10.5.1" }, { "dst": "3ffe:ffff:0:01ff::1/64" } ], "dns": { "nameservers" : ["8.8.8.8"], "domain": "example.com", "search": [ "example.com" ] } } }' Signed-off-by: Jianjun Shen --- build/yamls/antrea-aks.yml | 2 + build/yamls/antrea-eks.yml | 2 + build/yamls/antrea-gke.yml | 2 + build/yamls/antrea-ipsec.yml | 2 + build/yamls/antrea.yml | 2 + build/yamls/base/crds.yml | 2 + cmd/antrea-agent/agent.go | 17 +- pkg/agent/cniserver/ipam/antrea_ipam.go | 168 ++++++++++--- .../cniserver/ipam/antrea_ipam_controller.go | 138 +++++++---- pkg/agent/cniserver/ipam/antrea_ipam_test.go | 8 +- pkg/agent/cniserver/ipam/ipam_delegator.go | 4 +- pkg/agent/cniserver/ipam/ipam_service.go | 57 +++-- pkg/agent/cniserver/pod_configuration.go | 3 +- pkg/agent/cniserver/server.go | 227 +++++++++++------- pkg/agent/cniserver/server_test.go | 207 ++++++++++++---- pkg/agent/cniserver/types/arg_types.go | 26 -- pkg/agent/cniserver/types/types.go | 67 ++++++ pkg/apis/crd/v1alpha2/types.go | 12 +- pkg/controller/ipam/validate.go | 10 +- pkg/controller/ipam/validate_test.go | 2 +- pkg/ipam/poolallocator/allocator.go | 95 +++----- pkg/ipam/poolallocator/allocator_test.go | 46 +++- test/e2e/antreaipam_test.go | 4 +- test/integration/agent/cniserver_test.go | 8 +- 24 files changed, 734 insertions(+), 377 deletions(-) delete mode 100644 pkg/agent/cniserver/types/arg_types.go create mode 100644 pkg/agent/cniserver/types/types.go diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 71a1b70b4f4..324349895e7 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -1278,6 +1278,8 @@ spec: properties: containerID: type: string + ifName: + type: string name: type: string namespace: diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index c69134a139b..7f121adcb53 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -1278,6 +1278,8 @@ spec: properties: containerID: type: string + ifName: + type: string name: type: string namespace: diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 9987ca496cf..56eff8f1fbe 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -1278,6 +1278,8 @@ spec: properties: containerID: type: string + ifName: + type: string name: type: string namespace: diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index 95a0c9a2569..3fc1170a6ae 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -1278,6 +1278,8 @@ spec: properties: containerID: type: string + ifName: + type: string name: type: string namespace: diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index 36fd83723cc..45308ddc962 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -1278,6 +1278,8 @@ spec: properties: containerID: type: string + ifName: + type: string name: type: string namespace: diff --git a/build/yamls/base/crds.yml b/build/yamls/base/crds.yml index b4176f40846..da3a2e916c4 100644 --- a/build/yamls/base/crds.yml +++ b/build/yamls/base/crds.yml @@ -292,6 +292,8 @@ spec: type: string containerID: type: string + ifName: + type: string type: object statefulSet: properties: diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index cf3198f23c8..952f9a13c51 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -114,7 +114,8 @@ func run(o *Options) error { } defer ovsdbConnection.Close() - enableBridgingMode := features.DefaultFeatureGate.Enabled(features.AntreaIPAM) && o.config.EnableBridgingMode + enableAntreaIPAM := features.DefaultFeatureGate.Enabled(features.AntreaIPAM) + enableBridgingMode := enableAntreaIPAM && o.config.EnableBridgingMode // Bridging mode will connect the uplink interface to the OVS bridge. connectUplinkToBridge := enableBridgingMode @@ -357,9 +358,10 @@ func run(o *Options) error { o.config.HostProcPathPrefix, nodeConfig, k8sClient, - isChaining, - enableBridgingMode, // activate AntreaIPAM in CNIServer when bridging mode is enabled routeClient, + isChaining, + enableBridgingMode, + enableAntreaIPAM, networkReadyCh) var cniPodInfoStore cnipodcache.CNIPodInfoStore @@ -490,13 +492,10 @@ func run(o *Options) error { // Now Antrea IPAM is used only by bridging mode, so we initialize AntreaIPAMController only // when the bridging mode is enabled. - if enableBridgingMode { + if enableAntreaIPAM { ipamController, err := ipam.InitializeAntreaIPAMController( - k8sClient, - crdClient, - informerFactory, - localPodInformer, - crdInformerFactory) + crdClient, informerFactory, crdInformerFactory, + localPodInformer, enableBridgingMode) if err != nil { return fmt.Errorf("failed to start Antrea IPAM agent: %v", err) } diff --git a/pkg/agent/cniserver/ipam/antrea_ipam.go b/pkg/agent/cniserver/ipam/antrea_ipam.go index d37faa22395..bf252f3c93e 100644 --- a/pkg/agent/cniserver/ipam/antrea_ipam.go +++ b/pkg/agent/cniserver/ipam/antrea_ipam.go @@ -23,16 +23,17 @@ import ( "github.com/containernetworking/cni/pkg/invoke" cnitypes "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types/current" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - argtypes "antrea.io/antrea/pkg/agent/cniserver/types" + "antrea.io/antrea/pkg/agent/cniserver/types" crdv1a2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" "antrea.io/antrea/pkg/ipam/poolallocator" ) const ( - AntreaIPAMType = "antrea-ipam" + AntreaIPAMType = "antrea" ) // Antrea IPAM driver would allocate IP addresses according to object IPAM annotation, @@ -59,12 +60,17 @@ const ( // Resource needs to be unique since it is used as identifier in Del. // Therefore Container ID is used, while Pod/Namespace are shown for visibility. // TODO: Consider multi-interface case -func getAllocationOwner(args *invoke.Args, k8sArgs *argtypes.K8sArgs, reservedOwner *crdv1a2.IPAddressOwner) crdv1a2.IPAddressOwner { +func getAllocationOwner(args *invoke.Args, k8sArgs *types.K8sArgs, reservedOwner *crdv1a2.IPAddressOwner, secondary bool) crdv1a2.IPAddressOwner { podOwner := &crdv1a2.PodOwner{ Name: string(k8sArgs.K8S_POD_NAME), Namespace: string(k8sArgs.K8S_POD_NAMESPACE), ContainerID: args.ContainerID, } + if secondary { + // Add interface name for secondary network to uniquely identify + // the secondary network interface. + podOwner.IFName = args.IfName + } if reservedOwner != nil { owner := *reservedOwner owner.Pod = podOwner @@ -115,7 +121,7 @@ func (d *AntreaIPAM) setController(controller *AntreaIPAMController) { // Add allocates next available IP address from associated IP Pool // Allocated IP and associated resource are stored in IP Pool status -func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, *IPAMResult, error) { +func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, *IPAMResult, error) { mine, allocator, ips, reservedOwner, err := d.owns(k8sArgs) if err != nil { return true, nil, err @@ -125,9 +131,9 @@ func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkCo return false, nil, nil } - owner := getAllocationOwner(args, k8sArgs, reservedOwner) + owner := getAllocationOwner(args, k8sArgs, reservedOwner, false) var ip net.IP - var subnetInfo crdv1a2.SubnetInfo + var subnetInfo *crdv1a2.SubnetInfo if reservedOwner != nil { ip, subnetInfo, err = allocator.AllocateReservedOrNext(crdv1a2.IPAddressPhaseAllocated, owner) } else if len(ips) == 0 { @@ -142,7 +148,7 @@ func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkCo klog.V(4).InfoS("IP allocation successful", "IP", ip.String(), "Pod", string(k8sArgs.K8S_POD_NAME)) - result := IPAMResult{Result: current.Result{CNIVersion: current.ImplementedSpecVersion}, VLANID: subnetInfo.VLAN & 0xfff} + result := IPAMResult{Result: current.Result{CNIVersion: current.ImplementedSpecVersion}, VLANID: subnetInfo.VLAN} gwIP := net.ParseIP(subnetInfo.Gateway) ipConfig, defaultRoute := generateIPConfig(ip, int(subnetInfo.PrefixLength), gwIP) @@ -153,23 +159,20 @@ func (d *AntreaIPAM) Add(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkCo } // Del deletes IP associated with resource from IP Pool status -func (d *AntreaIPAM) Del(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, error) { - mine, allocator, _, _, err := d.owns(k8sArgs) - if mine == mineFalse || mine == mineUnknown { - // pass this request to next driver - return false, nil - } +func (d *AntreaIPAM) Del(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, error) { + owner := getAllocationOwner(args, k8sArgs, nil, false) + foundAllocation, err := d.del(owner.Pod) if err != nil { + // Let the invoker retry at error. return true, err } - owner := getAllocationOwner(args, k8sArgs, nil) - err = allocator.ReleaseContainerIfPresent(owner.Pod.ContainerID) - return true, err + // If no allocation found, pass CNI DEL to the next driver. + return foundAllocation, nil } // Check verifues IP associated with resource is tracked in IP Pool status -func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, error) { +func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, error) { mine, allocator, _, _, err := d.owns(k8sArgs) if err != nil { return true, err @@ -179,8 +182,7 @@ func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *argtypes.K8sArgs, network return false, nil } - owner := getAllocationOwner(args, k8sArgs, nil) - found, err := allocator.HasContainer(owner.Pod.ContainerID) + found, err := allocator.HasContainer(args.ContainerID, "") if err != nil { return true, err } @@ -192,6 +194,90 @@ func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *argtypes.K8sArgs, network return true, nil } +func (d *AntreaIPAM) secondaryNetworkAdd(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) (*current.Result, error) { + ipamConf := networkConfig.IPAM + if len(ipamConf.IPPools) == 0 { + return nil, fmt.Errorf("Antrea IPPool must be specified") + } + + if err := d.waitForControllerReady(); err != nil { + // Return error to let the invoker retry. + return nil, err + } + + var err error + var allocator *poolallocator.IPPoolAllocator + for _, p := range ipamConf.IPPools { + allocator, err = d.controller.getPoolAllocatorByName(p) + if err != nil { + if !errors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get IPPool %s: %v", p, err) + } + klog.InfoS("IPPool not found", "pool", p) + } else if allocator.IPVersion == crdv1a2.IPv4 { + // Support IPv6 / dual stack in future. + break + } + + } + if allocator == nil { + return nil, fmt.Errorf("no valid IPPool found") + } + + owner := getAllocationOwner(args, k8sArgs, nil, true) + var ip net.IP + var subnetInfo *crdv1a2.SubnetInfo + ip, subnetInfo, err = allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, owner) + if err != nil { + return nil, err + } + + // Copy routes and DNS from the input IPAM configuration. + result := ¤t.Result{Routes: ipamConf.Routes, DNS: ipamConf.DNS} + gwIP := net.ParseIP(subnetInfo.Gateway) + ipConfig, _ := generateIPConfig(ip, int(subnetInfo.PrefixLength), gwIP) + result.IPs = append(result.IPs, ipConfig) + return result, nil +} + +func (d *AntreaIPAM) secondaryNetworkDel(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + owner := getAllocationOwner(args, k8sArgs, nil, true) + _, err := d.del(owner.Pod) + return err +} + +func (d *AntreaIPAM) secondaryNetworkCheck(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + return fmt.Errorf("CNI CHECK is not implemented for secondary network") +} + +func (d *AntreaIPAM) del(podOwner *crdv1a2.PodOwner) (foundAllocation bool, err error) { + if err := d.waitForControllerReady(); err != nil { + // Return error to let the invoker retry. + return false, err + } + // The Pod resource might have been removed; and for a secondary we + // would rely on the passed IPPool for CNI DEL. So, search IPPools with + // the matched PodOwner. + allocators, err := d.controller.getPoolAllocatorsByOwner(podOwner) + if err != nil { + return false, err + } + + if len(allocators) == 0 { + return false, nil + } + // Multiple allocators can be returned if the network interface has IPs + // allocated from more than one IPPools. + for _, a := range allocators { + err = a.ReleaseContainer(podOwner.ContainerID, podOwner.IFName) + if err != nil { + klog.Errorf("xxx err: %v", err) + return true, err + } + } + return true, nil +} + // owns checks whether this driver owns coming IPAM request. This decision is based on // Antrea IPAM annotation for the resource (only Namespace annotation is supported as // of today). If annotation is not present, or annotated IP Pool not found, the driver @@ -203,18 +289,10 @@ func (d *AntreaIPAM) Check(args *invoke.Args, k8sArgs *argtypes.K8sArgs, network // mineTrue + timeout error // mineTrue + IPPoolNotFound error // mineTrue + nil error -func (d *AntreaIPAM) owns(k8sArgs *argtypes.K8sArgs) (mineType, *poolallocator.IPPoolAllocator, []net.IP, *crdv1a2.IPAddressOwner, error) { +func (d *AntreaIPAM) owns(k8sArgs *types.K8sArgs) (mineType, *poolallocator.IPPoolAllocator, []net.IP, *crdv1a2.IPAddressOwner, error) { // Wait controller ready to avoid inappropriate behavior on CNI request - if err := wait.PollImmediate(500*time.Millisecond, 5*time.Second, func() (bool, error) { - d.controllerMutex.RLock() - defer d.controllerMutex.RUnlock() - if d.controller == nil { - klog.Warningf("Antrea IPAM driver is not ready.") - return false, nil - } - return true, nil - }); err != nil { - // return mineTrue to make this request failed and kubelet will retry + if err := d.waitForControllerReady(); err != nil { + // Return mineTrue to make this request failed and kubelet will retry. return mineTrue, nil, nil, nil, err } @@ -227,17 +305,29 @@ func (d *AntreaIPAM) owns(k8sArgs *argtypes.K8sArgs) (mineType, *poolallocator.I return d.controller.getPoolAllocatorByPod(namespace, podName) } -func init() { - // Antrea driver must come first - // NOTE: this is global variable that requires follow-up setup post agent Init - antreaIPAMDriver = &AntreaIPAM{} +func (d *AntreaIPAM) waitForControllerReady() error { + err := wait.PollImmediate(500*time.Millisecond, 5*time.Second, func() (bool, error) { + d.controllerMutex.RLock() + defer d.controllerMutex.RUnlock() + if d.controller == nil { + klog.Warningf("Antrea IPAM driver is not ready.") + return false, nil + } + return true, nil + }) - if err := RegisterIPAMDriver(AntreaIPAMType, antreaIPAMDriver); err != nil { - klog.Errorf("Failed to register IPAM plugin on type %s", AntreaIPAMType) + if err != nil { + return fmt.Errorf("Antrea IPAM driver not ready: %v", err) } + return nil +} + +func init() { + // Antrea driver must come first. + // NOTE: this is global variable that requires follow-up setup post agent initialization. + antreaIPAMDriver = &AntreaIPAM{} + RegisterIPAMDriver(AntreaIPAMType, antreaIPAMDriver) // Host local plugin is fallback driver - if err := RegisterIPAMDriver(AntreaIPAMType, &IPAMDelegator{pluginType: ipamHostLocal}); err != nil { - klog.Errorf("Failed to register IPAM plugin on type %s", ipamHostLocal) - } + RegisterIPAMDriver(AntreaIPAMType, &IPAMDelegator{pluginType: ipamHostLocal}) } diff --git a/pkg/agent/cniserver/ipam/antrea_ipam_controller.go b/pkg/agent/cniserver/ipam/antrea_ipam_controller.go index 74d951b8e27..195ac278cf7 100644 --- a/pkg/agent/cniserver/ipam/antrea_ipam_controller.go +++ b/pkg/agent/cniserver/ipam/antrea_ipam_controller.go @@ -19,10 +19,10 @@ import ( "net" "strings" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" - clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" @@ -47,7 +47,6 @@ const ( // this controller can be used to store annotations for other objects, // such as Statefulsets. type AntreaIPAMController struct { - kubeClient clientset.Interface crdClient clientsetversioned.Interface ipPoolInformer crdinformers.IPPoolInformer ipPoolLister crdlisters.IPPoolLister @@ -57,30 +56,6 @@ type AntreaIPAMController struct { podLister corelisters.PodLister } -func NewAntreaIPAMController(kubeClient clientset.Interface, - crdClient clientsetversioned.Interface, - informerFactory informers.SharedInformerFactory, - podInformer cache.SharedIndexInformer, - crdInformerFactory externalversions.SharedInformerFactory) *AntreaIPAMController { - - namespaceInformer := informerFactory.Core().V1().Namespaces() - ipPoolInformer := crdInformerFactory.Crd().V1alpha2().IPPools() - ipPoolInformer.Informer().AddIndexers(cache.Indexers{podIndex: podIndexFunc}) - - c := AntreaIPAMController{ - kubeClient: kubeClient, - crdClient: crdClient, - ipPoolInformer: ipPoolInformer, - ipPoolLister: ipPoolInformer.Lister(), - namespaceInformer: namespaceInformer, - namespaceLister: namespaceInformer.Lister(), - podInformer: podInformer, - podLister: corelisters.NewPodLister(podInformer.GetIndexer()), - } - - return &c -} - func podIndexFunc(obj interface{}) ([]string, error) { ipPool, ok := obj.(*crdv1a2.IPPool) if !ok { @@ -95,9 +70,10 @@ func podIndexFunc(obj interface{}) ([]string, error) { return podNames.UnsortedList(), nil } -func InitializeAntreaIPAMController(kubeClient clientset.Interface, crdClient clientsetversioned.Interface, informerFactory informers.SharedInformerFactory, podInformer cache.SharedIndexInformer, crdInformerFactory externalversions.SharedInformerFactory) (*AntreaIPAMController, error) { - antreaIPAMController := NewAntreaIPAMController(kubeClient, crdClient, informerFactory, podInformer, crdInformerFactory) - +func InitializeAntreaIPAMController(crdClient clientsetversioned.Interface, + informerFactory informers.SharedInformerFactory, + crdInformerFactory externalversions.SharedInformerFactory, + podInformer cache.SharedIndexInformer, ipamAnnotations bool) (*AntreaIPAMController, error) { // Order of init causes antreaIPAMDriver to be initialized first // After controller is initialized by agent init, we need to make it // know to the driver @@ -105,6 +81,31 @@ func InitializeAntreaIPAMController(kubeClient clientset.Interface, crdClient cl return nil, fmt.Errorf("Antrea IPAM driver failed to initialize") } + var antreaIPAMController *AntreaIPAMController + ipPoolInformer := crdInformerFactory.Crd().V1alpha2().IPPools() + ipPoolInformer.Informer().AddIndexers(cache.Indexers{podIndex: podIndexFunc}) + + // Create podInformer/Lister and namespaceInformer/Lister if need to read the AntreaIPAM + // annotation on Pods and Namespaces. + if ipamAnnotations { + namespaceInformer := informerFactory.Core().V1().Namespaces() + antreaIPAMController = &AntreaIPAMController{ + crdClient: crdClient, + ipPoolInformer: ipPoolInformer, + ipPoolLister: ipPoolInformer.Lister(), + namespaceInformer: namespaceInformer, + namespaceLister: namespaceInformer.Lister(), + podInformer: podInformer, + podLister: corelisters.NewPodLister(podInformer.GetIndexer()), + } + } else { + antreaIPAMController = &AntreaIPAMController{ + crdClient: crdClient, + ipPoolInformer: ipPoolInformer, + ipPoolLister: ipPoolInformer.Lister(), + } + } + antreaIPAMDriver.setController(antreaIPAMController) return antreaIPAMController, nil } @@ -116,35 +117,26 @@ func (c *AntreaIPAMController) Run(stopCh <-chan struct{}) { }() klog.InfoS("Starting", "controller", controllerName) - if !cache.WaitForNamedCacheSync(controllerName, stopCh, c.namespaceInformer.Informer().HasSynced, c.ipPoolInformer.Informer().HasSynced, c.podInformer.HasSynced) { - return + if c.podInformer != nil && c.namespaceInformer != nil { + if !cache.WaitForNamedCacheSync(controllerName, stopCh, c.ipPoolInformer.Informer().HasSynced, c.podInformer.HasSynced, c.namespaceInformer.Informer().HasSynced) { + return + } + } else { + if !cache.WaitForNamedCacheSync(controllerName, stopCh, c.ipPoolInformer.Informer().HasSynced) { + return + } } antreaIPAMDriver.setController(c) <-stopCh } +// Look up IPPools from the Pod annotation. func (c *AntreaIPAMController) getIPPoolsByPod(namespace, name string) ([]string, []net.IP, *crdv1a2.IPAddressOwner, error) { - // Find IPPool by Pod var ips []net.IP var reservedOwner *crdv1a2.IPAddressOwner pod, err := c.podLister.Pods(namespace).Get(name) if err != nil { - // For CNI DEL case, getting Pod may fail. Try to get information from allocated IPs of all IPPools. - klog.ErrorS(err, "Getting pod failed", "namespace", namespace, "name", name) - ipPools, _ := c.ipPoolInformer.Informer().GetIndexer().ByIndex(podIndex, k8s.NamespacedName(namespace, name)) - for _, item := range ipPools { - ipPool := item.(*crdv1a2.IPPool) - if ipPool.Spec.IPVersion != 4 { - continue - } - for _, IPAddress := range ipPool.Status.IPAddresses { - if IPAddress.Owner.Pod != nil && IPAddress.Owner.Pod.Namespace == namespace && IPAddress.Owner.Pod.Name == name { - // reservedOwner is nil, since this is not needed for CNI DEL case - return []string{ipPool.Name}, []net.IP{net.ParseIP(IPAddress.IPAddress)}, nil, nil - } - } - } return nil, nil, nil, err } @@ -203,16 +195,58 @@ ownerReferenceLoop: return strings.Split(annotations, AntreaIPAMAnnotationDelimiter), ips, reservedOwner, ipErr } +// Look up IPPools from the Pod annotation. func (c *AntreaIPAMController) getPoolAllocatorByPod(namespace, podName string) (mineType, *poolallocator.IPPoolAllocator, []net.IP, *crdv1a2.IPAddressOwner, error) { poolNames, ips, reservedOwner, err := c.getIPPoolsByPod(namespace, podName) if err != nil { return mineUnknown, nil, nil, nil, err - } else if len(poolNames) < 1 { + } else if len(poolNames) == 0 { return mineFalse, nil, nil, nil, nil } - // Only one pool is supported as of today - // TODO - support a pool for each IP version - ipPool := poolNames[0] - allocator, err := poolallocator.NewIPPoolAllocator(ipPool, c.crdClient, c.ipPoolLister) + + var allocator *poolallocator.IPPoolAllocator + for _, p := range poolNames { + allocator, err = poolallocator.NewIPPoolAllocator(p, c.crdClient, c.ipPoolLister) + if err != nil { + if !errors.IsNotFound(err) { + err = fmt.Errorf("failed to get IPPool %s: %v", p, err) + break + } + klog.InfoS("IPPool not found", "pool", p) + err = nil + } else if allocator.IPVersion == crdv1a2.IPv4 { + // Support IPv6 / dual stack in future. + break + } + } + if allocator == nil { + err = fmt.Errorf("no valid IPPool found") + } + return mineTrue, allocator, ips, reservedOwner, err } + +// Look up IPPools by matching PodOwnder. +func (c *AntreaIPAMController) getPoolAllocatorsByOwner(podOwner *crdv1a2.PodOwner) ([]*poolallocator.IPPoolAllocator, error) { + var allocators []*poolallocator.IPPoolAllocator + ipPools, _ := c.ipPoolInformer.Informer().GetIndexer().ByIndex(podIndex, + k8s.NamespacedName(podOwner.Namespace, podOwner.Name)) + for _, item := range ipPools { + ipPool := item.(*crdv1a2.IPPool) + for _, IPAddress := range ipPool.Status.IPAddresses { + savedPod := IPAddress.Owner.Pod + if savedPod != nil && savedPod.ContainerID == podOwner.ContainerID && savedPod.IFName == podOwner.IFName { + allocator, err := poolallocator.NewIPPoolAllocator(ipPool.Name, c.crdClient, c.ipPoolLister) + if err != nil { + return nil, err + } + allocators = append(allocators, allocator) + } + } + } + return allocators, nil +} + +func (c *AntreaIPAMController) getPoolAllocatorByName(poolName string) (*poolallocator.IPPoolAllocator, error) { + return poolallocator.NewIPPoolAllocator(poolName, c.crdClient, c.ipPoolLister) +} diff --git a/pkg/agent/cniserver/ipam/antrea_ipam_test.go b/pkg/agent/cniserver/ipam/antrea_ipam_test.go index da651947f95..9140509540e 100644 --- a/pkg/agent/cniserver/ipam/antrea_ipam_test.go +++ b/pkg/agent/cniserver/ipam/antrea_ipam_test.go @@ -109,7 +109,7 @@ func createIPPools(crdClient *fakepoolclient.IPPoolClientset) { ObjectMeta: metav1.ObjectMeta{Name: testPear}, Spec: crdv1a2.IPPoolSpec{ IPRanges: []crdv1a2.SubnetIPRange{subnetRangePear}, - IPVersion: 4, + IPVersion: crdv1a2.IPv4, }, Status: crdv1a2.IPPoolStatus{IPAddresses: []crdv1a2.IPAddressState{{ IPAddress: "10.2.3.198", @@ -321,7 +321,7 @@ func TestAntreaIPAMDriver(t *testing.T) { listOptions, ) - antreaIPAMController, err := InitializeAntreaIPAMController(k8sClient, crdClient, informerFactory, localPodInformer, crdInformerFactory) + antreaIPAMController, err := InitializeAntreaIPAMController(crdClient, informerFactory, crdInformerFactory, localPodInformer, true) require.NoError(t, err, "Expected no error in initialization for Antrea IPAM Controller") informerFactory.Start(stopCh) go localPodInformer.Run(stopCh) @@ -337,7 +337,7 @@ func TestAntreaIPAMDriver(t *testing.T) { // Test the driver singleton that was assigned to global variable testDriver := antreaIPAMDriver - networkConfig := []byte("'name': 'testCfg', 'cniVersion': '0.4.0', 'type': 'antrea', 'ipam': {'type': 'antrea-ipam'}}") + networkConfig := []byte("'name': 'testCfg', 'cniVersion': '0.4.0', 'type': 'antrea', 'ipam': {'type': 'antrea'}}") cniArgsMap := make(map[string]*invoke.Args) k8sArgsMap := make(map[string]*argtypes.K8sArgs) @@ -515,7 +515,7 @@ func TestAntreaIPAMDriver(t *testing.T) { } owns, err = testDriver.Del(cniArgsBadContainer, k8sArgsMap["orange1"], networkConfig) - assert.True(t, owns) + assert.False(t, owns) require.NoError(t, err, "expected no error in Del call") // Make sure repeated Add works for Pod that was previously released diff --git a/pkg/agent/cniserver/ipam/ipam_delegator.go b/pkg/agent/cniserver/ipam/ipam_delegator.go index 10aeb827bb8..815809d9f11 100644 --- a/pkg/agent/cniserver/ipam/ipam_delegator.go +++ b/pkg/agent/cniserver/ipam/ipam_delegator.go @@ -125,7 +125,5 @@ func delegateNoResult(delegatePlugin string, networkConfig []byte, args *invoke. } func init() { - if err := RegisterIPAMDriver(ipamHostLocal, &IPAMDelegator{pluginType: ipamHostLocal}); err != nil { - klog.Errorf("Failed to register IPAM plugin on type %s", ipamHostLocal) - } + RegisterIPAMDriver(ipamHostLocal, &IPAMDelegator{pluginType: ipamHostLocal}) } diff --git a/pkg/agent/cniserver/ipam/ipam_service.go b/pkg/agent/cniserver/ipam/ipam_service.go index b64c335a7c6..2feb36725ee 100644 --- a/pkg/agent/cniserver/ipam/ipam_service.go +++ b/pkg/agent/cniserver/ipam/ipam_service.go @@ -21,7 +21,7 @@ import ( "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/types/current" - argtypes "antrea.io/antrea/pkg/agent/cniserver/types" + "antrea.io/antrea/pkg/agent/cniserver/types" cnipb "antrea.io/antrea/pkg/apis/cni/v1beta1" ) @@ -33,37 +33,24 @@ import ( // Otherwise IPAM should be handled by host-local plugin. var ipamDrivers map[string][]IPAMDriver -type Range struct { - Subnet string `json:"subnet"` - Gateway string `json:"gateway,omitempty"` -} - -type RangeSet []Range - -type IPAMConfig struct { - Type string `json:"type,omitempty"` - Ranges []RangeSet `json:"ranges,omitempty"` -} - type IPAMResult struct { current.Result VLANID uint16 } type IPAMDriver interface { - Add(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, *IPAMResult, error) - Del(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, error) - Check(args *invoke.Args, k8sArgs *argtypes.K8sArgs, networkConfig []byte) (bool, error) + Add(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, *IPAMResult, error) + Del(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, error) + Check(args *invoke.Args, k8sArgs *types.K8sArgs, networkConfig []byte) (bool, error) } var ipamResults = sync.Map{} -func RegisterIPAMDriver(ipamType string, ipamDriver IPAMDriver) error { +func RegisterIPAMDriver(ipamType string, ipamDriver IPAMDriver) { if ipamDrivers == nil { ipamDrivers = make(map[string][]IPAMDriver) } ipamDrivers[ipamType] = append(ipamDrivers[ipamType], ipamDriver) - return nil } func argsFromEnv(cniArgs *cnipb.CniCmdArgs) *invoke.Args { @@ -75,7 +62,7 @@ func argsFromEnv(cniArgs *cnipb.CniCmdArgs) *invoke.Args { } } -func ExecIPAMAdd(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamType string, resultKey string) (*IPAMResult, error) { +func ExecIPAMAdd(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, ipamType string, resultKey string) (*IPAMResult, error) { // Return the cached IPAM result for the same Pod. This cache helps to ensure CNIAdd is idempotent. There are two // usages of CNIAdd message on Windows: 1) add container network configuration, and 2) query Pod network status. // kubelet on Windows sends CNIAdd messages to query Pod status periodically before the sandbox container is ready. @@ -88,7 +75,6 @@ func ExecIPAMAdd(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamType } args := argsFromEnv(cniArgs) - drivers := ipamDrivers[ipamType] for _, driver := range drivers { owns, result, err := driver.Add(args, k8sArgs, cniArgs.NetworkConfiguration) @@ -106,7 +92,7 @@ func ExecIPAMAdd(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamType return nil, fmt.Errorf("No suitable IPAM driver found") } -func ExecIPAMDelete(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamType string, resultKey string) error { +func ExecIPAMDelete(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, ipamType string, resultKey string) error { args := argsFromEnv(cniArgs) drivers := ipamDrivers[ipamType] for _, driver := range drivers { @@ -124,7 +110,7 @@ func ExecIPAMDelete(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamTy return fmt.Errorf("No suitable IPAM driver found") } -func ExecIPAMCheck(cniArgs *cnipb.CniCmdArgs, k8sArgs *argtypes.K8sArgs, ipamType string) error { +func ExecIPAMCheck(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, ipamType string) error { args := argsFromEnv(cniArgs) drivers := ipamDrivers[ipamType] for _, driver := range drivers { @@ -153,3 +139,30 @@ func IsIPAMTypeValid(ipamType string) bool { _, valid := ipamDrivers[ipamType] return valid } + +// Antrea IPAM for secondary network. +func SecondaryNetworkAdd(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) (*current.Result, error) { + args := argsFromEnv(cniArgs) + return getAntreaIPAMDriver().secondaryNetworkAdd(args, k8sArgs, networkConfig) + +} + +func SecondaryNetworkDel(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + args := argsFromEnv(cniArgs) + return getAntreaIPAMDriver().secondaryNetworkDel(args, k8sArgs, networkConfig) + +} + +func SecondaryNetworkCheck(cniArgs *cnipb.CniCmdArgs, k8sArgs *types.K8sArgs, networkConfig *types.NetworkConfig) error { + args := argsFromEnv(cniArgs) + return getAntreaIPAMDriver().secondaryNetworkCheck(args, k8sArgs, networkConfig) + +} + +func getAntreaIPAMDriver() *AntreaIPAM { + drivers, ok := ipamDrivers[AntreaIPAMType] + if !ok { + return nil + } + return drivers[0].(*AntreaIPAM) +} diff --git a/pkg/agent/cniserver/pod_configuration.go b/pkg/agent/cniserver/pod_configuration.go index dd4d251225e..0c2027df658 100644 --- a/pkg/agent/cniserver/pod_configuration.go +++ b/pkg/agent/cniserver/pod_configuration.go @@ -27,6 +27,7 @@ import ( "k8s.io/klog/v2" "antrea.io/antrea/pkg/agent/cniserver/ipam" + "antrea.io/antrea/pkg/agent/cniserver/types" "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/agent/route" @@ -394,7 +395,7 @@ func (pc *podConfigurator) validateOVSInterfaceConfig(containerID string, contai return fmt.Errorf("container %s interface not found from local cache", containerID) } -func parsePrevResult(conf *NetworkConfig) error { +func parsePrevResult(conf *types.NetworkConfig) error { if conf.RawPrevResult == nil { return nil } diff --git a/pkg/agent/cniserver/server.go b/pkg/agent/cniserver/server.go index dd2c88788e4..49e6259b37e 100644 --- a/pkg/agent/cniserver/server.go +++ b/pkg/agent/cniserver/server.go @@ -34,7 +34,7 @@ import ( "k8s.io/klog/v2" "antrea.io/antrea/pkg/agent/cniserver/ipam" - argtypes "antrea.io/antrea/pkg/agent/cniserver/types" + "antrea.io/antrea/pkg/agent/cniserver/types" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/openflow" @@ -48,6 +48,8 @@ import ( ) const ( + antreaCNIType = "antrea" + // networkReadyTimeout is the maximum time the CNI server will wait for network ready when processing CNI Add // requests. If timeout occurs, tryAgainLaterResponse will be returned. // The default runtime request timeout of kubelet is 2 minutes. @@ -100,52 +102,35 @@ func (arbitrator *containerAccessArbitrator) unlockContainer(containerKey string } type CNIServer struct { - cniSocket string - supportedCNIVersions map[string]bool - serverVersion string - nodeConfig *config.NodeConfig - hostProcPathPrefix string - kubeClient clientset.Interface - containerAccess *containerAccessArbitrator - podConfigurator *podConfigurator - isChaining bool - antreaIPAM bool - routeClient route.Interface - secondaryNetworkEnabled bool + cniSocket string + supportedCNIVersions map[string]bool + serverVersion string + nodeConfig *config.NodeConfig + hostProcPathPrefix string + kubeClient clientset.Interface + containerAccess *containerAccessArbitrator + podConfigurator *podConfigurator + routeClient route.Interface + isChaining bool + enableBridgingMode bool + // Enable AntreaIPAM for secondary networks implementd by other CNIs. + enableSecondaryNetworkIPAM bool + secondaryNetworkEnabled bool // networkReadyCh notifies that the network is ready so new Pods can be created. Therefore, CmdAdd waits for it. networkReadyCh <-chan struct{} } var supportedCNIVersionSet map[string]bool -type RuntimeDNS struct { - Nameservers []string `json:"servers,omitempty"` - Search []string `json:"searches,omitempty"` -} - -type RuntimeConfig struct { - DNS RuntimeDNS `json:"dns"` -} - -type NetworkConfig struct { - CNIVersion string `json:"cniVersion,omitempty"` - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - DeviceID string `json:"deviceID"` // PCI address of a VF - MTU int `json:"mtu,omitempty"` - DNS cnitypes.DNS `json:"dns"` - IPAM ipam.IPAMConfig `json:"ipam,omitempty"` - // Options to be passed in by the runtime. - RuntimeConfig RuntimeConfig `json:"runtimeConfig"` - - RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` - PrevResult cnitypes.Result `json:"-"` -} - type CNIConfig struct { - *NetworkConfig + *types.NetworkConfig + // AntreaIPAM for an interface not managed by Antrea CNI. + secondaryNetworkIPAM bool + // CniCmdArgs received from the CNI plugin. IPAM data in CniCmdArgs can be updated with the + // Node's Pod CIDRs for NodeIPAM. *cnipb.CniCmdArgs - *argtypes.K8sArgs + // K8s CNI_ARGS passed to the CNI plugin. + *types.K8sArgs } // updateResultIfaceConfig processes the result from the IPAM plugin and does the following: @@ -197,23 +182,20 @@ func resultToResponse(result *current.Result) *cnipb.CniCmdResponse { } func (s *CNIServer) loadNetworkConfig(request *cnipb.CniCmdRequest) (*CNIConfig, error) { - cniConfig := &CNIConfig{} - cniConfig.CniCmdArgs = request.CniArgs - if err := json.Unmarshal(request.CniArgs.NetworkConfiguration, cniConfig); err != nil { - return cniConfig, err + cniConfig := CNIConfig{} + if err := json.Unmarshal(request.CniArgs.NetworkConfiguration, &cniConfig); err != nil { + return nil, err } - cniConfig.K8sArgs = &argtypes.K8sArgs{} + cniConfig.K8sArgs = &types.K8sArgs{} if err := cnitypes.LoadArgs(request.CniArgs.Args, cniConfig.K8sArgs); err != nil { - return cniConfig, err - } - if !s.isChaining { - s.updateLocalIPAMSubnet(cniConfig) + return nil, err } if cniConfig.MTU == 0 { cniConfig.MTU = s.nodeConfig.NodeMTU } + cniConfig.CniCmdArgs = request.CniArgs klog.V(3).Infof("Load network configurations: %v", cniConfig) - return cniConfig, nil + return &cniConfig, nil } func (s *CNIServer) isCNIVersionSupported(reqVersion string) bool { @@ -221,44 +203,69 @@ func (s *CNIServer) isCNIVersionSupported(reqVersion string) bool { return exist } -func (s *CNIServer) checkRequestMessage(request *cnipb.CniCmdRequest) (*CNIConfig, *cnipb.CniCmdResponse) { +func (s *CNIServer) valiateCNIAndIPAMType(cniConfig *CNIConfig) *cnipb.CniCmdResponse { + ipamType := cniConfig.IPAM.Type + if cniConfig.Type == antreaCNIType { + if s.isChaining { + return nil + } + if !ipam.IsIPAMTypeValid(ipamType) { + klog.Errorf("Unsupported IPAM type %s", ipamType) + return s.unsupportedFieldResponse("ipam/type", ipamType) + } + if s.enableBridgingMode { + // When the bridging mode is enabled, Antrea ignores IPAM type from request. + cniConfig.IPAM.Type = ipam.AntreaIPAMType + + } + return nil + } + + if !s.enableSecondaryNetworkIPAM { + return s.unsupportedFieldResponse("type", cniConfig.Type) + } + if ipamType != ipam.AntreaIPAMType { + klog.Errorf("Unsupported IPAM type %s", ipamType) + return s.unsupportedFieldResponse("ipam/type", ipamType) + } + // IPAM for an interface not managed by Antrea CNI. + cniConfig.secondaryNetworkIPAM = true + return nil +} + +func (s *CNIServer) validateRequestMessage(request *cnipb.CniCmdRequest) (*CNIConfig, *cnipb.CniCmdResponse) { cniConfig, err := s.loadNetworkConfig(request) if err != nil { klog.Errorf("Failed to parse network configuration: %v", err) return nil, s.decodingFailureResponse("network config") } + cniVersion := cniConfig.CNIVersion // Check if CNI version in the request is supported if !s.isCNIVersionSupported(cniVersion) { klog.Errorf(fmt.Sprintf("Unsupported CNI version [%s], supported CNI versions %s", cniVersion, version.All.SupportedVersions())) - return cniConfig, s.incompatibleCniVersionResponse(cniVersion) + return nil, s.incompatibleCniVersionResponse(cniVersion) } - if s.isChaining { - return cniConfig, nil - } - // Find IPAM Service according configuration - ipamType := cniConfig.IPAM.Type - isValid := ipam.IsIPAMTypeValid(ipamType) - if !isValid { - klog.Errorf("Unsupported IPAM type %s", ipamType) - return cniConfig, s.unsupportedFieldResponse("ipam/type", ipamType) - } - if s.antreaIPAM { - // With AnteaIPAM feature enabled, Antrea ignores IPAM type from request - cniConfig.IPAM.Type = ipam.AntreaIPAMType + if resp := s.valiateCNIAndIPAMType(cniConfig); resp != nil { + return nil, resp + } + if !s.isChaining && !cniConfig.secondaryNetworkIPAM { + s.updateLocalIPAMSubnet(cniConfig) } return cniConfig, nil } +// updateLocalIPAMSubnet updates CNIConfig.CniCmdArgs with this Node's Pod CIDRs, which will be +// passed to the IPAM driver. func (s *CNIServer) updateLocalIPAMSubnet(cniConfig *CNIConfig) { if (s.nodeConfig.GatewayConfig.IPv4 != nil) && (s.nodeConfig.PodIPv4CIDR != nil) { cniConfig.NetworkConfig.IPAM.Ranges = append(cniConfig.NetworkConfig.IPAM.Ranges, - ipam.RangeSet{ipam.Range{Subnet: s.nodeConfig.PodIPv4CIDR.String(), Gateway: s.nodeConfig.GatewayConfig.IPv4.String()}}) + types.RangeSet{types.Range{Subnet: s.nodeConfig.PodIPv4CIDR.String(), Gateway: s.nodeConfig.GatewayConfig.IPv4.String()}}) } if (s.nodeConfig.GatewayConfig.IPv6 != nil) && (s.nodeConfig.PodIPv6CIDR != nil) { cniConfig.NetworkConfig.IPAM.Ranges = append(cniConfig.NetworkConfig.IPAM.Ranges, - ipam.RangeSet{ipam.Range{Subnet: s.nodeConfig.PodIPv6CIDR.String(), Gateway: s.nodeConfig.GatewayConfig.IPv6.String()}}) + types.RangeSet{types.Range{Subnet: s.nodeConfig.PodIPv6CIDR.String(), Gateway: s.nodeConfig.GatewayConfig.IPv6.String()}}) } cniConfig.NetworkConfiguration, _ = json.Marshal(cniConfig.NetworkConfig) } @@ -336,7 +343,7 @@ func buildVersionSet() map[string]bool { return versionSet } -func (s *CNIServer) parsePrevResultFromRequest(networkConfig *NetworkConfig) (*current.Result, *cnipb.CniCmdResponse) { +func (s *CNIServer) parsePrevResultFromRequest(networkConfig *types.NetworkConfig) (*current.Result, *cnipb.CniCmdResponse) { if networkConfig.PrevResult == nil && networkConfig.RawPrevResult == nil { klog.Errorf("Previous network configuration not specified") return nil, s.unsupportedFieldResponse("prevResult", "") @@ -358,7 +365,7 @@ func (s *CNIServer) parsePrevResultFromRequest(networkConfig *NetworkConfig) (*c // validatePrevResult validates container and host interfaces configuration // the return value is nil if prevResult is valid -func (s *CNIServer) validatePrevResult(cfgArgs *cnipb.CniCmdArgs, k8sCNIArgs *argtypes.K8sArgs, prevResult *current.Result, sriovVFDeviceID string) *cnipb.CniCmdResponse { +func (s *CNIServer) validatePrevResult(cfgArgs *cnipb.CniCmdArgs, prevResult *current.Result, sriovVFDeviceID string) *cnipb.CniCmdResponse { containerID := cfgArgs.ContainerId netNS := s.hostNetNsPath(cfgArgs.Netns) @@ -384,13 +391,46 @@ func (s *CNIServer) GetPodConfigurator() *podConfigurator { return s.podConfigurator } +// Antrea IPAM for secondary network. +func (s *CNIServer) ipamAdd(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { + ipamResult, err := ipam.SecondaryNetworkAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig) + if err != nil { + return s.ipamFailureResponse(err), nil + } + klog.InfoS("Allocated IP addresses", "container", cniConfig.ContainerId, "result", ipamResult) + return resultToResponse(ipamResult), nil +} + +func (s *CNIServer) ipamDel(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { + if err := ipam.SecondaryNetworkDel(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { + return s.ipamFailureResponse(err), nil + } + return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil +} + +func (s *CNIServer) ipamCheck(cniConfig *CNIConfig) (*cnipb.CniCmdResponse, error) { + if err := ipam.SecondaryNetworkCheck(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.NetworkConfig); err != nil { + return s.ipamFailureResponse(err), nil + } + return &cnipb.CniCmdResponse{CniResult: []byte("")}, nil +} + func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (*cnipb.CniCmdResponse, error) { klog.Infof("Received CmdAdd request %v", request) - cniConfig, response := s.checkRequestMessage(request) + cniConfig, response := s.validateRequestMessage(request) if response != nil { return response, nil } + infraContainer := cniConfig.getInfraContainer() + if cniConfig.secondaryNetworkIPAM { + klog.InfoS("Antrea IPAM add", "CNI", cniConfig.Type, "network", cniConfig.Name) + s.containerAccess.lockContainer(infraContainer) + resp, err := s.ipamAdd(cniConfig) + s.containerAccess.unlockContainer(infraContainer) + return resp, err + } + select { case <-time.After(networkReadyTimeout): klog.Errorf("Cannot process CmdAdd request for container %v because network is not ready", cniConfig.ContainerId) @@ -418,7 +458,7 @@ func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (* } }() - infraContainer := cniConfig.getInfraContainer() + // Serialize CNI calls for one Pod. s.containerAccess.lockContainer(infraContainer) defer s.containerAccess.unlockContainer(infraContainer) @@ -446,16 +486,17 @@ func (s *CNIServer) CmdAdd(ctx context.Context, request *cnipb.CniCmdRequest) (* return s.ipamFailureResponse(err), nil } } - klog.Infof("Requested ip addresses for container %v: %v", cniConfig.ContainerId, ipamResult) + klog.InfoS("Allocated IP addresses", "container", cniConfig.ContainerId, "result", ipamResult) result.IPs = ipamResult.IPs result.Routes = ipamResult.Routes result.VLANID = ipamResult.VLANID // Ensure interface gateway setting and mapping relations between result.Interfaces and result.IPs updateResultIfaceConfig(&result.Result, s.nodeConfig.GatewayConfig.IPv4, s.nodeConfig.GatewayConfig.IPv6) + updateResultDNSConfig(&result.Result, cniConfig) + // Setup pod interfaces and connect to ovs bridge podName := string(cniConfig.K8S_POD_NAME) podNamespace := string(cniConfig.K8S_POD_NAMESPACE) - updateResultDNSConfig(&result.Result, cniConfig) if err = s.podConfigurator.configureInterfaces( podName, podNamespace, @@ -491,7 +532,7 @@ func (s *CNIServer) CmdDel(_ context.Context, request *cnipb.CniCmdRequest) ( *cnipb.CniCmdResponse, error) { klog.Infof("Received CmdDel request %v", request) - cniConfig, response := s.checkRequestMessage(request) + cniConfig, response := s.validateRequestMessage(request) if response != nil { return response, nil } @@ -500,6 +541,11 @@ func (s *CNIServer) CmdDel(_ context.Context, request *cnipb.CniCmdRequest) ( s.containerAccess.lockContainer(infraContainer) defer s.containerAccess.unlockContainer(infraContainer) + if cniConfig.secondaryNetworkIPAM { + klog.InfoS("Antrea IPAM del", "CNI", cniConfig.Type, "network", cniConfig.Name) + return s.ipamDel(cniConfig) + } + if s.isChaining { return s.interceptDel(cniConfig) } @@ -532,7 +578,7 @@ func (s *CNIServer) CmdCheck(_ context.Context, request *cnipb.CniCmdRequest) ( *cnipb.CniCmdResponse, error) { klog.Infof("Received CmdCheck request %v", request) - cniConfig, response := s.checkRequestMessage(request) + cniConfig, response := s.validateRequestMessage(request) if response != nil { return response, nil } @@ -541,6 +587,11 @@ func (s *CNIServer) CmdCheck(_ context.Context, request *cnipb.CniCmdRequest) ( s.containerAccess.lockContainer(infraContainer) defer s.containerAccess.unlockContainer(infraContainer) + if cniConfig.secondaryNetworkIPAM { + klog.InfoS("Antrea IPAM check", "CNI", cniConfig.Type, "network", cniConfig.Name) + return s.ipamCheck(cniConfig) + } + if s.isChaining { return s.interceptCheck(cniConfig) } @@ -554,7 +605,7 @@ func (s *CNIServer) CmdCheck(_ context.Context, request *cnipb.CniCmdRequest) ( if valid, _ := version.GreaterThanOrEqualTo(cniVersion, "0.4.0"); valid { if prevResult, response := s.parsePrevResultFromRequest(cniConfig.NetworkConfig); response != nil { return response, nil - } else if response := s.validatePrevResult(cniConfig.CniCmdArgs, cniConfig.K8sArgs, prevResult, cniConfig.DeviceID); response != nil { + } else if response := s.validatePrevResult(cniConfig.CniCmdArgs, prevResult, cniConfig.DeviceID); response != nil { return response, nil } } @@ -566,23 +617,23 @@ func New( cniSocket, hostProcPathPrefix string, nodeConfig *config.NodeConfig, kubeClient clientset.Interface, - isChaining bool, - antreaIPAM bool, routeClient route.Interface, + isChaining, enableBridgingMode, enableSecondaryNetworkIPAM bool, networkReadyCh <-chan struct{}, ) *CNIServer { return &CNIServer{ - cniSocket: cniSocket, - supportedCNIVersions: supportedCNIVersionSet, - serverVersion: cni.AntreaCNIVersion, - nodeConfig: nodeConfig, - hostProcPathPrefix: hostProcPathPrefix, - kubeClient: kubeClient, - containerAccess: newContainerAccessArbitrator(), - isChaining: isChaining, - antreaIPAM: antreaIPAM, - routeClient: routeClient, - networkReadyCh: networkReadyCh, + cniSocket: cniSocket, + supportedCNIVersions: supportedCNIVersionSet, + serverVersion: cni.AntreaCNIVersion, + nodeConfig: nodeConfig, + hostProcPathPrefix: hostProcPathPrefix, + kubeClient: kubeClient, + containerAccess: newContainerAccessArbitrator(), + routeClient: routeClient, + isChaining: isChaining, + enableBridgingMode: enableBridgingMode, + enableSecondaryNetworkIPAM: enableSecondaryNetworkIPAM, + networkReadyCh: networkReadyCh, } } diff --git a/pkg/agent/cniserver/server_test.go b/pkg/agent/cniserver/server_test.go index ef57bcfc4c1..c6cac35b392 100644 --- a/pkg/agent/cniserver/server_test.go +++ b/pkg/agent/cniserver/server_test.go @@ -37,7 +37,7 @@ import ( "antrea.io/antrea/pkg/agent/cniserver/ipam" ipamtest "antrea.io/antrea/pkg/agent/cniserver/ipam/testing" cniservertest "antrea.io/antrea/pkg/agent/cniserver/testing" - argtypes "antrea.io/antrea/pkg/agent/cniserver/types" + types "antrea.io/antrea/pkg/agent/cniserver/types" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/interfacestore" openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" @@ -64,26 +64,29 @@ const ( unsupportedCNIVersion = "0.5.1" ) -var routes = []string{"10.0.0.0/8,10.1.2.1", "0.0.0.0/0,10.1.2.1"} -var dns = []string{"192.168.100.1"} -var ips = []string{"10.1.2.100/24,10.1.2.1,4"} -var args = cniservertest.GenerateCNIArgs(testPodNameA, testPodNamespace, testPodInfraContainerID) -var testNodeConfig *config.NodeConfig -var gwIPv4 net.IP -var gwIPv6 net.IP +var ( + routes = []string{"10.0.0.0/8,10.1.2.1", "0.0.0.0/0,10.1.2.1"} + dns = []string{"192.168.100.1"} + ips = []string{"10.1.2.100/24,10.1.2.1,4"} + args = cniservertest.GenerateCNIArgs(testPodNameA, testPodNamespace, testPodInfraContainerID) + testNodeConfig *config.NodeConfig + gwIPv4 net.IP + gwIPv6 net.IP +) func TestLoadNetConfig(t *testing.T) { assert := assert.New(t) - cniService := newCNIServer(t) - networkCfg := generateNetworkConfiguration("testCfg", supportedCNIVersion, testIpamType) + ipamType := "TestLoadNetConfig" + networkCfg := generateNetworkConfiguration("", supportedCNIVersion, "", ipamType) requestMsg, containerID := newRequest(args, networkCfg, "", t) - netCfg, err := cniService.loadNetworkConfig(requestMsg) + ipam.RegisterIPAMDriver(ipamType, nil) + netCfg, resp := cniService.validateRequestMessage(requestMsg) // just make sure that cniService.nodeConfig matches the testNodeConfig. require.Equal(t, testNodeConfig, cniService.nodeConfig) - assert.Nil(err, "Error while parsing request message, %v", err) + assert.Nil(resp, "Error while parsing request message, %v", resp) assert.Equal(supportedCNIVersion, netCfg.CNIVersion) assert.Equal(containerID, netCfg.ContainerId) assert.Equal(netns, netCfg.Netns) @@ -137,7 +140,7 @@ func TestIPAMService(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() ipamMock := ipamtest.NewMockIPAMDriver(controller) - _ = ipam.RegisterIPAMDriver(testIpamType, ipamMock) + ipam.RegisterIPAMDriver(testIpamType, ipamMock) cniServer := newCNIServer(t) ifaceStore := interfacestore.NewInterfaceStore() cniServer.podConfigurator = &podConfigurator{ifaceStore: ifaceStore} @@ -147,7 +150,7 @@ func TestIPAMService(t *testing.T) { // Test IPAM_Failure cases cxt := context.Background() - networkCfg := generateNetworkConfiguration("testCfg", "0.4.0", testIpamType) + networkCfg := generateNetworkConfiguration("", "0.4.0", "", testIpamType) requestMsg, _ := newRequest(args, networkCfg, "", t) t.Run("Error on ADD", func(t *testing.T) { @@ -161,7 +164,7 @@ func TestIPAMService(t *testing.T) { t.Run("Error on DEL", func(t *testing.T) { // Prepare cached IPAM result which will be deleted later. ipamMock.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil, nil) - cniConfig, _ := cniServer.checkRequestMessage(requestMsg) + cniConfig, _ := cniServer.validateRequestMessage(requestMsg) _, err := ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig.getInfraContainer()) require.Nil(t, err, "expected no Add error") ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, fmt.Errorf("IPAM delete error")) @@ -186,7 +189,7 @@ func TestIPAMService(t *testing.T) { t.Run("Idempotent Call of IPAM ADD/DEL for the same Pod", func(t *testing.T) { ipamMock.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil, nil) ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(2) - cniConfig, response := cniServer.checkRequestMessage(requestMsg) + cniConfig, response := cniServer.validateRequestMessage(requestMsg) require.Nil(t, response, "expected no rpc error") ipamResult, err := ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig.getInfraContainer()) require.Nil(t, err, "expected no IPAM add error") @@ -202,14 +205,14 @@ func TestIPAMService(t *testing.T) { t.Run("Idempotent Call of IPAM ADD/DEL for the same Pod with different containers", func(t *testing.T) { ipamMock.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil, nil).Times(2) ipamMock.EXPECT().Del(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(2) - cniConfig, response := cniServer.checkRequestMessage(requestMsg) + cniConfig, response := cniServer.validateRequestMessage(requestMsg) require.Nil(t, response, "expected no rpc error") _, err := ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig.getInfraContainer()) require.Nil(t, err, "expected no IPAM add error") workerContainerID := "test-infra-2222222" args2 := cniservertest.GenerateCNIArgs(testPodNameA, testPodNamespace, workerContainerID) requestMsg2, _ := newRequest(args2, networkCfg, "", t) - cniConfig2, response := cniServer.checkRequestMessage(requestMsg2) + cniConfig2, response := cniServer.validateRequestMessage(requestMsg2) require.Nil(t, response, "expected no rpc error") _, err = ipam.ExecIPAMAdd(cniConfig2.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig2.getInfraContainer()) require.Nil(t, err, "expected no IPAM add error") @@ -227,8 +230,8 @@ func TestIPAMServiceMultiDriver(t *testing.T) { mockDriverA := ipamtest.NewMockIPAMDriver(controller) mockDriverB := ipamtest.NewMockIPAMDriver(controller) - _ = ipam.RegisterIPAMDriver(testIpamType2, mockDriverA) - _ = ipam.RegisterIPAMDriver(testIpamType2, mockDriverB) + ipam.RegisterIPAMDriver(testIpamType2, mockDriverA) + ipam.RegisterIPAMDriver(testIpamType2, mockDriverB) cniServer := newCNIServer(t) ifaceStore := interfacestore.NewInterfaceStore() cniServer.podConfigurator = &podConfigurator{ifaceStore: ifaceStore} @@ -238,7 +241,7 @@ func TestIPAMServiceMultiDriver(t *testing.T) { // Test IPAM_Failure cases cxt := context.Background() - networkCfg := generateNetworkConfiguration("testCfg", "0.4.0", testIpamType2) + networkCfg := generateNetworkConfiguration("", "0.4.0", "", testIpamType2) argsPodA := cniservertest.GenerateCNIArgs(testPodNameA, testPodNamespace, testPodInfraContainerID) argsPodB := cniservertest.GenerateCNIArgs(testPodNameB, testPodNamespace, testPodInfraContainerID) @@ -272,7 +275,7 @@ func TestIPAMServiceMultiDriver(t *testing.T) { t.Run("Error on DEL for first registered driver", func(t *testing.T) { // Prepare cached IPAM result which will be deleted later. mockDriverA.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, ipamResult, nil) - cniConfig, _ := cniServer.checkRequestMessage(requestMsgA) + cniConfig, _ := cniServer.validateRequestMessage(requestMsgA) _, err := ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig.getInfraContainer()) require.Nil(t, err, "expected no Add error") @@ -292,7 +295,7 @@ func TestIPAMServiceMultiDriver(t *testing.T) { // Prepare cached IPAM result which will be deleted later. mockDriverA.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil, nil) mockDriverB.EXPECT().Add(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, ipamResult, nil) - cniConfig, _ := cniServer.checkRequestMessage(requestMsgB) + cniConfig, _ := cniServer.validateRequestMessage(requestMsgB) _, err := ipam.ExecIPAMAdd(cniConfig.CniCmdArgs, cniConfig.K8sArgs, cniConfig.IPAM.Type, cniConfig.getInfraContainer()) require.Nil(t, err, "expected no Add error") @@ -326,30 +329,122 @@ func TestIPAMServiceMultiDriver(t *testing.T) { }) } -func TestCheckRequestMessage(t *testing.T) { +func TestValidateRequestMessage(t *testing.T) { + hostLocal := "host-local" + ipam.RegisterIPAMDriver(hostLocal, nil) + ipam.RegisterIPAMDriver(ipam.AntreaIPAMType, nil) cniServer := newCNIServer(t) - t.Run("Incompatible CNI version", func(t *testing.T) { - networkCfg := generateNetworkConfiguration("testCfg", unsupportedCNIVersion, testIpamType) - requestMsg, _ := newRequest(args, networkCfg, "", t) - _, response := cniServer.checkRequestMessage(requestMsg) - checkErrorResponse(t, response, cnipb.ErrorCode_INCOMPATIBLE_CNI_VERSION, "") - }) + type testCase struct { + test string + cniVersion string + cniType string + ipamType string + isChaining bool + enableBridgingMode bool + enableSecondaryNetworkIPAM bool + resIPAMType string + resSecondaryNetworkIPAM bool + errorCode cnipb.ErrorCode + } - t.Run("Unknown IPAM type", func(t *testing.T) { - networkCfg := generateNetworkConfiguration("testCfg", supportedCNIVersion, testIpamType) - networkCfg.IPAM.Type = "unknown" - requestMsg, _ := newRequest(args, networkCfg, "", t) - _, response := cniServer.checkRequestMessage(requestMsg) - checkErrorResponse(t, response, cnipb.ErrorCode_UNSUPPORTED_FIELD, "") - }) + testCases := []testCase{ + { + test: "Incompatible CNI version", + cniVersion: unsupportedCNIVersion, + errorCode: cnipb.ErrorCode_INCOMPATIBLE_CNI_VERSION, + }, + { + test: "Unknown CNI type", + cniType: "unknown", + errorCode: cnipb.ErrorCode_UNSUPPORTED_FIELD, + }, + { + test: "Unknown IPAM type", + ipamType: "unknown", + errorCode: cnipb.ErrorCode_UNSUPPORTED_FIELD, + }, + { + test: "host-local", + ipamType: hostLocal, + resIPAMType: hostLocal, + }, + { + test: "antrea-ipam", + ipamType: ipam.AntreaIPAMType, + resIPAMType: ipam.AntreaIPAMType, + }, + { + test: "host-local in bridging mode", + ipamType: hostLocal, + enableBridgingMode: true, + resIPAMType: ipam.AntreaIPAMType, + }, + { + test: "chaining", + ipamType: "unknown", + isChaining: true, + resIPAMType: "unknown", + }, + { + test: "IPAM for other CNI not enabled", + cniType: "unknown", + ipamType: ipam.AntreaIPAMType, + errorCode: cnipb.ErrorCode_UNSUPPORTED_FIELD, + }, + { + test: "IPAM for other CNI with unsupported IPAM type", + cniType: "unknown", + ipamType: hostLocal, + enableSecondaryNetworkIPAM: true, + errorCode: cnipb.ErrorCode_UNSUPPORTED_FIELD, + }, + { + test: "IPAM for other CNI", + cniType: "unknown", + ipamType: ipam.AntreaIPAMType, + resIPAMType: ipam.AntreaIPAMType, + resSecondaryNetworkIPAM: true, + enableSecondaryNetworkIPAM: true, + }, + } + + for _, c := range testCases { + t.Run(c.test, func(t *testing.T) { + cniVersion := c.cniVersion + if cniVersion == "" { + cniVersion = supportedCNIVersion + } + ipamType := c.ipamType + if ipamType == "" { + ipamType = hostLocal + } + + networkCfg := generateNetworkConfiguration("", cniVersion, c.cniType, ipamType) + requestMsg, _ := newRequest(args, networkCfg, "", t) + cniServer.isChaining = c.isChaining + cniServer.enableBridgingMode = c.enableBridgingMode + cniServer.enableSecondaryNetworkIPAM = c.enableSecondaryNetworkIPAM + + resCfg, response := cniServer.validateRequestMessage(requestMsg) + if c.errorCode != 0 { + assert.NotNil(t, response, "Error code %v is expected", c.errorCode) + checkErrorResponse(t, response, c.errorCode, "") + return + } + assert.Nil(t, response, "Unexpected error response: %v", response) + + assert.Equal(t, resCfg.IPAM.Type, c.resIPAMType) + assert.Equal(t, resCfg.secondaryNetworkIPAM, c.resSecondaryNetworkIPAM) + }) + } } func TestValidatePrevResult(t *testing.T) { cniServer := newCNIServer(t) cniVersion := "0.4.0" - networkCfg := generateNetworkConfiguration("testCfg", cniVersion, testIpamType) - k8sPodArgs := &argtypes.K8sArgs{} + networkCfg := generateNetworkConfiguration("", cniVersion, "", testIpamType) + k8sPodArgs := &types.K8sArgs{} cnitypes.LoadArgs(args, k8sPodArgs) networkCfg.PrevResult = nil ips := []string{"10.1.2.100/24,10.1.2.1,4"} @@ -375,7 +470,7 @@ func TestValidatePrevResult(t *testing.T) { cniConfig.Ifname = "invalid_iface" // invalid sriovVFDeviceID := "" prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, k8sPodArgs, prevResult, sriovVFDeviceID) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) checkErrorResponse( t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, "prevResult does not match network configuration", @@ -387,7 +482,7 @@ func TestValidatePrevResult(t *testing.T) { cniConfig.Ifname = "invalid_iface" // invalid sriovVFDeviceID := "0000:03:00.6" prevResult.Interfaces = []*current.Interface{hostIface, containerIface} - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, k8sPodArgs, prevResult, sriovVFDeviceID) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) checkErrorResponse( t, response, cnipb.ErrorCode_INVALID_NETWORK_CONFIG, "prevResult does not match network configuration", @@ -401,7 +496,7 @@ func TestValidatePrevResult(t *testing.T) { sriovVFDeviceID := "" prevResult.Interfaces = []*current.Interface{hostIface, containerIface} cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", false, channel.NewSubscribableChannel("PodUpdate", 100), nil) - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, k8sPodArgs, prevResult, sriovVFDeviceID) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") }) @@ -412,7 +507,7 @@ func TestValidatePrevResult(t *testing.T) { sriovVFDeviceID := "0000:03:00.6" prevResult.Interfaces = []*current.Interface{hostIface, containerIface} cniServer.podConfigurator, _ = newPodConfigurator(nil, nil, nil, nil, nil, "", true, channel.NewSubscribableChannel("PodUpdate", 100), nil) - response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, k8sPodArgs, prevResult, sriovVFDeviceID) + response := cniServer.validatePrevResult(cniConfig.CniCmdArgs, prevResult, sriovVFDeviceID) checkErrorResponse(t, response, cnipb.ErrorCode_CHECK_INTERFACE_FAILURE, "") }) } @@ -420,8 +515,8 @@ func TestValidatePrevResult(t *testing.T) { func TestParsePrevResultFromRequest(t *testing.T) { cniServer := newCNIServer(t) - getNetworkCfg := func(cniVersion string) *NetworkConfig { - networkCfg := generateNetworkConfiguration("testCfg", cniVersion, testIpamType) + getNetworkCfg := func(cniVersion string) *types.NetworkConfig { + networkCfg := generateNetworkConfiguration("", cniVersion, "", testIpamType) networkCfg.PrevResult = nil networkCfg.RawPrevResult = nil return networkCfg @@ -550,7 +645,7 @@ func TestRemoveInterface(t *testing.T) { hostIfaceName = util.GenerateContainerInterfaceName(podName, testPodNamespace, containerID) fakePortUUID = uuid.New().String() - netcfg := generateNetworkConfiguration("testCfg", supportedCNIVersion, testIpamType) + netcfg := generateNetworkConfiguration("", supportedCNIVersion, "", testIpamType) cniConfig = &CNIConfig{NetworkConfig: netcfg, CniCmdArgs: &cnipb.CniCmdArgs{}} cniConfig.Ifname = "eth0" cniConfig.ContainerId = containerID @@ -687,16 +782,24 @@ func newCNIServer(t *testing.T) *CNIServer { return cniServer } -func generateNetworkConfiguration(name string, cniVersion string, ipamType string) *NetworkConfig { - netCfg := new(NetworkConfig) - netCfg.Name = name +func generateNetworkConfiguration(name, cniVersion, cniType, ipamType string) *types.NetworkConfig { + netCfg := new(types.NetworkConfig) + if name == "" { + netCfg.Name = "test-network" + } else { + netCfg.Name = name + } netCfg.CNIVersion = cniVersion - netCfg.Type = "antrea" - netCfg.IPAM = ipam.IPAMConfig{Type: ipamType} + if cniType == "" { + netCfg.Type = antreaCNIType + } else { + netCfg.Type = "cniType" + } + netCfg.IPAM = &types.IPAMConfig{Type: ipamType} return netCfg } -func newRequest(args string, netCfg *NetworkConfig, path string, t *testing.T) (*cnipb.CniCmdRequest, string) { +func newRequest(args string, netCfg *types.NetworkConfig, path string, t *testing.T) (*cnipb.CniCmdRequest, string) { containerID := generateUUID(t) networkConfig, err := json.Marshal(netCfg) if err != nil { diff --git a/pkg/agent/cniserver/types/arg_types.go b/pkg/agent/cniserver/types/arg_types.go deleted file mode 100644 index a64faca186f..00000000000 --- a/pkg/agent/cniserver/types/arg_types.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 Antrea Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - cnitypes "github.com/containernetworking/cni/pkg/types" -) - -type K8sArgs struct { - cnitypes.CommonArgs - K8S_POD_NAME cnitypes.UnmarshallableString - K8S_POD_NAMESPACE cnitypes.UnmarshallableString - K8S_POD_INFRA_CONTAINER_ID cnitypes.UnmarshallableString -} diff --git a/pkg/agent/cniserver/types/types.go b/pkg/agent/cniserver/types/types.go new file mode 100644 index 00000000000..d915763ab89 --- /dev/null +++ b/pkg/agent/cniserver/types/types.go @@ -0,0 +1,67 @@ +// Copyright 2019 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + cnitypes "github.com/containernetworking/cni/pkg/types" +) + +type K8sArgs struct { + cnitypes.CommonArgs + K8S_POD_NAME cnitypes.UnmarshallableString + K8S_POD_NAMESPACE cnitypes.UnmarshallableString + K8S_POD_INFRA_CONTAINER_ID cnitypes.UnmarshallableString +} + +type RuntimeDNS struct { + Nameservers []string `json:"servers,omitempty"` + Search []string `json:"searches,omitempty"` +} + +type RuntimeConfig struct { + DNS RuntimeDNS `json:"dns"` +} + +type Range struct { + Subnet string `json:"subnet"` + Gateway string `json:"gateway,omitempty"` +} + +type RangeSet []Range + +type IPAMConfig struct { + Type string `json:"type"` + // IP ranges for NodeIPAM. Can include both v4 and v6 ranges. + Ranges []RangeSet `json:"ranges,omitempty"` + Routes []*cnitypes.Route `json:"routes,omitempty"` + DNS cnitypes.DNS `json:"dns,omitempty"` + // Antrea IPPool names for Antrea IPAM. + IPPools []string `json:"ippools,omitempty"` + // Other NodeIPAM config parameters (ResolvConf, IPArgs) are not supported. +} + +type NetworkConfig struct { + CNIVersion string `json:"cniVersion"` + Name string `json:"name"` + Type string `json:"type"` + DeviceID string `json:"deviceID,omitempty"` // PCI address of a VF + MTU int `json:"mtu,omitempty"` + DNS cnitypes.DNS `json:"dns,omitempty"` + IPAM *IPAMConfig `json:"ipam,omitempty"` + // Options to be passed in by the runtime. + RuntimeConfig RuntimeConfig `json:"runtimeConfig,omitempty"` + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult cnitypes.Result `json:"-"` +} diff --git a/pkg/apis/crd/v1alpha2/types.go b/pkg/apis/crd/v1alpha2/types.go index 43e9361a82f..3ee90effe31 100644 --- a/pkg/apis/crd/v1alpha2/types.go +++ b/pkg/apis/crd/v1alpha2/types.go @@ -304,9 +304,16 @@ type IPPool struct { Status IPPoolStatus `json:"status"` } +type IPVersion int + +const ( + IPv4 = IPVersion(4) + IPv6 = IPVersion(6) +) + type IPPoolSpec struct { // IP Version for this IP pool - either 4 or 6 - IPVersion int `json:"ipVersion"` + IPVersion IPVersion `json:"ipVersion"` // List IP ranges, along with subnet definition. IPRanges []SubnetIPRange `json:"ipRanges"` } @@ -361,6 +368,9 @@ type PodOwner struct { Name string `json:"name"` Namespace string `json:"namespace"` ContainerID string `json:"containerID"` + // Network interface name. Used when the IP is allocated for a secondary network interface + // of the Pod. + IFName string `json:"ifName,omitempty"` } // StatefulSet owner diff --git a/pkg/controller/ipam/validate.go b/pkg/controller/ipam/validate.go index abdb04296c4..3308d4aa3d4 100644 --- a/pkg/controller/ipam/validate.go +++ b/pkg/controller/ipam/validate.go @@ -187,20 +187,20 @@ func ipInRange(rangeStart, rangeEnd, ip net.IP) bool { return bytes.Compare(ip16, rangeStart.To16()) >= 0 && bytes.Compare(ip16, rangeEnd.To16()) <= 0 } -func ipVersion(ip net.IP) int { +func ipVersion(ip net.IP) crdv1alpha2.IPVersion { if ip.To4() != nil { - return 4 + return crdv1alpha2.IPv4 } - return 6 + return crdv1alpha2.IPv6 } -func validateIPRange(r crdv1alpha2.SubnetIPRange, poolIPVersion int) (bool, string) { +func validateIPRange(r crdv1alpha2.SubnetIPRange, poolIPVersion crdv1alpha2.IPVersion) (bool, string) { // Validate the integrity the IP range: // Verify that all the IP ranges have the same IP family as the IP pool // Verify that the gateway IP is reachable from the IP range gateway := net.ParseIP(r.Gateway) var mask net.IPMask - if ipVersion(gateway) == 4 { + if ipVersion(gateway) == crdv1alpha2.IPv4 { mask = net.CIDRMask(int(r.PrefixLength), 32) } else { mask = net.CIDRMask(int(r.PrefixLength), 128) diff --git a/pkg/controller/ipam/validate_test.go b/pkg/controller/ipam/validate_test.go index 8b86d477912..3955a0432ae 100644 --- a/pkg/controller/ipam/validate_test.go +++ b/pkg/controller/ipam/validate_test.go @@ -31,7 +31,7 @@ var testIPPool = &crdv1alpha2.IPPool{ Name: "test-ip-pool", }, Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: 4, + IPVersion: crdv1alpha2.IPv4, IPRanges: []crdv1alpha2.SubnetIPRange{ { IPRange: crdv1alpha2.IPRange{ diff --git a/pkg/ipam/poolallocator/allocator.go b/pkg/ipam/poolallocator/allocator.go index 2ea90c39697..eca2e2f020b 100644 --- a/pkg/ipam/poolallocator/allocator.go +++ b/pkg/ipam/poolallocator/allocator.go @@ -36,7 +36,10 @@ import ( // Pool Allocator assumes that pool with allocated IPs can not be deleted. Pool ranges can // only be extended. type IPPoolAllocator struct { - // Name of IP Pool custom resource + // IP version of the IPPool + IPVersion v1alpha2.IPVersion + + // Name of IPPool custom resource ipPoolName string // crd client to update the pool @@ -52,12 +55,13 @@ func NewIPPoolAllocator(poolName string, client crdclientset.Interface, poolList // Validate the pool exists // This has an extra roundtrip cost, however this would allow fallback to // default IPAM driver if needed - _, err := poolLister.Get(poolName) + pool, err := poolLister.Get(poolName) if err != nil { return nil, err } allocator := &IPPoolAllocator{ + IPVersion: pool.Spec.IPVersion, ipPoolName: poolName, crdClient: client, ipPoolLister: poolLister, @@ -252,8 +256,8 @@ func (a *IPPoolAllocator) removeIPAddressState(ipPool *v1alpha2.IPPool, ip net.I // allocated, or in case CRD failed to update its state. // In case of success, IP pool CRD status is updated with allocated IP/state/resource/container. // AllocateIP returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (v1alpha2.SubnetInfo, error) { - var subnetSpec v1alpha2.SubnetInfo +func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (*v1alpha2.SubnetInfo, error) { + var subnetSpec *v1alpha2.SubnetInfo // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. err := retry.RetryOnConflict(retry.DefaultRetry, func() error { ipPool, allocators, err := a.getPoolAndInitIPAllocators() @@ -278,7 +282,7 @@ func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, o return fmt.Errorf("IP %v does not belong to IP pool %s", ip, a.ipPoolName) } - subnetSpec = ipPool.Spec.IPRanges[index].SubnetInfo + subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo err = a.appendPoolUsage(ipPool, ip, state, owner) return err @@ -294,18 +298,19 @@ func (a *IPPoolAllocator) AllocateIP(ip net.IP, state v1alpha2.IPAddressPhase, o // or in case CRD failed to update its state. // In case of success, IP pool CRD status is updated with allocated IP/state/resource/container. // AllocateIP returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, v1alpha2.SubnetInfo, error) { - var subnetSpec v1alpha2.SubnetInfo +func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, *v1alpha2.SubnetInfo, error) { + var subnetSpec *v1alpha2.SubnetInfo var ip net.IP // Same resource can not ask for allocation twice without release // This needs to be verified even at the expense of another API call - exists, err := a.HasContainer(owner.Pod.ContainerID) + exists, err := a.HasContainer(owner.Pod.ContainerID, owner.Pod.IFName) if err != nil { - return ip, subnetSpec, err + return nil, nil, err } if exists { - return ip, subnetSpec, fmt.Errorf("container %s was already allocated an address from IP Pool %s", owner.Pod.ContainerID, a.ipPoolName) + return nil, nil, fmt.Errorf("container %s interface %s was already allocated an address from IP Pool %s", + owner.Pod.ContainerID, owner.Pod.IFName, a.ipPoolName) } // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. @@ -330,7 +335,7 @@ func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1al return fmt.Errorf("failed to allocate IP: Pool %s is exausted", a.ipPoolName) } - subnetSpec = ipPool.Spec.IPRanges[index].SubnetInfo + subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo return a.appendPoolUsage(ipPool, ip, state, owner) }) @@ -344,13 +349,13 @@ func (a *IPPoolAllocator) AllocateNext(state v1alpha2.IPAddressPhase, owner v1al // It returns error if pool is exhausted, or in case it fails to update IPPool's state. In case of // success, IP pool status is updated with allocated IP/state/resource/container. // AllocateReservedOrNext returns subnet details for the requested IP, as defined in IP pool spec. -func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, v1alpha2.SubnetInfo, error) { - var subnetSpec v1alpha2.SubnetInfo +func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, owner v1alpha2.IPAddressOwner) (net.IP, *v1alpha2.SubnetInfo, error) { + var subnetSpec *v1alpha2.SubnetInfo var ip net.IP ip, err := a.getReservedIP(owner) if err != nil { - return ip, subnetSpec, err + return nil, nil, err } if ip == nil { // ip is not reserved, allocate next available ip @@ -359,12 +364,12 @@ func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, // Same resource can not ask for allocation twice without release // This needs to be verified even at the expense of another API call - exists, err := a.HasContainer(owner.Pod.ContainerID) + exists, err := a.HasContainer(owner.Pod.ContainerID, owner.Pod.IFName) if err != nil { - return ip, subnetSpec, err + return nil, nil, err } if exists { - return ip, subnetSpec, fmt.Errorf("container %s was already allocated an address from IP Pool %s", owner.Pod.ContainerID, a.ipPoolName) + return nil, nil, fmt.Errorf("container %s was already allocated an address from IP Pool %s", owner.Pod.ContainerID, a.ipPoolName) } // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. @@ -387,7 +392,7 @@ func (a *IPPoolAllocator) AllocateReservedOrNext(state v1alpha2.IPAddressPhase, return fmt.Errorf("IP %v does not belong to IP pool %s", ip, a.ipPoolName) } - subnetSpec = ipPool.Spec.IPRanges[index].SubnetInfo + subnetSpec = &ipPool.Spec.IPRanges[index].SubnetInfo return a.updateIPAddressState(ipPool, ip, state, owner) }) @@ -451,37 +456,6 @@ func (a *IPPoolAllocator) Release(ip net.IP) error { return err } -// ReleasePod releases the IP associated with specified Pod. -// It returns error if the pod is not present in state or in case CRD failed to update state. -// In case of success, IP pool CRD status is updated with released entry. -func (a *IPPoolAllocator) ReleasePod(namespace, podName string) error { - - // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - ipPool, err := a.getPool() - - if err != nil { - return err - } - - // Mark allocated IPs from pool status as unavailable - for _, ip := range ipPool.Status.IPAddresses { - if ip.Owner.Pod != nil && ip.Owner.Pod.Namespace == namespace && ip.Owner.Pod.Name == podName { - return a.removeIPAddressState(ipPool, net.ParseIP(ip.IPAddress)) - - } - } - - klog.V(4).InfoS("IP Pool state:", "name", a.ipPoolName, "allocation", ipPool.Status.IPAddresses) - return fmt.Errorf("failed to find record of IP allocated to Pod:%s/%s in pool %s", namespace, podName, a.ipPoolName) - }) - - if err != nil { - klog.ErrorS(err, "Failed to release IP address", "Namespace", namespace, "Pod", podName, "IPPool", a.ipPoolName) - } - return err -} - // ReleaseStatefulSet releases all IPs associated with specified StatefulSet. It returns error // in case CRD failed to update its state. // In case of success, IP pool CRD status is updated with released entries. @@ -527,33 +501,34 @@ func (a *IPPoolAllocator) ReleaseStatefulSet(namespace, name string) error { return err } -// ReleaseContainerIfPresent releases the IP associated with specified container ID if present in state. -// It returns error in case CRD failed to update its state, or if pool does not exist. -// In case of success, IP pool CRD status is updated with released entry. -func (a *IPPoolAllocator) ReleaseContainerIfPresent(containerID string) error { - +// ReleaseContainer releases the IP associated with the specified container ID and interface name, +// and updates the IPPool CR status. +// If no IP is allocated to the Pod according to the IPPool CR status, the func just returns with no +// change. +func (a *IPPoolAllocator) ReleaseContainer(containerID, ifName string) error { // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. err := retry.RetryOnConflict(retry.DefaultRetry, func() error { ipPool, err := a.getPool() - if err != nil { return err } // Mark allocated IPs from pool status as unavailable for _, ip := range ipPool.Status.IPAddresses { - if ip.Owner.Pod != nil && ip.Owner.Pod.ContainerID == containerID { + savedOwner := ip.Owner.Pod + if savedOwner != nil && savedOwner.ContainerID == containerID && savedOwner.IFName == ifName { return a.removeIPAddressState(ipPool, net.ParseIP(ip.IPAddress)) } } - klog.V(4).InfoS("Failed to find allocation record in pool", "container", containerID, "pool", a.ipPoolName, "allocation", ipPool.Status.IPAddresses) + klog.V(4).InfoS("Did not find the allocation record in IPPool", + "container", containerID, "interface", ifName, "pool", a.ipPoolName, "allocation", ipPool.Status.IPAddresses) return nil }) if err != nil { - klog.ErrorS(err, "Failed to release IP address", "Container", containerID, "IPPool", a.ipPoolName) + klog.ErrorS(err, "Failed to release IP address", "Container", containerID, "interface", ifName, "IPPool", a.ipPoolName) } return err } @@ -576,7 +551,7 @@ func (a *IPPoolAllocator) HasPod(namespace, podName string) (bool, error) { } // HasContainer checks whether an IP was associated with specified container. It returns error if the resource crd fails to be retrieved. -func (a *IPPoolAllocator) HasContainer(containerID string) (bool, error) { +func (a *IPPoolAllocator) HasContainer(containerID, ifName string) (bool, error) { ipPool, err := a.getPool() @@ -585,7 +560,7 @@ func (a *IPPoolAllocator) HasContainer(containerID string) (bool, error) { } for _, ip := range ipPool.Status.IPAddresses { - if ip.Owner.Pod != nil && ip.Owner.Pod.ContainerID == containerID { + if ip.Owner.Pod != nil && ip.Owner.Pod.ContainerID == containerID && ip.Owner.Pod.IFName == ifName { return true, nil } } diff --git a/pkg/ipam/poolallocator/allocator_test.go b/pkg/ipam/poolallocator/allocator_test.go index 19119eb46f0..79b9a845059 100644 --- a/pkg/ipam/poolallocator/allocator_test.go +++ b/pkg/ipam/poolallocator/allocator_test.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/retry" "k8s.io/klog/v2" ) @@ -83,7 +84,7 @@ func validateAllocationSequence(t *testing.T, allocator *IPPoolAllocator, subnet ip, returnInfo, err := allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, owner) require.NoError(t, err) assert.Equal(t, net.ParseIP(expectedIP), ip) - assert.Equal(t, subnetInfo, returnInfo) + assert.Equal(t, subnetInfo, *returnInfo) i += 1 } } @@ -115,11 +116,11 @@ func TestAllocateIP(t *testing.T) { // Allocate specific IP from the range returnInfo, err := allocator.AllocateIP(net.ParseIP("10.2.2.101"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) - assert.Equal(t, subnetInfo, returnInfo) + assert.Equal(t, subnetInfo, *returnInfo) require.NoError(t, err) // Validate IP outside the range is not allocated - returnInfo, err = allocator.AllocateIP(net.ParseIP("10.2.2.121"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) + _, err = allocator.AllocateIP(net.ParseIP("10.2.2.121"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) require.Error(t, err) // Make sure IP allocated above is not allocated again @@ -128,7 +129,6 @@ func TestAllocateIP(t *testing.T) { // Validate error is returned if IP is already allocated _, err = allocator.AllocateIP(net.ParseIP("10.2.2.102"), crdv1a2.IPAddressPhaseAllocated, fakePodOwner) require.Error(t, err) - } func TestAllocateNext(t *testing.T) { @@ -269,6 +269,34 @@ func TestAllocateReleaseSequence(t *testing.T) { validateAllocationSequence(t, allocator, subnetInfo, []string{"2001::1000", "2001::2", "2001::5"}) } +// releasePod releases the IP associated with the specified Pod, and updates the IPPool CR status. +// The func returns an error, if no IP is allocated to the Pod according to the IPPool CR status. +func (a *IPPoolAllocator) releasePod(namespace, podName string) error { + // Retry on CRD update conflict which is caused by multiple agents updating a pool at same time. + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + ipPool, err := a.getPool() + if err != nil { + return err + } + + // Mark allocated IPs from pool status as unavailable + for _, ip := range ipPool.Status.IPAddresses { + if ip.Owner.Pod != nil && ip.Owner.Pod.Namespace == namespace && ip.Owner.Pod.Name == podName { + return a.removeIPAddressState(ipPool, net.ParseIP(ip.IPAddress)) + + } + } + + klog.V(4).InfoS("IP Pool state:", "name", a.ipPoolName, "allocation", ipPool.Status.IPAddresses) + return fmt.Errorf("failed to find record of IP allocated to Pod:%s/%s in pool %s", namespace, podName, a.ipPoolName) + }) + + if err != nil { + klog.ErrorS(err, "Failed to release IP address", "Namespace", namespace, "Pod", podName, "IPPool", a.ipPoolName) + } + return err +} + func TestReleaseResource(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) @@ -302,7 +330,7 @@ func TestReleaseResource(t *testing.T) { // Release first IP from first range and middle IP from second range for _, podName := range []string{"fakePod2", "fakePod4"} { - err := allocator.ReleasePod(testNamespace, podName) + err := allocator.releasePod(testNamespace, podName) require.NoError(t, err) } @@ -318,6 +346,7 @@ func TestHas(t *testing.T) { Name: "fakePod", Namespace: testNamespace, ContainerID: "fakeContainer", + IFName: "eth1", }, } poolName := uuid.New().String() @@ -352,10 +381,13 @@ func TestHas(t *testing.T) { has, err := allocator.HasPod(testNamespace, "realPod") require.NoError(t, err) assert.False(t, has) - has, err = allocator.HasContainer("fakeContainer") + has, err = allocator.HasContainer("fakeContainer", "eth1") require.NoError(t, err) assert.True(t, has) - has, err = allocator.HasContainer("realContainer") + has, err = allocator.HasContainer("fakeContainer", "") + require.NoError(t, err) + assert.False(t, has) + has, err = allocator.HasContainer("realContainer", "eth1") require.NoError(t, err) assert.False(t, has) } diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index e1f3c74328b..b0d4d2fc139 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -44,7 +44,7 @@ var ( Name: "test-ippool-ipv4-0", }, Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: 4, + IPVersion: crdv1alpha2.IPv4, IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ CIDR: "", Start: "192.168.240.100", @@ -61,7 +61,7 @@ var ( Name: "test-ippool-ipv4-1", }, Spec: crdv1alpha2.IPPoolSpec{ - IPVersion: 4, + IPVersion: crdv1alpha2.IPv4, IPRanges: []crdv1alpha2.SubnetIPRange{{IPRange: crdv1alpha2.IPRange{ CIDR: "", Start: "192.168.240.130", diff --git a/test/integration/agent/cniserver_test.go b/test/integration/agent/cniserver_test.go index c5e5efa4546..86d9cff9a7d 100644 --- a/test/integration/agent/cniserver_test.go +++ b/test/integration/agent/cniserver_test.go @@ -571,9 +571,8 @@ func newTester() *cmdAddDelTester { "", testNodeConfig, k8sFake.NewSimpleClientset(), - false, - false, routeMock, + false, false, false, tester.networkReadyCh) tester.server.Initialize(ovsServiceMock, ofServiceMock, ifaceStore, channel.NewSubscribableChannel("PodUpdate", 100), nil) ctx := context.Background() @@ -651,7 +650,7 @@ func TestAntreaServerFunc(t *testing.T) { controller := mock.NewController(t) defer controller.Finish() ipamMock = ipamtest.NewMockIPAMDriver(controller) - _ = ipam.RegisterIPAMDriver("mock", ipamMock) + ipam.RegisterIPAMDriver("mock", ipamMock) ovsServiceMock = ovsconfigtest.NewMockOVSBridgeClient(controller) ofServiceMock = openflowtest.NewMockClient(controller) routeMock = routetest.NewMockInterface(controller) @@ -736,9 +735,8 @@ func setupChainTest( "", testNodeConfig, k8sFake.NewSimpleClientset(), - true, - false, routeMock, + true, false, false, networkReadyCh) } else { server = inServer