diff --git a/build/charts/antrea/conf/antrea-agent.conf b/build/charts/antrea/conf/antrea-agent.conf
index 2b019b95f30..7be006976a2 100644
--- a/build/charts/antrea/conf/antrea-agent.conf
+++ b/build/charts/antrea/conf/antrea-agent.conf
@@ -82,6 +82,9 @@ featureGates:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
{{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "EgressSeparateSubnet" "default" false) }}
+# Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+{{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "NodeNetworkPolicy" "default" false) }}
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: {{ .Values.ovs.bridgeName | quote }}
diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml
index d3d020a8454..937f92e7145 100644
--- a/build/yamls/antrea-aks.yml
+++ b/build/yamls/antrea-aks.yml
@@ -5625,6 +5625,9 @@ data:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
# EgressSeparateSubnet: false
+ # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+ # NodeNetworkPolicy: false
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: "br-int"
@@ -6925,7 +6928,7 @@ spec:
kubectl.kubernetes.io/default-container: antrea-agent
# Automatically restart Pods with a RollingUpdate if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: fe9081d7718e258905728726bb8f3a8d42a332d7f4e0d8faaa9731b3c4b51aa4
+ checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72
labels:
app: antrea
component: antrea-agent
@@ -7163,7 +7166,7 @@ spec:
annotations:
# Automatically restart Pod if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: fe9081d7718e258905728726bb8f3a8d42a332d7f4e0d8faaa9731b3c4b51aa4
+ checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72
labels:
app: antrea
component: antrea-controller
diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml
index 937b7a3c7e8..8d83fc62773 100644
--- a/build/yamls/antrea-eks.yml
+++ b/build/yamls/antrea-eks.yml
@@ -5625,6 +5625,9 @@ data:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
# EgressSeparateSubnet: false
+ # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+ # NodeNetworkPolicy: false
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: "br-int"
@@ -6925,7 +6928,7 @@ spec:
kubectl.kubernetes.io/default-container: antrea-agent
# Automatically restart Pods with a RollingUpdate if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: fe9081d7718e258905728726bb8f3a8d42a332d7f4e0d8faaa9731b3c4b51aa4
+ checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72
labels:
app: antrea
component: antrea-agent
@@ -7164,7 +7167,7 @@ spec:
annotations:
# Automatically restart Pod if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: fe9081d7718e258905728726bb8f3a8d42a332d7f4e0d8faaa9731b3c4b51aa4
+ checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72
labels:
app: antrea
component: antrea-controller
diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml
index 9b694745985..a91213568da 100644
--- a/build/yamls/antrea-gke.yml
+++ b/build/yamls/antrea-gke.yml
@@ -5625,6 +5625,9 @@ data:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
# EgressSeparateSubnet: false
+ # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+ # NodeNetworkPolicy: false
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: "br-int"
@@ -6925,7 +6928,7 @@ spec:
kubectl.kubernetes.io/default-container: antrea-agent
# Automatically restart Pods with a RollingUpdate if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: 997259cac105a193d671880b165e203a9954f33009766df5eceed753509c46b9
+ checksum/config: a54768c79d693083be554386f268c93bbbd0fdf5b334edd9aff31c13151c4e29
labels:
app: antrea
component: antrea-agent
@@ -7161,7 +7164,7 @@ spec:
annotations:
# Automatically restart Pod if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: 997259cac105a193d671880b165e203a9954f33009766df5eceed753509c46b9
+ checksum/config: a54768c79d693083be554386f268c93bbbd0fdf5b334edd9aff31c13151c4e29
labels:
app: antrea
component: antrea-controller
diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml
index ac30d70d709..dafd8b040ce 100644
--- a/build/yamls/antrea-ipsec.yml
+++ b/build/yamls/antrea-ipsec.yml
@@ -5638,6 +5638,9 @@ data:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
# EgressSeparateSubnet: false
+ # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+ # NodeNetworkPolicy: false
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: "br-int"
@@ -6938,7 +6941,7 @@ spec:
kubectl.kubernetes.io/default-container: antrea-agent
# Automatically restart Pods with a RollingUpdate if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: 4364ee1520a24d9a465a405536736498119269c0fc81d4dc01e83d7fdd462913
+ checksum/config: 7ce7d85bc08079d1cef3b1d44f31e2139961f9ae49f71d79ff3b28e7e9ad6325
checksum/ipsec-secret: d0eb9c52d0cd4311b6d252a951126bf9bea27ec05590bed8a394f0f792dcb2a4
labels:
app: antrea
@@ -7220,7 +7223,7 @@ spec:
annotations:
# Automatically restart Pod if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: 4364ee1520a24d9a465a405536736498119269c0fc81d4dc01e83d7fdd462913
+ checksum/config: 7ce7d85bc08079d1cef3b1d44f31e2139961f9ae49f71d79ff3b28e7e9ad6325
labels:
app: antrea
component: antrea-controller
diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml
index ff792fa34d7..17858eb7007 100644
--- a/build/yamls/antrea.yml
+++ b/build/yamls/antrea.yml
@@ -5625,6 +5625,9 @@ data:
# Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
# EgressSeparateSubnet: false
+ # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes.
+ # NodeNetworkPolicy: false
+
# Name of the OpenVSwitch bridge antrea-agent will create and use.
# Make sure it doesn't conflict with your existing OpenVSwitch bridges.
ovsBridge: "br-int"
@@ -6925,7 +6928,7 @@ spec:
kubectl.kubernetes.io/default-container: antrea-agent
# Automatically restart Pods with a RollingUpdate if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: d8e5dd6cc4bd55eeba224229fd8207045291ab3dfafe5f3c3e100c31003d2887
+ checksum/config: 290f0c748863a7dad1e9d53d62c74f8108a44c5cc803306d351c108062cc1378
labels:
app: antrea
component: antrea-agent
@@ -7161,7 +7164,7 @@ spec:
annotations:
# Automatically restart Pod if the ConfigMap changes
# See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
- checksum/config: d8e5dd6cc4bd55eeba224229fd8207045291ab3dfafe5f3c3e100c31003d2887
+ checksum/config: 290f0c748863a7dad1e9d53d62c74f8108a44c5cc803306d351c108062cc1378
labels:
app: antrea
component: antrea-controller
diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go
index fba691bdd76..a0c1807278e 100644
--- a/cmd/antrea-agent/agent.go
+++ b/cmd/antrea-agent/agent.go
@@ -140,6 +140,7 @@ func run(o *Options) error {
enableAntreaIPAM := features.DefaultFeatureGate.Enabled(features.AntreaIPAM)
enableBridgingMode := enableAntreaIPAM && o.config.EnableBridgingMode
l7NetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.L7NetworkPolicy)
+ nodeNetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.NodeNetworkPolicy)
enableMulticlusterGW := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableGateway
enableMulticlusterNP := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableStretchedNetworkPolicy
enableFlowExporter := features.DefaultFeatureGate.Enabled(features.FlowExporter) && o.config.FlowExporter.Enable
@@ -219,7 +220,13 @@ func run(o *Options) error {
egressConfig := &config.EgressConfig{
ExceptCIDRs: exceptCIDRs,
}
- routeClient, err := route.NewClient(networkConfig, o.config.NoSNAT, o.config.AntreaProxy.ProxyAll, connectUplinkToBridge, multicastEnabled, serviceCIDRProvider)
+ routeClient, err := route.NewClient(networkConfig,
+ o.config.NoSNAT,
+ o.config.AntreaProxy.ProxyAll,
+ connectUplinkToBridge,
+ nodeNetworkPolicyEnabled,
+ multicastEnabled,
+ serviceCIDRProvider)
if err != nil {
return fmt.Errorf("error creating route client: %v", err)
}
@@ -462,6 +469,7 @@ func run(o *Options) error {
networkPolicyController, err := networkpolicy.NewNetworkPolicyController(
antreaClientProvider,
ofClient,
+ routeClient,
ifaceStore,
afero.NewOsFs(),
nodeKey,
@@ -471,6 +479,7 @@ func run(o *Options) error {
groupIDUpdates,
antreaPolicyEnabled,
l7NetworkPolicyEnabled,
+ nodeNetworkPolicyEnabled,
o.enableAntreaProxy,
statusManagerEnabled,
multicastEnabled,
diff --git a/docs/antrea-network-policy.md b/docs/antrea-network-policy.md
index c3e9c1d08ac..5fdd9a2db8b 100644
--- a/docs/antrea-network-policy.md
+++ b/docs/antrea-network-policy.md
@@ -20,6 +20,7 @@
- [ACNP for IGMP traffic](#acnp-for-igmp-traffic)
- [ACNP for multicast egress traffic](#acnp-for-multicast-egress-traffic)
- [ACNP for HTTP traffic](#acnp-for-http-traffic)
+ - [ACNP for Kubernetes Node traffic](#acnp-for-kubernetes-node-traffic)
- [ACNP with log settings](#acnp-with-log-settings)
- [Behavior of to and from selectors](#behavior-of-to-and-from-selectors)
- [Key differences from K8s NetworkPolicy](#key-differences-from-k8s-networkpolicy)
@@ -524,6 +525,56 @@ spec:
Please refer to [Antrea Layer 7 NetworkPolicy](antrea-l7-network-policy.md) for extra information.
+#### ACNP for Kubernetes Node traffic
+
+```yaml
+apiVersion: crd.antrea.io/v1beta1
+kind: ClusterNetworkPolicy
+metadata:
+ name: acnp-node-egress-traffic-drop
+spec:
+ priority: 5
+ tier: securityops
+ appliedTo:
+ - nodeSelector:
+ matchLabels:
+ kubernetes.io/os: linux
+ egress:
+ - action: Drop
+ to:
+ - ipBlock:
+ cidr: 192.168.1.0/24
+ ports:
+ - protocol: TCP
+ port: 80
+ name: dropHTTPTrafficToCIDR
+```
+
+```yaml
+apiVersion: crd.antrea.io/v1beta1
+kind: ClusterNetworkPolicy
+metadata:
+ name: acnp-node-ingress-traffic-drop
+spec:
+ priority: 5
+ tier: securityops
+ appliedTo:
+ - nodeSelector:
+ matchLabels:
+ kubernetes.io/os: linux
+ ingress:
+ - action: Drop
+ from:
+ - ipBlock:
+ cidr: 192.168.1.0/24
+ ports:
+ - protocol: TCP
+ port: 22
+ name: dropSSHTrafficFromCIDR
+```
+
+Please refer to [Antrea Node NetworkPolicy](antrea-node-network-policy.md) for more information.
+
#### ACNP with log settings
```yaml
diff --git a/docs/antrea-node-network-policy.md b/docs/antrea-node-network-policy.md
new file mode 100644
index 00000000000..66424cca693
--- /dev/null
+++ b/docs/antrea-node-network-policy.md
@@ -0,0 +1,115 @@
+# Antrea Node NetworkPolicy
+
+## Table of Contents
+
+
+- [Introduction](#introduction)
+- [Prerequisites](#prerequisites)
+- [Usage](#usage)
+- [Limitations](#limitations)
+
+
+## Introduction
+
+Node NetworkPolicy is designed to secure the Kubernetes Nodes traffic. It is supported by Antrea starting with Antrea v1.15.
+This guide demonstrates how to configure Node NetworkPolicies.
+
+## Prerequisites
+
+Node NetworkPolicy was introduced in v1.15 as an alpha feature and is disabled by default. A feature gate, `NodeNetworkPolicy`,
+must be enabled in antrea-agent.conf in the `antrea-config` ConfigMap.
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: antrea-config
+ namespace: kube-system
+data:
+ antrea-agent.conf: |
+ featureGates:
+ NodeNetworkPolicy: true
+```
+
+Alternatively, you can use the following helm installation command to enable the feature gate:
+
+```bash
+helm install antrea antrea/antrea --namespace kube-system --set featureGates.NodeNetworkPolicy=true
+```
+
+## Usage
+
+Node NetworkPolicy is an extension of Antrea ClusterNetworkPolicy (ACNP). By specifying a `nodeSelector` in the policy-level
+`appliedTo`, an ACNP is applied to the selected Kubernetes Nodes.
+
+An example Node NetworkPolicy that blocks ingress traffic from Pods with label `app=client` to Nodes with label
+`kubernetes.io/hostname: k8s-node-control-plane`:
+
+```yaml
+apiVersion: crd.antrea.io/v1beta1
+kind: ClusterNetworkPolicy
+metadata:
+ name: ingress-drop-pod-to-node
+spec:
+ priority: 5
+ tier: application
+ appliedTo:
+ - nodeSelector:
+ matchLabels:
+ kubernetes.io/hostname: k8s-node-control-plane
+ ingress:
+ - name: drop-80
+ action: Drop
+ from:
+ - podSelector:
+ matchLabels:
+ app: client
+ ports:
+ - protocol: TCP
+ port: 80
+```
+
+An example Node NetworkPolicy that blocks egress traffic from Nodes with the label `kubernetes.io/hostname: k8s-node-control-plane`
+to Nodes with the label `kubernetes.io/hostname: k8s-node-worker-1` and some IP blocks:
+
+```yaml
+apiVersion: crd.antrea.io/v1beta1
+kind: ClusterNetworkPolicy
+metadata:
+ name: egress-drop-node-to-node
+spec:
+ priority: 5
+ tier: application
+ appliedTo:
+ - nodeSelector:
+ matchLabels:
+ kubernetes.io/hostname: k8s-node-control-plane
+ egress:
+ - name: drop-22
+ action: Drop
+ to:
+ - nodeSelector:
+ matchLabels:
+ kubernetes.io/hostname: k8s-node-worker-1
+ - ipBlock:
+ cidr: 192.168.77.0/24
+ - ipBlock:
+ cidr: 10.10.0.0/24
+ ports:
+ - protocol: TCP
+ port: 22
+```
+
+## Limitations
+
+- This feature is currently only supported for Linux Nodes.
+- Be cautious when you configure policies to Nodes, in particular, when configuring a default-deny policy applied to Nodes.
+ You should ensure Kubernetes and Antrea control-plane communication is exempt, otherwise the cluster may go out-of-service
+ and you may lose connectivity to it.
+- Only ACNPs can be applied to Nodes. ANPs cannot be applied to Nodes.
+- `nodeSelector` can only be specified in the policy-level `appliedTo` field, not in the rule-level `appliedTo`, and not
+ in a `Group` or `ClusterGroup`.
+- ACNPs applied to Nodes cannot be applied to Pods at the same time.
+- FQDN is not supported for ACNPs applied to Nodes.
+- Layer 7 NetworkPolicy is not supported yet.
+- For UDP or SCTP, when the `Reject` action is specified in an egress rule, it behaves identical to `Drop` action.
diff --git a/docs/feature-gates.md b/docs/feature-gates.md
index 9171a94c5db..42812e92d46 100644
--- a/docs/feature-gates.md
+++ b/docs/feature-gates.md
@@ -57,6 +57,7 @@ edit the Agent configuration in the
| `AdminNetworkPolicy` | Controller | `false` | Alpha | v1.13 | N/A | N/A | Yes | |
| `EgressTrafficShaping` | Agent | `false` | Alpha | v1.14 | N/A | N/A | Yes | OVS meters should be supported |
| `EgressSeparateSubnet` | Agent | `false` | Alpha | v1.15 | N/A | N/A | No | |
+| `NodeNetworkPolicy` | Agent | `false` | Alpha | v1.15 | N/A | N/A | Yes | |
## Description and Requirements of Features
@@ -405,6 +406,14 @@ this [document](antrea-l7-network-policy.md#prerequisites) for more information
The `AdminNetworkPolicy` API (which currently includes the AdminNetworkPolicy and BaselineAdminNetworkPolicy objects)
complements the Antrea-native policies and help cluster administrators to set security postures in a portable manner.
+### NodeNetworkPolicy
+
+`NodeNetworkPolicy` allows users to protect their Kubernetes Nodes.
+
+#### Requirements for this Feature
+
+This feature is only supported for Linux Nodes at the moment.
+
### EgressTrafficShaping
The `EgressTrafficShaping` feature gate of Antrea Agent enables traffic shaping of Egress, which could limit the
diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go
index be911409807..e35d6b8e5a0 100644
--- a/multicluster/test/e2e/antreapolicy_test.go
+++ b/multicluster/test/e2e/antreapolicy_test.go
@@ -73,7 +73,7 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) {
k8sUtils, err := antreae2e.NewKubernetesUtils(&d)
failOnError(err, t)
if clusterName != leaderCluster {
- _, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true)
+ _, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true, nil, nil)
failOnError(err, t)
}
clusterK8sUtilsMap[clusterName] = k8sUtils
diff --git a/pkg/agent/config/node_config.go b/pkg/agent/config/node_config.go
index ebba7f3da9e..2908c1e2724 100644
--- a/pkg/agent/config/node_config.go
+++ b/pkg/agent/config/node_config.go
@@ -50,6 +50,13 @@ const (
L7NetworkPolicyReturnPortName = "antrea-l7-tap1"
)
+const (
+ NodeNetworkPolicyIngressRulesChain = "ANTREA-POL-INGRESS-RULES"
+ NodeNetworkPolicyEgressRulesChain = "ANTREA-POL-EGRESS-RULES"
+
+ NodeNetworkPolicyPrefix = "ANTREA-POL"
+)
+
var (
// VirtualServiceIPv4 or VirtualServiceIPv6 is used in the following scenarios:
// - The IP is used to perform SNAT for packets of Service sourced from Antrea gateway and destined for external
diff --git a/pkg/agent/controller/networkpolicy/cache.go b/pkg/agent/controller/networkpolicy/cache.go
index cf8f886e8de..10512d97bad 100644
--- a/pkg/agent/controller/networkpolicy/cache.go
+++ b/pkg/agent/controller/networkpolicy/cache.go
@@ -182,6 +182,17 @@ func (r *CompletedRule) isIGMPEgressPolicyRule() bool {
return false
}
+func (r *CompletedRule) isNodeNetworkPolicyRule() bool {
+ for _, m := range r.TargetMembers {
+ if m.Node != nil {
+ return true
+ } else {
+ return false
+ }
+ }
+ return false
+}
+
// ruleCache caches Antrea AddressGroups, AppliedToGroups and NetworkPolicies,
// can construct complete rules that can be used by reconciler to enforce.
type ruleCache struct {
diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go
index 9ce8e0b4343..989506707f6 100644
--- a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go
+++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go
@@ -41,6 +41,7 @@ import (
"antrea.io/antrea/pkg/agent/interfacestore"
"antrea.io/antrea/pkg/agent/openflow"
proxytypes "antrea.io/antrea/pkg/agent/proxy/types"
+ "antrea.io/antrea/pkg/agent/route"
"antrea.io/antrea/pkg/agent/types"
"antrea.io/antrea/pkg/apis/controlplane/install"
"antrea.io/antrea/pkg/apis/controlplane/v1beta2"
@@ -90,7 +91,7 @@ type packetInAction func(*ofctrl.PacketIn) error
// Controller is responsible for watching Antrea AddressGroups, AppliedToGroups,
// and NetworkPolicies, feeding them to ruleCache, getting dirty rules from
-// ruleCache, invoking reconciler to reconcile them.
+// ruleCache, invoking reconcilers to reconcile them.
//
// a.Feed AddressGroups,AppliedToGroups
// and NetworkPolicies
@@ -101,8 +102,9 @@ type packetInAction func(*ofctrl.PacketIn) error
type Controller struct {
// antreaPolicyEnabled indicates whether Antrea NetworkPolicy and
// ClusterNetworkPolicy are enabled.
- antreaPolicyEnabled bool
- l7NetworkPolicyEnabled bool
+ antreaPolicyEnabled bool
+ l7NetworkPolicyEnabled bool
+ nodeNetworkPolicyEnabled bool
// antreaProxyEnabled indicates whether Antrea proxy is enabled.
antreaProxyEnabled bool
// statusManagerEnabled indicates whether a statusManager is configured.
@@ -123,9 +125,12 @@ type Controller struct {
queue workqueue.RateLimitingInterface
// ruleCache maintains the desired state of NetworkPolicy rules.
ruleCache *ruleCache
- // reconciler provides interfaces to reconcile the desired state of
+ // podReconciler provides interfaces to reconcile the desired state of
// NetworkPolicy rules with the actual state of Openflow entries.
- reconciler Reconciler
+ podReconciler Reconciler
+ // nodeReconciler provides interfaces to reconcile the desired state of
+ // NetworkPolicy rules with the actual state of iptables entries.
+ nodeReconciler Reconciler
// l7RuleReconciler provides interfaces to reconcile the desired state of
// NetworkPolicy rules which have L7 rules with the actual state of Suricata rules.
l7RuleReconciler L7RuleReconciler
@@ -164,6 +169,7 @@ type Controller struct {
// NewNetworkPolicyController returns a new *Controller.
func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider,
ofClient openflow.Client,
+ routeClient route.Interface,
ifaceStore interfacestore.InterfaceStore,
fs afero.Fs,
nodeName string,
@@ -173,6 +179,7 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider,
groupIDUpdates <-chan string,
antreaPolicyEnabled bool,
l7NetworkPolicyEnabled bool,
+ nodeNetworkPolicyEnabled bool,
antreaProxyEnabled bool,
statusManagerEnabled bool,
multicastEnabled bool,
@@ -187,19 +194,20 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider,
podNetworkWait *utilwait.Group) (*Controller, error) {
idAllocator := newIDAllocator(asyncRuleDeleteInterval, dnsInterceptRuleID)
c := &Controller{
- antreaClientProvider: antreaClientGetter,
- queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "networkpolicyrule"),
- ofClient: ofClient,
- nodeType: nodeType,
- antreaPolicyEnabled: antreaPolicyEnabled,
- l7NetworkPolicyEnabled: l7NetworkPolicyEnabled,
- antreaProxyEnabled: antreaProxyEnabled,
- statusManagerEnabled: statusManagerEnabled,
- multicastEnabled: multicastEnabled,
- gwPort: gwPort,
- tunPort: tunPort,
- nodeConfig: nodeConfig,
- podNetworkWait: podNetworkWait.Increment(),
+ antreaClientProvider: antreaClientGetter,
+ queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "networkpolicyrule"),
+ ofClient: ofClient,
+ nodeType: nodeType,
+ antreaPolicyEnabled: antreaPolicyEnabled,
+ l7NetworkPolicyEnabled: l7NetworkPolicyEnabled,
+ nodeNetworkPolicyEnabled: nodeNetworkPolicyEnabled,
+ antreaProxyEnabled: antreaProxyEnabled,
+ statusManagerEnabled: statusManagerEnabled,
+ multicastEnabled: multicastEnabled,
+ gwPort: gwPort,
+ tunPort: tunPort,
+ nodeConfig: nodeConfig,
+ podNetworkWait: podNetworkWait.Increment(),
}
if l7NetworkPolicyEnabled {
@@ -217,8 +225,12 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider,
c.ofClient.RegisterPacketInHandler(uint8(openflow.PacketInCategoryDNS), c.fqdnController)
}
}
- c.reconciler = newReconciler(ofClient, ifaceStore, idAllocator, c.fqdnController, groupCounters,
+ c.podReconciler = newPodReconciler(ofClient, ifaceStore, idAllocator, c.fqdnController, groupCounters,
v4Enabled, v6Enabled, antreaPolicyEnabled, multicastEnabled)
+
+ if c.nodeNetworkPolicyEnabled {
+ c.nodeReconciler = newNodeReconciler(routeClient, v4Enabled, v6Enabled)
+ }
c.ruleCache = newRuleCache(c.enqueueRule, podUpdateSubscriber, externalEntityUpdateSubscriber, groupIDUpdates, nodeType)
serializer := protobuf.NewSerializer(scheme, scheme)
@@ -289,7 +301,7 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider,
klog.ErrorS(err, "Failed to store the NetworkPolicy to file", "policyName", policy.SourceRef.ToString())
}
c.ruleCache.AddNetworkPolicy(policy)
- klog.InfoS("NetworkPolicy applied to Pods on this Node", "policyName", policy.SourceRef.ToString())
+ klog.InfoS("NetworkPolicy applied to Pods on this Node or the Node itself", "policyName", policy.SourceRef.ToString())
return nil
},
UpdateFunc: func(obj runtime.Object) error {
@@ -556,7 +568,7 @@ func (c *Controller) GetNetworkPolicyByRuleFlowID(ruleFlowID uint32) *v1beta2.Ne
}
func (c *Controller) GetRuleByFlowID(ruleFlowID uint32) *types.PolicyRule {
- rule, exists, err := c.reconciler.GetRuleByFlowID(ruleFlowID)
+ rule, exists, err := c.podReconciler.GetRuleByFlowID(ruleFlowID)
if err != nil {
klog.Errorf("Error when getting network policy by rule flow ID: %v", err)
return nil
@@ -623,7 +635,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) {
}
klog.Infof("Starting IDAllocator worker to maintain the async rule cache")
- go c.reconciler.RunIDAllocatorWorker(stopCh)
+ go c.podReconciler.RunIDAllocatorWorker(stopCh)
if c.statusManagerEnabled {
go c.statusManager.Run(stopCh)
@@ -733,9 +745,15 @@ func (c *Controller) syncRule(key string) error {
rule, effective, realizable := c.ruleCache.GetCompletedRule(key)
if !effective {
klog.V(2).InfoS("Rule was not effective, removing it", "ruleID", key)
- if err := c.reconciler.Forget(key); err != nil {
+ // Uncertain whether this rule applies to Node or Pod, but it's safe to delete it redundantly.
+ if err := c.podReconciler.Forget(key); err != nil {
return err
}
+ if c.nodeNetworkPolicyEnabled {
+ if err := c.nodeReconciler.Forget(key); err != nil {
+ return err
+ }
+ }
if c.statusManagerEnabled {
// We don't know whether this is a rule owned by Antrea Policy, but
// harmless to delete it.
@@ -758,6 +776,12 @@ func (c *Controller) syncRule(key string) error {
return nil
}
+ isNodeNetworkPolicy := rule.isNodeNetworkPolicyRule()
+ if !c.nodeNetworkPolicyEnabled {
+ klog.InfoS("Feature gate NodeNetworkPolicy is not enabled, skipping", "ruleID", key)
+ return nil
+ }
+
if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 {
// Allocate VLAN ID for the L7 rule.
vlanID := c.l7VlanIDAllocator.allocate(key)
@@ -768,12 +792,17 @@ func (c *Controller) syncRule(key string) error {
}
}
- err := c.reconciler.Reconcile(rule)
- if c.fqdnController != nil {
- // No matter whether the rule reconciliation succeeds or not, fqdnController
- // needs to be notified of the status.
- klog.V(2).InfoS("Rule realization was done", "ruleID", key)
- c.fqdnController.notifyRuleUpdate(key, err)
+ var err error
+ if isNodeNetworkPolicy {
+ err = c.nodeReconciler.Reconcile(rule)
+ } else {
+ err = c.podReconciler.Reconcile(rule)
+ if c.fqdnController != nil {
+ // No matter whether the rule reconciliation succeeds or not, fqdnController
+ // needs to be notified of the status.
+ klog.V(2).InfoS("Rule realization was done", "ruleID", key)
+ c.fqdnController.notifyRuleUpdate(key, err)
+ }
}
if err != nil {
return err
@@ -793,7 +822,7 @@ func (c *Controller) syncRules(keys []string) error {
klog.V(4).Infof("Finished syncing all rules before bookmark event (%v)", time.Since(startTime))
}()
- var allRules []*CompletedRule
+ var allPodRules, allNodeRules []*CompletedRule
for _, key := range keys {
rule, effective, realizable := c.ruleCache.GetCompletedRule(key)
// It's normal that a rule is not effective on this Node but abnormal that it is not realizable after watchers
@@ -803,7 +832,11 @@ func (c *Controller) syncRules(keys []string) error {
} else if !realizable {
klog.Errorf("Rule %s is effective but not realizable", key)
} else {
- if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 {
+ var isNodeNetworkPolicy bool
+ if c.nodeNetworkPolicyEnabled {
+ isNodeNetworkPolicy = rule.isNodeNetworkPolicyRule()
+ }
+ if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 && !isNodeNetworkPolicy {
// Allocate VLAN ID for the L7 rule.
vlanID := c.l7VlanIDAllocator.allocate(key)
rule.L7RuleVlanID = &vlanID
@@ -812,14 +845,28 @@ func (c *Controller) syncRules(keys []string) error {
return err
}
}
- allRules = append(allRules, rule)
+ if isNodeNetworkPolicy {
+ allNodeRules = append(allNodeRules, rule)
+ } else {
+ allPodRules = append(allPodRules, rule)
+ }
+ }
+ }
+ if c.nodeNetworkPolicyEnabled {
+ if err := c.nodeReconciler.BatchReconcile(allNodeRules); err != nil {
+ return err
}
}
- if err := c.reconciler.BatchReconcile(allRules); err != nil {
+ if err := c.podReconciler.BatchReconcile(allPodRules); err != nil {
return err
}
if c.statusManagerEnabled {
- for _, rule := range allRules {
+ for _, rule := range allPodRules {
+ if v1beta2.IsSourceAntreaNativePolicy(rule.SourceRef) {
+ c.statusManager.SetRuleRealization(rule.ID, rule.PolicyUID)
+ }
+ }
+ for _, rule := range allNodeRules {
if v1beta2.IsSourceAntreaNativePolicy(rule.SourceRef) {
c.statusManager.SetRuleRealization(rule.ID, rule.PolicyUID)
}
diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go
index 58df524a48c..82012854bee 100644
--- a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go
+++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go
@@ -78,6 +78,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) {
groupCounters := []proxytypes.GroupCounter{proxytypes.NewGroupCounter(groupIDAllocator, ch2)}
fs := afero.NewMemMapFs()
controller, _ := NewNetworkPolicyController(&antreaClientGetter{clientset},
+ nil,
nil,
nil,
fs,
@@ -88,6 +89,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) {
ch2,
true,
true,
+ false,
true,
true,
false,
@@ -102,7 +104,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) {
&config.NodeConfig{},
wait.NewGroup())
reconciler := newMockReconciler()
- controller.reconciler = reconciler
+ controller.podReconciler = reconciler
controller.auditLogger = nil
return controller, clientset, reconciler
}
diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_linux.go b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go
new file mode 100644
index 00000000000..b2082c8d1db
--- /dev/null
+++ b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go
@@ -0,0 +1,712 @@
+//go:build linux
+// +build linux
+
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package networkpolicy
+
+import (
+ "fmt"
+ "net"
+ "sort"
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/klog/v2"
+ utilnet "k8s.io/utils/net"
+
+ "antrea.io/antrea/pkg/agent/config"
+ "antrea.io/antrea/pkg/agent/route"
+ "antrea.io/antrea/pkg/agent/types"
+ "antrea.io/antrea/pkg/agent/util/iptables"
+ "antrea.io/antrea/pkg/apis/controlplane/v1beta2"
+ secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
+ "antrea.io/antrea/pkg/util/ip"
+)
+
+const (
+ ipv4Any = "0.0.0.0/0"
+ ipv6Any = "::/0"
+)
+
+/*
+Tips:
+In the following, service describes a port to allow traffic on which is defined in pkg/apis/controlplane/v1beta2/types.go
+
+NodeNetworkPolicy data path implementation using iptables/ip6tables involves four components:
+1. Core iptables rule:
+ - Added to ANTREA-POL-INGRESS-RULES (ingress) or ANTREA-POL-EGRESS-RULES (egress).
+ - Matches an ipset created for the NodeNetworkPolicy rule as source (ingress) or destination (egress) when there are
+ multiple IP addresses; if there is only one address, matches the address directly.
+ - Targets an action (the rule with a single service) or a service chain created for the NodeNetworkPolicy rule (the
+ rule with multiple services).
+2. Service iptables chain:
+ - Created for the NodeNetworkPolicy rule to integrate service iptables rules if a rule has multiple services.
+3. Service iptables rules:
+ - Added to the service chain created for the NodeNetworkPolicy rule.
+ - Constructed from the services of the NodeNetworkPolicy rule.
+4. From/To ipset:
+ - Created for the NodeNetworkPolicy rule, containing all source IP addresses (ingress) or destination IP addresses (egress).
+
+Assuming four ingress NodeNetworkPolicy rules with IDs RULE1, RULE2, RULE3 and RULE4 prioritized in descending order.
+Core iptables rules organized by priorities in ANTREA-POL-INGRESS-RULES like the following.
+
+If the rule has multiple source IP addresses to match, then an ipset will be created for it. The name of the ipset consists
+of prefix "ANTREA-POL", rule ID and IP protocol version.
+
+If the rule has multiple services, an iptables chain and related rules will be created for it. The name the chain consists
+of prefix "ANTREA-POL" and rule ID.
+
+```
+:ANTREA-POL-INGRESS-RULES
+-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-RULE1-4 src -j ANTREA-POL-RULE1 -m comment --comment "Antrea: for rule RULE1, policy AntreaClusterNetworkPolicy:name1"
+-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-RULE2-4 src -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule RULE2, policy AntreaClusterNetworkPolicy:name2"
+-A ANTREA-POL-INGRESS-RULES -s 3.3.3.3/32 src -j ANTREA-POL-RULE3 -m comment --comment "Antrea: for rule RULE3, policy AntreaClusterNetworkPolicy:name3"
+-A ANTREA-POL-INGRESS-RULES -s 4.4.4.4/32 -p tcp --dport 80 -j ACCEPT -m comment --comment "Antrea: for rule RULE4, policy AntreaClusterNetworkPolicy:name4"
+```
+
+For the first rule, it has multiple services and multiple source IP addresses to match, so there will be service iptables chain
+and service iptables rules and ipset created for it.
+
+The iptables chain is like the following:
+
+```
+:ANTREA-POL-RULE1
+-A ANTREA-POL-RULE1 -j ACCEPT -p tcp --dport 80
+-A ANTREA-POL-RULE1 -j ACCEPT -p tcp --dport 443
+```
+
+The ipset is like the following:
+
+```
+Name: ANTREA-POL-RULE1-4
+Type: hash:net
+Revision: 6
+Header: family inet hashsize 1024 maxelem 65536
+Size in memory: 472
+References: 1
+Number of entries: 2
+Members:
+1.1.1.1
+1.1.1.2
+```
+
+For the second rule, it has only one service, so there will be no service iptables chain and service iptables rules created
+for it. The core rule will match the service and target the action directly. The rule has multiple source IP addresses to
+match, so there will be an ipset `ANTREA-POL-RULE2-4` created for it.
+
+For the third rule, it has multiple services to match, so there will be service iptables chain and service iptables rules
+created for it. The rule has only one source IP address to match, so there will be no ipset created for it and just match
+the source IP address directly.
+
+For the fourth rule, it has only one service and one source IP address to match, so there will be no service iptables chain
+and service iptables rules created for it. The core rule will match the service and source IP address and target the action
+directly.
+*/
+
+// coreIPTRule is a struct to cache the core iptables rules to guarantee the order of iptables rules.
+type coreIPTRule struct {
+ ruleID string
+ priority *types.Priority
+ ruleStr string
+}
+
+type chainKey struct {
+ name string
+ isIPv6 bool
+}
+
+// coreIPTChain caches the sorted iptables rules for a chain.
+type coreIPTChain struct {
+ rules []*coreIPTRule
+ sync.Mutex
+}
+
+func newIPTChain() *coreIPTChain {
+ return &coreIPTChain{}
+}
+
+// nodePolicyLastRealized is the struct cached by nodeReconciler. It's used to track the actual state of iptables rules
+// and chains we have enforced, so that we can know how to reconcile a rule when it's updated/removed.
+type nodePolicyLastRealized struct {
+ // ipsets tracks the last realized ipset names used in core iptables rules. It cannot coexist with ipNets.
+ ipsets map[iptables.Protocol]string
+ // ipNets tracks the last realized ipNet used in core iptables rules. It cannot coexist with ipsets.
+ ipNets map[iptables.Protocol]string
+ // serviceIPTChain tracks the last realized service iptables chain if a rule have multiple services.
+ serviceIPTChain string
+ // coreIPTChain tracks the last realized iptables chain where the core iptables rule is installed.
+ coreIPTChain string
+}
+
+func newNodePolicyLastRealized() *nodePolicyLastRealized {
+ return &nodePolicyLastRealized{
+ ipsets: make(map[iptables.Protocol]string),
+ ipNets: make(map[iptables.Protocol]string),
+ }
+}
+
+type nodeReconciler struct {
+ ipProtocols []iptables.Protocol
+ routeClient route.Interface
+ coreIPTChains map[chainKey]*coreIPTChain
+ // lastRealizeds caches the last realized rules. It's a mapping from ruleID to *nodePolicyLastRealized.
+ lastRealizeds sync.Map
+}
+
+func newNodeReconciler(routeClient route.Interface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler {
+ var ipProtocols []iptables.Protocol
+ coreIPTChains := make(map[chainKey]*coreIPTChain)
+ if ipv4Enabled {
+ ipProtocols = append(ipProtocols, iptables.ProtocolIPv4)
+ coreIPTChains[newChainKey(config.NodeNetworkPolicyIngressRulesChain, false)] = newIPTChain()
+ coreIPTChains[newChainKey(config.NodeNetworkPolicyEgressRulesChain, false)] = newIPTChain()
+ }
+ if ipv6Enabled {
+ ipProtocols = append(ipProtocols, iptables.ProtocolIPv6)
+ coreIPTChains[newChainKey(config.NodeNetworkPolicyIngressRulesChain, true)] = newIPTChain()
+ coreIPTChains[newChainKey(config.NodeNetworkPolicyEgressRulesChain, true)] = newIPTChain()
+ }
+ return &nodeReconciler{
+ ipProtocols: ipProtocols,
+ routeClient: routeClient,
+ coreIPTChains: coreIPTChains,
+ }
+}
+
+// Reconcile checks whether the provided rule has been enforced or not, and invoke the add or update method accordingly.
+func (r *nodeReconciler) Reconcile(rule *CompletedRule) error {
+ klog.InfoS("Reconciling Node NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString())
+
+ value, exists := r.lastRealizeds.Load(rule.ID)
+ var err error
+ if !exists {
+ err = r.add(rule)
+ } else {
+ err = r.update(value.(*nodePolicyLastRealized), rule)
+ }
+ return err
+}
+
+func (r *nodeReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) {
+
+}
+
+func (r *nodeReconciler) BatchReconcile(rules []*CompletedRule) error {
+ var rulesToInstall []*CompletedRule
+ for _, rule := range rules {
+ if _, exists := r.lastRealizeds.Load(rule.ID); exists {
+ klog.ErrorS(nil, "Rule should not have been realized yet: initialization phase", "rule", rule.ID)
+ } else {
+ rulesToInstall = append(rulesToInstall, rule)
+ }
+ }
+ if err := r.batchAdd(rulesToInstall); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *nodeReconciler) batchAdd(rules []*CompletedRule) error {
+ lastRealizeds := make(map[string]*nodePolicyLastRealized)
+ serviceIPTChains := make(map[iptables.Protocol][]string)
+ serviceIPTRules := make(map[iptables.Protocol][][]string)
+ ingressCoreIPTRules := make(map[iptables.Protocol][]*coreIPTRule)
+ egressCoreIPTRules := make(map[iptables.Protocol][]*coreIPTRule)
+
+ for _, rule := range rules {
+ iptRules, lastRealized := r.computeIPTRules(rule)
+ ruleID := rule.ID
+ for ipProtocol, iptRule := range iptRules {
+ // Sync all ipsets.
+ if iptRule.IPSet != "" {
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil {
+ return err
+ }
+ }
+ // Collect all service iptables rules and chains.
+ if iptRule.ServiceIPTChain != "" {
+ serviceIPTChains[ipProtocol] = append(serviceIPTChains[ipProtocol], iptRule.ServiceIPTChain)
+ serviceIPTRules[ipProtocol] = append(serviceIPTRules[ipProtocol], iptRule.ServiceIPTRules)
+ }
+
+ // Collect all core iptables rules.
+ coreIPTRule := &coreIPTRule{ruleID, iptRule.Priority, iptRule.CoreIPTRule}
+ if rule.Direction == v1beta2.DirectionIn {
+ ingressCoreIPTRules[ipProtocol] = append(ingressCoreIPTRules[ipProtocol], coreIPTRule)
+ } else {
+ egressCoreIPTRules[ipProtocol] = append(egressCoreIPTRules[ipProtocol], coreIPTRule)
+ }
+ }
+ lastRealizeds[ruleID] = lastRealized
+ }
+ for _, ipProtocol := range r.ipProtocols {
+ isIPv6 := iptables.IsIPv6Protocol(ipProtocol)
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables(serviceIPTChains[ipProtocol], serviceIPTRules[ipProtocol], isIPv6); err != nil {
+ return err
+ }
+ if err := r.addOrUpdateCoreIPTRules(config.NodeNetworkPolicyIngressRulesChain, isIPv6, false, ingressCoreIPTRules[ipProtocol]...); err != nil {
+ return err
+ }
+ if err := r.addOrUpdateCoreIPTRules(config.NodeNetworkPolicyEgressRulesChain, isIPv6, false, egressCoreIPTRules[ipProtocol]...); err != nil {
+ return err
+ }
+ }
+
+ for ruleID, lastRealized := range lastRealizeds {
+ r.lastRealizeds.Store(ruleID, lastRealized)
+ }
+ return nil
+}
+
+func (r *nodeReconciler) Forget(ruleID string) error {
+ klog.InfoS("Forgetting rule", "rule", ruleID)
+
+ value, exists := r.lastRealizeds.Load(ruleID)
+ if !exists {
+ return nil
+ }
+
+ lastRealized := value.(*nodePolicyLastRealized)
+ coreIPTChain := lastRealized.coreIPTChain
+
+ for _, ipProtocol := range r.ipProtocols {
+ isIPv6 := iptables.IsIPv6Protocol(ipProtocol)
+ if err := r.deleteCoreIPRule(ruleID, coreIPTChain, isIPv6); err != nil {
+ return err
+ }
+ if lastRealized.ipsets[ipProtocol] != "" {
+ if err := r.routeClient.DeleteNodeNetworkPolicyIPSet(lastRealized.ipsets[ipProtocol], isIPv6); err != nil {
+ return err
+ }
+ }
+ if lastRealized.serviceIPTChain != "" {
+ if err := r.routeClient.DeleteNodeNetworkPolicyIPTables([]string{lastRealized.serviceIPTChain}, isIPv6); err != nil {
+ return err
+ }
+ }
+ }
+
+ r.lastRealizeds.Delete(ruleID)
+ return nil
+}
+
+func (r *nodeReconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) {
+ return nil, false, nil
+}
+
+func (r *nodeReconciler) computeIPTRules(rule *CompletedRule) (map[iptables.Protocol]*types.NodePolicyRule, *nodePolicyLastRealized) {
+ ruleID := rule.ID
+ lastRealized := newNodePolicyLastRealized()
+ priority := &types.Priority{
+ TierPriority: *rule.TierPriority,
+ PolicyPriority: *rule.PolicyPriority,
+ RulePriority: rule.Priority,
+ }
+
+ var serviceIPTChain, serviceIPTRuleTarget, coreIPTRuleTarget string
+ var service *v1beta2.Service
+ if len(rule.Services) > 1 {
+ // If a rule has multiple services, create a chain to install iptables rules for these services, with the target
+ // of the services determined by the rule's action. The core iptables rule should target the chain.
+ serviceIPTChain = genServiceIPTRuleChain(ruleID)
+ serviceIPTRuleTarget = ruleActionToIPTTarget(rule.Action)
+ coreIPTRuleTarget = serviceIPTChain
+ lastRealized.serviceIPTChain = serviceIPTChain
+ } else {
+ // If a rule has no service or a single service, the target is determined by the rule's action, as there is no
+ // need to create a chain for a single-service iptables rule.
+ coreIPTRuleTarget = ruleActionToIPTTarget(rule.Action)
+ // If a rule has a single service, the core iptables rule directly incorporates the service.
+ if len(rule.Services) == 1 {
+ service = &rule.Services[0]
+ }
+ }
+ coreIPTChain := getCoreIPTChain(rule)
+ coreIPTRuleComment := genCoreIPTRuleComment(ruleID, rule.SourceRef.ToString())
+ lastRealized.coreIPTChain = coreIPTChain
+
+ nodePolicyRules := make(map[iptables.Protocol]*types.NodePolicyRule)
+ for _, ipProtocol := range r.ipProtocols {
+ isIPv6 := iptables.IsIPv6Protocol(ipProtocol)
+
+ var serviceIPTRules []string
+ if serviceIPTChain != "" {
+ serviceIPTRules = buildServiceIPTRules(ipProtocol, rule.Services, serviceIPTChain, serviceIPTRuleTarget)
+ }
+
+ ipNets := getIPNetsFromRule(rule, isIPv6)
+ var ipNet string
+ var ipset string
+ if ipNets.Len() > 1 {
+ // If a rule matches multiple source or destination ipNets, create an ipset which contains these ipnets and
+ // use the ipset in core iptables rule.
+ ipset = genIPSetName(ruleID, isIPv6)
+ lastRealized.ipsets[ipProtocol] = ipset
+ } else if ipNets.Len() == 1 {
+ // If a rule matches single source or destination, use it in core iptables rule directly.
+ ipNet, _ = ipNets.PopAny()
+ lastRealized.ipNets[ipProtocol] = ipNet
+ }
+
+ coreIPTRule := buildCoreIPTRule(ipProtocol,
+ coreIPTChain,
+ ipset,
+ ipNet,
+ coreIPTRuleTarget,
+ coreIPTRuleComment,
+ service,
+ rule.Direction == v1beta2.DirectionIn)
+
+ nodePolicyRules[ipProtocol] = &types.NodePolicyRule{
+ IPSet: ipset,
+ IPSetMembers: ipNets,
+ Priority: priority,
+ ServiceIPTChain: serviceIPTChain,
+ ServiceIPTRules: serviceIPTRules,
+ CoreIPTChain: coreIPTChain,
+ CoreIPTRule: coreIPTRule,
+ IsIPv6: isIPv6,
+ }
+ }
+
+ return nodePolicyRules, lastRealized
+}
+
+func (r *nodeReconciler) add(rule *CompletedRule) error {
+ klog.V(2).InfoS("Adding new rule", "rule", rule)
+ ruleID := rule.ID
+ iptRules, lastRealized := r.computeIPTRules(rule)
+ for _, iptRule := range iptRules {
+ if iptRule.IPSet != "" {
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil {
+ return err
+ }
+ }
+ if iptRule.ServiceIPTChain != "" {
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptRule.ServiceIPTChain}, [][]string{iptRule.ServiceIPTRules}, iptRule.IsIPv6); err != nil {
+ return err
+ }
+ }
+ if err := r.addOrUpdateCoreIPTRules(iptRule.CoreIPTChain, iptRule.IsIPv6, false, &coreIPTRule{ruleID, iptRule.Priority, iptRule.CoreIPTRule}); err != nil {
+ return err
+ }
+ }
+ r.lastRealizeds.Store(ruleID, lastRealized)
+ return nil
+}
+
+func (r *nodeReconciler) update(lastRealized *nodePolicyLastRealized, newRule *CompletedRule) error {
+ klog.V(2).InfoS("Updating existing rule", "rule", newRule)
+ ruleID := newRule.ID
+ newIPTRules, newLastRealized := r.computeIPTRules(newRule)
+
+ for _, ipProtocol := range r.ipProtocols {
+ iptRule := newIPTRules[ipProtocol]
+
+ prevIPNet := lastRealized.ipNets[ipProtocol]
+ ipNet := newLastRealized.ipNets[ipProtocol]
+ prevIPSet := lastRealized.ipsets[ipProtocol]
+ ipset := newLastRealized.ipsets[ipProtocol]
+
+ if ipset != "" {
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil {
+ return err
+ }
+ } else if prevIPSet != "" {
+ if err := r.routeClient.DeleteNodeNetworkPolicyIPSet(lastRealized.ipsets[ipProtocol], iptRule.IsIPv6); err != nil {
+ return err
+ }
+ }
+ if prevIPSet != ipset || prevIPNet != ipNet {
+ if err := r.addOrUpdateCoreIPTRules(iptRule.CoreIPTChain, iptRule.IsIPv6, true, &coreIPTRule{ruleID, iptRule.Priority, iptRule.CoreIPTRule}); err != nil {
+ return err
+ }
+ }
+ }
+
+ r.lastRealizeds.Store(ruleID, newLastRealized)
+ return nil
+}
+
+func (r *nodeReconciler) addOrUpdateCoreIPTRules(iptChain string, isIPv6 bool, isUpdate bool, iptRules ...*coreIPTRule) error {
+ if len(iptRules) == 0 {
+ return nil
+ }
+
+ cachedCoreIPTChain := r.getCoreIPTChain(iptChain, isIPv6)
+ cachedCoreIPTChain.Lock()
+ defer cachedCoreIPTChain.Unlock()
+
+ cachedIPTRules := cachedCoreIPTChain.rules
+ if isUpdate {
+ // Build a map to store the mapping of rule ID to rule to add.
+ iptRulesToUpdate := make(map[string]*coreIPTRule)
+ for _, iptRule := range iptRules {
+ iptRulesToUpdate[iptRule.ruleID] = iptRule
+ }
+ // Iterate each existing rule. If an existing rule is in the iptRulesToUpdate map, replace it with the new rule.
+ for index, iptRule := range cachedIPTRules {
+ if _, exists := iptRulesToUpdate[iptRule.ruleID]; exists {
+ cachedIPTRules[index] = iptRulesToUpdate[iptRule.ruleID]
+ }
+ }
+ } else {
+ // If these are new rules, append the new rules then sort all rules.
+ cachedIPTRules = append(cachedIPTRules, iptRules...)
+ sort.Slice(cachedIPTRules, func(i, j int) bool {
+ return !cachedIPTRules[i].priority.Less(*cachedIPTRules[j].priority)
+ })
+ }
+
+ // Get all the sorted iptables rules and synchronize them.
+ var iptRuleStrs []string
+ for _, r := range cachedIPTRules {
+ iptRuleStrs = append(iptRuleStrs, r.ruleStr)
+ }
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptChain}, [][]string{iptRuleStrs}, isIPv6); err != nil {
+ return err
+ }
+
+ // cache the new iptables rules.
+ cachedCoreIPTChain.rules = cachedIPTRules
+ return nil
+}
+
+func (r *nodeReconciler) deleteCoreIPRule(ruleID string, iptChain string, isIPv6 bool) error {
+ coreIPTChain := r.getCoreIPTChain(iptChain, isIPv6)
+ coreIPTChain.Lock()
+ defer coreIPTChain.Unlock()
+
+ // Get all cached iptables rules, then delete the rule with the given ruleID.
+ iptRules := coreIPTChain.rules
+ indexToDelete := -1
+ for i := 0; i < len(iptRules); i++ {
+ if iptRules[i].ruleID == ruleID {
+ indexToDelete = i
+ break
+ }
+ }
+ // If the rule is not found, return directly.
+ if indexToDelete == -1 {
+ return nil
+ }
+ iptRules = append(iptRules[:indexToDelete], iptRules[indexToDelete+1:]...)
+
+ // Get all the sorted iptables rules and synchronize them.
+ var iptRuleStrs []string
+ for _, r := range iptRules {
+ iptRuleStrs = append(iptRuleStrs, r.ruleStr)
+ }
+ if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptChain}, [][]string{iptRuleStrs}, isIPv6); err != nil {
+ return err
+ }
+
+ // cache the new iptables rules.
+ coreIPTChain.rules = iptRules
+ return nil
+}
+
+func (r *nodeReconciler) getCoreIPTChain(iptChain string, isIPv6 bool) *coreIPTChain {
+ // There are 4 categories of cached core iptables rules:
+ // - For IPv4, iptables rules installed in the chain ANTREA-INGRESS-RULES for ingress rules.
+ // - For IPv6, ip6tables rules installed in the chain ANTREA-INGRESS-RULES for ingress rules.
+ // - For IPv4, iptables rules installed in the chain ANTREA-EGRESS-RULES for egress rules.
+ // - For IPv6, ip6tables rules installed in the chain ANTREA-EGRESS-RULES for egress rules.
+ return r.coreIPTChains[newChainKey(iptChain, isIPv6)]
+}
+
+func groupMembersToIPNets(groups v1beta2.GroupMemberSet, isIPv6 bool) sets.Set[string] {
+ ipNets := sets.New[string]()
+ suffix := "/32"
+ if isIPv6 {
+ suffix = "/128"
+ }
+ for _, member := range groups {
+ for _, ip := range member.IPs {
+ ipAddr := net.IP(ip)
+ if isIPv6 == utilnet.IsIPv6(ipAddr) {
+ ipNets.Insert(ipAddr.String() + suffix)
+ }
+ }
+ }
+ return ipNets
+}
+
+func ipBlocksToIPNets(ipBlocks []v1beta2.IPBlock, isIPv6 bool) []string {
+ var ipNets []string
+ for _, b := range ipBlocks {
+ blockCIDR := ip.IPNetToNetIPNet(&b.CIDR)
+ if isIPv6 != utilnet.IsIPv6CIDR(blockCIDR) {
+ continue
+ }
+ exceptIPNets := make([]*net.IPNet, 0, len(b.Except))
+ for i := range b.Except {
+ c := b.Except[i]
+ except := ip.IPNetToNetIPNet(&c)
+ exceptIPNets = append(exceptIPNets, except)
+ }
+ diffCIDRs, err := ip.DiffFromCIDRs(blockCIDR, exceptIPNets)
+ if err != nil {
+ klog.ErrorS(err, "Error when computing effective CIDRs by removing except IPNets from IPBlock")
+ continue
+ }
+ for _, d := range diffCIDRs {
+ ipNets = append(ipNets, d.String())
+ }
+ }
+ return ipNets
+}
+
+func getIPNetsFromRule(rule *CompletedRule, isIPv6 bool) sets.Set[string] {
+ var set sets.Set[string]
+ if rule.Direction == v1beta2.DirectionIn {
+ set = groupMembersToIPNets(rule.FromAddresses, isIPv6)
+ set.Insert(ipBlocksToIPNets(rule.From.IPBlocks, isIPv6)...)
+ } else {
+ set = groupMembersToIPNets(rule.ToAddresses, isIPv6)
+ set.Insert(ipBlocksToIPNets(rule.To.IPBlocks, isIPv6)...)
+ }
+ if isIPv6 && set.Has(ipv6Any) {
+ return sets.New[string](ipv6Any)
+ }
+ if !isIPv6 && set.Has(ipv4Any) {
+ return sets.New[string](ipv4Any)
+ }
+ return set
+}
+
+func getCoreIPTChain(rule *CompletedRule) string {
+ if rule.Direction == v1beta2.DirectionIn {
+ return config.NodeNetworkPolicyIngressRulesChain
+ }
+ return config.NodeNetworkPolicyEgressRulesChain
+}
+
+func buildCoreIPTRule(ipProtocol iptables.Protocol,
+ iptChain string,
+ ipset string,
+ ipNet string,
+ iptRuleTarget string,
+ iptRuleComment string,
+ service *v1beta2.Service,
+ isIngress bool) string {
+ builder := iptables.NewRuleBuilder(iptChain)
+ if isIngress {
+ if ipset != "" {
+ builder = builder.MatchIPSetSrc(ipset)
+ } else if ipNet != "" {
+ builder = builder.MatchCIDRSrc(ipNet)
+ } else {
+ builder = builder.MatchNoSrc(ipProtocol)
+ }
+ } else {
+ if ipset != "" {
+ builder = builder.MatchIPSetDst(ipset)
+ } else if ipNet != "" {
+ builder = builder.MatchCIDRDst(ipNet)
+ } else {
+ builder = builder.MatchNoDst(ipProtocol)
+ }
+ }
+ if service != nil {
+ transProtocol := getServiceTransProtocol(service.Protocol)
+ switch transProtocol {
+ case "tcp":
+ fallthrough
+ case "udp":
+ fallthrough
+ case "sctp":
+ builder = builder.MatchTransProtocol(transProtocol).
+ MatchSrcPort(service.SrcPort, service.SrcEndPort).
+ MatchDstPort(service.Port, service.EndPort)
+ case "icmp":
+ builder = builder.MatchICMP(service.ICMPType, service.ICMPCode, ipProtocol)
+ }
+ }
+ return builder.SetTarget(iptRuleTarget).
+ SetComment(iptRuleComment).
+ Done().
+ GetRule()
+}
+
+func buildServiceIPTRules(ipProtocol iptables.Protocol, services []v1beta2.Service, iptChain string, iptRuleTarget string) []string {
+ var rules []string
+ builder := iptables.NewRuleBuilder(iptChain)
+ for _, svc := range services {
+ copiedBuilder := builder.CopyBuilder()
+ transProtocol := getServiceTransProtocol(svc.Protocol)
+ switch transProtocol {
+ case "tcp":
+ fallthrough
+ case "udp":
+ fallthrough
+ case "sctp":
+ copiedBuilder = copiedBuilder.MatchTransProtocol(transProtocol).
+ MatchSrcPort(svc.SrcPort, svc.SrcEndPort).
+ MatchDstPort(svc.Port, svc.EndPort)
+ case "icmp":
+ copiedBuilder = copiedBuilder.MatchICMP(svc.ICMPType, svc.ICMPCode, ipProtocol)
+ }
+ rules = append(rules, copiedBuilder.SetTarget(iptRuleTarget).
+ Done().
+ GetRule())
+ }
+ return rules
+}
+
+func genServiceIPTRuleChain(ruleID string) string {
+ return fmt.Sprintf("%s-%s", config.NodeNetworkPolicyPrefix, strings.ToUpper(ruleID))
+}
+
+func genIPSetName(ruleID string, isIPv6 bool) string {
+ suffix := "4"
+ if isIPv6 {
+ suffix = "6"
+ }
+ return fmt.Sprintf("%s-%s-%s", config.NodeNetworkPolicyPrefix, strings.ToUpper(ruleID), suffix)
+}
+
+func ruleActionToIPTTarget(ruleAction *secv1beta1.RuleAction) string {
+ var target string
+ switch *ruleAction {
+ case secv1beta1.RuleActionDrop:
+ target = iptables.DropTarget
+ case secv1beta1.RuleActionReject:
+ target = iptables.RejectTarget
+ case secv1beta1.RuleActionAllow:
+ target = iptables.AcceptTarget
+ }
+ return target
+}
+
+func getServiceTransProtocol(protocol *v1beta2.Protocol) string {
+ if protocol == nil {
+ return "tcp"
+ }
+ return strings.ToLower(string(*protocol))
+}
+
+func genCoreIPTRuleComment(ruleID, policyName string) string {
+ return fmt.Sprintf("Antrea: for rule %s, policy %s", ruleID, policyName)
+}
+
+func newChainKey(name string, isIPv6 bool) chainKey {
+ return chainKey{
+ name: name,
+ isIPv6: isIPv6,
+ }
+}
diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go b/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go
new file mode 100644
index 00000000000..633a1cf240b
--- /dev/null
+++ b/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go
@@ -0,0 +1,1094 @@
+//go:build linux
+// +build linux
+
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package networkpolicy
+
+import (
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/mock/gomock"
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ routetest "antrea.io/antrea/pkg/agent/route/testing"
+ "antrea.io/antrea/pkg/apis/controlplane/v1beta2"
+ secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
+)
+
+var (
+ ruleActionAllow = secv1beta1.RuleActionAllow
+
+ ipv4Net1 = newCIDR("192.168.1.0/24")
+ ipv6Net1 = newCIDR("fec0::192:168:1:0/124")
+ ipv4Net2 = newCIDR("192.168.1.128/25")
+ ipv6Net2 = newCIDR("fec0::192:168:1:1/125")
+ ipBlocks = v1beta2.NetworkPolicyPeer{
+ IPBlocks: []v1beta2.IPBlock{
+ {
+ CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(ipv4Net1.IP), PrefixLength: 24},
+ Except: []v1beta2.IPNet{
+ {IP: v1beta2.IPAddress(ipv4Net2.IP), PrefixLength: 25},
+ },
+ },
+ {
+ CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(ipv6Net1.IP), PrefixLength: 124},
+ Except: []v1beta2.IPNet{
+ {IP: v1beta2.IPAddress(ipv6Net2.IP), PrefixLength: 125},
+ },
+ },
+ },
+ }
+ ipBlocksToMatchAny = v1beta2.NetworkPolicyPeer{
+ IPBlocks: []v1beta2.IPBlock{
+ {
+ CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(net.IPv4zero), PrefixLength: 0},
+ },
+ {
+ CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(net.IPv4zero), PrefixLength: 0},
+ },
+ },
+ }
+
+ policyPriority1 = float64(1)
+ tierPriority1 = int32(1)
+ tierPriority2 = int32(2)
+
+ ingressRuleID1 = "ingressRule1"
+ ingressRuleID2 = "ingressRule2"
+ ingressRuleID3 = "ingressRule3"
+ egressRuleID1 = "egressRule1"
+ egressRuleID2 = "egressRule2"
+ ingressRule1 = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID1,
+ Name: "rule-01",
+ PolicyName: "ingress-policy",
+ From: ipBlocks,
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP80, serviceTCP443},
+ Action: &ruleActionAllow,
+ Priority: 1,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority1,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: dualAddressGroup1,
+ ToAddresses: nil,
+ }
+ ingressRule2 = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID2,
+ Name: "rule-02",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP443},
+ Action: &ruleActionAllow,
+ Priority: 2,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: dualAddressGroup1,
+ ToAddresses: nil,
+ }
+ ingressRule3 = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ From: ipBlocksToMatchAny,
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: nil,
+ ToAddresses: nil,
+ }
+ ingressRule3WithFromAnyAddress = ingressRule3
+ updatedIngressRule3WithOneFromAddress = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: addressGroup1,
+ ToAddresses: nil,
+ }
+ updatedIngressRule3WithAnotherFromAddress = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: addressGroup2,
+ ToAddresses: nil,
+ }
+ updatedIngressRule3WithMultipleFromAddresses = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: addressGroup2.Union(addressGroup1),
+ ToAddresses: nil,
+ }
+ updatedIngressRule3WithOtherMultipleFromAddresses = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: addressGroup2.Union(v1beta2.NewGroupMemberSet(newAddressGroupMember("1.1.1.3"))),
+ ToAddresses: nil,
+ }
+ updatedIngressRule3WithFromNoAddress = &CompletedRule{
+ rule: &rule{
+ ID: ingressRuleID3,
+ Name: "rule-03",
+ PolicyName: "ingress-policy",
+ Direction: v1beta2.DirectionIn,
+ Services: []v1beta2.Service{serviceTCP8080},
+ Action: &ruleActionAllow,
+ Priority: 3,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ FromAddresses: nil,
+ ToAddresses: nil,
+ }
+ egressRule1 = &CompletedRule{
+ rule: &rule{
+ ID: egressRuleID1,
+ Name: "rule-01",
+ PolicyName: "egress-policy",
+ Direction: v1beta2.DirectionOut,
+ Services: []v1beta2.Service{serviceTCP80, serviceTCP443},
+ Action: &ruleActionAllow,
+ Priority: 1,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority1,
+ SourceRef: &cnp1,
+ },
+ ToAddresses: dualAddressGroup1,
+ FromAddresses: nil,
+ }
+ egressRule2 = &CompletedRule{
+ rule: &rule{
+ ID: egressRuleID2,
+ Name: "rule-02",
+ PolicyName: "egress-policy",
+ Direction: v1beta2.DirectionOut,
+ Services: []v1beta2.Service{serviceTCP443},
+ Action: &ruleActionAllow,
+ Priority: 2,
+ PolicyPriority: &policyPriority1,
+ TierPriority: &tierPriority2,
+ SourceRef: &cnp1,
+ },
+ ToAddresses: dualAddressGroup1,
+ FromAddresses: nil,
+ }
+)
+
+func newTestNodeReconciler(mockRouteClient *routetest.MockInterface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler {
+ return newNodeReconciler(mockRouteClient, ipv4Enabled, ipv6Enabled)
+}
+
+func TestNodeReconcilerReconcileAndForget(t *testing.T) {
+ tests := []struct {
+ name string
+ rulesToAdd []*CompletedRule
+ rulesToForget []string
+ ipv4Enabled bool
+ ipv6Enabled bool
+ expectedCalls func(mockRouteClient *routetest.MockInterfaceMockRecorder)
+ }{
+ {
+ name: "IPv4, add an ingress rule, then forget it",
+ ipv4Enabled: true,
+ ipv6Enabled: false,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ serviceRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ },
+ },
+ {
+ name: "IPv6, add an egress rule, then forget it",
+ ipv4Enabled: false,
+ ipv6Enabled: true,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ serviceRules := [][]string{
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, serviceRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESS-RULES"}, coreRules, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESS-RULES"}, [][]string{nil}, true).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ egressRule1,
+ },
+ rulesToForget: []string{
+ egressRuleID1,
+ },
+ },
+ {
+ name: "Dualstack, add an ingress rule, then forget it",
+ ipv4Enabled: true,
+ ipv6Enabled: true,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ serviceRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRulesIPv4 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesIPv6 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesIPv4, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesIPv6, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, true).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ },
+ },
+ {
+ name: "IPv4, add multiple ingress rules whose priorities are in ascending order, then forget some",
+ ipv4Enabled: true,
+ ipv6Enabled: false,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ serviceRules1 := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRules1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDeleted3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDelete2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDeleted3, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete2, false).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ ingressRule3,
+ },
+ rulesToForget: []string{
+ ingressRuleID3,
+ ingressRuleID2,
+ },
+ },
+ {
+ name: "IPv4, add multiple ingress rules whose priorities are in descending order, then forget some",
+ ipv4Enabled: true,
+ ipv6Enabled: false,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreRules3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ serviceRules1 := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRules1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDelete3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDelete1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete3, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete1, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule3,
+ ingressRule2,
+ ingressRule1,
+ },
+ rulesToForget: []string{
+ ingressRuleID3,
+ ingressRuleID1,
+ },
+ },
+ {
+ name: "IPv4, add multiple ingress rules whose priorities are in random order, then forget some",
+ ipv4Enabled: true,
+ ipv6Enabled: false,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreRules2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ serviceRules1 := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ coreRules1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDelete2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRulesDelete1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete2, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete1, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule2,
+ ingressRule1,
+ ingressRule3,
+ },
+ rulesToForget: []string{
+ ingressRuleID2,
+ ingressRuleID1,
+ },
+ },
+ {
+ name: "IPv4, add an ingress rule, then update it several times, forget it finally",
+ ipv4Enabled: true,
+ ipv6Enabled: false,
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreRules1 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules2 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules3 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.2/32 -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules4 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE3-4 src -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules6 := coreRules2
+ coreRules7 := coreRules1
+ coreRules8 := coreRules4
+ coreRules9 := coreRules1
+ coreRules10 := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES ! -s 0.0.0.0/0 -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ coreRules11 := coreRules4
+ coreRules12 := coreRules10
+ coreRules13 := coreRules1
+
+ s1 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1)
+ s2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1)
+ s3 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1)
+ s4p1 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.1/32", "1.1.1.2/32"), false).Times(1)
+ s4p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules4, false).Times(1)
+ s5 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.2/32", "1.1.1.3/32"), false).Times(1)
+ s6p1 := mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", false).Times(1)
+ s6p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules6, false).Times(1)
+ s7 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules7, false).Times(1)
+ s8p1 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.1/32", "1.1.1.2/32"), false).Times(1)
+ s8p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules8, false).Times(1)
+ s9p1 := mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", false).Times(1)
+ s9p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules9, false).Times(1)
+ s10 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules10, false).Times(1)
+ s11p1 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.1/32", "1.1.1.2/32"), false).Times(1)
+ s11p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules11, false).Times(1)
+ s12p1 := mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", false).Times(1)
+ s12p2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules12, false).Times(1)
+ s13 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules13, false).Times(1)
+ s2.After(s1)
+ s3.After(s2)
+ s4p1.After(s3)
+ s4p2.After(s3)
+ s5.After(s4p2)
+ s5.After(s4p2)
+ s6p1.After(s5)
+ s6p2.After(s5)
+ s7.After(s6p2)
+ s8p1.After(s7)
+ s8p2.After(s7)
+ s9p1.After(s8p2)
+ s9p2.After(s8p2)
+ s10.After(s9p2)
+ s11p1.After(s10)
+ s11p2.After(s10)
+ s12p1.After(s11p2)
+ s12p2.After(s11p2)
+ s13.After(s12p2)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1)
+ },
+ rulesToAdd: []*CompletedRule{
+ ingressRule3WithFromAnyAddress,
+ updatedIngressRule3WithOneFromAddress,
+ updatedIngressRule3WithAnotherFromAddress,
+ updatedIngressRule3WithMultipleFromAddresses,
+ updatedIngressRule3WithOtherMultipleFromAddresses,
+ updatedIngressRule3WithOneFromAddress,
+ ingressRule3WithFromAnyAddress,
+ updatedIngressRule3WithMultipleFromAddresses,
+ ingressRule3WithFromAnyAddress,
+ updatedIngressRule3WithFromNoAddress,
+ updatedIngressRule3WithMultipleFromAddresses,
+ updatedIngressRule3WithFromNoAddress,
+ ingressRule3WithFromAnyAddress,
+ },
+ rulesToForget: []string{
+ ingressRuleID3,
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ controller := gomock.NewController(t)
+ mockRouteClient := routetest.NewMockInterface(controller)
+ r := newTestNodeReconciler(mockRouteClient, tt.ipv4Enabled, tt.ipv6Enabled)
+
+ tt.expectedCalls(mockRouteClient.EXPECT())
+ for _, rule := range tt.rulesToAdd {
+ assert.NoError(t, r.Reconcile(rule))
+ }
+ for _, rule := range tt.rulesToForget {
+ assert.NoError(t, r.Forget(rule))
+ }
+ })
+ }
+}
+
+func TestNodeReconcilerBatchReconcileAndForget(t *testing.T) {
+ tests := []struct {
+ name string
+ ipv4Enabled bool
+ ipv6Enabled bool
+ rulesToAdd []*CompletedRule
+ rulesToForget []string
+ expectedCalls func(mockRouteClient *routetest.MockInterfaceMockRecorder)
+ }{
+ {
+ name: "IPv4, add ingress rules in batch, then forget one",
+ ipv4Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-INGRESS-RULES",
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-INGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ updatedCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, false).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1)
+ },
+ },
+ {
+ name: "IPv6, add ingress rules in batch, then forget one",
+ ipv6Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ },
+ rulesToForget: []string{
+ ingressRuleID2,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-INGRESS-RULES",
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-INGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ updatedCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, true).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, true).Times(1)
+ },
+ },
+ {
+ name: "dualstack, add ingress rules in batch, then forget one",
+ ipv4Enabled: true,
+ ipv6Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-INGRESS-RULES",
+ }
+ ipv4CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ ipv6CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-INGRESSRULE1",
+ }
+ ipv4SvcRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ ipv6SvcRules := ipv4SvcRules
+ updatedIPv4CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedIPv6CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv4SvcRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv4CoreRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv6SvcRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv6CoreRules, true).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv4CoreRules, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv6CoreRules, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1)
+ },
+ },
+ {
+ name: "IPv4, add egress rules in batch, then forget one",
+ ipv4Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ egressRule1,
+ egressRule2,
+ },
+ rulesToForget: []string{
+ egressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-EGRESS-RULES",
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-EGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ updatedCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, false).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1)
+ },
+ },
+ {
+ name: "IPv6, add egress rules in batch, then forget one",
+ ipv6Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ egressRule1,
+ egressRule2,
+ },
+ rulesToForget: []string{
+ egressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-EGRESS-RULES",
+ }
+ coreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-EGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ updatedCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, true).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, true).Times(1)
+ },
+ },
+ {
+ name: "dualstack, only add egress rules, then forget one",
+ ipv4Enabled: true,
+ ipv6Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ egressRule1,
+ egressRule2,
+ },
+ rulesToForget: []string{
+ egressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ coreChains := []string{
+ "ANTREA-POL-EGRESS-RULES",
+ }
+ ipv4CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ ipv6CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ svcChains := []string{
+ "ANTREA-POL-EGRESSRULE1",
+ }
+ ipv4SvcRules := [][]string{
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ ipv6SvcRules := ipv4SvcRules
+ updatedIPv4CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedIPv6CoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv4SvcRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv4CoreRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv6SvcRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv6CoreRules, true).Times(1)
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv4CoreRules, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv6CoreRules, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, true).Times(1)
+ },
+ },
+ {
+ name: "IPv4, add ingress and egress rules in batch, then forget some rules",
+ ipv4Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ egressRule1,
+ egressRule2,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ egressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ svcChains := []string{
+ "ANTREA-POL-INGRESSRULE1",
+ "ANTREA-POL-EGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ ingressCoreChains := []string{"ANTREA-POL-INGRESS-RULES"}
+ egressCoreChains := []string{"ANTREA-POL-EGRESS-RULES"}
+ ingressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ egressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedIngressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedEgressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, ingressCoreRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, egressCoreRules, false).Times(1)
+
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, updatedIngressCoreRules, false).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, updatedEgressCoreRules, false).Times(1)
+ },
+ },
+ {
+ name: "IPv6, add ingress and egress rules in batch, then forget some rules",
+ ipv6Enabled: true,
+ rulesToAdd: []*CompletedRule{
+ ingressRule1,
+ ingressRule2,
+ egressRule1,
+ egressRule2,
+ },
+ rulesToForget: []string{
+ ingressRuleID1,
+ egressRuleID1,
+ },
+ expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) {
+ svcChains := []string{
+ "ANTREA-POL-INGRESSRULE1",
+ "ANTREA-POL-EGRESSRULE1",
+ }
+ svcRules := [][]string{
+ {
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ {
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT",
+ },
+ }
+ ingressCoreChains := []string{"ANTREA-POL-INGRESS-RULES"}
+ egressCoreChains := []string{"ANTREA-POL-EGRESS-RULES"}
+ ingressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ egressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`,
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedIngressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+ updatedEgressCoreRules := [][]string{
+ {
+ `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`,
+ },
+ }
+
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, ingressCoreRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, egressCoreRules, true).Times(1)
+
+ mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1)
+ mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, updatedIngressCoreRules, true).Times(1)
+ mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, updatedEgressCoreRules, true).Times(1)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ controller := gomock.NewController(t)
+ mockRouteClient := routetest.NewMockInterface(controller)
+ r := newTestNodeReconciler(mockRouteClient, tt.ipv4Enabled, tt.ipv6Enabled)
+
+ tt.expectedCalls(mockRouteClient.EXPECT())
+ assert.NoError(t, r.BatchReconcile(tt.rulesToAdd))
+
+ for _, ruleID := range tt.rulesToForget {
+ assert.NoError(t, r.Forget(ruleID))
+ }
+ })
+ }
+}
diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go b/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go
new file mode 100644
index 00000000000..deac59eeb57
--- /dev/null
+++ b/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go
@@ -0,0 +1,49 @@
+//go:build !linux
+// +build !linux
+
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package networkpolicy
+
+import (
+ "antrea.io/antrea/pkg/agent/route"
+ "antrea.io/antrea/pkg/agent/types"
+)
+
+type nodeReconciler struct{}
+
+func newNodeReconciler(routeClient route.Interface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler {
+ return &nodeReconciler{}
+}
+
+func (r *nodeReconciler) Reconcile(rule *CompletedRule) error {
+ return nil
+}
+
+func (r *nodeReconciler) BatchReconcile(rules []*CompletedRule) error {
+ return nil
+}
+
+func (r *nodeReconciler) Forget(ruleID string) error {
+ return nil
+}
+
+func (r *nodeReconciler) GetRuleByFlowID(ruleID uint32) (*types.PolicyRule, bool, error) {
+ return nil, false, nil
+}
+
+func (r *nodeReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) {
+
+}
diff --git a/pkg/agent/controller/networkpolicy/reconciler.go b/pkg/agent/controller/networkpolicy/pod_reconciler.go
similarity index 93%
rename from pkg/agent/controller/networkpolicy/reconciler.go
rename to pkg/agent/controller/networkpolicy/pod_reconciler.go
index a20771bc4e4..1bf64e4a84e 100644
--- a/pkg/agent/controller/networkpolicy/reconciler.go
+++ b/pkg/agent/controller/networkpolicy/pod_reconciler.go
@@ -98,10 +98,10 @@ func normalizeServices(services []v1beta2.Service) servicesKey {
return servicesKey(b.String())
}
-// lastRealized is the struct cached by reconciler. It's used to track the
+// podPolicyLastRealized is the struct cached by podReconciler. It's used to track the
// actual state of rules we have enforced, so that we can know how to reconcile
// a rule when it's updated/removed.
-// It includes the last version of CompletedRule the reconciler has realized
+// It includes the last version of CompletedRule the podReconciler has realized
// and the related runtime information including the ofIDs, the Openflow ports
// or the IP addresses of the target Pods got from the InterfaceStore.
//
@@ -142,7 +142,7 @@ func normalizeServices(services []v1beta2.Service) servicesKey {
// while Pod C will have another Openflow rule as it resolves "http" to 8080.
// In the implementation, we group Pods by their resolved services value so Pod A and B
// can be mapped to same group.
-type lastRealized struct {
+type podPolicyLastRealized struct {
// ofIDs identifies Openflow rules in Openflow implementation.
// It's a map of servicesKey to Openflow rule ID.
ofIDs map[servicesKey]uint32
@@ -175,8 +175,8 @@ type lastRealized struct {
groupAddresses sets.Set[string]
}
-func newLastRealized(rule *CompletedRule) *lastRealized {
- return &lastRealized{
+func newPodPolicyLastRealized(rule *CompletedRule) *podPolicyLastRealized {
+ return &podPolicyLastRealized{
ofIDs: map[servicesKey]uint32{},
CompletedRule: rule,
podOFPorts: map[servicesKey]sets.Set[int32]{},
@@ -194,11 +194,11 @@ type tablePriorityAssigner struct {
mutex sync.RWMutex
}
-// reconciler implements Reconciler.
+// podReconciler implements Reconciler.
// Note that although its Reconcile and Forget methods are thread-safe, it's
// assumed each rule can only be processed by a single client at any given
// time. Different rules can be processed in parallel.
-type reconciler struct {
+type podReconciler struct {
// ofClient is the Openflow interface.
ofClient openflow.Client
@@ -206,7 +206,7 @@ type reconciler struct {
ifaceStore interfacestore.InterfaceStore
// lastRealizeds caches the last realized rules.
- // It's a mapping from ruleID to *lastRealized.
+ // It's a mapping from ruleID to *podPolicyLastRealized.
lastRealizeds sync.Map
// idAllocator provides interfaces to allocateForRule and release uint32 id.
@@ -220,11 +220,11 @@ type reconciler struct {
ipv6Enabled bool
// fqdnController manages dns cache of FQDN rules. It provides interfaces for the
- // reconciler to register FQDN policy rules and query the IP addresses corresponded
+ // podReconciler to register FQDN policy rules and query the IP addresses corresponded
// to a FQDN.
fqdnController *fqdnController
- // groupCounters is a list of GroupCounter for v4 and v6 env. reconciler uses these
+ // groupCounters is a list of GroupCounter for v4 and v6 env. podReconciler uses these
// GroupCounters to get the groupIDs of a specific Service.
groupCounters []proxytypes.GroupCounter
@@ -232,8 +232,8 @@ type reconciler struct {
multicastEnabled bool
}
-// newReconciler returns a new *reconciler.
-func newReconciler(ofClient openflow.Client,
+// newPodReconciler returns a new *podReconciler.
+func newPodReconciler(ofClient openflow.Client,
ifaceStore interfacestore.InterfaceStore,
idAllocator *idAllocator,
fqdnController *fqdnController,
@@ -242,7 +242,7 @@ func newReconciler(ofClient openflow.Client,
v6Enabled bool,
antreaPolicyEnabled bool,
multicastEnabled bool,
-) *reconciler {
+) *podReconciler {
priorityAssigners := map[uint8]*tablePriorityAssigner{}
if antreaPolicyEnabled {
for _, table := range openflow.GetAntreaPolicyBaselineTierTables() {
@@ -268,7 +268,7 @@ func newReconciler(ofClient openflow.Client,
}
}
}
- reconciler := &reconciler{
+ reconciler := &podReconciler{
ofClient: ofClient,
ifaceStore: ifaceStore,
lastRealizeds: sync.Map{},
@@ -288,14 +288,14 @@ func newReconciler(ofClient openflow.Client,
// RunIDAllocatorWorker runs the worker that deletes the rules from the cache in
// idAllocator.
-func (r *reconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) {
+func (r *podReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) {
r.idAllocator.runWorker(stopCh)
}
-// Reconcile checks whether the provided rule have been enforced or not, and
+// Reconcile checks whether the provided rule has been enforced or not, and
// invoke the add or update method accordingly.
-func (r *reconciler) Reconcile(rule *CompletedRule) error {
- klog.InfoS("Reconciling NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString())
+func (r *podReconciler) Reconcile(rule *CompletedRule) error {
+ klog.InfoS("Reconciling Pod NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString())
var err error
var ofPriority *uint16
@@ -319,7 +319,7 @@ func (r *reconciler) Reconcile(rule *CompletedRule) error {
if !exists {
ofRuleInstallErr = r.add(rule, ofPriority, ruleTable)
} else {
- ofRuleInstallErr = r.update(value.(*lastRealized), rule, ofPriority, ruleTable)
+ ofRuleInstallErr = r.update(value.(*podPolicyLastRealized), rule, ofPriority, ruleTable)
}
if ofRuleInstallErr != nil && ofPriority != nil && !registeredBefore {
priorityAssigner.assigner.release(*ofPriority)
@@ -327,7 +327,7 @@ func (r *reconciler) Reconcile(rule *CompletedRule) error {
return ofRuleInstallErr
}
-func (r *reconciler) getRuleType(rule *CompletedRule) ruleType {
+func (r *podReconciler) getRuleType(rule *CompletedRule) ruleType {
if !r.multicastEnabled {
return unicast
}
@@ -349,7 +349,7 @@ func (r *reconciler) getRuleType(rule *CompletedRule) ruleType {
// getOFRuleTable retrieves the OpenFlow table to install the CompletedRule.
// The decision is made based on whether the rule is created for an ACNP/ANNP, and
// the Tier of that NetworkPolicy.
-func (r *reconciler) getOFRuleTable(rule *CompletedRule) uint8 {
+func (r *podReconciler) getOFRuleTable(rule *CompletedRule) uint8 {
rType := r.getRuleType(rule)
var ruleTables []*openflow.Table
var tableID uint8
@@ -388,7 +388,7 @@ func (r *reconciler) getOFRuleTable(rule *CompletedRule) uint8 {
// getOFPriority retrieves the OFPriority for the input CompletedRule to be installed,
// and re-arranges installed priorities on OVS if necessary.
-func (r *reconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *tablePriorityAssigner) (*uint16, bool, error) {
+func (r *podReconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *tablePriorityAssigner) (*uint16, bool, error) {
// IGMP Egress policy is enforced in userspace via packet-in message, there won't be OpenFlow
// rules created for such rules. Therefore, assigning priority is not required.
if !rule.isAntreaNetworkPolicyRule() || rule.isIGMPEgressPolicyRule() {
@@ -431,7 +431,7 @@ func (r *reconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *table
// BatchReconcile reconciles the desired state of the provided CompletedRules
// with the actual state of Openflow entries in batch. It should only be invoked
// if all rules are newly added without last realized status.
-func (r *reconciler) BatchReconcile(rules []*CompletedRule) error {
+func (r *podReconciler) BatchReconcile(rules []*CompletedRule) error {
var rulesToInstall []*CompletedRule
var priorities []*uint16
prioritiesByTable := map[uint8][]*uint16{}
@@ -471,7 +471,7 @@ func (r *reconciler) BatchReconcile(rules []*CompletedRule) error {
// registerOFPriorities constructs a Priority type for each CompletedRule in the input list,
// and registers those Priorities with appropriate tablePriorityAssigner based on Tier.
-func (r *reconciler) registerOFPriorities(rules []*CompletedRule) error {
+func (r *podReconciler) registerOFPriorities(rules []*CompletedRule) error {
prioritiesToRegister := map[uint8][]types.Priority{}
for _, rule := range rules {
// IGMP Egress policy is enforced in userspace via packet-in message, there won't be OpenFlow
@@ -495,7 +495,7 @@ func (r *reconciler) registerOFPriorities(rules []*CompletedRule) error {
}
// add converts CompletedRule to PolicyRule(s) and invokes installOFRule to install them.
-func (r *reconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) error {
+func (r *podReconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) error {
klog.V(2).InfoS("Adding new rule", "rule", rule)
ofRuleByServicesMap, lastRealized := r.computeOFRulesForAdd(rule, ofPriority, table)
for svcKey, ofRule := range ofRuleByServicesMap {
@@ -517,9 +517,9 @@ func (r *reconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) e
return nil
}
-func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint16, table uint8) (
- map[servicesKey]*types.PolicyRule, *lastRealized) {
- lastRealized := newLastRealized(rule)
+func (r *podReconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint16, table uint8) (
+ map[servicesKey]*types.PolicyRule, *podPolicyLastRealized) {
+ lastRealized := newPodPolicyLastRealized(rule)
// TODO: Handle the case that the following processing fails or partially succeeds.
r.lastRealizeds.Store(rule.ID, lastRealized)
@@ -561,7 +561,7 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1
svcGroupIDs := r.getSvcGroupIDs(members)
toAddresses = svcGroupIDsToOFAddresses(svcGroupIDs)
// If rule is applied to Services, there will be only one svcKey, which is "", in
- // membersByServicesMap. So lastRealized.serviceGroupIDs won't be overwritten in
+ // membersByServicesMap. So podPolicyLastRealized.serviceGroupIDs won't be overwritten in
// this for-loop.
lastRealized.serviceGroupIDs = svcGroupIDs
} else {
@@ -672,8 +672,8 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1
}
// batchAdd converts CompletedRules to PolicyRules and invokes BatchInstallPolicyRuleFlows to install them.
-func (r *reconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) error {
- lastRealizeds := make([]*lastRealized, len(rules))
+func (r *podReconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) error {
+ lastRealizeds := make([]*podPolicyLastRealized, len(rules))
ofIDUpdateMaps := make([]map[servicesKey]uint32, len(rules))
var allOFRules []*types.PolicyRule
@@ -711,7 +711,7 @@ func (r *reconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) er
// update calculates the difference of Addresses between oldRule and newRule,
// and invokes Openflow client's methods to reconcile them.
-func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, ofPriority *uint16, table uint8) error {
+func (r *podReconciler) update(lastRealized *podPolicyLastRealized, newRule *CompletedRule, ofPriority *uint16, table uint8) error {
klog.V(2).InfoS("Updating existing rule", "rule", newRule)
// staleOFIDs tracks servicesKey that are no long needed.
// Firstly fill it with the last realized ofIDs.
@@ -871,7 +871,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule,
LogLabel: newRule.LogLabel,
}
// If the PolicyRule for the original services doesn't exist and IPBlocks is present, it means the
- // reconciler hasn't installed flows for IPBlocks, then it must be added to the new PolicyRule.
+ // podReconciler hasn't installed flows for IPBlocks, then it must be added to the new PolicyRule.
if svcKey == originalSvcKey && len(newRule.To.IPBlocks) > 0 {
to := ipBlocksToOFAddresses(newRule.To.IPBlocks, r.ipv4Enabled, r.ipv6Enabled, false)
ofRule.To = append(ofRule.To, to...)
@@ -943,7 +943,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule,
return nil
}
-func (r *reconciler) installOFRule(ofRule *types.PolicyRule) error {
+func (r *podReconciler) installOFRule(ofRule *types.PolicyRule) error {
klog.V(2).InfoS("Installing ofRule", "id", ofRule.FlowID, "direction", ofRule.Direction, "from", len(ofRule.From), "to", len(ofRule.To), "service", len(ofRule.Service))
if err := r.ofClient.InstallPolicyRuleFlows(ofRule); err != nil {
r.idAllocator.forgetRule(ofRule.FlowID)
@@ -952,7 +952,7 @@ func (r *reconciler) installOFRule(ofRule *types.PolicyRule) error {
return nil
}
-func (r *reconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedTo []types.Address, deletedFrom []types.Address, deletedTo []types.Address, priority *uint16, enableLogging, isMCNPRule bool) error {
+func (r *podReconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedTo []types.Address, deletedFrom []types.Address, deletedTo []types.Address, priority *uint16, enableLogging, isMCNPRule bool) error {
klog.V(2).InfoS("Updating ofRule", "id", ofID, "addedFrom", len(addedFrom), "addedTo", len(addedTo), "deletedFrom", len(deletedFrom), "deletedTo", len(deletedTo))
// TODO: This might be unnecessarily complex and hard for error handling, consider revising the Openflow interfaces.
if len(addedFrom) > 0 {
@@ -978,7 +978,7 @@ func (r *reconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedT
return nil
}
-func (r *reconciler) uninstallOFRule(ofID uint32, table uint8) error {
+func (r *podReconciler) uninstallOFRule(ofID uint32, table uint8) error {
klog.V(2).InfoS("Uninstalling ofRule", "id", ofID)
stalePriorities, err := r.ofClient.UninstallPolicyRuleFlows(ofID)
if err != nil {
@@ -1003,7 +1003,7 @@ func (r *reconciler) uninstallOFRule(ofID uint32, table uint8) error {
// Forget invokes UninstallPolicyRuleFlows to uninstall Openflow entries
// associated with the provided ruleID if it was enforced before.
-func (r *reconciler) Forget(ruleID string) error {
+func (r *podReconciler) Forget(ruleID string) error {
klog.InfoS("Forgetting rule", "rule", ruleID)
value, exists := r.lastRealizeds.Load(ruleID)
@@ -1012,7 +1012,7 @@ func (r *reconciler) Forget(ruleID string) error {
return nil
}
- lastRealized := value.(*lastRealized)
+ lastRealized := value.(*podPolicyLastRealized)
table := r.getOFRuleTable(lastRealized.CompletedRule)
priorityAssigner, exists := r.priorityAssigners[table]
if exists {
@@ -1033,7 +1033,7 @@ func (r *reconciler) Forget(ruleID string) error {
return nil
}
-func (r *reconciler) isIGMPRule(rule *CompletedRule) bool {
+func (r *podReconciler) isIGMPRule(rule *CompletedRule) bool {
isIGMP := false
if len(rule.Services) > 0 && (rule.Services[0].Protocol != nil) &&
(*rule.Services[0].Protocol == v1beta2.ProtocolIGMP) {
@@ -1042,11 +1042,11 @@ func (r *reconciler) isIGMPRule(rule *CompletedRule) bool {
return isIGMP
}
-func (r *reconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) {
+func (r *podReconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) {
return r.idAllocator.getRuleFromAsyncCache(ruleFlowID)
}
-func (r *reconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32] {
+func (r *podReconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32] {
ofPorts := sets.New[int32]()
for _, m := range members {
var entityName, ns string
@@ -1071,7 +1071,7 @@ func (r *reconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32]
return ofPorts
}
-func (r *reconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] {
+func (r *podReconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] {
ips := sets.New[string]()
for _, m := range members {
var entityName, ns string
@@ -1100,7 +1100,7 @@ func (r *reconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] {
return ips
}
-func (r *reconciler) getSvcGroupIDs(members v1beta2.GroupMemberSet) sets.Set[int64] {
+func (r *podReconciler) getSvcGroupIDs(members v1beta2.GroupMemberSet) sets.Set[int64] {
var svcRefs []v1beta2.ServiceReference
for _, m := range members {
if m.Service != nil {
@@ -1162,7 +1162,7 @@ func ofPortsToOFAddresses(ofPorts sets.Set[int32]) []types.Address {
return addresses
}
-func (r *reconciler) svcRefsToGroupIDs(svcRefs []v1beta2.ServiceReference) sets.Set[int64] {
+func (r *podReconciler) svcRefsToGroupIDs(svcRefs []v1beta2.ServiceReference) sets.Set[int64] {
groupIDs := sets.New[int64]()
for _, svcRef := range svcRefs {
for _, groupCounter := range r.groupCounters {
diff --git a/pkg/agent/controller/networkpolicy/reconciler_test.go b/pkg/agent/controller/networkpolicy/pod_reconciler_test.go
similarity index 98%
rename from pkg/agent/controller/networkpolicy/reconciler_test.go
rename to pkg/agent/controller/networkpolicy/pod_reconciler_test.go
index 0b6cbc58f30..ec31137a918 100644
--- a/pkg/agent/controller/networkpolicy/reconciler_test.go
+++ b/pkg/agent/controller/networkpolicy/pod_reconciler_test.go
@@ -107,12 +107,12 @@ func newCIDR(cidrStr string) *net.IPNet {
return tmpIPNet
}
-func newTestReconciler(t *testing.T, controller *gomock.Controller, ifaceStore interfacestore.InterfaceStore, ofClient *openflowtest.MockClient, v4Enabled, v6Enabled bool) *reconciler {
+func newTestReconciler(t *testing.T, controller *gomock.Controller, ifaceStore interfacestore.InterfaceStore, ofClient *openflowtest.MockClient, v4Enabled, v6Enabled bool) *podReconciler {
f, _ := newMockFQDNController(t, controller, nil)
ch := make(chan string, 100)
groupIDAllocator := openflow.NewGroupAllocator()
groupCounters := []proxytypes.GroupCounter{proxytypes.NewGroupCounter(groupIDAllocator, ch)}
- r := newReconciler(ofClient, ifaceStore, newIDAllocator(testAsyncDeleteInterval), f, groupCounters, v4Enabled, v6Enabled, true, false)
+ r := newPodReconciler(ofClient, ifaceStore, newIDAllocator(testAsyncDeleteInterval), f, groupCounters, v4Enabled, v6Enabled, true, false)
return r
}
@@ -120,14 +120,14 @@ func TestReconcilerForget(t *testing.T) {
prepareMockTables()
tests := []struct {
name string
- lastRealizeds map[string]*lastRealized
+ lastRealizeds map[string]*podPolicyLastRealized
args string
expectedOFRuleIDs []uint32
wantErr bool
}{
{
"unknown-rule",
- map[string]*lastRealized{
+ map[string]*podPolicyLastRealized{
"foo": {
ofIDs: map[servicesKey]uint32{servicesKey1: 8},
CompletedRule: &CompletedRule{
@@ -141,7 +141,7 @@ func TestReconcilerForget(t *testing.T) {
},
{
"known-single-ofrule",
- map[string]*lastRealized{
+ map[string]*podPolicyLastRealized{
"foo": {
ofIDs: map[servicesKey]uint32{servicesKey1: 8},
CompletedRule: &CompletedRule{
@@ -155,7 +155,7 @@ func TestReconcilerForget(t *testing.T) {
},
{
"known-multiple-ofrule",
- map[string]*lastRealized{
+ map[string]*podPolicyLastRealized{
"foo": {
ofIDs: map[servicesKey]uint32{servicesKey1: 8, servicesKey2: 9},
CompletedRule: &CompletedRule{
@@ -169,7 +169,7 @@ func TestReconcilerForget(t *testing.T) {
},
{
"known-multiple-ofrule-cnp",
- map[string]*lastRealized{
+ map[string]*podPolicyLastRealized{
"foo": {
ofIDs: map[servicesKey]uint32{servicesKey1: 8, servicesKey2: 9},
CompletedRule: &CompletedRule{
@@ -864,7 +864,7 @@ func TestReconcilerReconcileServiceRelatedRule(t *testing.T) {
}
}
-// TestReconcileWithTransientError ensures the reconciler can reconcile a rule properly after the first attempt meets
+// TestReconcileWithTransientError ensures the podReconciler can reconcile a rule properly after the first attempt meets
// transient error.
// The input rule is an egress rule with named port, applying to 3 Pods and 1 IPBlock. The first 2 Pods have different
// port numbers for the named port and the 3rd Pod cannot resolve it.
@@ -922,10 +922,10 @@ func TestReconcileWithTransientError(t *testing.T) {
mockOFClient.EXPECT().InstallPolicyRuleFlows(gomock.Any()).Return(transientError).Times(1)
err := r.Reconcile(egressRule)
assert.Error(t, err)
- // Ensure the openflow ID is not persistent in lastRealized and is released to idAllocator upon error.
+ // Ensure the openflow ID is not persistent in podPolicyLastRealized and is released to idAllocator upon error.
value, exists := r.lastRealizeds.Load(egressRule.ID)
assert.True(t, exists)
- assert.Empty(t, value.(*lastRealized).ofIDs)
+ assert.Empty(t, value.(*podPolicyLastRealized).ofIDs)
assert.Equal(t, 1, r.idAllocator.deleteQueue.Len())
// Make the second call success.
@@ -961,10 +961,10 @@ func TestReconcileWithTransientError(t *testing.T) {
}
err = r.Reconcile(egressRule)
assert.NoError(t, err)
- // Ensure the openflow IDs are persistent in lastRealized and are not released to idAllocator upon success.
+ // Ensure the openflow IDs are persistent in podPolicyLastRealized and are not released to idAllocator upon success.
value, exists = r.lastRealizeds.Load(egressRule.ID)
assert.True(t, exists)
- assert.Len(t, value.(*lastRealized).ofIDs, 3)
+ assert.Len(t, value.(*podPolicyLastRealized).ofIDs, 3)
// Ensure the number of released IDs doesn't change.
assert.Equal(t, 1, r.idAllocator.deleteQueue.Len())
@@ -1075,7 +1075,7 @@ func TestReconcilerBatchReconcile(t *testing.T) {
r := newTestReconciler(t, controller, ifaceStore, mockOFClient, true, true)
if tt.numInstalledRules > 0 {
// BatchInstall should skip rules already installed
- r.lastRealizeds.Store(tt.args[0].ID, newLastRealized(tt.args[0]))
+ r.lastRealizeds.Store(tt.args[0].ID, newPodPolicyLastRealized(tt.args[0]))
}
// TODO: mock idAllocator and priorityAssigner
mockOFClient.EXPECT().BatchInstallPolicyRuleFlows(gomock.Any()).
diff --git a/pkg/agent/route/interfaces.go b/pkg/agent/route/interfaces.go
index fd0d725d5c7..5355efbf3c6 100644
--- a/pkg/agent/route/interfaces.go
+++ b/pkg/agent/route/interfaces.go
@@ -18,6 +18,8 @@ import (
"net"
"time"
+ "k8s.io/apimachinery/pkg/util/sets"
+
"antrea.io/antrea/pkg/agent/config"
binding "antrea.io/antrea/pkg/ovs/openflow"
)
@@ -105,4 +107,16 @@ type Interface interface {
// ClearConntrackEntryForService deletes a conntrack entry for a Service connection.
ClearConntrackEntryForService(svcIP net.IP, svcPort uint16, endpointIP net.IP, protocol binding.Protocol) error
+
+ // AddOrUpdateNodeNetworkPolicyIPSet adds or updates ipset created for NodeNetworkPolicy.
+ AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error
+
+ // DeleteNodeNetworkPolicyIPSet deletes ipset created for NodeNetworkPolicy.
+ DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error
+
+ // AddOrUpdateNodeNetworkPolicyIPTables adds or updates iptables chains and rules within the chains for NodeNetworkPolicy.
+ AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error
+
+ // DeleteNodeNetworkPolicyIPTables deletes iptables chains and rules within the chains for NodeNetworkPolicy.
+ DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error
}
diff --git a/pkg/agent/route/route_linux.go b/pkg/agent/route/route_linux.go
index 9b24191d98f..753f8cd075f 100644
--- a/pkg/agent/route/route_linux.go
+++ b/pkg/agent/route/route_linux.go
@@ -19,6 +19,7 @@ import (
"fmt"
"net"
"reflect"
+ "sort"
"strconv"
"sync"
"time"
@@ -72,11 +73,15 @@ const (
antreaForwardChain = "ANTREA-FORWARD"
antreaPreRoutingChain = "ANTREA-PREROUTING"
antreaPostRoutingChain = "ANTREA-POSTROUTING"
+ antreaInputChain = "ANTREA-INPUT"
antreaOutputChain = "ANTREA-OUTPUT"
antreaMangleChain = "ANTREA-MANGLE"
serviceIPv4CIDRKey = "serviceIPv4CIDRKey"
serviceIPv6CIDRKey = "serviceIPv6CIDRKey"
+
+ preNodeNetworkPolicyIngressRulesChain = "ANTREA-POL-PRE-INGRESS-RULES"
+ preNodeNetworkPolicyEgressRulesChain = "ANTREA-POL-PRE-EGRESS-RULES"
)
// Client implements Interface.
@@ -107,11 +112,12 @@ type Client struct {
// markToSNATIP caches marks to SNAT IPs. It's used in Egress feature.
markToSNATIP sync.Map
// iptablesInitialized is used to notify when iptables initialization is done.
- iptablesInitialized chan struct{}
- proxyAll bool
- connectUplinkToBridge bool
- multicastEnabled bool
- isCloudEKS bool
+ iptablesInitialized chan struct{}
+ proxyAll bool
+ connectUplinkToBridge bool
+ multicastEnabled bool
+ isCloudEKS bool
+ nodeNetworkPolicyEnabled bool
// serviceRoutes caches ip routes about Services.
serviceRoutes sync.Map
// serviceNeighbors caches neighbors.
@@ -128,20 +134,39 @@ type Client struct {
egressRoutes sync.Map
// The latest calculated Service CIDRs can be got from serviceCIDRProvider.
serviceCIDRProvider servicecidr.Interface
+ // nodeNetworkPolicyIPSetsIPv4 caches all existing IPv4 ipsets for NodeNetworkPolicy.
+ nodeNetworkPolicyIPSetsIPv4 sync.Map
+ // nodeNetworkPolicyIPSetsIPv6 caches all existing IPv6 ipsets for NodeNetworkPolicy.
+ nodeNetworkPolicyIPSetsIPv6 sync.Map
+ // nodeNetworkPolicyIPSetsIPv4 caches all existing IPv4 iptables chains and rules for NodeNetworkPolicy.
+ nodeNetworkPolicyIPTablesIPv4 sync.Map
+ // nodeNetworkPolicyIPSetsIPv6 caches all existing IPv4 iptables chains and rules for NodeNetworkPolicy.
+ nodeNetworkPolicyIPTablesIPv6 sync.Map
+ // nodeNetworkPolicyIPTablesDeterministic represents whether to write iptables chains and rules for NodeNetworkPolicy
+ // deterministically when syncIPTables is called. Enabling it may carry a performance impact. It's disabled by default
+ // and should only be used in testing.
+ nodeNetworkPolicyIPTablesDeterministic bool
}
// NewClient returns a route client.
-func NewClient(networkConfig *config.NetworkConfig, noSNAT, proxyAll, connectUplinkToBridge, multicastEnabled bool, serviceCIDRProvider servicecidr.Interface) (*Client, error) {
+func NewClient(networkConfig *config.NetworkConfig,
+ noSNAT bool,
+ proxyAll bool,
+ connectUplinkToBridge bool,
+ nodeNetworkPolicyEnabled bool,
+ multicastEnabled bool,
+ serviceCIDRProvider servicecidr.Interface) (*Client, error) {
return &Client{
- networkConfig: networkConfig,
- noSNAT: noSNAT,
- proxyAll: proxyAll,
- multicastEnabled: multicastEnabled,
- connectUplinkToBridge: connectUplinkToBridge,
- ipset: ipset.NewClient(),
- netlink: &netlink.Handle{},
- isCloudEKS: env.IsCloudEKS(),
- serviceCIDRProvider: serviceCIDRProvider,
+ networkConfig: networkConfig,
+ noSNAT: noSNAT,
+ proxyAll: proxyAll,
+ multicastEnabled: multicastEnabled,
+ connectUplinkToBridge: connectUplinkToBridge,
+ nodeNetworkPolicyEnabled: nodeNetworkPolicyEnabled,
+ ipset: ipset.NewClient(),
+ netlink: &netlink.Handle{},
+ isCloudEKS: env.IsCloudEKS(),
+ serviceCIDRProvider: serviceCIDRProvider,
}, nil
}
@@ -204,6 +229,10 @@ func (c *Client) Initialize(nodeConfig *config.NodeConfig, done func()) error {
return fmt.Errorf("failed to initialize Service IP routes: %v", err)
}
}
+ // Build static iptables rules for NodeNetworkPolicy.
+ if c.nodeNetworkPolicyEnabled {
+ c.initNodeNetworkPolicy()
+ }
return nil
}
@@ -411,6 +440,35 @@ func (c *Client) syncIPSet() error {
})
}
+ if c.nodeNetworkPolicyEnabled {
+ c.nodeNetworkPolicyIPSetsIPv4.Range(func(key, value any) bool {
+ ipsetName := key.(string)
+ ipsetEntries := value.(sets.Set[string])
+ if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, false); err != nil {
+ return false
+ }
+ for ipsetEntry := range ipsetEntries {
+ if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil {
+ return false
+ }
+ }
+ return true
+ })
+ c.nodeNetworkPolicyIPSetsIPv6.Range(func(key, value any) bool {
+ ipsetName := key.(string)
+ ipsetEntries := value.(sets.Set[string])
+ if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, true); err != nil {
+ return false
+ }
+ for ipsetEntry := range ipsetEntries {
+ if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil {
+ return false
+ }
+ }
+ return true
+ })
+ }
+
return nil
}
@@ -497,18 +555,19 @@ func (c *Client) writeEKSNATRules(iptablesData *bytes.Buffer) {
}...)
}
+// Create the antrea managed chains and link them to built-in chains.
+// We cannot use iptables-restore for these jump rules because there
+// are non antrea managed rules in built-in chains.
+type jumpRule struct {
+ table string
+ srcChain string
+ dstChain string
+ comment string
+}
+
// syncIPTables ensure that the iptables infrastructure we use is set up.
// It's idempotent and can safely be called on every startup.
func (c *Client) syncIPTables() error {
- // Create the antrea managed chains and link them to built-in chains.
- // We cannot use iptables-restore for these jump rules because there
- // are non antrea managed rules in built-in chains.
- type jumpRule struct {
- table string
- srcChain string
- dstChain string
- comment string
- }
jumpRules := []jumpRule{
{iptables.RawTable, iptables.PreRoutingChain, antreaPreRoutingChain, "Antrea: jump to Antrea prerouting rules"},
{iptables.RawTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"},
@@ -523,6 +582,10 @@ func (c *Client) syncIPTables() error {
if c.proxyAll {
jumpRules = append(jumpRules, jumpRule{iptables.NATTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"})
}
+ if c.nodeNetworkPolicyEnabled {
+ jumpRules = append(jumpRules, jumpRule{iptables.FilterTable, iptables.InputChain, antreaInputChain, "Antrea: jump to Antrea input rules"})
+ jumpRules = append(jumpRules, jumpRule{iptables.FilterTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"})
+ }
for _, rule := range jumpRules {
if err := c.iptables.EnsureChain(iptables.ProtocolDual, rule.table, rule.dstChain); err != nil {
return err
@@ -546,6 +609,21 @@ func (c *Client) syncIPTables() error {
return true
})
+ nodeNetworkPolicyIPTablesIPv4 := map[string][]string{}
+ nodeNetworkPolicyIPTablesIPv6 := map[string][]string{}
+ c.nodeNetworkPolicyIPTablesIPv4.Range(func(key, value interface{}) bool {
+ chain := key.(string)
+ rules := value.([]string)
+ nodeNetworkPolicyIPTablesIPv4[chain] = rules
+ return true
+ })
+ c.nodeNetworkPolicyIPTablesIPv6.Range(func(key, value interface{}) bool {
+ chain := key.(string)
+ rules := value.([]string)
+ nodeNetworkPolicyIPTablesIPv6[chain] = rules
+ return true
+ })
+
// Use iptables-restore to configure IPv4 settings.
if c.networkConfig.IPv4Enabled {
iptablesData := c.restoreIptablesData(c.nodeConfig.PodIPv4CIDR,
@@ -556,6 +634,7 @@ func (c *Client) syncIPTables() error {
config.VirtualNodePortDNATIPv4,
config.VirtualServiceIPv4,
snatMarkToIPv4,
+ nodeNetworkPolicyIPTablesIPv4,
false)
// Setting --noflush to keep the previous contents (i.e. non antrea managed chains) of the tables.
@@ -574,6 +653,7 @@ func (c *Client) syncIPTables() error {
config.VirtualNodePortDNATIPv6,
config.VirtualServiceIPv6,
snatMarkToIPv6,
+ nodeNetworkPolicyIPTablesIPv6,
true)
// Setting --noflush to keep the previous contents (i.e. non antrea managed chains) of the tables.
if err := c.iptables.Restore(iptablesData.String(), false, true); err != nil {
@@ -592,6 +672,7 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet,
nodePortDNATVirtualIP,
serviceVirtualIP net.IP,
snatMarkToIP map[uint32]net.IP,
+ nodeNetWorkPolicyIPTables map[string][]string,
isIPv6 bool) *bytes.Buffer {
// Create required rules in the antrea chains.
// Use iptables-restore as it flushes the involved chains and creates the desired rules
@@ -638,7 +719,7 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet,
"-m", "comment", "--comment", `"Antrea: drop Pod multicast traffic forwarded via underlay network"`,
"-m", "set", "--match-set", clusterNodeIPSet, "src",
"-d", types.McastCIDR.String(),
- "-j", iptables.DROPTarget,
+ "-j", iptables.DropTarget,
}...)
}
}
@@ -680,6 +761,18 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet,
writeLine(iptablesData, "*filter")
writeLine(iptablesData, iptables.MakeChainLine(antreaForwardChain))
+
+ var nodeNetworkPolicyIPTablesChains []string
+ for chain := range nodeNetWorkPolicyIPTables {
+ nodeNetworkPolicyIPTablesChains = append(nodeNetworkPolicyIPTablesChains, chain)
+ }
+ if c.nodeNetworkPolicyIPTablesDeterministic {
+ sort.Sort(sort.StringSlice(nodeNetworkPolicyIPTablesChains))
+ }
+ for _, chain := range nodeNetworkPolicyIPTablesChains {
+ writeLine(iptablesData, iptables.MakeChainLine(chain))
+ }
+
writeLine(iptablesData, []string{
"-A", antreaForwardChain,
"-m", "comment", "--comment", `"Antrea: accept packets from local Pods"`,
@@ -709,6 +802,11 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet,
"-j", iptables.AcceptTarget,
}...)
}
+ for _, chain := range nodeNetworkPolicyIPTablesChains {
+ for _, rule := range nodeNetWorkPolicyIPTables[chain] {
+ writeLine(iptablesData, rule)
+ }
+ }
writeLine(iptablesData, "COMMIT")
writeLine(iptablesData, "*nat")
@@ -878,6 +976,88 @@ func (c *Client) initServiceIPRoutes() error {
return nil
}
+func (c *Client) initNodeNetworkPolicy() {
+ var ipProtocols []iptables.Protocol
+ if c.networkConfig.IPv4Enabled {
+ ipProtocols = append(ipProtocols, iptables.ProtocolIPv4)
+ }
+ if c.networkConfig.IPv6Enabled {
+ ipProtocols = append(ipProtocols, iptables.ProtocolIPv6)
+ }
+
+ antreaInputChainRules := []string{
+ iptables.NewRuleBuilder(antreaInputChain).
+ SetComment("Antrea: jump to static ingress NodeNetworkPolicy rules").
+ SetTarget(preNodeNetworkPolicyIngressRulesChain).
+ Done().
+ GetRule(),
+ iptables.NewRuleBuilder(antreaInputChain).
+ SetComment("Antrea: jump to ingress NodeNetworkPolicy rules").
+ SetTarget(config.NodeNetworkPolicyIngressRulesChain).
+ Done().
+ GetRule(),
+ }
+ antreaOutputChainRules := []string{
+ iptables.NewRuleBuilder(antreaOutputChain).
+ SetComment("Antrea: jump to static egress NodeNetworkPolicy rules").
+ SetTarget(preNodeNetworkPolicyEgressRulesChain).
+ Done().
+ GetRule(),
+ iptables.NewRuleBuilder(antreaOutputChain).
+ SetComment("Antrea: jump to egress NodeNetworkPolicy rules").
+ SetTarget(config.NodeNetworkPolicyEgressRulesChain).
+ Done().
+ GetRule(),
+ }
+ preIngressChainRules := []string{
+ iptables.NewRuleBuilder(preNodeNetworkPolicyIngressRulesChain).
+ MatchEstablishedOrRelated().
+ SetComment("Antrea: allow ingress established or related packets").
+ SetTarget(iptables.AcceptTarget).
+ Done().
+ GetRule(),
+ iptables.NewRuleBuilder(preNodeNetworkPolicyIngressRulesChain).
+ MatchInputInterface("lo").
+ SetComment("Antrea: allow ingress packets from loopback").
+ SetTarget(iptables.AcceptTarget).
+ Done().
+ GetRule(),
+ }
+ preEgressChainRules := []string{
+ iptables.NewRuleBuilder(preNodeNetworkPolicyEgressRulesChain).
+ MatchEstablishedOrRelated().
+ SetComment("Antrea: allow egress established or related packets").
+ SetTarget(iptables.AcceptTarget).
+ Done().
+ GetRule(),
+ iptables.NewRuleBuilder(preNodeNetworkPolicyEgressRulesChain).
+ MatchOutputInterface("lo").
+ SetComment("Antrea: allow egress packets to loopback").
+ SetTarget(iptables.AcceptTarget).
+ Done().
+ GetRule(),
+ }
+
+ for _, ipProtocol := range ipProtocols {
+ if ipProtocol == iptables.ProtocolIPv6 {
+ c.nodeNetworkPolicyIPTablesIPv6.Store(antreaInputChain, antreaInputChainRules)
+ c.nodeNetworkPolicyIPTablesIPv6.Store(antreaOutputChain, antreaOutputChainRules)
+ c.nodeNetworkPolicyIPTablesIPv6.Store(preNodeNetworkPolicyIngressRulesChain, preIngressChainRules)
+ c.nodeNetworkPolicyIPTablesIPv6.Store(preNodeNetworkPolicyEgressRulesChain, preEgressChainRules)
+ c.nodeNetworkPolicyIPTablesIPv6.Store(config.NodeNetworkPolicyIngressRulesChain, []string{})
+ c.nodeNetworkPolicyIPTablesIPv6.Store(config.NodeNetworkPolicyEgressRulesChain, []string{})
+ }
+ if ipProtocol == iptables.ProtocolIPv4 {
+ c.nodeNetworkPolicyIPTablesIPv4.Store(antreaInputChain, antreaInputChainRules)
+ c.nodeNetworkPolicyIPTablesIPv4.Store(antreaOutputChain, antreaOutputChainRules)
+ c.nodeNetworkPolicyIPTablesIPv4.Store(preNodeNetworkPolicyIngressRulesChain, preIngressChainRules)
+ c.nodeNetworkPolicyIPTablesIPv4.Store(preNodeNetworkPolicyEgressRulesChain, preEgressChainRules)
+ c.nodeNetworkPolicyIPTablesIPv4.Store(config.NodeNetworkPolicyIngressRulesChain, []string{})
+ c.nodeNetworkPolicyIPTablesIPv4.Store(config.NodeNetworkPolicyEgressRulesChain, []string{})
+ }
+ }
+}
+
// Reconcile removes orphaned podCIDRs from ipset and removes routes to orphaned podCIDRs
// based on the desired podCIDRs.
func (c *Client) Reconcile(podCIDRs []string) error {
@@ -1829,3 +2009,102 @@ func generateNeigh(ip net.IP, linkIndex int) *netlink.Neigh {
HardwareAddr: globalVMAC,
}
}
+
+func (c *Client) AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error {
+ var prevIPSetEntries sets.Set[string]
+ if isIPv6 {
+ if value, ok := c.nodeNetworkPolicyIPSetsIPv6.Load(ipsetName); ok {
+ prevIPSetEntries = value.(sets.Set[string])
+ }
+ } else {
+ if value, ok := c.nodeNetworkPolicyIPSetsIPv4.Load(ipsetName); ok {
+ prevIPSetEntries = value.(sets.Set[string])
+ }
+ }
+ ipsetEntriesToAdd := ipsetEntries.Difference(prevIPSetEntries)
+ ipsetEntriesToDelete := prevIPSetEntries.Difference(ipsetEntries)
+
+ if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, isIPv6); err != nil {
+ return err
+ }
+ for ipsetEntry := range ipsetEntriesToAdd {
+ if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil {
+ return err
+ }
+ }
+ for ipsetEntry := range ipsetEntriesToDelete {
+ if err := c.ipset.DelEntry(ipsetName, ipsetEntry); err != nil {
+ return err
+ }
+ }
+
+ if isIPv6 {
+ c.nodeNetworkPolicyIPSetsIPv6.Store(ipsetName, ipsetEntries)
+ } else {
+ c.nodeNetworkPolicyIPSetsIPv4.Store(ipsetName, ipsetEntries)
+ }
+ return nil
+}
+
+func (c *Client) DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error {
+ if err := c.ipset.DestroyIPSet(ipsetName); err != nil {
+ return err
+ }
+ if isIPv6 {
+ c.nodeNetworkPolicyIPSetsIPv6.Delete(ipsetName)
+ } else {
+ c.nodeNetworkPolicyIPSetsIPv4.Delete(ipsetName)
+ }
+ return nil
+}
+
+func (c *Client) AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error {
+ iptablesData := bytes.NewBuffer(nil)
+
+ writeLine(iptablesData, "*filter")
+ for _, iptablesChain := range iptablesChains {
+ writeLine(iptablesData, iptables.MakeChainLine(iptablesChain))
+ }
+ for _, rules := range iptablesRules {
+ for _, rule := range rules {
+ writeLine(iptablesData, rule)
+ }
+ }
+ writeLine(iptablesData, "COMMIT")
+
+ if err := c.iptables.Restore(iptablesData.String(), false, isIPv6); err != nil {
+ return err
+ }
+
+ for index, iptablesChain := range iptablesChains {
+ if isIPv6 {
+ c.nodeNetworkPolicyIPTablesIPv6.Store(iptablesChain, iptablesRules[index])
+ } else {
+ c.nodeNetworkPolicyIPTablesIPv4.Store(iptablesChain, iptablesRules[index])
+ }
+ }
+ return nil
+}
+
+func (c *Client) DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error {
+ ipProtocol := iptables.ProtocolIPv4
+ if isIPv6 {
+ ipProtocol = iptables.ProtocolIPv6
+ }
+
+ for _, iptablesChain := range iptablesChains {
+ if err := c.iptables.DeleteChain(ipProtocol, iptables.FilterTable, iptablesChain); err != nil {
+ return err
+ }
+ }
+
+ for _, iptablesChain := range iptablesChains {
+ if isIPv6 {
+ c.nodeNetworkPolicyIPTablesIPv6.Delete(iptablesChain)
+ } else {
+ c.nodeNetworkPolicyIPTablesIPv4.Delete(iptablesChain)
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/agent/route/route_linux_test.go b/pkg/agent/route/route_linux_test.go
index 35e999fd0a4..3a3cc98ed2e 100644
--- a/pkg/agent/route/route_linux_test.go
+++ b/pkg/agent/route/route_linux_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vishvananda/netlink"
"go.uber.org/mock/gomock"
+ "k8s.io/apimachinery/pkg/util/sets"
"antrea.io/antrea/pkg/agent/config"
servicecidrtest "antrea.io/antrea/pkg/agent/servicecidr/testing"
@@ -32,13 +33,32 @@ import (
"antrea.io/antrea/pkg/agent/util/iptables"
iptablestest "antrea.io/antrea/pkg/agent/util/iptables/testing"
netlinktest "antrea.io/antrea/pkg/agent/util/netlink/testing"
+ agentconfig "antrea.io/antrea/pkg/config/agent"
"antrea.io/antrea/pkg/ovs/openflow"
"antrea.io/antrea/pkg/ovs/ovsconfig"
"antrea.io/antrea/pkg/util/ip"
)
var (
- nodeConfig = &config.NodeConfig{GatewayConfig: &config.GatewayConfig{LinkIndex: 10}}
+ nodeConfig = &config.NodeConfig{GatewayConfig: &config.GatewayConfig{LinkIndex: 10}}
+ nodeNetworkPolicyConfig = &agentconfig.NodeNetworkPolicyConfig{
+ PrivilegedRules: []agentconfig.PrivilegedRule{
+ {
+ Direction: "ingress",
+ Protocol: "tcp",
+ IPFamilies: "ipv4",
+ CIDR: "192.168.1.0/24",
+ Ports: []string{"22"},
+ Description: "allow ingress IPv4 SSH traffic",
+ },
+ {
+ Direction: "egress",
+ Protocol: "",
+ Ports: []string{"53"},
+ Description: "allow egress DNS traffic",
+ },
+ },
+ }
externalIPv4Addr1 = "1.1.1.1"
externalIPv4Addr2 = "1.1.1.2"
@@ -147,17 +167,20 @@ func TestSyncIPSet(t *testing.T) {
podCIDRv6Str := "2001:ab03:cd04:55ef::/64"
_, podCIDRv6, _ := net.ParseCIDR(podCIDRv6Str)
tests := []struct {
- name string
- proxyAll bool
- multicastEnabled bool
- connectUplinkToBridge bool
- networkConfig *config.NetworkConfig
- nodeConfig *config.NodeConfig
- nodePortsIPv4 []string
- nodePortsIPv6 []string
- clusterNodeIPs map[string]string
- clusterNodeIP6s map[string]string
- expectedCalls func(ipset *ipsettest.MockInterfaceMockRecorder)
+ name string
+ proxyAll bool
+ multicastEnabled bool
+ connectUplinkToBridge bool
+ nodeNetworkPolicyEnabled bool
+ networkConfig *config.NetworkConfig
+ nodeConfig *config.NodeConfig
+ nodePortsIPv4 []string
+ nodePortsIPv6 []string
+ clusterNodeIPs map[string]string
+ clusterNodeIP6s map[string]string
+ nodeNetworkPolicyIPSetsIPv4 map[string]sets.Set[string]
+ nodeNetworkPolicyIPSetsIPv6 map[string]sets.Set[string]
+ expectedCalls func(ipset *ipsettest.MockInterfaceMockRecorder)
}{
{
name: "networkPolicyOnly",
@@ -185,9 +208,10 @@ func TestSyncIPSet(t *testing.T) {
},
},
{
- name: "encap, proxyAll=true, multicastEnabled=true",
- proxyAll: true,
- multicastEnabled: true,
+ name: "encap, proxyAll=true, multicastEnabled=true, nodeNetworkPolicy=true",
+ proxyAll: true,
+ multicastEnabled: true,
+ nodeNetworkPolicyEnabled: true,
networkConfig: &config.NetworkConfig{
TrafficEncapMode: config.TrafficEncapModeEncap,
IPv4Enabled: true,
@@ -197,10 +221,12 @@ func TestSyncIPSet(t *testing.T) {
PodIPv4CIDR: podCIDR,
PodIPv6CIDR: podCIDRv6,
},
- nodePortsIPv4: []string{"192.168.0.2,tcp:10000", "127.0.0.1,tcp:10000"},
- nodePortsIPv6: []string{"fe80::e643:4bff:fe44:ee,tcp:10000", "::1,tcp:10000"},
- clusterNodeIPs: map[string]string{"172.16.3.0/24": "192.168.0.3", "172.16.4.0/24": "192.168.0.4"},
- clusterNodeIP6s: map[string]string{"2001:ab03:cd04:5503::/64": "fe80::e643:4bff:fe03", "2001:ab03:cd04:5504::/64": "fe80::e643:4bff:fe04"},
+ nodePortsIPv4: []string{"192.168.0.2,tcp:10000", "127.0.0.1,tcp:10000"},
+ nodePortsIPv6: []string{"fe80::e643:4bff:fe44:ee,tcp:10000", "::1,tcp:10000"},
+ clusterNodeIPs: map[string]string{"172.16.3.0/24": "192.168.0.3", "172.16.4.0/24": "192.168.0.4"},
+ clusterNodeIP6s: map[string]string{"2001:ab03:cd04:5503::/64": "fe80::e643:4bff:fe03", "2001:ab03:cd04:5504::/64": "fe80::e643:4bff:fe04"},
+ nodeNetworkPolicyIPSetsIPv4: map[string]sets.Set[string]{"ANTREA-POL-RULE1-4": sets.New[string]("1.1.1.1/32", "2.2.2.2/32")},
+ nodeNetworkPolicyIPSetsIPv6: map[string]sets.Set[string]{"ANTREA-POL-RULE1-6": sets.New[string]("fec0::1111/128", "fec0::2222/128")},
expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) {
mockIPSet.CreateIPSet(antreaPodIPSet, ipset.HashNet, false)
mockIPSet.CreateIPSet(antreaPodIP6Set, ipset.HashNet, true)
@@ -218,6 +244,12 @@ func TestSyncIPSet(t *testing.T) {
mockIPSet.AddEntry(clusterNodeIPSet, "192.168.0.4")
mockIPSet.AddEntry(clusterNodeIP6Set, "fe80::e643:4bff:fe03")
mockIPSet.AddEntry(clusterNodeIP6Set, "fe80::e643:4bff:fe04")
+ mockIPSet.CreateIPSet("ANTREA-POL-RULE1-4", ipset.HashNet, false)
+ mockIPSet.CreateIPSet("ANTREA-POL-RULE1-6", ipset.HashNet, true)
+ mockIPSet.AddEntry("ANTREA-POL-RULE1-4", "1.1.1.1/32")
+ mockIPSet.AddEntry("ANTREA-POL-RULE1-4", "2.2.2.2/32")
+ mockIPSet.AddEntry("ANTREA-POL-RULE1-6", "fec0::1111/128")
+ mockIPSet.AddEntry("ANTREA-POL-RULE1-6", "fec0::2222/128")
},
},
{
@@ -247,15 +279,16 @@ func TestSyncIPSet(t *testing.T) {
ctrl := gomock.NewController(t)
ipset := ipsettest.NewMockInterface(ctrl)
c := &Client{ipset: ipset,
- networkConfig: tt.networkConfig,
- nodeConfig: tt.nodeConfig,
- proxyAll: tt.proxyAll,
- multicastEnabled: tt.multicastEnabled,
- connectUplinkToBridge: tt.connectUplinkToBridge,
- nodePortsIPv4: sync.Map{},
- nodePortsIPv6: sync.Map{},
- clusterNodeIPs: sync.Map{},
- clusterNodeIP6s: sync.Map{},
+ networkConfig: tt.networkConfig,
+ nodeConfig: tt.nodeConfig,
+ proxyAll: tt.proxyAll,
+ multicastEnabled: tt.multicastEnabled,
+ connectUplinkToBridge: tt.connectUplinkToBridge,
+ nodeNetworkPolicyEnabled: tt.nodeNetworkPolicyEnabled,
+ nodePortsIPv4: sync.Map{},
+ nodePortsIPv6: sync.Map{},
+ clusterNodeIPs: sync.Map{},
+ clusterNodeIP6s: sync.Map{},
}
for _, nodePortIPv4 := range tt.nodePortsIPv4 {
c.nodePortsIPv4.Store(nodePortIPv4, struct{}{})
@@ -269,6 +302,12 @@ func TestSyncIPSet(t *testing.T) {
for cidr, nodeIP := range tt.clusterNodeIP6s {
c.clusterNodeIP6s.Store(cidr, nodeIP)
}
+ for set, ips := range tt.nodeNetworkPolicyIPSetsIPv4 {
+ c.nodeNetworkPolicyIPSetsIPv4.Store(set, ips)
+ }
+ for set, ips := range tt.nodeNetworkPolicyIPSetsIPv6 {
+ c.nodeNetworkPolicyIPSetsIPv6.Store(set, ips)
+ }
tt.expectedCalls(ipset.EXPECT())
assert.NoError(t, c.syncIPSet())
})
@@ -277,22 +316,26 @@ func TestSyncIPSet(t *testing.T) {
func TestSyncIPTables(t *testing.T) {
tests := []struct {
- name string
- isCloudEKS bool
- proxyAll bool
- multicastEnabled bool
- connectUplinkToBridge bool
- networkConfig *config.NetworkConfig
- nodeConfig *config.NodeConfig
- nodePortsIPv4 []string
- nodePortsIPv6 []string
- markToSNATIP map[uint32]string
- expectedCalls func(iptables *iptablestest.MockInterfaceMockRecorder)
+ name string
+ isCloudEKS bool
+ proxyAll bool
+ multicastEnabled bool
+ connectUplinkToBridge bool
+ nodeNetworkPolicyEnabled bool
+ nodeNetworkPolicyConfig *agentconfig.NodeNetworkPolicyConfig
+ networkConfig *config.NetworkConfig
+ nodeConfig *config.NodeConfig
+ nodePortsIPv4 []string
+ nodePortsIPv6 []string
+ markToSNATIP map[uint32]string
+ expectedCalls func(iptables *iptablestest.MockInterfaceMockRecorder)
}{
{
- name: "encap,egress=true,multicastEnabled=true,proxyAll=true",
- proxyAll: true,
- multicastEnabled: true,
+ name: "encap,egress=true,multicastEnabled=true,proxyAll=true,nodeNetworkPolicy=true",
+ proxyAll: true,
+ multicastEnabled: true,
+ nodeNetworkPolicyEnabled: true,
+ nodeNetworkPolicyConfig: nodeNetworkPolicyConfig,
networkConfig: &config.NetworkConfig{
TrafficEncapMode: config.TrafficEncapModeEncap,
TunnelType: ovsconfig.GeneveTunnel,
@@ -327,6 +370,10 @@ func TestSyncIPTables(t *testing.T) {
mockIPTables.AppendRule(iptables.ProtocolDual, iptables.NATTable, iptables.PreRoutingChain, []string{"-j", antreaPreRoutingChain, "-m", "comment", "--comment", "Antrea: jump to Antrea prerouting rules"})
mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.NATTable, antreaOutputChain)
mockIPTables.AppendRule(iptables.ProtocolDual, iptables.NATTable, iptables.OutputChain, []string{"-j", antreaOutputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea output rules"})
+ mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.FilterTable, antreaInputChain)
+ mockIPTables.AppendRule(iptables.ProtocolDual, iptables.FilterTable, iptables.InputChain, []string{"-j", antreaInputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea input rules"})
+ mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.FilterTable, antreaOutputChain)
+ mockIPTables.AppendRule(iptables.ProtocolDual, iptables.FilterTable, iptables.OutputChain, []string{"-j", antreaOutputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea output rules"})
mockIPTables.Restore(`*raw
:ANTREA-PREROUTING - [0:0]
:ANTREA-OUTPUT - [0:0]
@@ -341,8 +388,23 @@ COMMIT
COMMIT
*filter
:ANTREA-FORWARD - [0:0]
+:ANTREA-INPUT - [0:0]
+:ANTREA-OUTPUT - [0:0]
+:ANTREA-POL-EGRESS-RULES - [0:0]
+:ANTREA-POL-INGRESS-RULES - [0:0]
+:ANTREA-POL-PRE-EGRESS-RULES - [0:0]
+:ANTREA-POL-PRE-INGRESS-RULES - [0:0]
-A ANTREA-FORWARD -m comment --comment "Antrea: accept packets from local Pods" -i antrea-gw0 -j ACCEPT
-A ANTREA-FORWARD -m comment --comment "Antrea: accept packets to local Pods" -o antrea-gw0 -j ACCEPT
+-A ANTREA-INPUT -m comment --comment "Antrea: jump to static ingress NodeNetworkPolicy rules" -j ANTREA-POL-PRE-INGRESS-RULES
+-A ANTREA-INPUT -m comment --comment "Antrea: jump to ingress NodeNetworkPolicy rules" -j ANTREA-POL-INGRESS-RULES
+-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to static egress NodeNetworkPolicy rules" -j ANTREA-POL-PRE-EGRESS-RULES
+-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to egress NodeNetworkPolicy rules" -j ANTREA-POL-EGRESS-RULES
+-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"
+-A ANTREA-POL-PRE-EGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow egress established or related packets" -j ACCEPT
+-A ANTREA-POL-PRE-EGRESS-RULES -o lo -m comment --comment "Antrea: allow egress packets to loopback" -j ACCEPT
+-A ANTREA-POL-PRE-INGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow ingress established or related packets" -j ACCEPT
+-A ANTREA-POL-PRE-INGRESS-RULES -i lo -m comment --comment "Antrea: allow ingress packets from loopback" -j ACCEPT
COMMIT
*nat
:ANTREA-PREROUTING - [0:0]
@@ -370,8 +432,23 @@ COMMIT
COMMIT
*filter
:ANTREA-FORWARD - [0:0]
+:ANTREA-INPUT - [0:0]
+:ANTREA-OUTPUT - [0:0]
+:ANTREA-POL-EGRESS-RULES - [0:0]
+:ANTREA-POL-INGRESS-RULES - [0:0]
+:ANTREA-POL-PRE-EGRESS-RULES - [0:0]
+:ANTREA-POL-PRE-INGRESS-RULES - [0:0]
-A ANTREA-FORWARD -m comment --comment "Antrea: accept packets from local Pods" -i antrea-gw0 -j ACCEPT
-A ANTREA-FORWARD -m comment --comment "Antrea: accept packets to local Pods" -o antrea-gw0 -j ACCEPT
+-A ANTREA-INPUT -m comment --comment "Antrea: jump to static ingress NodeNetworkPolicy rules" -j ANTREA-POL-PRE-INGRESS-RULES
+-A ANTREA-INPUT -m comment --comment "Antrea: jump to ingress NodeNetworkPolicy rules" -j ANTREA-POL-INGRESS-RULES
+-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to static egress NodeNetworkPolicy rules" -j ANTREA-POL-PRE-EGRESS-RULES
+-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to egress NodeNetworkPolicy rules" -j ANTREA-POL-EGRESS-RULES
+-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"
+-A ANTREA-POL-PRE-EGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow egress established or related packets" -j ACCEPT
+-A ANTREA-POL-PRE-EGRESS-RULES -o lo -m comment --comment "Antrea: allow egress packets to loopback" -j ACCEPT
+-A ANTREA-POL-PRE-INGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow ingress established or related packets" -j ACCEPT
+-A ANTREA-POL-PRE-INGRESS-RULES -i lo -m comment --comment "Antrea: allow ingress packets from loopback" -j ACCEPT
COMMIT
*nat
:ANTREA-PREROUTING - [0:0]
@@ -527,17 +604,25 @@ COMMIT
ctrl := gomock.NewController(t)
mockIPTables := iptablestest.NewMockInterface(ctrl)
c := &Client{iptables: mockIPTables,
- networkConfig: tt.networkConfig,
- nodeConfig: tt.nodeConfig,
- proxyAll: tt.proxyAll,
- isCloudEKS: tt.isCloudEKS,
- multicastEnabled: tt.multicastEnabled,
- connectUplinkToBridge: tt.connectUplinkToBridge,
- markToSNATIP: sync.Map{},
+ networkConfig: tt.networkConfig,
+ nodeConfig: tt.nodeConfig,
+ proxyAll: tt.proxyAll,
+ isCloudEKS: tt.isCloudEKS,
+ multicastEnabled: tt.multicastEnabled,
+ connectUplinkToBridge: tt.connectUplinkToBridge,
+ nodeNetworkPolicyEnabled: tt.nodeNetworkPolicyEnabled,
+ nodeNetworkPolicyIPTablesDeterministic: true,
}
for mark, snatIP := range tt.markToSNATIP {
c.markToSNATIP.Store(mark, net.ParseIP(snatIP))
}
+ if tt.nodeNetworkPolicyEnabled {
+ c.initNodeNetworkPolicy()
+ c.nodeNetworkPolicyIPTablesIPv4.Store(config.NodeNetworkPolicyIngressRulesChain, []string{
+ `-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"`})
+ c.nodeNetworkPolicyIPTablesIPv6.Store(config.NodeNetworkPolicyIngressRulesChain, []string{
+ `-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"`})
+ }
tt.expectedCalls(mockIPTables.EXPECT())
assert.NoError(t, c.syncIPTables())
})
@@ -1871,3 +1956,225 @@ func TestEgressRule(t *testing.T) {
})
}
}
+
+func TestAddAndDeleteNodeNetworkPolicyIPSet(t *testing.T) {
+ ipv4SetName := "TEST-IPSET-4"
+ ipv4Net1 := "1.1.1.1/32"
+ ipv4Net2 := "2.2.2.2/32"
+ ipv4Net3 := "3.3.3.3/32"
+ ipv6SetName := "TEST-IPSET-6"
+ ipv6Net1 := "fec0::1111/128"
+ ipv6Net2 := "fec0::2222/128"
+ ipv6Net3 := "fec0::3333/128"
+
+ tests := []struct {
+ name string
+ ipsetName string
+ prevIPSetEntries sets.Set[string]
+ curIPSetEntries sets.Set[string]
+ isIPv6 bool
+ expectedCalls func(mockIPSet *ipsettest.MockInterfaceMockRecorder)
+ }{
+ {
+ name: "IPv4, add an ipset and delete it",
+ ipsetName: ipv4SetName,
+ curIPSetEntries: sets.New[string](ipv4Net1, ipv4Net3),
+ isIPv6: false,
+ expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) {
+ mockIPSet.CreateIPSet(ipv4SetName, ipset.HashNet, false).Times(1)
+ mockIPSet.AddEntry(ipv4SetName, ipv4Net1).Times(1)
+ mockIPSet.AddEntry(ipv4SetName, ipv4Net3).Times(1)
+ mockIPSet.DestroyIPSet(ipv4SetName).Times(1)
+ },
+ },
+ {
+ name: "IPv4, update an ipset and delete it",
+ ipsetName: ipv4SetName,
+ prevIPSetEntries: sets.New[string](ipv4Net1, ipv4Net2),
+ curIPSetEntries: sets.New[string](ipv4Net1, ipv4Net3),
+ isIPv6: false,
+ expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) {
+ mockIPSet.CreateIPSet(ipv4SetName, ipset.HashNet, false).Times(1)
+ mockIPSet.AddEntry(ipv4SetName, ipv4Net3).Times(1)
+ mockIPSet.DelEntry(ipv4SetName, ipv4Net2).Times(1)
+ mockIPSet.DestroyIPSet(ipv4SetName).Times(1)
+ },
+ },
+ {
+ name: "IPv6, add an ipset and delete it",
+ ipsetName: ipv6SetName,
+ curIPSetEntries: sets.New[string](ipv6Net1, ipv6Net3),
+ isIPv6: true,
+ expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) {
+ mockIPSet.CreateIPSet(ipv6SetName, ipset.HashNet, true).Times(1)
+ mockIPSet.AddEntry(ipv6SetName, ipv6Net1).Times(1)
+ mockIPSet.AddEntry(ipv6SetName, ipv6Net3).Times(1)
+ mockIPSet.DestroyIPSet(ipv6SetName).Times(1)
+ },
+ },
+ {
+ name: "IPv6, update an ipset and delete it",
+ ipsetName: ipv6SetName,
+ prevIPSetEntries: sets.New[string](ipv6Net1, ipv6Net2),
+ curIPSetEntries: sets.New[string](ipv6Net1, ipv6Net3),
+ isIPv6: true,
+ expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) {
+ mockIPSet.CreateIPSet(ipv6SetName, ipset.HashNet, true).Times(1)
+ mockIPSet.AddEntry(ipv6SetName, ipv6Net3).Times(1)
+ mockIPSet.DelEntry(ipv6SetName, ipv6Net2).Times(1)
+ mockIPSet.DestroyIPSet(ipv6SetName).Times(1)
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ mockIPSet := ipsettest.NewMockInterface(ctrl)
+ c := &Client{ipset: mockIPSet}
+ tt.expectedCalls(mockIPSet.EXPECT())
+
+ if tt.prevIPSetEntries != nil {
+ if tt.isIPv6 {
+ c.nodeNetworkPolicyIPSetsIPv6.Store(tt.ipsetName, tt.prevIPSetEntries)
+ } else {
+ c.nodeNetworkPolicyIPSetsIPv4.Store(tt.ipsetName, tt.prevIPSetEntries)
+ }
+ }
+
+ assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPSet(tt.ipsetName, tt.curIPSetEntries, tt.isIPv6))
+ var exists bool
+ if tt.isIPv6 {
+ _, exists = c.nodeNetworkPolicyIPSetsIPv6.Load(tt.ipsetName)
+ } else {
+ _, exists = c.nodeNetworkPolicyIPSetsIPv4.Load(tt.ipsetName)
+ }
+ assert.True(t, exists)
+
+ assert.NoError(t, c.DeleteNodeNetworkPolicyIPSet(tt.ipsetName, tt.isIPv6))
+ if tt.isIPv6 {
+ _, exists = c.nodeNetworkPolicyIPSetsIPv6.Load(tt.ipsetName)
+ } else {
+ _, exists = c.nodeNetworkPolicyIPSetsIPv4.Load(tt.ipsetName)
+ }
+ assert.False(t, exists)
+ })
+ }
+}
+
+func TestAddAndDeleteNodeNetworkPolicyIPTables(t *testing.T) {
+ ingressChain := config.NodeNetworkPolicyIngressRulesChain
+ ingressRules := []string{
+ "-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT",
+ }
+ svcChain := "ANTREA-POL-12619C0214FB0845"
+ svcRules := []string{
+ "-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT",
+ "-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT",
+ }
+
+ tests := []struct {
+ name string
+ isIPv6 bool
+ expectedCalls func(mockIPTables *iptablestest.MockInterfaceMockRecorder)
+ expectedRules map[string][]string
+ }{
+ {
+ name: "IPv4",
+ isIPv6: false,
+ expectedCalls: func(mockIPTables *iptablestest.MockInterfaceMockRecorder) {
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-INGRESS-RULES - [0:0]
+-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT
+COMMIT
+`, false, false)
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-12619C0214FB0845 - [0:0]
+-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT
+-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT
+COMMIT
+`, false, false)
+ mockIPTables.DeleteChain(iptables.ProtocolIPv4, iptables.FilterTable, svcChain).Times(1)
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-INGRESS-RULES - [0:0]
+COMMIT
+`, false, false)
+ },
+ },
+
+ {
+ name: "IPv6",
+ isIPv6: true,
+ expectedCalls: func(mockIPTables *iptablestest.MockInterfaceMockRecorder) {
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-INGRESS-RULES - [0:0]
+-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT
+COMMIT
+`, false, true)
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-12619C0214FB0845 - [0:0]
+-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT
+-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT
+COMMIT
+`, false, true)
+ mockIPTables.DeleteChain(iptables.ProtocolIPv6, iptables.FilterTable, svcChain).Times(1)
+ mockIPTables.Restore(`*filter
+:ANTREA-POL-INGRESS-RULES - [0:0]
+COMMIT
+`, false, true)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ mockIPTables := iptablestest.NewMockInterface(ctrl)
+ c := &Client{iptables: mockIPTables,
+ networkConfig: &config.NetworkConfig{
+ IPv4Enabled: true,
+ IPv6Enabled: true,
+ },
+ }
+ c.initNodeNetworkPolicy()
+
+ tt.expectedCalls(mockIPTables.EXPECT())
+
+ assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{ingressChain}, [][]string{ingressRules}, tt.isIPv6))
+ var gotRules any
+ var exists bool
+ if tt.isIPv6 {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(ingressChain)
+ } else {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(ingressChain)
+ }
+ assert.True(t, exists)
+ assert.EqualValues(t, ingressRules, gotRules)
+
+ assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{svcChain}, [][]string{svcRules}, tt.isIPv6))
+ if tt.isIPv6 {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(svcChain)
+ } else {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(svcChain)
+ }
+ assert.True(t, exists)
+ assert.EqualValues(t, svcRules, gotRules)
+
+ assert.NoError(t, c.DeleteNodeNetworkPolicyIPTables([]string{svcChain}, tt.isIPv6))
+ if tt.isIPv6 {
+ _, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(svcChain)
+ } else {
+ _, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(svcChain)
+ }
+ assert.False(t, exists)
+
+ assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{ingressChain}, [][]string{nil}, tt.isIPv6))
+ if tt.isIPv6 {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(ingressChain)
+ } else {
+ gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(ingressChain)
+ }
+ assert.True(t, exists)
+ assert.EqualValues(t, []string(nil), gotRules)
+ })
+ }
+}
diff --git a/pkg/agent/route/route_windows.go b/pkg/agent/route/route_windows.go
index e2d5fb7b977..9c279a9c65c 100644
--- a/pkg/agent/route/route_windows.go
+++ b/pkg/agent/route/route_windows.go
@@ -71,7 +71,13 @@ type Client struct {
}
// NewClient returns a route client.
-func NewClient(networkConfig *config.NetworkConfig, noSNAT, proxyAll, connectUplinkToBridge, multicastEnabled bool, serviceCIDRProvider servicecidr.Interface) (*Client, error) {
+func NewClient(networkConfig *config.NetworkConfig,
+ noSNAT bool,
+ proxyAll bool,
+ connectUplinkToBridge bool,
+ nodeNetworkPolicyEnabled bool,
+ multicastEnabled bool,
+ serviceCIDRProvider servicecidr.Interface) (*Client, error) {
return &Client{
networkConfig: networkConfig,
nodeRoutes: &sync.Map{},
@@ -593,3 +599,19 @@ func (c *Client) AddEgressRule(tableID uint32, mark uint32) error {
func (c *Client) DeleteEgressRule(tableID uint32, mark uint32) error {
return errors.New("DeleteEgressRule is not implemented on Windows")
}
+
+func (c *Client) AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error {
+ return errors.New("AddOrUpdateNodeNetworkPolicyIPSet is not implemented on Windows")
+}
+
+func (c *Client) DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error {
+ return errors.New("DeleteNodeNetworkPolicyIPSet is not implemented on Windows")
+}
+
+func (c *Client) AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error {
+ return errors.New("AddOrUpdateNodeNetworkPolicyIPTables is not implemented on Windows")
+}
+
+func (c *Client) DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error {
+ return errors.New("DeleteNodeNetworkPolicyIPTables is not implemented on Windows")
+}
diff --git a/pkg/agent/route/route_windows_test.go b/pkg/agent/route/route_windows_test.go
index c29afd469dc..516e019838b 100644
--- a/pkg/agent/route/route_windows_test.go
+++ b/pkg/agent/route/route_windows_test.go
@@ -61,7 +61,7 @@ func TestRouteOperation(t *testing.T) {
gwIP2 := net.ParseIP("192.168.3.1")
_, destCIDR2, _ := net.ParseCIDR(dest2)
- client, err := NewClient(&config.NetworkConfig{}, true, false, false, false, nil)
+ client, err := NewClient(&config.NetworkConfig{}, true, false, false, false, false, nil)
require.Nil(t, err)
called := false
diff --git a/pkg/agent/route/testing/mock_route.go b/pkg/agent/route/testing/mock_route.go
index d6d8a725617..6153d2f3d0c 100644
--- a/pkg/agent/route/testing/mock_route.go
+++ b/pkg/agent/route/testing/mock_route.go
@@ -30,6 +30,7 @@ import (
config "antrea.io/antrea/pkg/agent/config"
openflow "antrea.io/antrea/pkg/ovs/openflow"
gomock "go.uber.org/mock/gomock"
+ sets "k8s.io/apimachinery/pkg/util/sets"
)
// MockInterface is a mock of Interface interface.
@@ -125,6 +126,34 @@ func (mr *MockInterfaceMockRecorder) AddNodePort(arg0, arg1, arg2 any) *gomock.C
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNodePort", reflect.TypeOf((*MockInterface)(nil).AddNodePort), arg0, arg1, arg2)
}
+// AddOrUpdateNodeNetworkPolicyIPSet mocks base method.
+func (m *MockInterface) AddOrUpdateNodeNetworkPolicyIPSet(arg0 string, arg1 sets.Set[string], arg2 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddOrUpdateNodeNetworkPolicyIPSet", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddOrUpdateNodeNetworkPolicyIPSet indicates an expected call of AddOrUpdateNodeNetworkPolicyIPSet.
+func (mr *MockInterfaceMockRecorder) AddOrUpdateNodeNetworkPolicyIPSet(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateNodeNetworkPolicyIPSet", reflect.TypeOf((*MockInterface)(nil).AddOrUpdateNodeNetworkPolicyIPSet), arg0, arg1, arg2)
+}
+
+// AddOrUpdateNodeNetworkPolicyIPTables mocks base method.
+func (m *MockInterface) AddOrUpdateNodeNetworkPolicyIPTables(arg0 []string, arg1 [][]string, arg2 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddOrUpdateNodeNetworkPolicyIPTables", arg0, arg1, arg2)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddOrUpdateNodeNetworkPolicyIPTables indicates an expected call of AddOrUpdateNodeNetworkPolicyIPTables.
+func (mr *MockInterfaceMockRecorder) AddOrUpdateNodeNetworkPolicyIPTables(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateNodeNetworkPolicyIPTables", reflect.TypeOf((*MockInterface)(nil).AddOrUpdateNodeNetworkPolicyIPTables), arg0, arg1, arg2)
+}
+
// AddRouteForLink mocks base method.
func (m *MockInterface) AddRouteForLink(arg0 *net.IPNet, arg1 int) error {
m.ctrl.T.Helper()
@@ -237,6 +266,34 @@ func (mr *MockInterfaceMockRecorder) DeleteLocalAntreaFlexibleIPAMPodRule(arg0 a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLocalAntreaFlexibleIPAMPodRule", reflect.TypeOf((*MockInterface)(nil).DeleteLocalAntreaFlexibleIPAMPodRule), arg0)
}
+// DeleteNodeNetworkPolicyIPSet mocks base method.
+func (m *MockInterface) DeleteNodeNetworkPolicyIPSet(arg0 string, arg1 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteNodeNetworkPolicyIPSet", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteNodeNetworkPolicyIPSet indicates an expected call of DeleteNodeNetworkPolicyIPSet.
+func (mr *MockInterfaceMockRecorder) DeleteNodeNetworkPolicyIPSet(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeNetworkPolicyIPSet", reflect.TypeOf((*MockInterface)(nil).DeleteNodeNetworkPolicyIPSet), arg0, arg1)
+}
+
+// DeleteNodeNetworkPolicyIPTables mocks base method.
+func (m *MockInterface) DeleteNodeNetworkPolicyIPTables(arg0 []string, arg1 bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteNodeNetworkPolicyIPTables", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteNodeNetworkPolicyIPTables indicates an expected call of DeleteNodeNetworkPolicyIPTables.
+func (mr *MockInterfaceMockRecorder) DeleteNodeNetworkPolicyIPTables(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeNetworkPolicyIPTables", reflect.TypeOf((*MockInterface)(nil).DeleteNodeNetworkPolicyIPTables), arg0, arg1)
+}
+
// DeleteNodePort mocks base method.
func (m *MockInterface) DeleteNodePort(arg0 []net.IP, arg1 uint16, arg2 openflow.Protocol) error {
m.ctrl.T.Helper()
diff --git a/pkg/agent/types/networkpolicy.go b/pkg/agent/types/networkpolicy.go
index 7722941cfcb..483c70de108 100644
--- a/pkg/agent/types/networkpolicy.go
+++ b/pkg/agent/types/networkpolicy.go
@@ -15,6 +15,8 @@
package types
import (
+ "k8s.io/apimachinery/pkg/util/sets"
+
"antrea.io/antrea/pkg/apis/controlplane/v1beta2"
secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
binding "antrea.io/antrea/pkg/ovs/openflow"
@@ -75,6 +77,17 @@ type Address interface {
GetValue() interface{}
}
+type NodePolicyRule struct {
+ IPSet string
+ IPSetMembers sets.Set[string]
+ Priority *Priority
+ ServiceIPTChain string
+ ServiceIPTRules []string
+ CoreIPTChain string
+ CoreIPTRule string
+ IsIPv6 bool
+}
+
// PolicyRule groups configurations to set up conjunctive match for egress/ingress policy rules.
type PolicyRule struct {
Direction v1beta2.Direction
diff --git a/pkg/agent/util/iptables/builder.go b/pkg/agent/util/iptables/builder.go
new file mode 100644
index 00000000000..a1f8c557865
--- /dev/null
+++ b/pkg/agent/util/iptables/builder.go
@@ -0,0 +1,230 @@
+//go:build !windows
+// +build !windows
+
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package iptables
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+type iptablesRule struct {
+ chain string
+
+ specs *strings.Builder
+}
+
+type iptablesRuleBuilder struct {
+ iptablesRule
+}
+
+func NewRuleBuilder(chain string) IPTablesRuleBuilder {
+ builder := &iptablesRuleBuilder{
+ iptablesRule{
+ chain: chain,
+ specs: &strings.Builder{},
+ },
+ }
+ return builder
+}
+
+func (b *iptablesRuleBuilder) writeSpec(spec string) {
+ b.specs.WriteString(spec)
+ b.specs.WriteByte(' ')
+}
+
+func (b *iptablesRuleBuilder) MatchCIDRSrc(cidr string) IPTablesRuleBuilder {
+ if cidr == "" || cidr == "0.0.0.0/0" || cidr == "::/0" {
+ return b
+ }
+ matchStr := fmt.Sprintf("-s %s", cidr)
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchCIDRDst(cidr string) IPTablesRuleBuilder {
+ if cidr == "" || cidr == "0.0.0.0/0" || cidr == "::/0" {
+ return b
+ }
+ matchStr := fmt.Sprintf("-d %s", cidr)
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchIPSetSrc(ipset string) IPTablesRuleBuilder {
+ if ipset == "" {
+ return b
+ }
+ matchStr := fmt.Sprintf("-m set --match-set %s src", ipset)
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchIPSetDst(ipset string) IPTablesRuleBuilder {
+ if ipset == "" {
+ return b
+ }
+ matchStr := fmt.Sprintf("-m set --match-set %s dst", ipset)
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchNoSrc(ipProtocol Protocol) IPTablesRuleBuilder {
+ if ipProtocol == ProtocolIPv4 {
+ b.writeSpec("! -s 0.0.0.0/0")
+ } else if ipProtocol == ProtocolIPv6 {
+ b.writeSpec("! -s ::/0")
+ }
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchNoDst(ipProtocol Protocol) IPTablesRuleBuilder {
+ if ipProtocol == ProtocolIPv4 {
+ b.writeSpec("! -d 0.0.0.0/0")
+ } else if ipProtocol == ProtocolIPv6 {
+ b.writeSpec("! -d ::/0")
+ }
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchTransProtocol(protocol string) IPTablesRuleBuilder {
+ if protocol == "" {
+ return b
+ }
+ matchStr := fmt.Sprintf("-p %s", protocol)
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchDstPort(port *intstr.IntOrString, endPort *int32) IPTablesRuleBuilder {
+ if port == nil {
+ return b
+ }
+ var matchStr string
+ if endPort != nil {
+ matchStr = fmt.Sprintf("--dport %s:%d", port.String(), *endPort)
+ } else {
+ matchStr = fmt.Sprintf("--dport %s", port.String())
+ }
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchSrcPort(port, endPort *int32) IPTablesRuleBuilder {
+ if port == nil {
+ return b
+ }
+ var matchStr string
+ if endPort != nil {
+ matchStr = fmt.Sprintf("--sport %d:%d", *port, *endPort)
+ } else {
+ matchStr = fmt.Sprintf("--sport %d", *port)
+ }
+ b.writeSpec(matchStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchICMP(icmpType, icmpCode *int32, ipProtocol Protocol) IPTablesRuleBuilder {
+ parts := []string{"-p"}
+ icmpTypeStr := "icmp"
+ if ipProtocol != ProtocolIPv4 {
+ icmpTypeStr = "icmpv6"
+ }
+ parts = append(parts, icmpTypeStr)
+
+ if icmpType != nil {
+ icmpTypeFlag := "--icmp-type"
+ if ipProtocol != ProtocolIPv4 {
+ icmpTypeFlag = "--icmpv6-type"
+ }
+
+ if icmpCode != nil {
+ parts = append(parts, icmpTypeFlag, fmt.Sprintf("%d/%d", *icmpType, *icmpCode))
+ } else {
+ parts = append(parts, icmpTypeFlag, strconv.Itoa(int(*icmpType)))
+ }
+ }
+ b.writeSpec(strings.Join(parts, " "))
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchEstablishedOrRelated() IPTablesRuleBuilder {
+ b.writeSpec("-m conntrack --ctstate ESTABLISHED,RELATED")
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchInputInterface(interfaceName string) IPTablesRuleBuilder {
+ if interfaceName == "" {
+ return b
+ }
+ specStr := fmt.Sprintf("-i %s", interfaceName)
+ b.writeSpec(specStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) MatchOutputInterface(interfaceName string) IPTablesRuleBuilder {
+ if interfaceName == "" {
+ return b
+ }
+ specStr := fmt.Sprintf("-o %s", interfaceName)
+ b.writeSpec(specStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) SetTarget(target string) IPTablesRuleBuilder {
+ if target == "" {
+ return b
+ }
+ targetStr := fmt.Sprintf("-j %s", target)
+ b.writeSpec(targetStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) SetComment(comment string) IPTablesRuleBuilder {
+ if comment == "" {
+ return b
+ }
+
+ commentStr := fmt.Sprintf("-m comment --comment \"%s\"", comment)
+ b.writeSpec(commentStr)
+ return b
+}
+
+func (b *iptablesRuleBuilder) CopyBuilder() IPTablesRuleBuilder {
+ var copiedSpec strings.Builder
+ copiedSpec.Grow(b.specs.Len())
+ copiedSpec.WriteString(b.specs.String())
+ builder := &iptablesRuleBuilder{
+ iptablesRule{
+ chain: b.chain,
+ specs: &copiedSpec,
+ },
+ }
+ return builder
+}
+
+func (b *iptablesRuleBuilder) Done() IPTablesRule {
+ return &b.iptablesRule
+}
+
+func (e *iptablesRule) GetRule() string {
+ ruleStr := fmt.Sprintf("-A %s %s", e.chain, e.specs.String())
+ return ruleStr[:len(ruleStr)-1]
+}
diff --git a/pkg/agent/util/iptables/builder_test.go b/pkg/agent/util/iptables/builder_test.go
new file mode 100644
index 00000000000..d6777739871
--- /dev/null
+++ b/pkg/agent/util/iptables/builder_test.go
@@ -0,0 +1,159 @@
+//go:build !windows
+// +build !windows
+
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package iptables
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+var (
+ ipsetAlfa = "alfa"
+ ipsetBravo = "bravo"
+ eth0 = "eth0"
+ eth1 = "eth1"
+ port8080 = &intstr.IntOrString{Type: intstr.Int, IntVal: 8080}
+ port137 = &intstr.IntOrString{Type: intstr.Int, IntVal: 137}
+ port139 = int32(139)
+ port40000 = int32(40000)
+ port50000 = int32(50000)
+ icmpType0 = int32(0)
+ icmpCode0 = int32(0)
+ cidr = "192.168.1.0/24"
+)
+
+func TestBuilders(t *testing.T) {
+ testCases := []struct {
+ name string
+ chain string
+ buildFunc func(IPTablesRuleBuilder) IPTablesRule
+ expected string
+ }{
+ {
+ name: "Accept TCP destination 8080 in FORWARD",
+ chain: ForwardChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchIPSetSrc(ipsetAlfa).
+ MatchIPSetDst(ipsetBravo).
+ MatchInputInterface(eth0).
+ MatchTransProtocol(ProtocolTCP).
+ MatchDstPort(port8080, nil).
+ MatchCIDRSrc(cidr).
+ SetComment("Accept TCP 8080").
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A FORWARD -m set --match-set alfa src -m set --match-set bravo dst -i eth0 -p tcp --dport 8080 -s 192.168.1.0/24 -m comment --comment "Accept TCP 8080" -j ACCEPT`,
+ },
+ {
+ name: "Drop UDP destination 137-139 in INPUT",
+ chain: "INPUT",
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchIPSetSrc(ipsetAlfa).
+ MatchInputInterface(eth0).
+ MatchTransProtocol(ProtocolUDP).
+ MatchDstPort(port137, &port139).
+ MatchCIDRDst(cidr).
+ SetComment("Drop UDP 137-139").
+ SetTarget(DropTarget).
+ Done()
+ },
+ expected: `-A INPUT -m set --match-set alfa src -i eth0 -p udp --dport 137:139 -d 192.168.1.0/24 -m comment --comment "Drop UDP 137-139" -j DROP`,
+ },
+ {
+ name: "Reject SCTP source 40000-50000 in OUTPUT",
+ chain: OutputChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchOutputInterface(eth1).
+ MatchTransProtocol(ProtocolSCTP).
+ MatchSrcPort(&port40000, &port50000).
+ SetComment("Drop SCTP 40000-50000").
+ SetTarget(DropTarget).
+ Done()
+ },
+ expected: `-A OUTPUT -o eth1 -p sctp --sport 40000:50000 -m comment --comment "Drop SCTP 40000-50000" -j DROP`,
+ },
+ {
+ name: "Accept ICMP IPv4",
+ chain: ForwardChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchInputInterface(eth0).
+ MatchICMP(&icmpType0, &icmpCode0, ProtocolIPv4).
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A FORWARD -i eth0 -p icmp --icmp-type 0/0 -j ACCEPT`,
+ },
+ {
+ name: "Accept ICMP IPv6",
+ chain: ForwardChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchInputInterface(eth0).
+ MatchICMP(&icmpType0, nil, ProtocolIPv6).
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A FORWARD -i eth0 -p icmpv6 --icmpv6-type 0 -j ACCEPT`,
+ },
+ {
+ name: "Accept packets of established TCP connections",
+ chain: InputChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchTransProtocol(ProtocolTCP).
+ MatchEstablishedOrRelated().
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A INPUT -p tcp -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT`,
+ },
+ {
+ name: "Accept no source and destination IPv4 address",
+ chain: InputChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchTransProtocol(ProtocolTCP).
+ MatchNoSrc(ProtocolIPv4).
+ MatchNoDst(ProtocolIPv4).
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A INPUT -p tcp ! -s 0.0.0.0/0 ! -d 0.0.0.0/0 -j ACCEPT`,
+ },
+ {
+ name: "Accept no source and destination IPv6 address",
+ chain: InputChain,
+ buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule {
+ return builder.MatchTransProtocol(ProtocolTCP).
+ MatchNoSrc(ProtocolIPv6).
+ MatchNoDst(ProtocolIPv6).
+ SetTarget(AcceptTarget).
+ Done()
+ },
+ expected: `-A INPUT -p tcp ! -s ::/0 ! -d ::/0 -j ACCEPT`,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ builder := NewRuleBuilder(tc.chain)
+ rule := tc.buildFunc(builder)
+ assert.Equal(t, tc.expected, rule.GetRule())
+ })
+ }
+}
diff --git a/pkg/agent/util/iptables/iptables.go b/pkg/agent/util/iptables/iptables.go
index 9514c16008d..436e3867a56 100644
--- a/pkg/agent/util/iptables/iptables.go
+++ b/pkg/agent/util/iptables/iptables.go
@@ -26,6 +26,7 @@ import (
"github.com/blang/semver"
"github.com/coreos/go-iptables/iptables"
+ "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
)
@@ -36,7 +37,7 @@ const (
RawTable = "raw"
AcceptTarget = "ACCEPT"
- DROPTarget = "DROP"
+ DropTarget = "DROP"
MasqueradeTarget = "MASQUERADE"
MarkTarget = "MARK"
ReturnTarget = "RETURN"
@@ -44,8 +45,10 @@ const (
NoTrackTarget = "NOTRACK"
SNATTarget = "SNAT"
DNATTarget = "DNAT"
+ RejectTarget = "REJECT"
PreRoutingChain = "PREROUTING"
+ InputChain = "INPUT"
ForwardChain = "FORWARD"
PostRoutingChain = "POSTROUTING"
OutputChain = "OUTPUT"
@@ -71,6 +74,14 @@ const (
ProtocolIPv6
)
+const (
+ ProtocolTCP = "tcp"
+ ProtocolUDP = "udp"
+ ProtocolSCTP = "sctp"
+ ProtocolICMP = "icmp"
+ ProtocolICMPv6 = "icmp6"
+)
+
// https://netfilter.org/projects/iptables/files/changes-iptables-1.6.2.txt:
// iptables-restore: support acquiring the lock.
var restoreWaitSupportedMinVersion = semver.Version{Major: 1, Minor: 6, Patch: 2}
@@ -95,6 +106,30 @@ type Interface interface {
Save() ([]byte, error)
}
+type IPTablesRuleBuilder interface {
+ MatchCIDRSrc(cidr string) IPTablesRuleBuilder
+ MatchCIDRDst(cidr string) IPTablesRuleBuilder
+ MatchIPSetSrc(ipset string) IPTablesRuleBuilder
+ MatchIPSetDst(ipset string) IPTablesRuleBuilder
+ MatchNoSrc(ipProtocol Protocol) IPTablesRuleBuilder
+ MatchNoDst(ipProtocol Protocol) IPTablesRuleBuilder
+ MatchTransProtocol(protocol string) IPTablesRuleBuilder
+ MatchDstPort(port *intstr.IntOrString, endPort *int32) IPTablesRuleBuilder
+ MatchSrcPort(port, endPort *int32) IPTablesRuleBuilder
+ MatchICMP(icmpType, icmpCode *int32, ipProtocol Protocol) IPTablesRuleBuilder
+ MatchEstablishedOrRelated() IPTablesRuleBuilder
+ MatchInputInterface(interfaceName string) IPTablesRuleBuilder
+ MatchOutputInterface(interfaceName string) IPTablesRuleBuilder
+ SetTarget(target string) IPTablesRuleBuilder
+ SetComment(comment string) IPTablesRuleBuilder
+ CopyBuilder() IPTablesRuleBuilder
+ Done() IPTablesRule
+}
+
+type IPTablesRule interface {
+ GetRule() string
+}
+
type Client struct {
ipts map[Protocol]*iptables.IPTables
// restoreWaitSupported indicates whether iptables-restore (or ip6tables-restore) supports --wait flag.
@@ -352,3 +387,7 @@ func (c *Client) Save() ([]byte, error) {
func MakeChainLine(chain string) string {
return fmt.Sprintf(":%s - [0:0]", chain)
}
+
+func IsIPv6Protocol(protocol Protocol) bool {
+ return protocol == ProtocolIPv6
+}
diff --git a/pkg/apiserver/handlers/featuregates/handler_test.go b/pkg/apiserver/handlers/featuregates/handler_test.go
index d4f812ddcaa..945045eb55a 100644
--- a/pkg/apiserver/handlers/featuregates/handler_test.go
+++ b/pkg/apiserver/handlers/featuregates/handler_test.go
@@ -67,6 +67,7 @@ func Test_getGatesResponse(t *testing.T) {
{Component: "agent", Name: "Multicast", Status: multicastStatus, Version: "BETA"},
{Component: "agent", Name: "Multicluster", Status: "Disabled", Version: "ALPHA"},
{Component: "agent", Name: "NetworkPolicyStats", Status: "Enabled", Version: "BETA"},
+ {Component: "agent", Name: "NodeNetworkPolicy", Status: "Disabled", Version: "ALPHA"},
{Component: "agent", Name: "NodePortLocal", Status: "Enabled", Version: "GA"},
{Component: "agent", Name: "SecondaryNetwork", Status: "Disabled", Version: "ALPHA"},
{Component: "agent", Name: "ServiceExternalIP", Status: "Disabled", Version: "ALPHA"},
@@ -199,6 +200,7 @@ func Test_getControllerGatesResponse(t *testing.T) {
{Component: "controller", Name: "Multicluster", Status: "Disabled", Version: "ALPHA"},
{Component: "controller", Name: "NetworkPolicyStats", Status: "Enabled", Version: "BETA"},
{Component: "controller", Name: "NodeIPAM", Status: "Enabled", Version: "BETA"},
+ {Component: "controller", Name: "NodeNetworkPolicy", Status: "Disabled", Version: "ALPHA"},
{Component: "controller", Name: "ServiceExternalIP", Status: "Disabled", Version: "ALPHA"},
{Component: "controller", Name: "SupportBundleCollection", Status: "Disabled", Version: "ALPHA"},
{Component: "controller", Name: "Traceflow", Status: "Enabled", Version: "BETA"},
diff --git a/pkg/config/agent/config.go b/pkg/config/agent/config.go
index ed57bd00db1..dd0f2c95305 100644
--- a/pkg/config/agent/config.go
+++ b/pkg/config/agent/config.go
@@ -404,3 +404,23 @@ type OVSBridgeConfig struct {
// only a single physical interface is supported.
PhysicalInterfaces []string `yaml:"physicalInterfaces,omitempty"`
}
+
+type PrivilegedRule struct {
+ // The direction value can be "ingress" or "egress".
+ Direction string `yaml:"direction,omitempty"`
+ // The IP families of the rule. Supported values are "ipv4", "ipv6" and "" (both).
+ IPFamilies string `yaml:"ipFamily,omitempty"`
+ // The protocol which traffic must match. Supported values are "tcp", "udp", "" (both).
+ Protocol string `yaml:"protocol,omitempty"`
+ // CIDR marks the destination CIDR for egress and source CIDR for ingress. It can be "" which means allow all addresses.
+ CIDR string `json:"cidr,omitempty"`
+ // The destination port list of the given protocol. It can be nil which means allow all ports.
+ Ports []string `yaml:"ports,omitempty"`
+ // Description is the explanation of the rule.
+ Description string `yaml:"description,omitempty"`
+}
+
+// NodeNetworkPolicyConfig includes the privileged rules.
+type NodeNetworkPolicyConfig struct {
+ PrivilegedRules []PrivilegedRule `yaml:"privilegedRules,omitempty"`
+}
diff --git a/pkg/features/antrea_features.go b/pkg/features/antrea_features.go
index ea5d1eaf161..4a2be6c9b0d 100644
--- a/pkg/features/antrea_features.go
+++ b/pkg/features/antrea_features.go
@@ -150,6 +150,10 @@ const (
// alpha: v1.15
// Allow users to allocate Egress IPs from a different subnet from the default Node subnet.
EgressSeparateSubnet featuregate.Feature = "EgressSeparateSubnet"
+
+ // alpha: v1.15
+ // Allow users to protect their Kubernetes Nodes.
+ NodeNetworkPolicy featuregate.Feature = "NodeNetworkPolicy"
)
var (
@@ -189,6 +193,7 @@ var (
AdminNetworkPolicy: {Default: false, PreRelease: featuregate.Alpha},
EgressTrafficShaping: {Default: false, PreRelease: featuregate.Alpha},
EgressSeparateSubnet: {Default: false, PreRelease: featuregate.Alpha},
+ NodeNetworkPolicy: {Default: false, PreRelease: featuregate.Alpha},
}
// AgentGates consists of all known feature gates for the Antrea Agent.
@@ -217,6 +222,7 @@ var (
TrafficControl,
EgressTrafficShaping,
EgressSeparateSubnet,
+ NodeNetworkPolicy,
)
// ControllerGates consists of all known feature gates for the Antrea Controller.
@@ -262,6 +268,7 @@ var (
CleanupStaleUDPSvcConntrack: {},
EgressTrafficShaping: {},
EgressSeparateSubnet: {},
+ NodeNetworkPolicy: {},
}
// supportedFeaturesOnExternalNode records the features supported on an external
// Node. Antrea Agent checks the enabled features if it is running on an
diff --git a/test/e2e/antreaipam_anp_test.go b/test/e2e/antreaipam_anp_test.go
index e5c0ef8ce02..b6fd4a162d6 100644
--- a/test/e2e/antreaipam_anp_test.go
+++ b/test/e2e/antreaipam_anp_test.go
@@ -44,7 +44,6 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) {
// It should be empty every time when "initializeAntreaIPAM" is performed, otherwise there will be unexpected
// results.
allPods = []Pod{}
- podsByNamespace = make(map[string][]Pod)
for _, ns := range antreaIPAMNamespaces {
namespaces[ns] = ns
@@ -53,7 +52,6 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) {
for _, podName := range pods {
for _, ns := range namespaces {
allPods = append(allPods, NewPod(ns, podName))
- podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName))
}
}
@@ -61,9 +59,9 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) {
// k8sUtils is a global var
k8sUtils, err = NewKubernetesUtils(data)
failOnError(err, t)
- _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true)
+ _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true, nil, nil)
failOnError(err, t)
- ips, err := k8sUtils.Bootstrap(namespaces, pods, false)
+ ips, err := k8sUtils.Bootstrap(namespaces, pods, false, nil, nil)
failOnError(err, t)
podIPs = ips
}
@@ -195,18 +193,18 @@ func testAntreaIPAMACNP(t *testing.T, protocol e2eutils.AntreaPolicyProtocol, ac
SetAppliedToGroup([]e2eutils.ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "c"}}})
if isIngress {
builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
builder2.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
builder3.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
} else {
builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
builder2.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
builder3.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil,
- nil, nil, false, nil, ruleAction, "", "", nil)
+ nil, nil, nil, nil, false, nil, ruleAction, "", "", nil)
}
reachability := NewReachability(allPods, action)
diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go
index 27c5ae0cb28..ef3d78ee252 100644
--- a/test/e2e/antreapolicy_test.go
+++ b/test/e2e/antreapolicy_test.go
@@ -47,13 +47,13 @@ import (
// common for all tests.
var (
allPods []Pod
- podsByNamespace map[string][]Pod
k8sUtils *KubernetesUtils
allTestList []*TestCase
pods []string
namespaces map[string]string
podIPs map[string][]string
p80, p81, p8080, p8081, p8082, p8085, p6443 int32
+ nodes map[string]string
)
const (
@@ -125,12 +125,10 @@ func initialize(t *testing.T, data *TestData) {
// It should be empty every time when "initialize" is performed, otherwise there will be unexpected
// results.
allPods = []Pod{}
- podsByNamespace = make(map[string][]Pod)
for _, podName := range pods {
for _, ns := range namespaces {
allPods = append(allPods, NewPod(ns, podName))
- podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName))
}
}
skipIfAntreaPolicyDisabled(t)
@@ -139,7 +137,7 @@ func initialize(t *testing.T, data *TestData) {
// k8sUtils is a global var
k8sUtils, err = NewKubernetesUtils(data)
failOnError(err, t)
- ips, err := k8sUtils.Bootstrap(namespaces, pods, true)
+ ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nil, nil)
failOnError(err, t)
podIPs = ips
}
@@ -242,14 +240,14 @@ func testUpdateValidationInvalidACNP(t *testing.T) {
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}).
SetPriority(1.0)
builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
acnp := builder.Get()
if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err != nil {
failOnError(fmt.Errorf("create ACNP acnp-applied-to-update failed: %v", err), t)
}
builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil,
- nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil)
+ nil, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil)
acnp = builder.Get()
if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil {
// Above update of ACNP must fail as it is an invalid spec.
@@ -407,8 +405,8 @@ func testACNPAllowXBtoA(t *testing.T) {
builder = builder.SetName("acnp-allow-xb-to-a").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
reachability := NewReachability(allPods, Dropped)
reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Connected)
@@ -445,22 +443,22 @@ func testACNPSourcePort(t *testing.T) {
builder = builder.SetName("acnp-source-port").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-source-port").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder3 := &ClusterNetworkPolicySpecBuilder{}
builder3 = builder3.SetName("acnp-source-port").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped)
@@ -512,8 +510,8 @@ func testACNPAllowXBtoYA(t *testing.T) {
builder = builder.SetName("acnp-allow-xb-to-ya").
SetPriority(2.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}})
- builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
reachability := NewReachability(allPods, Dropped)
reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected)
@@ -544,15 +542,15 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) {
builder1 = builder1.SetName("acnp-priority2").
SetPriority(2).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-priority1").
SetPriority(1).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
// Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence.
reachabilityBothACNP := NewReachability(allPods, Dropped)
@@ -595,10 +593,10 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol
builder = builder.SetName("acnp-allow-x-ingress-y-egress-z").
SetPriority(1.1).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
- builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
reachability := NewReachability(allPods, Connected)
testStep := []*TestStep{
@@ -632,8 +630,8 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) {
builder = builder.SetName("acnp-deny-a-to-z-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -665,7 +663,7 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) {
builder = builder.SetName("acnp-deny-ingress-to-x").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil,
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil,
crdv1beta1.RuleActionDrop, "", "drop-all-ingress", nil)
reachability := NewReachability(allPods, Connected)
@@ -696,8 +694,8 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) {
builder = builder.SetName("acnp-deny-a-to-z-ingress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability1 := NewReachability(allPods, Connected)
reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
@@ -749,8 +747,8 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) {
builder = builder.SetName("acnp-deny-cg-with-ya-from-xb").
SetPriority(2.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}})
- builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped)
@@ -787,7 +785,7 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) {
SetPriority(2.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}})
builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
reachability := NewReachability(allPods, Connected)
reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped)
@@ -818,8 +816,8 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) {
builder := &ClusterNetworkPolicySpecBuilder{}
builder = builder.SetName("acnp-deny-cg-with-a-to-z").
SetPriority(1.0)
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -853,8 +851,8 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) {
builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -890,8 +888,8 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) {
builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -941,8 +939,8 @@ func testACNPClusterGroupUpdate(t *testing.T) {
builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -991,8 +989,8 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) {
builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
cp := []*CustomProbe{
{
SourcePod: CustomPod{
@@ -1039,8 +1037,8 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) {
NSSelector: map[string]string{"ns": namespaces["x"]},
},
})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
cp := []*CustomProbe{
{
SourcePod: CustomPod{
@@ -1115,10 +1113,10 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) {
NSSelector: map[string]string{"ns": namespaces["y"]},
},
})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil)
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil)
reachability := NewReachability(allPods, Connected)
reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
@@ -1718,8 +1716,8 @@ func testBaselineNamespaceIsolation(t *testing.T) {
SetTier("baseline").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false,
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false,
nil, crdv1beta1.RuleActionDrop, "", "", nil)
// create a K8s NetworkPolicy for Pods in namespace x to allow ingress traffic from Pods in the same namespace,
@@ -1762,7 +1760,7 @@ func testBaselineNamespaceIsolation(t *testing.T) {
time.Sleep(networkPolicyDelay)
}
-// testACNPPriorityOverride tests priority overriding in three Policies. Those three Policies are applied in a specific order to
+// testACNPPriorityOverride tests priority overriding in three ACNPs. Those three ACNPs are applied in a specific order to
// test priority reassignment, and each controls a smaller set of traffic patterns as priority increases.
func testACNPPriorityOverride(t *testing.T) {
builder1 := &ClusterNetworkPolicySpecBuilder{}
@@ -1770,24 +1768,24 @@ func testACNPPriorityOverride(t *testing.T) {
SetPriority(1.001).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Highest priority. Drops traffic from z/b to x/a.
- builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-priority2").
SetPriority(1.002).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Medium priority. Allows traffic from z to x/a.
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
builder3 := &ClusterNetworkPolicySpecBuilder{}
builder3 = builder3.SetName("acnp-priority3").
SetPriority(1.003).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Lowest priority. Drops traffic from z to x.
- builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachabilityTwoACNPs := NewReachability(allPods, Connected)
reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped)
@@ -1836,8 +1834,8 @@ func testACNPPriorityOverride(t *testing.T) {
executeTests(t, testCase)
}
-// testACNPTierOverride tests tier priority overriding in three Policies.
-// Each ACNP controls a smaller set of traffic patterns as tier priority increases.
+// testACNPTierOverride tests tier priority overriding in three ACNPs. Each ACNP controls a smaller set of traffic patterns
+// as tier priority increases.
func testACNPTierOverride(t *testing.T) {
builder1 := &ClusterNetworkPolicySpecBuilder{}
builder1 = builder1.SetName("acnp-tier-emergency").
@@ -1845,8 +1843,8 @@ func testACNPTierOverride(t *testing.T) {
SetPriority(100).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Highest priority tier. Drops traffic from z/b to x/a.
- builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-tier-securityops").
@@ -1854,8 +1852,8 @@ func testACNPTierOverride(t *testing.T) {
SetPriority(10).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Medium priority tier. Allows traffic from z to x/a.
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
builder3 := &ClusterNetworkPolicySpecBuilder{}
builder3 = builder3.SetName("acnp-tier-application").
@@ -1863,8 +1861,8 @@ func testACNPTierOverride(t *testing.T) {
SetPriority(1).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Lowest priority tier. Drops traffic from z to x.
- builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachabilityTwoACNPs := NewReachability(allPods, Connected)
reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped)
@@ -1912,8 +1910,8 @@ func testACNPTierOverride(t *testing.T) {
executeTests(t, testCase)
}
-// testACNPTierOverride tests tier priority overriding in three Policies with custom created tiers.
-// Each ACNP controls a smaller set of traffic patterns as tier priority increases.
+// testACNPTierOverride tests tier priority overriding in three ACNPs with custom created tiers. Each ACNP controls a
+// smaller set of traffic patterns as tier priority increases.
func testACNPCustomTiers(t *testing.T) {
k8sUtils.DeleteTier("high-priority")
k8sUtils.DeleteTier("low-priority")
@@ -1929,8 +1927,8 @@ func testACNPCustomTiers(t *testing.T) {
SetPriority(100).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Medium priority tier. Allows traffic from z to x/a.
- builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-tier-low").
@@ -1938,8 +1936,8 @@ func testACNPCustomTiers(t *testing.T) {
SetPriority(1).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
// Lowest priority tier. Drops traffic from z to x.
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachabilityTwoACNPs := NewReachability(allPods, Connected)
reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped)
@@ -1963,7 +1961,7 @@ func testACNPCustomTiers(t *testing.T) {
{"ACNP Custom Tier priority", testStepTwoACNP},
}
executeTests(t, testCase)
- // Cleanup customed tiers. ACNPs created in those tiers need to be deleted first.
+ // Cleanup customized tiers. ACNPs created in those tiers need to be deleted first.
failOnError(k8sUtils.CleanACNPs(), t)
failOnError(k8sUtils.DeleteTier("high-priority"), t)
failOnError(k8sUtils.DeleteTier("low-priority"), t)
@@ -1977,8 +1975,8 @@ func testACNPPriorityConflictingRule(t *testing.T) {
builder1 = builder1.SetName("acnp-drop").
SetPriority(1).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-allow").
@@ -1986,8 +1984,8 @@ func testACNPPriorityConflictingRule(t *testing.T) {
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
// The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop,
// but cnp-allow has lower priority.
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
reachabilityBothACNP := NewReachability(allPods, Connected)
reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["x"], Dropped)
@@ -2010,30 +2008,30 @@ func testACNPPriorityConflictingRule(t *testing.T) {
executeTests(t, testCase)
}
-// testACNPPriorityConflictingRule tests that if there are two rules in the cluster that conflicts with
-// each other, the rule with higher precedence will prevail.
+// testACNPRulePriority tests that if there are two rules in the cluster that conflicts with each other, the rule with
+// higher precedence will prevail.
func testACNPRulePriority(t *testing.T) {
builder1 := &ClusterNetworkPolicySpecBuilder{}
// acnp-deny will apply to all pods in namespace x
builder1 = builder1.SetName("acnp-deny").
SetPriority(5).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
// This rule should take no effect as it will be overridden by the first rule of cnp-allow
- builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
builder2 := &ClusterNetworkPolicySpecBuilder{}
// acnp-allow will also apply to all pods in namespace x
builder2 = builder2.SetName("acnp-allow").
SetPriority(5).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
- // This rule should take no effect as it will be overridden by the first rule of cnp-drop
- builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ // This rule should take no effect as it will be overridden by the first rule of cnp-deny
+ builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
// Only egress from pods in namespace x to namespace y should be denied
reachabilityBothACNP := NewReachability(allPods, Connected)
@@ -2063,8 +2061,8 @@ func testACNPPortRange(t *testing.T) {
builder = builder.SetName("acnp-deny-a-to-z-egress-port-range").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil)
+ builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
@@ -2095,8 +2093,8 @@ func testACNPRejectEgress(t *testing.T) {
builder = builder.SetName("acnp-reject-a-to-z-egress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected)
@@ -2120,14 +2118,14 @@ func testACNPRejectEgress(t *testing.T) {
executeTests(t, testCase)
}
-// testACNPRejectIngress tests that an ACNP is able to reject egress traffic from pods labelled A to namespace Z.
+// testACNPRejectIngress tests that an ACNP is able to reject ingress traffic from pods labelled A to namespace Z.
func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) {
builder := &ClusterNetworkPolicySpecBuilder{}
builder = builder.SetName("acnp-reject-a-from-z-ingress").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}})
- builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
reachability := NewReachability(allPods, Connected)
reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected)
@@ -2184,10 +2182,10 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser
builder1 = builder1.SetName("acnp-reject-egress-svc-traffic").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "agnhost-client"}}})
- builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
- builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
acnpEgress := builder1.Get()
k8sUtils.CreateOrUpdateACNP(acnpEgress)
@@ -2211,8 +2209,8 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser
builder2 = builder2.SetName("acnp-reject-ingress-svc-traffic").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: svc1.Spec.Selector}, {PodSelector: svc2.Spec.Selector}})
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
acnpIngress := builder2.Get()
k8sUtils.CreateOrUpdateACNP(acnpIngress)
@@ -2302,10 +2300,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser
builder1 := &ClusterNetworkPolicySpecBuilder{}
builder1 = builder1.SetName("acnp-reject-ingress-double-dir").
SetPriority(1.0)
- builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil,
- nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil)
- builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil,
- nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil,
+ nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil)
+ builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil,
+ nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil)
runTestsWithACNP(builder1.Get(), testcases)
@@ -2313,10 +2311,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser
builder2 := &ClusterNetworkPolicySpecBuilder{}
builder2 = builder2.SetName("acnp-reject-egress-double-dir").
SetPriority(1.0)
- builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil,
- nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil)
- builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil,
- nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil)
+ builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil,
+ nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil)
+ builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil,
+ nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil)
runTestsWithACNP(builder2.Get(), testcases)
@@ -2325,10 +2323,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser
builder3 = builder3.SetName("acnp-reject-server-double-dir").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}})
- builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
- builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
runTestsWithACNP(builder3.Get(), testcases)
@@ -2337,10 +2335,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser
builder4 = builder4.SetName("acnp-reject-client-double-dir").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}})
- builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
- builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
runTestsWithACNP(builder4.Get(), testcases)
}
@@ -2623,8 +2621,8 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) {
builder = builder.SetName(npName).
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil)
builder.AddEgressLogging(logLabel)
npRef := fmt.Sprintf("AntreaClusterNetworkPolicy:%s", npName)
@@ -2820,10 +2818,10 @@ func testAppliedToPerRule(t *testing.T) {
cnpATGrp2 := ACNPAppliedToSpec{
PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": namespaces["y"]},
PodSelectorMatchExp: nil, NSSelectorMatchExp: nil}
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil)
- builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]},
- nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]},
+ nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil)
reachability2 := NewReachability(allPods, Connected)
reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped)
@@ -2861,7 +2859,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData)
builder := &ClusterNetworkPolicySpecBuilder{}
builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
false, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil)
// Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y.
@@ -2914,8 +2912,8 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData)
builderUpdated := &ClusterNetworkPolicySpecBuilder{}
builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0)
builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
- builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["y"]},
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["y"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
// Pod x/a should not allow ingress from y/b per the updated ACNP spec.
testStep3 := &TestStep{
@@ -2956,7 +2954,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) {
builder := &ClusterNetworkPolicySpecBuilder{}
builder = builder.SetName("cnp-nested-cg").SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}}}).
- AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil)
// Pods in Namespace z should not allow traffic from Pods backing svc1 (label pod=a) in Namespace x.
@@ -3066,8 +3064,8 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) {
NSSelector: map[string]string{"ns": namespaces["y"]},
},
})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil)
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil)
reachability := NewReachability(allPods, Connected)
reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
@@ -3115,9 +3113,9 @@ func testACNPNamespaceIsolation(t *testing.T) {
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}})
// deny ingress traffic except from own namespace, which is always allowed.
- builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
true, nil, crdv1beta1.RuleActionAllow, "", "", nil)
- builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil,
+ builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil,
false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
reachability := NewReachability(allPods, Dropped)
@@ -3136,9 +3134,9 @@ func testACNPNamespaceIsolation(t *testing.T) {
builder2 = builder2.SetName("test-acnp-ns-isolation-applied-to-per-rule").
SetTier("baseline").
SetPriority(1.0)
- builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionAllow, "", "", nil)
- builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil,
+ builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil,
false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop, "", "", nil)
reachability2 := NewReachability(allPods, Connected)
@@ -3171,9 +3169,9 @@ func testACNPStrictNamespacesIsolation(t *testing.T) {
SetTier("securityops").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}})
- builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
true, nil, crdv1beta1.RuleActionPass, "", "", nil)
- builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil,
+ builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil,
false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
// deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s
// NetworkPolicies to regulate intra-Namespace traffic)
@@ -3531,7 +3529,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) {
builder = builder.SetName("acnp-service-account").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": serverName}}})
- builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", sa)
acnp := builder.Get()
@@ -3745,10 +3743,10 @@ func testACNPICMPSupport(t *testing.T, data *TestData) {
builder := &ClusterNetworkPolicySpecBuilder{}
builder = builder.SetName("test-acnp-icmp").
SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}})
- builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
- builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+ builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
testcases := []podToAddrTestStep{}
if clusterInfo.podV4NetworkCIDR != "" {
@@ -3848,8 +3846,8 @@ func testACNPNodePortServiceSupport(t *testing.T, data *TestData, serverNamespac
},
},
})
- builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil,
- nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+ builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
acnp, err := k8sUtils.CreateOrUpdateACNP(builder.Get())
failOnError(err, t)
@@ -3940,8 +3938,8 @@ func testACNPIGMPQuery(t *testing.T, data *TestData, acnpName, caseName, groupAd
// create acnp with ingress rule for IGMP query
igmpType := crdv1alpha1.IGMPQuery
- builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil,
- nil, nil, false, nil, action, "", "", nil)
+ builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, action, "", "", nil)
acnp := builder.Get()
_, err = k8sUtils.CreateOrUpdateACNP(acnp)
defer data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Delete(context.TODO(), acnp.Name, metav1.DeleteOptions{})
@@ -4021,8 +4019,8 @@ func testACNPMulticastEgress(t *testing.T, data *TestData, acnpName, caseName, g
builder = builder.SetName(acnpName).SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": label}}})
cidr := mc.group.String() + "/32"
- builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil,
- nil, nil, false, nil, action, "", "", nil)
+ builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil,
+ nil, nil, nil, false, nil, action, "", "", nil)
acnp := builder.Get()
_, err = k8sUtils.CreateOrUpdateACNP(acnp)
if err != nil {
@@ -4489,8 +4487,8 @@ func TestAntreaPolicyStatus(t *testing.T) {
acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes").
SetPriority(1.0).
SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}})
- acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]},
- nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+ acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]},
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
acnp := acnpBuilder.Get()
log.Debugf("creating ACNP %v", acnp.Name)
_, err = data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Create(context.TODO(), acnp, metav1.CreateOptions{})
diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go
index f7a117b9255..06cca769de6 100644
--- a/test/e2e/k8s_util.go
+++ b/test/e2e/k8s_util.go
@@ -487,7 +487,12 @@ func (data *TestData) CreateOrUpdateNamespace(n string, labels map[string]string
}
// CreateOrUpdateDeployment is a convenience function for idempotent setup of deployments
-func (data *TestData) CreateOrUpdateDeployment(ns, deploymentName string, replicas int32, labels map[string]string) (*appsv1.Deployment, error) {
+func (data *TestData) CreateOrUpdateDeployment(ns string,
+ deploymentName string,
+ replicas int32,
+ labels map[string]string,
+ nodeName string,
+ hostNetwork bool) (*appsv1.Deployment, error) {
zero := int64(0)
log.Infof("Creating/updating Deployment '%s/%s'", ns, deploymentName)
makeContainerSpec := func(port int32, protocol v1.Protocol) v1.Container {
@@ -535,6 +540,8 @@ func (data *TestData) CreateOrUpdateDeployment(ns, deploymentName string, replic
Namespace: ns,
},
Spec: v1.PodSpec{
+ NodeName: nodeName,
+ HostNetwork: hostNetwork,
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
makeContainerSpec(80, "ALL"),
@@ -1054,6 +1061,7 @@ func (k *KubernetesUtils) waitForHTTPServers(allPods []Pod) error {
serversAreReady := func() bool {
reachability := NewReachability(allPods, Connected)
+
k.Validate(allPods, reachability, []int32{80, 81, 8080, 8081, 8082, 8083, 8084, 8085}, utils.ProtocolTCP)
if _, wrong, _ := reachability.Summary(); wrong != 0 {
return false
@@ -1160,18 +1168,26 @@ func (k *KubernetesUtils) ValidateRemoteCluster(remoteCluster *KubernetesUtils,
}
}
-func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool) (map[string][]string, error) {
- for _, ns := range namespaces {
+func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) {
+ for key, ns := range namespaces {
if createNamespaces {
_, err := k.CreateOrUpdateNamespace(ns, map[string]string{"ns": ns})
if err != nil {
return nil, fmt.Errorf("unable to create/update ns %s: %w", ns, err)
}
}
+ var nodeName string
+ var hostNetwork bool
+ if nodeNames != nil {
+ nodeName = nodeNames[key]
+ }
+ if hostNetworks != nil {
+ hostNetwork = hostNetworks[key]
+ }
for _, pod := range pods {
log.Infof("Creating/updating Pod '%s/%s'", ns, pod)
deployment := ns + pod
- _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod})
+ _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork)
if err != nil {
return nil, fmt.Errorf("unable to create/update Deployment '%s/%s': %w", ns, pod, err)
}
diff --git a/test/e2e/nodenetworkpolicy_test.go b/test/e2e/nodenetworkpolicy_test.go
new file mode 100644
index 00000000000..8f6ca910431
--- /dev/null
+++ b/test/e2e/nodenetworkpolicy_test.go
@@ -0,0 +1,941 @@
+// Copyright 2024 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package e2e
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1"
+ "antrea.io/antrea/pkg/features"
+ . "antrea.io/antrea/test/e2e/utils"
+)
+
+const labelNodeHostname = "kubernetes.io/hostname"
+
+func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetworkPod bool) {
+ p80 = 80
+ p81 = 81
+ p8080 = 8080
+ p8081 = 8081
+ p8082 = 8082
+ p8085 = 8085
+ pods = []string{"a"}
+ suffix := randName("")
+ namespaces = make(map[string]string)
+ namespaces["x"] = "x-" + suffix
+ namespaces["y"] = "y-" + suffix
+ nodes = make(map[string]string)
+ nodes["x"] = controlPlaneNodeName()
+ nodes["y"] = workerNodeName(1)
+ hostNetworks := make(map[string]bool)
+ hostNetworks["x"] = true
+ if toHostNetworkPod {
+ hostNetworks["y"] = true
+ } else {
+ hostNetworks["y"] = false
+ namespaces["z"] = "z-" + suffix
+ nodes["z"] = workerNodeName(1)
+ hostNetworks["z"] = false
+ }
+ allPods = []Pod{}
+
+ for _, podName := range pods {
+ for _, ns := range namespaces {
+ allPods = append(allPods, NewPod(ns, podName))
+ }
+ }
+
+ var err error
+ // k8sUtils is a global var
+ k8sUtils, err = NewKubernetesUtils(data)
+ failOnError(err, t)
+ ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nodes, hostNetworks)
+ failOnError(err, t)
+ podIPs = ips
+}
+
+func skipIfNodeNetworkPolicyDisabled(tb testing.TB) {
+ skipIfFeatureDisabled(tb, features.NodeNetworkPolicy, true, false)
+}
+
+func TestAntreaNodeNetworkPolicy(t *testing.T) {
+ skipIfAntreaPolicyDisabled(t)
+ skipIfNodeNetworkPolicyDisabled(t)
+ skipIfHasWindowsNodes(t)
+ skipIfNumNodesLessThan(t, 2)
+
+ data, err := setupTest(t)
+ if err != nil {
+ t.Fatalf("Error when setting up test: %v", err)
+ }
+ defer teardownTest(t, data)
+
+ initializeAntreaNodeNetworkPolicy(t, data, true)
+
+ t.Run("Case=ACNPAllowNoDefaultIsolationTCP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolTCP) })
+ t.Run("Case=ACNPAllowNoDefaultIsolationUDP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolUDP) })
+ t.Run("Case=ACNPAllowNoDefaultIsolationSCTP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolSCTP) })
+ t.Run("Case=ACNPDropEgress", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolTCP) })
+ t.Run("Case=ACNPDropEgressUDP", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolUDP) })
+ t.Run("Case=ACNPDropEgressSCTP", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolSCTP) })
+ t.Run("Case=ACNPDropIngress", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolTCP) })
+ t.Run("Case=ACNPDropIngressUDP", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolUDP) })
+ t.Run("Case=ACNPDropIngressSCTP", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolSCTP) })
+ t.Run("Case=ACNPPortRange", func(t *testing.T) { testNodeACNPPortRange(t) })
+ t.Run("Case=ACNPSourcePort", func(t *testing.T) { testNodeACNPSourcePort(t) })
+ t.Run("Case=ACNPRejectEgress", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolTCP) })
+ t.Run("Case=ACNPRejectEgressUDP", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolUDP) })
+ t.Run("Case=ACNPRejectEgressSCTP", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolSCTP) })
+ t.Run("Case=ACNPRejectIngress", func(t *testing.T) { testNodeACNPRejectIngress(t, ProtocolTCP) })
+ t.Run("Case=ACNPRejectIngressUDP", func(t *testing.T) { testNodeACNPRejectIngress(t, ProtocolUDP) })
+ t.Run("Case=ACNPNoEffectOnOtherProtocols", func(t *testing.T) { testNodeACNPNoEffectOnOtherProtocols(t) })
+ t.Run("Case=ACNPPriorityOverride", func(t *testing.T) { testNodeACNPPriorityOverride(t) })
+ t.Run("Case=ACNPTierOverride", func(t *testing.T) { testNodeACNPTierOverride(t) })
+ t.Run("Case=ACNPCustomTiers", func(t *testing.T) { testNodeACNPCustomTiers(t) })
+ t.Run("Case=ACNPPriorityConflictingRule", func(t *testing.T) { testNodeACNPPriorityConflictingRule(t) })
+
+ k8sUtils.Cleanup(namespaces)
+
+ initializeAntreaNodeNetworkPolicy(t, data, false)
+
+ t.Run("Case=ACNPNamespaceIsolation", func(t *testing.T) { testNodeACNPNamespaceIsolation(t) })
+ t.Run("Case=ACNPClusterGroupUpdate", func(t *testing.T) { testNodeACNPClusterGroupUpdate(t) })
+ t.Run("Case=ACNPClusterGroupRefRuleIPBlocks", func(t *testing.T) { testNodeACNPClusterGroupRefRuleIPBlocks(t) })
+ t.Run("Case=ACNPNestedClusterGroup", func(t *testing.T) { testNodeACNPNestedClusterGroupCreateAndUpdate(t, data) })
+ t.Run("Case=ACNPNestedIPBlockClusterGroup", func(t *testing.T) { testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t) })
+
+ k8sUtils.Cleanup(namespaces)
+}
+
+// testNodeACNPAllowNoDefaultIsolation tests that no default isolation rules are created for ACNPs applied to Node.
+func testNodeACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol) {
+ if protocol == ProtocolSCTP {
+ // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be
+ // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on
+ // different Node VMs which are themselves on different ESX hosts. We are
+ // investigating the issue and disabling the tests for IPv6 clusters in the
+ // meantime.
+ skipIfIPv6Cluster(t)
+ }
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("acnp-allow-x-from-y-ingress").
+ SetPriority(1.1).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder1.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-allow-x-to-y-egress").
+ SetPriority(1.1).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder2.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ testStep := []*TestStep{
+ {
+ "Port 81",
+ reachability,
+ []metav1.Object{builder1.Get(), builder2.Get()},
+ []int32{81},
+ protocol,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Allow No Default Isolation", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPDropEgress tests that an ACNP applied to Node is able to drop egress traffic from Node x to Node y.
+func testNodeACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) {
+ if protocol == ProtocolSCTP {
+ // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be
+ // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on
+ // different Node VMs which are themselves on different ESX hosts. We are
+ // investigating the issue and disabling the tests for IPv6 clusters in the
+ // meantime.
+ skipIfIPv6Cluster(t)
+ }
+ if protocol == ProtocolUDP {
+ // For UDP, when action `Reject` or `Drop` is specified in an egress rule, agnhost got the unexpected message immediately
+ // like the follows.
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ t.Skip("Skipping test as dropping UDP egress traffic doesn't return the expected stdout or stderr message")
+ }
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-drop-x-to-y-egress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ protocol,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Egress From Node:x to Node:y", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPDropIngress tests that an ACNP applied to Node is able to drop ingress traffic from Node y to Node x.
+func testNodeACNPDropIngress(t *testing.T, protocol AntreaPolicyProtocol) {
+ if protocol == ProtocolSCTP {
+ // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be
+ // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on
+ // different Node VMs which are themselves on different ESX hosts. We are
+ // investigating the issue and disabling the tests for IPv6 clusters in the
+ // meantime.
+ skipIfIPv6Cluster(t)
+ }
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-drop-x-from-y-ingress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ protocol,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Ingress From Node:y to Node:x", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testACNPPortRange tests the port range in an ACNP applied to Node can work.
+func testNodeACNPPortRange(t *testing.T) {
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-drop-x-to-y-egress-port-range").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
+ testSteps := []*TestStep{
+ {
+ fmt.Sprintf("ACNP Drop Ports 8080:8082"),
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{8080, 8081, 8082},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+
+ testCase := []*TestCase{
+ {"ACNP Drop Egress From Node:x to Node:y with a portRange", testSteps},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPSourcePort tests ACNP applied to Node source port filtering. The agnhost image used in E2E tests uses
+// ephemeral ports to initiate TCP connections, which should be 32768–60999 by default (https://en.wikipedia.org/wiki/Ephemeral_port).
+// This test retrieves the port range from the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to
+// verify that packets can be matched by source port.
+func testNodeACNPSourcePort(t *testing.T) {
+ portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a")
+ failOnError(err, t)
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-source-port").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-source-port").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ builder3 := &ClusterNetworkPolicySpecBuilder{}
+ builder3 = builder3.SetName("acnp-source-port").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+ // After adding the dst port constraint of port 80, traffic on port 81 should not be affected.
+ updatedReachability := NewReachability(allPods, Connected)
+
+ testSteps := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ {
+ "Port 81",
+ updatedReachability,
+ []metav1.Object{builder2.Get()},
+ []int32{81},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ {
+ "Port range 80-81",
+ reachability,
+ []metav1.Object{builder3.Get()},
+ []int32{80, 81},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Node:y to Node:x based on source port", testSteps},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPRejectEgress tests that an ACNP applied to Node is able to reject egress traffic from Node x to Node y.
+func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) {
+ if protocol == ProtocolSCTP {
+ // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be
+ // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on
+ // different Node VMs which are themselves on different ESX hosts. We are
+ // investigating the issue and disabling the tests for IPv6 clusters in the
+ // meantime.
+ skipIfIPv6Cluster(t)
+ }
+ if protocol == ProtocolUDP {
+ // For UDP, when action `Reject` or `Drop` is specified in an egress rule, agnhost got the unexpected message immediately
+ // like the follows.
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted
+ t.Skip("Skipping test as dropping UDP egress traffic doesn't return the expected stdout or stderr message")
+ }
+
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-reject-x-to-y-egress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+
+ expectedResult := Rejected
+ // For SCTP, when action `Rejected` is specified in an egress rule, it behaves identical to action `Dropped`.
+ if protocol == ProtocolSCTP {
+ expectedResult = Dropped
+ }
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), expectedResult)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ protocol,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Reject Egress From Node:x to Node:y", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPRejectIngress tests that an ACNP applied Node to is able to reject ingress traffic from Node y to Node x.
+func testNodeACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) {
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-reject-x-from-y-ingress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Rejected)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ protocol,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Reject ingress from Node:y to Node:x", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPNoEffectOnOtherProtocols tests that an ACNP applied Node which drops TCP traffic won't affect other protocols (e.g. UDP).
+func testNodeACNPNoEffectOnOtherProtocols(t *testing.T) {
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-drop-x-from-y-ingress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachability1 := NewReachability(allPods, Connected)
+ reachability1.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+
+ reachability2 := NewReachability(allPods, Connected)
+
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability1,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ {
+ "Port 80",
+ reachability2,
+ []metav1.Object{builder.Get()},
+ []int32{80},
+ ProtocolUDP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Ingress From Node:y to Node:x TCP Not UDP", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPPriorityOverride tests priority overriding in three ACNPs applied to Node. Those three ACNPs are synced in
+// a specific order to test priority reassignment, and each controls a smaller set of traffic patterns as priority increases.
+func testNodeACNPPriorityOverride(t *testing.T) {
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("acnp-priority1").
+ SetPriority(1.001).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Highest priority. Drops traffic from y to x.
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-priority2").
+ SetPriority(1.002).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Medium priority. Allows traffic from y to x.
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ builder3 := &ClusterNetworkPolicySpecBuilder{}
+ builder3 = builder3.SetName("acnp-priority3").
+ SetPriority(1.003).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Lowest priority. Drops traffic from y to x.
+ builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachabilityTwoACNPs := NewReachability(allPods, Connected)
+
+ reachabilityAllACNPs := NewReachability(allPods, Connected)
+ reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+
+ testStepTwoACNP := []*TestStep{
+ {
+ "Two Policies with different priorities",
+ reachabilityTwoACNPs,
+ []metav1.Object{builder3.Get(), builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ // Create the Policies in specific order to make sure that priority re-assignments work as expected.
+ testStepAll := []*TestStep{
+ {
+ "All three Policies",
+ reachabilityAllACNPs,
+ []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP PriorityOverride Intermediate", testStepTwoACNP},
+ {"ACNP PriorityOverride All", testStepAll},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPTierOverride tests tier priority overriding in three ACNPs applied to Node. Each ACNP controls a smaller
+// set of traffic patterns as tier priority increases.
+func testNodeACNPTierOverride(t *testing.T) {
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("acnp-tier-emergency").
+ SetTier("emergency").
+ SetPriority(100).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Highest priority tier. Drops traffic from y to x.
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-tier-securityops").
+ SetTier("securityops").
+ SetPriority(10).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}})
+ // Medium priority tier. Allows traffic from y to x.
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ builder3 := &ClusterNetworkPolicySpecBuilder{}
+ builder3 = builder3.SetName("acnp-tier-application").
+ SetTier("application").
+ SetPriority(1).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}})
+ // Lowest priority tier. Drops traffic from y to x.
+ builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachabilityTwoACNPs := NewReachability(allPods, Connected)
+
+ reachabilityAllACNPs := NewReachability(allPods, Connected)
+ reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+
+ testStepTwoACNP := []*TestStep{
+ {
+ "Two Policies in different tiers",
+ reachabilityTwoACNPs,
+ []metav1.Object{builder3.Get(), builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testStepAll := []*TestStep{
+ {
+ "All three Policies in different tiers",
+ reachabilityAllACNPs,
+ []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP TierOverride Intermediate", testStepTwoACNP},
+ {"ACNP TierOverride All", testStepAll},
+ }
+ executeTests(t, testCase)
+}
+
+// testNodeACNPCustomTiers tests tier priority overriding in two ACNPs applied to Node with custom created tiers. Each ACNP
+// controls a smaller set of traffic patterns as tier priority increases.
+func testNodeACNPCustomTiers(t *testing.T) {
+ k8sUtils.DeleteTier("high-priority")
+ k8sUtils.DeleteTier("low-priority")
+ // Create two custom tiers with tier priority immediately next to each other.
+ _, err := k8sUtils.CreateNewTier("high-priority", 245)
+ failOnError(err, t)
+ _, err = k8sUtils.CreateNewTier("low-priority", 246)
+ failOnError(err, t)
+
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("acnp-tier-high").
+ SetTier("high-priority").
+ SetPriority(100).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Medium priority tier. Allows traffic from y to x.
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-tier-low").
+ SetTier("low-priority").
+ SetPriority(1).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // Lowest priority tier. Drops traffic from y to x.
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachabilityOneACNP := NewReachability(allPods, Connected)
+ reachabilityOneACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+ testStepOneACNP := []*TestStep{
+ {
+ "One Policy",
+ reachabilityOneACNP,
+ []metav1.Object{builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+
+ reachabilityTwoACNPs := NewReachability(allPods, Connected)
+ testStepTwoACNP := []*TestStep{
+ {
+ "Two Policies in different tiers",
+ reachabilityTwoACNPs,
+ []metav1.Object{builder2.Get(), builder1.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Custom Tier priority with one policy", testStepOneACNP},
+ {"ACNP Custom Tier priority with two policies", testStepTwoACNP},
+ }
+ executeTests(t, testCase)
+ // Cleanup customized tiers. ACNPs created in those tiers need to be deleted first.
+ failOnError(k8sUtils.CleanACNPs(), t)
+ failOnError(k8sUtils.DeleteTier("high-priority"), t)
+ failOnError(k8sUtils.DeleteTier("low-priority"), t)
+ time.Sleep(networkPolicyDelay)
+}
+
+// testNodeACNPPriorityConflictingRule tests that if there are two ACNPs applied to Node in the cluster with rules that
+// conflicts with each other, the ACNP with higher priority will prevail.
+func testNodeACNPPriorityConflictingRule(t *testing.T) {
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("acnp-drop").
+ SetPriority(1).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ builder2 := &ClusterNetworkPolicySpecBuilder{}
+ builder2 = builder2.SetName("acnp-allow").
+ SetPriority(2).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop,
+ // but cnp-allow has lower priority.
+ builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil)
+
+ reachabilityBothACNP := NewReachability(allPods, Connected)
+ reachabilityBothACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped)
+ testStep := []*TestStep{
+ {
+ "Both ACNP",
+ reachabilityBothACNP,
+ []metav1.Object{builder1.Get(), builder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Priority Conflicting Rule", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+func testNodeACNPNamespaceIsolation(t *testing.T) {
+ builder1 := &ClusterNetworkPolicySpecBuilder{}
+ builder1 = builder1.SetName("test-acnp-ns-isolation").
+ SetTier("baseline").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, nil,
+ false, nil, crdv1beta1.RuleActionDrop, "", "", nil)
+
+ reachability1 := NewReachability(allPods, Connected)
+ reachability1.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped)
+ testStep1 := &TestStep{
+ "Port 80",
+ reachability1,
+ []metav1.Object{builder1.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ }
+
+ testCase := []*TestCase{
+ {"ACNP Namespace isolation for namespace y", []*TestStep{testStep1}},
+ }
+ executeTests(t, testCase)
+}
+
+func testNodeACNPClusterGroupUpdate(t *testing.T) {
+ cgName := "cg-ns-z-then-y"
+ cgBuilder := &ClusterGroupSpecBuilder{}
+ cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil)
+ // Update CG NS selector to group Pods from Namespace Y
+ updatedCgBuilder := &ClusterGroupSpecBuilder{}
+ updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil)
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
+
+ updatedReachability := NewReachability(allPods, Connected)
+ updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{cgBuilder.Get(), builder.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ {
+ "Port 80 - update",
+ updatedReachability,
+ []metav1.Object{updatedCgBuilder.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Egress From Node:x to ClusterGroup with NS:z updated to ClusterGroup with NS:y", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) {
+ podYAIP, _ := podIPs[namespaces["y"]+"/a"]
+ podZAIP, _ := podIPs[namespaces["z"]+"/a"]
+ // There are three situations of a Pod's IP(s):
+ // 1. Only one IPv4 address.
+ // 2. Only one IPv6 address.
+ // 3. One IPv4 and one IPv6 address, and we don't know the order in list.
+ // We need to add all IP(s) of Pods as CIDR to IPBlock.
+ genCIDR := func(ip string) string {
+ if strings.Contains(ip, ".") {
+ return ip + "/32"
+ }
+ return ip + "/128"
+ }
+ var ipBlock1, ipBlock2 []crdv1beta1.IPBlock
+ for i := 0; i < len(podYAIP); i++ {
+ ipBlock1 = append(ipBlock1, crdv1beta1.IPBlock{CIDR: genCIDR(podYAIP[i])})
+ ipBlock2 = append(ipBlock2, crdv1beta1.IPBlock{CIDR: genCIDR(podZAIP[i])})
+ }
+
+ cgName := "cg-ipblocks-pod-in-ns-y"
+ cgBuilder := &ClusterGroupSpecBuilder{}
+ cgBuilder = cgBuilder.SetName(cgName).
+ SetIPBlocks(ipBlock1)
+ cgName2 := "cg-ipblock-pod-in-ns-z"
+ cgBuilder2 := &ClusterGroupSpecBuilder{}
+ cgBuilder2 = cgBuilder2.SetName(cgName2).
+ SetIPBlocks(ipBlock2)
+
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-deny-x-to-yz-egress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil)
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped)
+ testStep := []*TestStep{
+ {
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ },
+ }
+ testCase := []*TestCase{
+ {"ACNP Drop Egress From Node x to Pod y/a and z/a to ClusterGroup with ipBlocks", testStep},
+ }
+ executeTests(t, testCase)
+}
+
+func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) {
+ cg1Name := "cg-1"
+ cgBuilder1 := &ClusterGroupSpecBuilder{}
+ cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil)
+ cgNestedName := "cg-nested"
+ cgBuilderNested := &ClusterGroupSpecBuilder{}
+ cgBuilderNested = cgBuilderNested.SetName(cgNestedName).SetChildGroups([]string{cg1Name})
+
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("cnp-nested-cg").SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}).
+ AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped)
+ testStep1 := &TestStep{
+ "Port 80",
+ reachability,
+ // Note in this testcase the ClusterGroup is created after the ACNP
+ []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ }
+
+ cg2Name := "cg-2"
+ cgBuilder2 := &ClusterGroupSpecBuilder{}
+ cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil)
+ cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg2Name})
+ reachability2 := NewReachability(allPods, Connected)
+ reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped)
+ testStep2 := &TestStep{
+ "Port 80 updated",
+ reachability2,
+ []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ }
+
+ testSteps := []*TestStep{testStep1, testStep2}
+ testCase := []*TestCase{
+ {"ACNP nested ClusterGroup create and update", testSteps},
+ }
+ executeTestsWithData(t, testCase, data)
+}
+
+func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) {
+ podYAIP, _ := podIPs[namespaces["y"]+"/a"]
+ podZAIP, _ := podIPs[namespaces["z"]+"/a"]
+ genCIDR := func(ip string) string {
+ switch IPFamily(ip) {
+ case "v4":
+ return ip + "/32"
+ case "v6":
+ return ip + "/128"
+ default:
+ return ""
+ }
+ }
+ cg1Name, cg2Name := "cg-y", "cg-z"
+ cgParentName := "cg-parent"
+ var ipBlockYA, ipBlockZA []crdv1beta1.IPBlock
+ for i := 0; i < len(podYAIP); i++ {
+ ipBlockYA = append(ipBlockYA, crdv1beta1.IPBlock{CIDR: genCIDR(podYAIP[i])})
+ ipBlockZA = append(ipBlockZA, crdv1beta1.IPBlock{CIDR: genCIDR(podZAIP[i])})
+ }
+ cgBuilder1 := &ClusterGroupSpecBuilder{}
+ cgBuilder1 = cgBuilder1.SetName(cg1Name).SetIPBlocks(ipBlockYA)
+ cgBuilder2 := &ClusterGroupSpecBuilder{}
+ cgBuilder2 = cgBuilder2.SetName(cg2Name).SetIPBlocks(ipBlockZA)
+ cgParent := &ClusterGroupSpecBuilder{}
+ cgParent = cgParent.SetName(cgParentName).SetChildGroups([]string{cg1Name, cg2Name})
+
+ builder := &ClusterNetworkPolicySpecBuilder{}
+ builder = builder.SetName("acnp-deny-x-to-yz-egress").
+ SetPriority(1.0).
+ SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}})
+ builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
+ nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil)
+
+ reachability := NewReachability(allPods, Connected)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
+ reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped)
+ testStep := &TestStep{
+ "Port 80",
+ reachability,
+ []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ }
+
+ cgParent = cgParent.SetChildGroups([]string{cg1Name})
+
+ reachability2 := NewReachability(allPods, Connected)
+ reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped)
+ testStep2 := &TestStep{
+ "Port 80, updated",
+ reachability2,
+ []metav1.Object{cgParent.Get()},
+ []int32{80},
+ ProtocolTCP,
+ 0,
+ nil,
+ }
+
+ testCase := []*TestCase{
+ {"ACNP Drop Ingress From Node x to Pod y/a and z/a with nested ClusterGroup with ipBlocks", []*TestStep{testStep, testStep2}},
+ }
+ executeTests(t, testCase)
+}
diff --git a/test/e2e/utils/cnp_spec_builder.go b/test/e2e/utils/cnp_spec_builder.go
index 83d0ee424bb..0c6acdc7585 100644
--- a/test/e2e/utils/cnp_spec_builder.go
+++ b/test/e2e/utils/cnp_spec_builder.go
@@ -27,12 +27,14 @@ type ClusterNetworkPolicySpecBuilder struct {
}
type ACNPAppliedToSpec struct {
- PodSelector map[string]string
- NSSelector map[string]string
- PodSelectorMatchExp []metav1.LabelSelectorRequirement
- NSSelectorMatchExp []metav1.LabelSelectorRequirement
- Group string
- Service *crdv1beta1.NamespacedName
+ PodSelector map[string]string
+ NodeSelector map[string]string
+ NSSelector map[string]string
+ PodSelectorMatchExp []metav1.LabelSelectorRequirement
+ NodeSelectorMatchExp []metav1.LabelSelectorRequirement
+ NSSelectorMatchExp []metav1.LabelSelectorRequirement
+ Group string
+ Service *crdv1beta1.NamespacedName
}
func (b *ClusterNetworkPolicySpecBuilder) Get() *crdv1beta1.ClusterNetworkPolicy {
@@ -67,37 +69,54 @@ func (b *ClusterNetworkPolicySpecBuilder) SetTier(tier string) *ClusterNetworkPo
func (b *ClusterNetworkPolicySpecBuilder) SetAppliedToGroup(specs []ACNPAppliedToSpec) *ClusterNetworkPolicySpecBuilder {
for _, spec := range specs {
- appliedToPeer := b.GetAppliedToPeer(spec.PodSelector, spec.NSSelector, spec.PodSelectorMatchExp, spec.NSSelectorMatchExp, spec.Group, spec.Service)
+ appliedToPeer := b.GetAppliedToPeer(spec.PodSelector,
+ spec.NodeSelector,
+ spec.NSSelector,
+ spec.PodSelectorMatchExp,
+ spec.NodeSelectorMatchExp,
+ spec.NSSelectorMatchExp,
+ spec.Group,
+ spec.Service)
b.Spec.AppliedTo = append(b.Spec.AppliedTo, appliedToPeer)
}
return b
}
func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[string]string,
+ nodeSelector map[string]string,
nsSelector map[string]string,
podSelectorMatchExp []metav1.LabelSelectorRequirement,
+ nodeSelectorMatchExp []metav1.LabelSelectorRequirement,
nsSelectorMatchExp []metav1.LabelSelectorRequirement,
appliedToCG string,
service *crdv1beta1.NamespacedName) crdv1beta1.AppliedTo {
- var ps *metav1.LabelSelector
- var ns *metav1.LabelSelector
+ var podSel *metav1.LabelSelector
+ var nodeSel *metav1.LabelSelector
+ var nsSel *metav1.LabelSelector
if podSelector != nil || podSelectorMatchExp != nil {
- ps = &metav1.LabelSelector{
+ podSel = &metav1.LabelSelector{
MatchLabels: podSelector,
MatchExpressions: podSelectorMatchExp,
}
}
+ if nodeSelector != nil || nodeSelectorMatchExp != nil {
+ nodeSel = &metav1.LabelSelector{
+ MatchLabels: nodeSelector,
+ MatchExpressions: nodeSelectorMatchExp,
+ }
+ }
if nsSelector != nil || nsSelectorMatchExp != nil {
- ns = &metav1.LabelSelector{
+ nsSel = &metav1.LabelSelector{
MatchLabels: nsSelector,
MatchExpressions: nsSelectorMatchExp,
}
}
peer := crdv1beta1.AppliedTo{
- PodSelector: ps,
- NamespaceSelector: ns,
+ PodSelector: podSel,
+ NodeSelector: nodeSel,
+ NamespaceSelector: nsSel,
}
if appliedToCG != "" {
peer.Group = appliedToCG
@@ -110,12 +129,13 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin
func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol,
port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32,
- groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string,
- podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
+ groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string,
+ podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder {
- var pSel *metav1.LabelSelector
- var nSel *metav1.LabelSelector
+ var podSel *metav1.LabelSelector
+ var nodeSel *metav1.LabelSelector
+ var nsSel *metav1.LabelSelector
var ns *crdv1beta1.PeerNamespaces
var appliedTos []crdv1beta1.AppliedTo
matchSelf := crdv1beta1.NamespaceMatchSelf
@@ -125,13 +145,19 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol
}
if podSelector != nil || podSelectorMatchExp != nil {
- pSel = &metav1.LabelSelector{
+ podSel = &metav1.LabelSelector{
MatchLabels: podSelector,
MatchExpressions: podSelectorMatchExp,
}
}
+ if nodeSelector != nil || nodeSelectorMatchExp != nil {
+ nodeSel = &metav1.LabelSelector{
+ MatchLabels: nodeSelector,
+ MatchExpressions: nodeSelectorMatchExp,
+ }
+ }
if nsSelector != nil || nsSelectorMatchExp != nil {
- nSel = &metav1.LabelSelector{
+ nsSel = &metav1.LabelSelector{
MatchLabels: nsSelector,
MatchExpressions: nsSelectorMatchExp,
}
@@ -148,14 +174,22 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol
}
}
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
// An empty From/To in ACNP rules evaluates to match all addresses.
policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0)
- if pSel != nil || nSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil {
+ if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil {
policyPeer = []crdv1beta1.NetworkPolicyPeer{{
- PodSelector: pSel,
- NamespaceSelector: nSel,
+ PodSelector: podSel,
+ NodeSelector: nodeSel,
+ NamespaceSelector: nsSel,
Namespaces: ns,
IPBlock: ipBlock,
Group: ruleClusterGroup,
@@ -180,12 +214,13 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol
// all conflicting PRs are merged.
func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPolicyProtocol,
port, endPort, srcPort, endSrcPort, icmpType, icmpCode, igmpType *int32,
- groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string,
- podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
+ groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string,
+ podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder {
- var pSel *metav1.LabelSelector
- var nSel *metav1.LabelSelector
+ var podSel *metav1.LabelSelector
+ var nodeSel *metav1.LabelSelector
+ var nsSel *metav1.LabelSelector
var ns *crdv1beta1.PeerNamespaces
var appliedTos []crdv1beta1.AppliedTo
matchSelf := crdv1beta1.NamespaceMatchSelf
@@ -195,13 +230,19 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli
}
if podSelector != nil || podSelectorMatchExp != nil {
- pSel = &metav1.LabelSelector{
+ podSel = &metav1.LabelSelector{
MatchLabels: podSelector,
MatchExpressions: podSelectorMatchExp,
}
}
+ if nodeSelector != nil || nodeSelectorMatchExp != nil {
+ nodeSel = &metav1.LabelSelector{
+ MatchLabels: nodeSelector,
+ MatchExpressions: nodeSelectorMatchExp,
+ }
+ }
if nsSelector != nil || nsSelectorMatchExp != nil {
- nSel = &metav1.LabelSelector{
+ nsSel = &metav1.LabelSelector{
MatchLabels: nsSelector,
MatchExpressions: nsSelectorMatchExp,
}
@@ -218,14 +259,22 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli
}
}
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
// An empty From/To in ACNP rules evaluates to match all addresses.
policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0)
- if pSel != nil || nSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil {
+ if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil {
policyPeer = []crdv1beta1.NetworkPolicyPeer{{
- PodSelector: pSel,
- NamespaceSelector: nSel,
+ PodSelector: podSel,
+ NodeSelector: nodeSel,
+ NamespaceSelector: nsSel,
Namespaces: ns,
IPBlock: ipBlock,
Group: ruleClusterGroup,
@@ -247,15 +296,15 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli
func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol,
port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32,
- groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string,
- podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
+ groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string,
+ podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder {
// For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical
// With the exception of calling the rule `To` vs. `From`.
c := &ClusterNetworkPolicySpecBuilder{}
- c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nsSelector,
- podSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount)
+ c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nodeSelector, nsSelector,
+ podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount)
theRule := c.Get().Spec.Ingress[0]
b.Spec.Egress = append(b.Spec.Egress, crdv1beta1.Rule{
@@ -272,7 +321,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddNodeSelectorRule(nodeSelector *meta
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, isEgress bool) *ClusterNetworkPolicySpecBuilder {
var appliedTos []crdv1beta1.AppliedTo
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
policyPeer := []crdv1beta1.NetworkPolicyPeer{{NodeSelector: nodeSelector}}
k8sProtocol, _ := AntreaPolicyProtocolToK8sProtocol(protoc)
@@ -299,7 +355,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddFQDNRule(fqdn string,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction) *ClusterNetworkPolicySpecBuilder {
var appliedTos []crdv1beta1.AppliedTo
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
policyPeer := []crdv1beta1.NetworkPolicyPeer{{FQDN: fqdn}}
ports, _ := GenPortsOrProtocols(protoc, port, portName, endPort, nil, nil, nil, nil, nil, nil)
@@ -318,7 +381,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddToServicesRule(svcRefs []crdv1beta1
name string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction) *ClusterNetworkPolicySpecBuilder {
var appliedTos []crdv1beta1.AppliedTo
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
newRule := crdv1beta1.Rule{
To: make([]crdv1beta1.NetworkPolicyPeer, 0),
@@ -336,7 +406,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddStretchedIngressRule(pSel, nsSel ma
var appliedTos []crdv1beta1.AppliedTo
for _, at := range ruleAppliedToSpecs {
- appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service))
+ appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector,
+ at.NodeSelector,
+ at.NSSelector,
+ at.PodSelectorMatchExp,
+ at.NodeSelectorMatchExp,
+ at.NSSelectorMatchExp,
+ at.Group,
+ at.Service))
}
newRule := crdv1beta1.Rule{
From: []crdv1beta1.NetworkPolicyPeer{{Scope: "ClusterSet"}},
diff --git a/test/integration/agent/route_test.go b/test/integration/agent/route_test.go
index 65ae265a71c..974fb3f7919 100644
--- a/test/integration/agent/route_test.go
+++ b/test/integration/agent/route_test.go
@@ -145,7 +145,7 @@ func TestInitialize(t *testing.T) {
for _, tc := range tcs {
t.Logf("Running Initialize test with mode %s node config %s", tc.networkConfig.TrafficEncapMode, nodeConfig)
- routeClient, err := route.NewClient(tc.networkConfig, tc.noSNAT, false, false, false, nil)
+ routeClient, err := route.NewClient(tc.networkConfig, tc.noSNAT, false, false, false, false, nil)
assert.NoError(t, err)
var xtablesReleasedTime, initializedTime time.Time
@@ -252,7 +252,7 @@ func TestIpTablesSync(t *testing.T) {
gwLink := createDummyGW(t)
defer netlink.LinkDel(gwLink)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.Nil(t, err)
inited := make(chan struct{})
@@ -303,7 +303,7 @@ func TestAddAndDeleteSNATRule(t *testing.T) {
gwLink := createDummyGW(t)
defer netlink.LinkDel(gwLink)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.Nil(t, err)
inited := make(chan struct{})
@@ -357,7 +357,7 @@ func TestAddAndDeleteRoutes(t *testing.T) {
for _, tc := range tcs {
t.Logf("Running test with mode %s peer cidr %s peer ip %s node config %s", tc.mode, tc.peerCIDR, tc.peerIP, nodeConfig)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.NoError(t, err)
err = routeClient.Initialize(nodeConfig, func() {})
assert.NoError(t, err)
@@ -422,7 +422,7 @@ func TestSyncRoutes(t *testing.T) {
for _, tc := range tcs {
t.Logf("Running test with mode %s peer cidr %s peer ip %s node config %s", tc.mode, tc.peerCIDR, tc.peerIP, nodeConfig)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.NoError(t, err)
err = routeClient.Initialize(nodeConfig, func() {})
assert.NoError(t, err)
@@ -465,7 +465,7 @@ func TestSyncGatewayKernelRoute(t *testing.T) {
}
require.NoError(t, netlink.AddrAdd(gwLink, &netlink.Addr{IPNet: gwNet}), "configuring gw IP failed")
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap}, false, false, false, false, false, nil)
assert.NoError(t, err)
err = routeClient.Initialize(nodeConfig, func() {})
assert.NoError(t, err)
@@ -559,7 +559,7 @@ func TestReconcile(t *testing.T) {
for _, tc := range tcs {
t.Logf("Running test with mode %s added routes %v desired routes %v", tc.mode, tc.addedRoutes, tc.desiredPeerCIDRs)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.NoError(t, err)
err = routeClient.Initialize(nodeConfig, func() {})
assert.NoError(t, err)
@@ -598,7 +598,7 @@ func TestRouteTablePolicyOnly(t *testing.T) {
gwLink := createDummyGW(t)
defer netlink.LinkDel(gwLink)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeNetworkPolicyOnly, IPv4Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeNetworkPolicyOnly, IPv4Enabled: true}, false, false, false, false, false, nil)
assert.NoError(t, err)
err = routeClient.Initialize(nodeConfig, func() {})
assert.NoError(t, err)
@@ -654,7 +654,7 @@ func TestIPv6RoutesAndNeighbors(t *testing.T) {
gwLink := createDummyGW(t)
defer netlink.LinkDel(gwLink)
- routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true, IPv6Enabled: true}, false, false, false, false, nil)
+ routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true, IPv6Enabled: true}, false, false, false, false, false, nil)
assert.Nil(t, err)
_, ipv6Subnet, _ := net.ParseCIDR("fd74:ca9b:172:19::/64")
gwIPv6 := net.ParseIP("fd74:ca9b:172:19::1")