From 76bce3a17eaae33e30612ce53cc344c8ce7a57d4 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Mon, 26 Sep 2022 14:48:58 -0600 Subject: [PATCH] agentless: enable admin partitions with agentless * integrate partition-init job with consul-server-connection-manager * integrate controller with consul-server-connection-manager * modify partition token to be be able read all services so that it works with connection manager --- .../partitions/partitions_connect_test.go | 256 ++++++------ .../templates/controller-deployment.yaml | 160 ++------ .../templates/mesh-gateway-deployment.yaml | 6 +- .../consul/templates/partition-init-job.yaml | 52 +-- .../consul/templates/server-acl-init-job.yaml | 4 + .../test/unit/controller-deployment.bats | 374 ------------------ .../test/unit/mesh-gateway-deployment.bats | 72 ++++ .../consul/test/unit/partition-init-job.bats | 81 ++-- .../consul/test/unit/server-acl-init-job.bats | 20 + charts/consul/values.yaml | 2 +- .../api/v1alpha1/exportedservices_webhook.go | 8 +- .../api/v1alpha1/ingressgateway_webhook.go | 4 +- control-plane/api/v1alpha1/mesh_webhook.go | 6 +- .../api/v1alpha1/proxydefaults_webhook.go | 8 +- .../api/v1alpha1/servicedefaults_webhook.go | 4 +- .../api/v1alpha1/serviceintentions_webhook.go | 8 +- .../api/v1alpha1/serviceresolver_webhook.go | 4 +- .../api/v1alpha1/servicerouter_webhook.go | 4 +- .../api/v1alpha1/servicesplitter_webhook.go | 4 +- .../v1alpha1/terminatinggateway_webhook.go | 4 +- .../consul_dataplane_sidecar.go | 2 +- .../consul_dataplane_sidecar_test.go | 10 +- .../peering_dialer_controller_test.go | 2 +- .../controller/configentry_controller.go | 34 +- .../configentry_controller_ent_test.go | 48 +-- .../controller/configentry_controller_test.go | 360 +++++++++-------- .../exportedservices_controller_ent_test.go | 49 +-- control-plane/helper/test/test_util.go | 2 +- .../connect-init/command_ent_test.go | 2 +- .../subcommand/connect-init/command_test.go | 10 +- .../subcommand/controller/command.go | 179 ++++----- .../subcommand/controller/command_test.go | 6 +- control-plane/subcommand/flags/consul.go | 20 +- control-plane/subcommand/flags/consul_test.go | 43 +- .../subcommand/partition-init/command.go | 92 ++--- .../partition-init/command_ent_test.go | 41 +- .../subcommand/server-acl-init/rules.go | 3 + 37 files changed, 793 insertions(+), 1191 deletions(-) diff --git a/acceptance/tests/partitions/partitions_connect_test.go b/acceptance/tests/partitions/partitions_connect_test.go index 0d6a732683..fc2ce6f46c 100644 --- a/acceptance/tests/partitions/partitions_connect_test.go +++ b/acceptance/tests/partitions/partitions_connect_test.go @@ -25,8 +25,6 @@ const StaticClientNamespace = "ns2" // Test that Connect works in a default and ACLsEnabled installations for X-Partition and in-partition networking. func TestPartitions_Connect(t *testing.T) { - t.Skipf("currently unsupported in agentless") - env := suite.Environment() cfg := suite.Config() @@ -54,7 +52,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "default destination namespace; ACLs and auto-encrypt enabled", + "default destination namespace; ACLs enabled", defaultNamespace, false, true, @@ -66,7 +64,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "single destination namespace; ACLs and auto-encrypt enabled", + "single destination namespace; ACLs enabled", staticServerNamespace, false, true, @@ -78,7 +76,7 @@ func TestPartitions_Connect(t *testing.T) { false, }, { - "mirror k8s namespaces; ACLs and auto-encrypt enabled", + "mirror k8s namespaces; ACLs enabled", staticServerNamespace, true, true, @@ -87,14 +85,13 @@ func TestPartitions_Connect(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - serverClusterContext := env.DefaultContext(t) - clientClusterContext := env.Context(t, environment.SecondaryContextName) - - ctx := context.Background() + defaultPartitionClusterContext := env.DefaultContext(t) + secondaryPartitionClusterContext := env.Context(t, environment.SecondaryContextName) commonHelmValues := map[string]string{ "global.adminPartitions.enabled": "true", "global.enableConsulNamespaces": "true", + "global.logLevel": "debug", "global.tls.enabled": "true", "global.tls.httpsOnly": strconv.FormatBool(c.ACLsEnabled), @@ -115,56 +112,47 @@ func TestPartitions_Connect(t *testing.T) { "dns.enableRedirection": strconv.FormatBool(cfg.EnableTransparentProxy), } - serverHelmValues := map[string]string{ - "server.exposeGossipAndRPCPorts": "true", - } + defaultPartitionHelmValues := make(map[string]string) // On Kind, there are no load balancers but since all clusters // share the same node network (docker bridge), we can use // a NodePort service so that we can access node(s) in a different Kind cluster. if cfg.UseKind { - serverHelmValues["global.adminPartitions.service.type"] = "NodePort" - serverHelmValues["global.adminPartitions.service.nodePort.https"] = "30000" - serverHelmValues["meshGateway.service.type"] = "NodePort" - serverHelmValues["meshGateway.service.nodePort"] = "30100" - serverHelmValues["server.exposeService.type"] = "NodePort" + defaultPartitionHelmValues["global.adminPartitions.service.type"] = "NodePort" + defaultPartitionHelmValues["meshGateway.service.type"] = "NodePort" + defaultPartitionHelmValues["meshGateway.service.nodePort"] = "30200" // todo: do we need to set this port? + defaultPartitionHelmValues["server.exposeService.type"] = "NodePort" + defaultPartitionHelmValues["server.exposeService.nodePort.https"] = "30000" + defaultPartitionHelmValues["server.exposeService.nodePort.grpc"] = "30100" } releaseName := helpers.RandomName() - helpers.MergeMaps(serverHelmValues, commonHelmValues) + helpers.MergeMaps(defaultPartitionHelmValues, commonHelmValues) // Install the consul cluster with servers in the default kubernetes context. - serverConsulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterContext, cfg, releaseName) + serverConsulCluster := consul.NewHelmCluster(t, defaultPartitionHelmValues, defaultPartitionClusterContext, cfg, releaseName) serverConsulCluster.Create(t) // Get the TLS CA certificate and key secret from the server cluster and apply it to the client cluster. caCertSecretName := fmt.Sprintf("%s-consul-ca-cert", releaseName) - caKeySecretName := fmt.Sprintf("%s-consul-ca-key", releaseName) logger.Logf(t, "retrieving ca cert secret %s from the server cluster and applying to the client cluster", caCertSecretName) - k8s.CopySecret(t, serverClusterContext, clientClusterContext, caCertSecretName) - - if !c.ACLsEnabled { - // When auto-encrypt is disabled, we need both - // the CA cert and CA key to be available in the clients cluster to generate client certificates and keys. - logger.Logf(t, "retrieving ca key secret %s from the server cluster and applying to the client cluster", caKeySecretName) - k8s.CopySecret(t, serverClusterContext, clientClusterContext, caKeySecretName) - } + k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, caCertSecretName) partitionToken := fmt.Sprintf("%s-consul-partitions-acl-token", releaseName) if c.ACLsEnabled { logger.Logf(t, "retrieving partition token secret %s from the server cluster and applying to the client cluster", partitionToken) - k8s.CopySecret(t, serverClusterContext, clientClusterContext, partitionToken) + k8s.CopySecret(t, defaultPartitionClusterContext, secondaryPartitionClusterContext, partitionToken) } - partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) - partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterContext, partitionServiceName) + partitionServiceName := fmt.Sprintf("%s-expose-servers", releaseName) + partitionSvcAddress := k8s.ServiceHost(t, cfg, defaultPartitionClusterContext, partitionServiceName) - k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterContext) + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryPartitionClusterContext) // Create client cluster. - clientHelmValues := map[string]string{ + secondaryPartitionHelmValues := map[string]string{ "global.enabled": "false", "global.adminPartitions.name": secondaryPartition, @@ -175,77 +163,61 @@ func TestPartitions_Connect(t *testing.T) { "externalServers.enabled": "true", "externalServers.hosts[0]": partitionSvcAddress, "externalServers.tlsServerName": "server.dc1.consul", - - "client.enabled": "true", - "client.exposeGossipPorts": "true", - "client.join[0]": partitionSvcAddress, } if c.ACLsEnabled { // Setup partition token and auth method host if ACLs enabled. - clientHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken - clientHelmValues["global.acls.bootstrapToken.secretKey"] = "token" - clientHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost - } else { - // Provide CA key when auto-encrypt is disabled. - clientHelmValues["global.tls.caKey.secretName"] = caKeySecretName - clientHelmValues["global.tls.caKey.secretKey"] = "tls.key" + secondaryPartitionHelmValues["global.acls.bootstrapToken.secretName"] = partitionToken + secondaryPartitionHelmValues["global.acls.bootstrapToken.secretKey"] = "token" + secondaryPartitionHelmValues["externalServers.k8sAuthMethodHost"] = k8sAuthMethodHost } if cfg.UseKind { - clientHelmValues["externalServers.httpsPort"] = "30000" - clientHelmValues["meshGateway.service.type"] = "NodePort" - clientHelmValues["meshGateway.service.nodePort"] = "30100" + secondaryPartitionHelmValues["externalServers.httpsPort"] = "30000" + secondaryPartitionHelmValues["externalServers.grpcPort"] = "30100" + secondaryPartitionHelmValues["meshGateway.service.type"] = "NodePort" + secondaryPartitionHelmValues["meshGateway.service.nodePort"] = "30200" } - helpers.MergeMaps(clientHelmValues, commonHelmValues) + helpers.MergeMaps(secondaryPartitionHelmValues, commonHelmValues) // Install the consul cluster without servers in the client cluster kubernetes context. - clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterContext, cfg, releaseName) + clientConsulCluster := consul.NewHelmCluster(t, secondaryPartitionHelmValues, secondaryPartitionClusterContext, cfg, releaseName) clientConsulCluster.Create(t) - // Ensure consul clients are created. - agentPodList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(clientClusterContext.KubectlOptions(t).Namespace).List(ctx, metav1.ListOptions{LabelSelector: "app=consul,component=client"}) - require.NoError(t, err) - require.NotEmpty(t, agentPodList.Items) - - output, err := k8s.RunKubectlAndGetOutputE(t, clientClusterContext.KubectlOptions(t), "logs", agentPodList.Items[0].Name, "-n", clientClusterContext.KubectlOptions(t).Namespace) - require.NoError(t, err) - require.Contains(t, output, "Partition: 'secondary'") - - serverClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: serverClusterContext.KubectlOptions(t).ContextName, - ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, + defaultPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, + ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - serverClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: serverClusterContext.KubectlOptions(t).ContextName, - ConfigPath: serverClusterContext.KubectlOptions(t).ConfigPath, + defaultPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: defaultPartitionClusterContext.KubectlOptions(t).ContextName, + ConfigPath: defaultPartitionClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } - clientClusterStaticServerOpts := &terratestk8s.KubectlOptions{ - ContextName: clientClusterContext.KubectlOptions(t).ContextName, - ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, + secondaryPartitionClusterStaticServerOpts := &terratestk8s.KubectlOptions{ + ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, + ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, Namespace: staticServerNamespace, } - clientClusterStaticClientOpts := &terratestk8s.KubectlOptions{ - ContextName: clientClusterContext.KubectlOptions(t).ContextName, - ConfigPath: clientClusterContext.KubectlOptions(t).ConfigPath, + secondaryPartitionClusterStaticClientOpts := &terratestk8s.KubectlOptions{ + ContextName: secondaryPartitionClusterContext.KubectlOptions(t).ContextName, + ConfigPath: secondaryPartitionClusterContext.KubectlOptions(t).ConfigPath, Namespace: StaticClientNamespace, } logger.Logf(t, "creating namespaces %s and %s in servers cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, serverClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, defaultPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) logger.Logf(t, "creating namespaces %s and %s in clients cluster", staticServerNamespace, StaticClientNamespace) - k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) - k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) + k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", staticServerNamespace) + k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "create", "ns", StaticClientNamespace) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.RunKubectl(t, clientClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) + k8s.RunKubectl(t, secondaryPartitionClusterContext.KubectlOptions(t), "delete", "ns", staticServerNamespace, StaticClientNamespace) }) consulClient, _ := serverConsulCluster.SetupConsulClient(t, c.ACLsEnabled) @@ -303,43 +275,43 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating proxy-defaults config") kustomizeDir := "../fixtures/bases/mesh-gateway" - k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), kustomizeDir) }) - k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), kustomizeDir) + k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), kustomizeDir) }) // This section of the tests runs the in-partition networking tests. t.Run("in-partition", func(t *testing.T) { logger.Log(t, "test in-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject") } else { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-namespaces") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -349,7 +321,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -384,11 +356,11 @@ func TestPartitions_Connect(t *testing.T) { if c.ACLsEnabled { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -426,18 +398,18 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -446,41 +418,41 @@ func TestPartitions_Connect(t *testing.T) { // from server, which is the case when a connection is unsuccessful due to intentions in other tests. logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.%s", staticServerNamespace)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) // This section of the tests runs the cross-partition networking tests. t.Run("cross-partition", func(t *testing.T) { logger.Log(t, "test cross-partition networking") logger.Log(t, "creating static-server and static-client deployments in server cluster") - k8s.DeployKustomize(t, serverClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, defaultPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-partition") } else { - k8s.DeployKustomize(t, serverClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") + k8s.DeployKustomize(t, defaultPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-partition") } } logger.Log(t, "creating static-server and static-client deployments in client cluster") - k8s.DeployKustomize(t, clientClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticServerOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") if cfg.EnableTransparentProxy { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy") } else { if c.destinationNamespace == defaultNamespace { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/default-ns-default-partition") } else { - k8s.DeployKustomize(t, clientClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") + k8s.DeployKustomize(t, secondaryPartitionClusterStaticClientOpts, cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-partitions/ns-default-partition") } } // Check that both static-server and static-client have been injected and now have 2 containers in server cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := serverClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := defaultPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -490,7 +462,7 @@ func TestPartitions_Connect(t *testing.T) { // Check that both static-server and static-client have been injected and now have 2 containers in client cluster. for _, labelSelector := range []string{"app=static-server", "app=static-client"} { - podList, err := clientClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + podList, err := secondaryPartitionClusterContext.KubernetesClient(t).CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) @@ -527,18 +499,18 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "creating exported services") if c.destinationNamespace == defaultNamespace { - k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") - k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") + k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-default") + k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-default") }) } else { - k8s.KubectlApplyK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlApplyK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlApplyK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlApplyK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { - k8s.KubectlDeleteK(t, serverClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") - k8s.KubectlDeleteK(t, clientClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") + k8s.KubectlDeleteK(t, defaultPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/default-partition-ns1") + k8s.KubectlDeleteK(t, secondaryPartitionClusterContext.KubectlOptions(t), "../fixtures/cases/crd-partitions/secondary-partition-ns1") }) } @@ -546,15 +518,15 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that the connection is not successful because there's no intention") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionFailing(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionFailing(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionFailing(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } intention := &api.ServiceIntentionsConfigEntry{ @@ -595,22 +567,22 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is successful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionSuccessful(t, serverClusterStaticClientOpts, StaticClientName, "http://localhost:1234") - k8s.CheckStaticServerConnectionSuccessful(t, clientClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, defaultPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, "http://localhost:1234") } // Test that kubernetes readiness status is synced to Consul. // Create the file so that the readiness probe of the static-server pod fails. logger.Log(t, "testing k8s -> consul health checks sync by making the static-server unhealthy") - k8s.RunKubectl(t, serverClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") - k8s.RunKubectl(t, clientClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, defaultPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") + k8s.RunKubectl(t, secondaryPartitionClusterStaticServerOpts, "exec", "deploy/"+staticServerName, "--", "touch", "/tmp/unhealthy") // The readiness probe should take a moment to be reflected in Consul, CheckStaticServerConnection will retry // until Consul marks the service instance unavailable for mesh traffic, causing the connection to fail. @@ -620,15 +592,15 @@ func TestPartitions_Connect(t *testing.T) { logger.Log(t, "checking that connection is unsuccessful") if cfg.EnableTransparentProxy { if !c.mirrorK8S { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", c.destinationNamespace, defaultPartition)) } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, secondaryPartition)) + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server", "curl: (7) Failed to connect to static-server.ns1 port 80: Connection refused"}, "", fmt.Sprintf("http://static-server.virtual.%s.ns.%s.ap.dc1.dc.consul", staticServerNamespace, defaultPartition)) } } else { - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, serverClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") - k8s.CheckStaticServerConnectionMultipleFailureMessages(t, clientClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, defaultPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") + k8s.CheckStaticServerConnectionMultipleFailureMessages(t, secondaryPartitionClusterStaticClientOpts, StaticClientName, false, []string{"curl: (56) Recv failure: Connection reset by peer", "curl: (52) Empty reply from server"}, "", "http://localhost:1234") } }) }) diff --git a/charts/consul/templates/controller-deployment.yaml b/charts/consul/templates/controller-deployment.yaml index 2495d22b4b..9a6fddd885 100644 --- a/charts/consul/templates/controller-deployment.yaml +++ b/charts/consul/templates/controller-deployment.yaml @@ -1,5 +1,6 @@ {{- if .Values.controller.enabled }} {{- if and .Values.global.adminPartitions.enabled (not .Values.global.enableConsulNamespaces) }}{{ fail "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" }}{{ end }} +{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} {{ template "consul.validateVaultWebhookCertConfiguration" . }} apiVersion: apps/v1 kind: Deployment @@ -65,107 +66,21 @@ spec: {{- end }} {{- end }} spec: - {{- if .Values.global.acls.manageSystemACLs }} - initContainers: - - name: controller-acl-init - env: - {{- if not .Values.externalServers.enabled }} - - name: CONSUL_HTTP_ADDR - {{- if .Values.global.tls.enabled }} - value: https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501 - {{- else }} - value: http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500 - {{- end }} - {{- end }} - {{- if (and .Values.global.tls.enabled (not .Values.externalServers.useSystemRoots)) }} - - name: CONSUL_CACERT - {{- if .Values.global.secretsBackend.vault.enabled }} - value: "/vault/secrets/serverca.crt" - {{- else }} - value: "/consul/tls/ca/tls.crt" - {{- end }} - {{- end }} - image: {{ .Values.global.imageK8S }} - volumeMounts: - - mountPath: /consul/login - name: consul-data - readOnly: false - {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots))}} - - name: consul-ca-cert - mountPath: /consul/tls/ca - readOnly: true - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane acl-init \ - -component-name=controller \ - {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} - -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} \ - -primary-datacenter={{ .Values.global.federation.primaryDatacenter }} \ - {{- else }} - -acl-auth-method={{ template "consul.fullname" . }}-k8s-component-auth-method \ - {{- end }} - {{- if .Values.global.adminPartitions.enabled }} - -partition={{ .Values.global.adminPartitions.name }} \ - {{- end }} - {{- if .Values.externalServers.enabled }} - {{- if .Values.global.tls.enabled }} - -use-https \ - {{- end }} - {{- range .Values.externalServers.hosts }} - -server-address={{ quote . }} \ - {{- end }} - -server-port={{ .Values.externalServers.httpsPort }} \ - {{- if .Values.externalServers.tlsServerName }} - -tls-server-name={{ .Values.externalServers.tlsServerName }} \ - {{- end }} - {{- end }} - -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ - -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ - -log-json={{ .Values.global.logJSON }} - resources: - requests: - memory: "25Mi" - cpu: "50m" - limits: - memory: "25Mi" - cpu: "50m" - {{- end }} containers: - command: - "/bin/sh" - "-ec" - | consul-k8s-control-plane controller \ - -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ default .Values.global.logLevel .Values.controller.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ -resource-prefix={{ template "consul.fullname" . }} \ - {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} - {{- if .Values.externalServers.enabled }} - {{- if .Values.global.tls.enabled }} - -use-https \ - {{- end }} - {{- range .Values.externalServers.hosts }} - -server-address={{ quote . }} \ - {{- end }} - -server-port={{ .Values.externalServers.httpsPort }} \ - {{- if .Values.externalServers.tlsServerName }} - -tls-server-name={{ .Values.externalServers.tlsServerName }} \ - {{- end }} - {{- end }} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName }} -enable-webhook-ca-update \ -webhook-tls-cert-dir=/vault/secrets/controller-webhook/certs \ {{- else }} -webhook-tls-cert-dir=/tmp/controller-webhook/certs \ {{- end }} - -datacenter={{ .Values.global.datacenter }} \ - {{- if .Values.global.adminPartitions.enabled }} - -partition={{ .Values.global.adminPartitions.name }} \ - {{- end }} -enable-leader-election \ {{- if .Values.global.enableConsulNamespaces }} -enable-namespaces=true \ @@ -182,44 +97,39 @@ spec: -consul-cross-namespace-acl-policy=cross-namespace-policy \ {{- end }} {{- end }} - {{- if .Values.global.acls.manageSystemACLs }} - lifecycle: - preStop: - exec: - command: - - "/bin/sh" - - "-ec" - - | - consul-k8s-control-plane consul-logout -consul-api-timeout={{ .Values.global.consulAPITimeout }} - {{- end }} env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 8 }} {{- if .Values.global.acls.manageSystemACLs }} - - name: CONSUL_HTTP_TOKEN_FILE - value: "/consul/login/acl-token" + - name: CONSUL_LOGIN_AUTH_METHOD + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method-{{ .Values.global.datacenter }} + {{- else }} + value: {{ template "consul.fullname" . }}-k8s-component-auth-method + {{- end }} + - name: CONSUL_LOGIN_DATACENTER + {{- if and .Values.global.federation.enabled .Values.global.federation.primaryDatacenter }} + value: {{ .Values.global.federation.primaryDatacenter }} + {{- else }} + value: {{ .Values.global.datacenter }} + {{- end }} + - name: CONSUL_LOGIN_META + value: "component=controller,pod=$(POD_NAME)" {{- end }} {{- if (and .Values.controller.aclToken.secretName .Values.controller.aclToken.secretKey) }} - - name: CONSUL_HTTP_TOKEN + - name: CONSUL_ACL_TOKEN valueFrom: secretKeyRef: name: {{ .Values.controller.aclToken.secretName }} key: {{ .Values.controller.aclToken.secretKey }} {{- end }} - {{- if not .Values.externalServers.enabled }} - - name: CONSUL_HTTP_ADDR - {{- if .Values.global.tls.enabled }} - value: https://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8501 - {{- else }} - value: http://{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc:8500 - {{- end }} - {{- end }} - {{- if (and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots))) }} - - name: CONSUL_CACERT - {{- if .Values.global.secretsBackend.vault.enabled }} - value: "/vault/secrets/serverca.crt" - {{- else }} - value: "/consul/tls/ca/tls.crt" - {{- end }} - {{- end }} image: {{ .Values.global.imageK8S }} name: controller ports: @@ -230,10 +140,21 @@ spec: resources: {{- toYaml . | nindent 12 }} {{- end }} + startupProbe: + tcpSocket: + port: 9443 + initialDelaySeconds: 30 + failureThreshold: 15 + periodSeconds: 2 + timeoutSeconds: 5 + readinessProbe: + tcpSocket: + port: 9443 + failureThreshold: 2 + initialDelaySeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 volumeMounts: - - mountPath: /consul/login - name: consul-data - readOnly: true {{- if not (and .Values.global.secretsBackend.vault.enabled .Values.global.secretsBackend.vault.controller.tlsCert.secretName) }} - mountPath: /tmp/controller-webhook/certs name: cert @@ -266,9 +187,6 @@ spec: path: tls.crt {{- end }} {{- end }} - - name: consul-data - emptyDir: - medium: "Memory" serviceAccountName: {{ template "consul.fullname" . }}-controller {{- if .Values.controller.nodeSelector }} nodeSelector: diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index ce28fbbdd3..b46e9f6908 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -152,7 +152,7 @@ spec: volumeMounts: - name: consul-service mountPath: /consul/service - {{- if .Values.global.tls.enabled }} + {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - name: consul-ca-cert mountPath: /consul/tls/ca readOnly: true @@ -175,7 +175,7 @@ spec: - mountPath: /consul/service name: consul-service readOnly: true - {{- if .Values.global.tls.enabled }} + {{- if and .Values.global.tls.enabled (not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots)) }} - name: consul-ca-cert mountPath: /consul/tls/ca readOnly: true @@ -195,7 +195,7 @@ spec: - | consul-dataplane \ {{- if .Values.externalServers.enabled }} - -addresses={{ .Values.externalServers.hosts | first }} \ + -addresses={{ .Values.externalServers.hosts | first | quote }} \ {{- else }} -addresses="{{ template "consul.fullname" . }}-server.{{ .Release.Namespace }}.svc" \ {{- end }} diff --git a/charts/consul/templates/partition-init-job.yaml b/charts/consul/templates/partition-init-job.yaml index 4d6d971743..ac023328a6 100644 --- a/charts/consul/templates/partition-init-job.yaml +++ b/charts/consul/templates/partition-init-job.yaml @@ -3,6 +3,7 @@ {{- template "consul.reservedNamesFailer" (list .Values.global.adminPartitions.name "global.adminPartitions.name") }} {{- if and (not .Values.externalServers.enabled) (ne .Values.global.adminPartitions.name "default") }}{{ fail "externalServers.enabled needs to be true and configured to create a non-default partition." }}{{ end -}} {{- if and .Values.global.secretsBackend.vault.enabled .Values.global.acls.manageSystemACLs (not .Values.global.secretsBackend.vault.adminPartitionsRole) }}{{ fail "global.secretsBackend.vault.adminPartitionsRole is required when global.secretsBackend.vault.enabled and global.acls.manageSystemACLs are true." }}{{ end -}} +{{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} apiVersion: batch/v1 kind: Job metadata: @@ -75,22 +76,19 @@ spec: - name: partition-init-job image: {{ .Values.global.imageK8S }} env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} - {{- if .Values.global.secretsBackend.vault.enabled }} - - name: CONSUL_HTTP_TOKEN_FILE - value: /vault/secrets/bootstrap-token - {{- else }} - - name: CONSUL_HTTP_TOKEN - valueFrom: - secretKeyRef: - name: {{ .Values.global.acls.bootstrapToken.secretName }} - key: {{ .Values.global.acls.bootstrapToken.secretKey }} - {{- end }} - {{- end }} + {{- include "consul.consulK8sConsulServerEnvVars" . | nindent 10 }} + {{- if (and .Values.global.acls.bootstrapToken.secretName .Values.global.acls.bootstrapToken.secretKey) }} + {{- if .Values.global.secretsBackend.vault.enabled }} + - name: CONSUL_ACL_TOKEN_FILE + value: /vault/secrets/bootstrap-token + {{- else }} + - name: CONSUL_ACL_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.acls.bootstrapToken.secretName }} + key: {{ .Values.global.acls.bootstrapToken.secretKey }} + {{- end }} + {{- end }} {{- if .Values.global.tls.enabled }} {{- if not (or .Values.externalServers.useSystemRoots .Values.global.secretsBackend.vault.enabled) }} volumeMounts: @@ -104,30 +102,8 @@ spec: - "-ec" - | consul-k8s-control-plane partition-init \ - -consul-api-timeout={{ .Values.global.consulAPITimeout }} \ -log-level={{ .Values.global.logLevel }} \ -log-json={{ .Values.global.logJSON }} \ - - {{- if and .Values.externalServers.enabled (not .Values.externalServers.hosts) }}{{ fail "externalServers.hosts must be set if externalServers.enabled is true" }}{{ end -}} - {{- range .Values.externalServers.hosts }} - -server-address={{ quote . }} \ - {{- end }} - -server-port={{ .Values.externalServers.httpsPort }} \ - - {{- if .Values.global.tls.enabled }} - -use-https \ - {{- if not .Values.externalServers.useSystemRoots }} - {{- if .Values.global.secretsBackend.vault.enabled }} - -ca-file=/vault/secrets/serverca.crt \ - {{- else }} - -ca-file=/consul/tls/ca/tls.crt \ - {{- end }} - {{- end }} - {{- if .Values.externalServers.tlsServerName }} - -tls-server-name={{ .Values.externalServers.tlsServerName }} \ - {{- end }} - {{- end }} - -partition-name={{ .Values.global.adminPartitions.name }} resources: requests: memory: "50Mi" diff --git a/charts/consul/templates/server-acl-init-job.yaml b/charts/consul/templates/server-acl-init-job.yaml index 8709b82af7..45567fe0ea 100644 --- a/charts/consul/templates/server-acl-init-job.yaml +++ b/charts/consul/templates/server-acl-init-job.yaml @@ -80,6 +80,7 @@ spec: {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumes: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert secret: {{- if .Values.global.tls.caCert.secretName }} @@ -91,6 +92,7 @@ spec: - key: {{ default "tls.crt" .Values.global.tls.caCert.secretKey }} path: tls.crt {{- end }} + {{- end }} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token secret: @@ -118,10 +120,12 @@ spec: {{- if (or .Values.global.tls.enabled .Values.global.acls.replicationToken.secretName .Values.global.acls.bootstrapToken.secretName) }} volumeMounts: {{- if and .Values.global.tls.enabled (not .Values.global.secretsBackend.vault.enabled) }} + {{- if not (and .Values.externalServers.enabled .Values.externalServers.useSystemRoots) }} - name: consul-ca-cert mountPath: /consul/tls/ca readOnly: true {{- end }} + {{- end }} {{- if (and .Values.global.acls.bootstrapToken.secretName (not .Values.global.secretsBackend.vault.enabled)) }} - name: bootstrap-token mountPath: /consul/acl/tokens diff --git a/charts/consul/test/unit/controller-deployment.bats b/charts/consul/test/unit/controller-deployment.bats index 2ef5e449cc..2110958d87 100644 --- a/charts/consul/test/unit/controller-deployment.bats +++ b/charts/consul/test/unit/controller-deployment.bats @@ -19,23 +19,6 @@ load _helpers [ "${actual}" = "true" ] } -@test "controller/Deployment: command defaults" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq 'any(contains("consul-k8s-control-plane controller"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$cmd" | - yq 'any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # resourcePrefix @@ -113,183 +96,6 @@ load _helpers [ "${actual}" = "2" ] } -#-------------------------------------------------------------------- -# global.acls.manageSystemACLs - -@test "controller/Deployment: consul-logout preStop hook is added when ACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]] | any(contains("consul-k8s-control-plane consul-logout -consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is not set when acls are disabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "controller/Deployment: CONSUL_HTTP_TOKEN_FILE is set when acls are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[0].name] | any(contains("CONSUL_HTTP_TOKEN_FILE"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls disabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[0]' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "controller-acl-init" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].value] | any(contains("http://release-name-consul-server.default.svc:8500"))' | tee /dev/stderr) - echo $actual - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command and environment with tls enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].value] | any(contains("https://release-name-consul-server.default.svc:8501"))' | tee /dev/stderr) - echo $actual - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command with Partitions enabled" { - cd `chart_dir` - local object=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.adminPartitions.name=default' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-partition=default"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[1].name] | any(contains("CONSUL_CACERT"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].name] | any(contains("CONSUL_HTTP_ADDR"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '[.env[0].value] | any(contains("https://release-name-consul-server.default.svc:8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq '.volumeMounts[1] | any(contains("consul-ca-cert"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: init container is created when global.acls.manageSystemACLs=true and has correct command when federation enabled in non-primary datacenter" { - cd `chart_dir` - local object=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.datacenter=dc2' \ - --set 'global.federation.enabled=true' \ - --set 'global.federation.primaryDatacenter=dc1' \ - --set 'meshGateway.enabled=true' \ - --set 'connectInject.enabled=true' \ - --set 'global.tls.enabled=true' \ - --set 'global.tls.enableAutoEncrypt=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.initContainers[] | select(.name == "controller-acl-init")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.command | any(contains("consul-k8s-control-plane acl-init"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-acl-auth-method=release-name-consul-k8s-component-auth-method-dc2"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.command | any(contains("-primary-datacenter=dc1"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # global.tls.enabled @@ -364,19 +170,6 @@ load _helpers [ "${actual}" = "false" ] } -@test "controller/Deployment: partition name set with .global.adminPartitions.enabled=true" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.adminPartitions.enabled=true' \ - --set 'global.enableConsulNamespaces=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.containers[0].command | any(contains("partition=default"))' | tee /dev/stderr) - - [ "${actual}" = "true" ] -} - @test "controller/Deployment: fails if namespaces are disabled and .global.adminPartitions.enabled=true" { cd `chart_dir` run helm template \ @@ -642,82 +435,6 @@ load _helpers [ "${actual}" = '{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}' ] } -#-------------------------------------------------------------------- -# aclToken - -@test "controller/Deployment: aclToken enabled when secretName and secretKey is provided" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'controller.aclToken.secretName=foo' \ - --set 'controller.aclToken.secretKey=bar' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: aclToken env is set when ACLs are enabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: aclToken env is not set when ACLs are disabled" { - cd `chart_dir` - local actual=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -#-------------------------------------------------------------------- -# logLevel - -@test "controller/Deployment: logLevel info by default from global" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq '.containers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$cmd" | - yq '.initContainers[0].command | any(contains("-log-level=info"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: logLevel can be overridden" { - cd `chart_dir` - local cmd=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'controller.logLevel=error' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$cmd" | - yq '.containers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$cmd" | - yq '.initContainers[0].command | any(contains("-log-level=error"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - #-------------------------------------------------------------------- # Vault @@ -1097,97 +814,6 @@ load _helpers [[ "$output" =~ "externalServers.hosts must be set if externalServers.enabled is true" ]] } -@test "controller/Deployment: configures the controller and acl-init containers to use external servers" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].command | any(contains("-server-address=\"consul\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.containers[0].command | any(contains("-server-port=8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.containers[0].env[] | select(.name == "CONSUL_HTTP_ADDR")' | tee /dev/stderr) - [ "${actual}" = "" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].command | any(contains("-server-address=\"consul\""))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].command | any(contains("-server-port=8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].env[] | select(.name == "CONSUL_HTTP_ADDR")' | tee /dev/stderr) - [ "${actual}" = "" ] -} - -@test "controller/Deployment: can provide a different port for the controller and acl-init containers when external servers are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.httpsPort=443' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].command | any(contains("-server-port=443"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].command | any(contains("-server-port=443"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: can provide a TLS server name for the controller and acl-init containers when external servers are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'externalServers.tlsServerName=foo' \ - --set 'global.acls.manageSystemACLs=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].command | any(contains("-tls-server-name=foo"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].command | any(contains("-tls-server-name=foo"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "controller/Deployment: sets -use-https flag for the controller and acl-init containers when external servers with TLS are enabled" { - cd `chart_dir` - local spec=$(helm template \ - -s templates/controller-deployment.yaml \ - --set 'controller.enabled=true' \ - --set 'server.enabled=false' \ - --set 'externalServers.enabled=true' \ - --set 'externalServers.hosts[0]=consul' \ - --set 'global.acls.manageSystemACLs=true' \ - --set 'global.tls.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec' | tee /dev/stderr) - - local actual=$(echo "$spec" | yq '.containers[0].command | any(contains("-use-https"))' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo "$spec" | yq '.initContainers[0].command | any(contains("-use-https"))' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - @test "controller/Deployment: does not configure CA cert for the controller and acl-init containers when external servers with useSystemRoots are enabled" { cd `chart_dir` local spec=$(helm template \ diff --git a/charts/consul/test/unit/mesh-gateway-deployment.bats b/charts/consul/test/unit/mesh-gateway-deployment.bats index 6312abcec9..dee776e3b3 100755 --- a/charts/consul/test/unit/mesh-gateway-deployment.bats +++ b/charts/consul/test/unit/mesh-gateway-deployment.bats @@ -866,6 +866,50 @@ key2: value2' \ [ "${actual}" != "" ] } +@test "meshGateway/Deployment: CA cert volume mount present when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "meshGateway/Deployment: CA cert volume is not present when TLS is enabled with externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + +@test "meshGateway/Deployment: CA cert volume mount is not present when TLS is enabled with externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + ##-------------------------------------------------------------------- ## mesh-gateway-init init container @@ -1090,6 +1134,34 @@ key2: value2' \ [ "${actual}" = "${exp}" ] } +@test "meshGateway/Deployment: CA cert volume mount present on the init container when TLS is enabled" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" != "" ] +} + +@test "meshGateway/Deployment: CA cert volume mount present is not present on the init container when TLS is enabled with externalServers and useSystemRoots" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'meshGateway.enabled=true' \ + --set 'connectInject.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'server.enabled=false' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.useSystemRoots=true' \ + . | tee /dev/stderr | + yq '.spec.template.spec.initContainers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + #-------------------------------------------------------------------- # meshGateway.globalMode [DEPRECATED] diff --git a/charts/consul/test/unit/partition-init-job.bats b/charts/consul/test/unit/partition-init-job.bats index 816ad26ede..ae04e61d98 100644 --- a/charts/consul/test/unit/partition-init-job.bats +++ b/charts/consul/test/unit/partition-init-job.bats @@ -71,32 +71,45 @@ load _helpers [[ "$output" =~ "externalServers.enabled needs to be true and configured to create a non-default partition." ]] } -@test "partitionInit/Job: command defaults" { +@test "partitionInit/Job: consul env defaults" { cd `chart_dir` - local command=$(helm template \ - -s templates/partition-init-job.yaml \ - --set 'global.enabled=false' \ + local env=$(helm template \ + -s templates/partition-init-job.yaml \ --set 'global.adminPartitions.enabled=true' \ --set 'global.adminPartitions.name=bar' \ --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ + --set 'server.enabled=false' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual - actual=$(echo $command | jq -r '. | any(contains("consul-k8s-control-plane partition-init"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_ADDRESSES").value' | tee /dev/stderr) + [ "${actual}" = "foo" ] - actual=$(echo $command | jq -r '. | any(contains("-consul-api-timeout=5s"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_GRPC_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8502" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8501" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_DATACENTER").value' | tee /dev/stderr) + [ "${actual}" = "dc1" ] + + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_API_TIMEOUT").value' | tee /dev/stderr) + [ "${actual}" = "5s" ] } #-------------------------------------------------------------------- # global.tls.enabled -@test "partitionInit/Job: sets TLS flags when global.tls.enabled" { +@test "partitionInit/Job: sets TLS env vars when global.tls.enabled" { cd `chart_dir` - local command=$(helm template \ + local env=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ @@ -105,22 +118,24 @@ load _helpers --set 'externalServers.enabled=true' \ --set 'externalServers.hosts[0]=foo' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq -r '.spec.template.spec.containers[0].env[]' | tee /dev/stderr) - local actual - actual=$(echo $command | jq -r '. | any(contains("-use-https"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_HTTP_PORT").value' | tee /dev/stderr) + [ "${actual}" = "8501" ] - actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_USE_TLS").value' | tee /dev/stderr) [ "${actual}" = "true" ] - actual=$(echo $command | jq -r '. | any(contains("-server-port=8501"))' | tee /dev/stderr) - [ "${actual}" = "true" ] + local actual=$(echo "$env" | + jq -r '. | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) + [ "${actual}" = "/consul/tls/ca/tls.crt" ] } -@test "partitionInit/Job: does not set consul ca cert or server-port when .externalServers.useSystemRoots is true" { +@test "partitionInit/Job: does not set consul ca cert when .externalServers.useSystemRoots is true" { cd `chart_dir` - local command=$(helm template \ + local spec=$(helm template \ -s templates/partition-init-job.yaml \ --set 'global.enabled=false' \ --set 'global.adminPartitions.enabled=true' \ @@ -130,11 +145,19 @@ load _helpers --set 'externalServers.hosts[0]=foo' \ --set 'externalServers.useSystemRoots=true' \ . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].command' | tee /dev/stderr) + yq -r '.spec.template.spec' | tee /dev/stderr) - local actual - actual=$(echo $command | jq -r '. | any(contains("-ca-file=/consul/tls/ca/tls.crt"))' | tee /dev/stderr) - [ "${actual}" = "false" ] + local actual=$(echo "$env" | + jq -r '.containers[0].env[] | select( .name == "CONSUL_CACERT_FILE").value' | tee /dev/stderr) + [ "${actual}" = "" ] + + local actual=$(echo "$env" | + jq -r '.volumes[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] + + local actual=$(echo "$env" | + jq -r '.spec.volumeMounts[] | select( .name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] } @test "partitionInit/Job: can overwrite CA secret with the provided one" { @@ -167,7 +190,7 @@ load _helpers #-------------------------------------------------------------------- # global.acls.bootstrapToken -@test "partitionInit/Job: HTTP_TOKEN is set when global.acls.bootstrapToken is provided" { +@test "partitionInit/Job: CONSUL_ACL_TOKEN is set when global.acls.bootstrapToken is provided" { cd `chart_dir` local actual=$(helm template \ -s templates/partition-init-job.yaml \ @@ -179,7 +202,7 @@ load _helpers --set 'global.acls.bootstrapToken.secretName=partition-token' \ --set 'global.acls.bootstrapToken.secretKey=token' \ . | tee /dev/stderr | - yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_HTTP_TOKEN"))' | tee /dev/stderr) + yq '[.spec.template.spec.containers[0].env[].name] | any(contains("CONSUL_ACL_TOKEN"))' | tee /dev/stderr) [ "${actual}" = "true" ] } @@ -280,7 +303,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that no (secret) volumes are not attached @@ -382,7 +405,7 @@ reservedNameTest() { [ "${actual}" = "${expected}" ] # Check that the bootstrap token flag is set to the path of the Vault secret. - local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_HTTP_TOKEN_FILE").value') + local actual=$(echo $object | jq -r '.spec.containers[] | select(.name=="partition-init-job").env[] | select(.name=="CONSUL_ACL_TOKEN_FILE").value') [ "${actual}" = "/vault/secrets/bootstrap-token" ] # Check that the consul-ca-cert volume is not attached diff --git a/charts/consul/test/unit/server-acl-init-job.bats b/charts/consul/test/unit/server-acl-init-job.bats index 161d37aa93..ac123f346f 100644 --- a/charts/consul/test/unit/server-acl-init-job.bats +++ b/charts/consul/test/unit/server-acl-init-job.bats @@ -548,6 +548,26 @@ load _helpers [ "${actual}" = "true" ] } +@test "serverACLInit/Job: does not add consul-ca-cert volume when global.tls.enabled with externalServers and useSystemRoots" { + cd `chart_dir` + local spec=$(helm template \ + -s templates/server-acl-init-job.yaml \ + --set 'global.acls.manageSystemACLs=true' \ + --set 'global.tls.enabled=true' \ + --set 'externalServers.enabled=true' \ + --set 'externalServers.hosts[0]=consul' \ + --set 'externalServers.useSystemRoots=true' \ + --set 'servers.enabled=false' \ + . | tee /dev/stderr | + yq -r '.spec.template.spec' | tee /dev/stderr) + + actual=$(echo $spec | jq -r '.volumes[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] + + actual=$(echo $spec | jq -r '.containers[0].volumeMounts[] | select(.name == "consul-ca-cert")' | tee /dev/stderr) + [ "${actual}" = "" ] +} + @test "serverACLInit/Job: can overwrite CA secret with the provided one" { cd `chart_dir` local ca_cert_volume=$(helm template \ diff --git a/charts/consul/values.yaml b/charts/consul/values.yaml index 555bb124ad..c04e18f736 100644 --- a/charts/consul/values.yaml +++ b/charts/consul/values.yaml @@ -634,7 +634,7 @@ global: # The name (and tag) of the consul-dataplane Docker image used for the # connect-injected sidecar proxies and mesh, terminating, and ingress gateways. # @default: hashicorp/consul-dataplane: - imageConsulDataplane: "ishustava/consul-dataplane:latest@sha256:475dc059fa625d999e675d3fda63653985b0e57e022bebd71162a69b55562666" + imageConsulDataplane: "ishustava/consul-dataplane:latest@sha256:a19acc509b24960275a842cafc9c35b167e3e8ee9a75ccae1cbcaf3c40a3664a" # Configuration for running this Helm chart on the Red Hat OpenShift platform. # This Helm chart currently supports OpenShift v4.x+. diff --git a/control-plane/api/v1alpha1/exportedservices_webhook.go b/control-plane/api/v1alpha1/exportedservices_webhook.go index d80062e958..5a3d2cb2f1 100644 --- a/control-plane/api/v1alpha1/exportedservices_webhook.go +++ b/control-plane/api/v1alpha1/exportedservices_webhook.go @@ -7,7 +7,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -17,10 +16,9 @@ import ( type ExportedServicesWebhook struct { client.Client - ConsulClient *capi.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/ingressgateway_webhook.go b/control-plane/api/v1alpha1/ingressgateway_webhook.go index 8dcc2fa9ee..7f8ba37558 100644 --- a/control-plane/api/v1alpha1/ingressgateway_webhook.go +++ b/control-plane/api/v1alpha1/ingressgateway_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type IngressGatewayWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/mesh_webhook.go b/control-plane/api/v1alpha1/mesh_webhook.go index d28cfc193c..7b69c1f3d5 100644 --- a/control-plane/api/v1alpha1/mesh_webhook.go +++ b/control-plane/api/v1alpha1/mesh_webhook.go @@ -7,7 +7,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -17,9 +16,8 @@ import ( type MeshWebhook struct { client.Client - ConsulClient *capi.Client - Logger logr.Logger - decoder *admission.Decoder + Logger logr.Logger + decoder *admission.Decoder } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/proxydefaults_webhook.go b/control-plane/api/v1alpha1/proxydefaults_webhook.go index 4e221e0130..3873516074 100644 --- a/control-plane/api/v1alpha1/proxydefaults_webhook.go +++ b/control-plane/api/v1alpha1/proxydefaults_webhook.go @@ -7,7 +7,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -17,10 +16,9 @@ import ( type ProxyDefaultsWebhook struct { client.Client - ConsulClient *capi.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/servicedefaults_webhook.go b/control-plane/api/v1alpha1/servicedefaults_webhook.go index a196a6d941..f79e68bcde 100644 --- a/control-plane/api/v1alpha1/servicedefaults_webhook.go +++ b/control-plane/api/v1alpha1/servicedefaults_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type ServiceDefaultsWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/serviceintentions_webhook.go b/control-plane/api/v1alpha1/serviceintentions_webhook.go index 0287ddfeb8..ddc6488690 100644 --- a/control-plane/api/v1alpha1/serviceintentions_webhook.go +++ b/control-plane/api/v1alpha1/serviceintentions_webhook.go @@ -8,7 +8,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -18,10 +17,9 @@ import ( type ServiceIntentionsWebhook struct { client.Client - ConsulClient *capi.Client - Logger logr.Logger - decoder *admission.Decoder - ConsulMeta common.ConsulMeta + Logger logr.Logger + decoder *admission.Decoder + ConsulMeta common.ConsulMeta } // NOTE: The path value in the below line is the path to the webhook. diff --git a/control-plane/api/v1alpha1/serviceresolver_webhook.go b/control-plane/api/v1alpha1/serviceresolver_webhook.go index 1af2fa0383..ca5f9d9482 100644 --- a/control-plane/api/v1alpha1/serviceresolver_webhook.go +++ b/control-plane/api/v1alpha1/serviceresolver_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type ServiceResolverWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicerouter_webhook.go b/control-plane/api/v1alpha1/servicerouter_webhook.go index 03644432e6..f6837fcf7b 100644 --- a/control-plane/api/v1alpha1/servicerouter_webhook.go +++ b/control-plane/api/v1alpha1/servicerouter_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type ServiceRouterWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/servicesplitter_webhook.go b/control-plane/api/v1alpha1/servicesplitter_webhook.go index f90c49f45a..c0020c88b8 100644 --- a/control-plane/api/v1alpha1/servicesplitter_webhook.go +++ b/control-plane/api/v1alpha1/servicesplitter_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type ServiceSplitterWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/api/v1alpha1/terminatinggateway_webhook.go b/control-plane/api/v1alpha1/terminatinggateway_webhook.go index 2d3367fcaa..b0427b87ca 100644 --- a/control-plane/api/v1alpha1/terminatinggateway_webhook.go +++ b/control-plane/api/v1alpha1/terminatinggateway_webhook.go @@ -6,7 +6,6 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" - capi "github.com/hashicorp/consul/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -14,8 +13,7 @@ import ( // +kubebuilder:object:generate=false type TerminatingGatewayWebhook struct { - ConsulClient *capi.Client - Logger logr.Logger + Logger logr.Logger // ConsulMeta contains metadata specific to the Consul installation. ConsulMeta common.ConsulMeta diff --git a/control-plane/connect-inject/consul_dataplane_sidecar.go b/control-plane/connect-inject/consul_dataplane_sidecar.go index 1f6f2de85a..120738e654 100644 --- a/control-plane/connect-inject/consul_dataplane_sidecar.go +++ b/control-plane/connect-inject/consul_dataplane_sidecar.go @@ -130,7 +130,7 @@ func (w *MeshWebhook) getContainerSidecarCommand(namespace corev1.Namespace, mpi cmd := []string{ "consul-dataplane", - "-addresses=" + w.ConsulAddress, + fmt.Sprintf("-addresses=%q", w.ConsulAddress), "-grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort), "-proxy-service-id=" + fmt.Sprintf("$(cat %s)", proxyIDFileName), "-service-node-name=" + ConsulNodeName, diff --git a/control-plane/connect-inject/consul_dataplane_sidecar_test.go b/control-plane/connect-inject/consul_dataplane_sidecar_test.go index 1416c379fb..fb78649309 100644 --- a/control-plane/connect-inject/consul_dataplane_sidecar_test.go +++ b/control-plane/connect-inject/consul_dataplane_sidecar_test.go @@ -166,7 +166,7 @@ func TestHandlerConsulDataplaneSidecar(t *testing.T) { // todo(agentless): test default concurrency expCmd := []string{ "/bin/sh", "-ec", - "consul-dataplane -addresses=1.1.1.1 -grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort) + + "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=" + strconv.Itoa(w.ConsulConfig.GRPCPort) + " -proxy-service-id=$(cat /consul/connect-inject/proxyid) " + "-service-node-name=k8s-service-mesh -log-level=" + w.LogLevel + " -log-json=" + strconv.FormatBool(w.LogJSON) + c.additionalExpCmdArgs} require.Equal(t, container.Command, expCmd) @@ -336,17 +336,17 @@ func TestHandlerConsulDataplaneSidecar_Multiport(t *testing.T) { }, } expCommand := [][]string{ - {"/bin/sh", "-ec", "consul-dataplane -addresses=1.1.1.1 -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + + {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -tls-disabled -envoy-admin-bind-port=19000"}, - {"/bin/sh", "-ec", "consul-dataplane -addresses=1.1.1.1 -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + + {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -tls-disabled -envoy-admin-bind-port=19001"}, } if aclsEnabled { expCommand = [][]string{ - {"/bin/sh", "-ec", "consul-dataplane -addresses=1.1.1.1 -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + + {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web) " + "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -credential-type=login -login-method=test-auth-method " + "-login-bearer-path=/var/run/secrets/kubernetes.io/serviceaccount/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19000"}, - {"/bin/sh", "-ec", "consul-dataplane -addresses=1.1.1.1 -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + + {"/bin/sh", "-ec", "consul-dataplane -addresses=\"1.1.1.1\" -grpc-port=8502 -proxy-service-id=$(cat /consul/connect-inject/proxyid-web-admin) " + "-service-node-name=k8s-service-mesh -log-level=info -log-json=false -credential-type=login -login-method=test-auth-method " + "-login-bearer-path=/consul/serviceaccount-web-admin/token -login-meta=pod=k8snamespace/test-pod -tls-disabled -envoy-admin-bind-port=19001"}, } diff --git a/control-plane/connect-inject/peering_dialer_controller_test.go b/control-plane/connect-inject/peering_dialer_controller_test.go index 91fe0ab6c1..232f0c0203 100644 --- a/control-plane/connect-inject/peering_dialer_controller_test.go +++ b/control-plane/connect-inject/peering_dialer_controller_test.go @@ -485,7 +485,7 @@ func TestReconcile_VersionAnnotationPeeringDialer(t *testing.T) { ctx, cancelFunc := context.WithCancel(context.Background()) t.Cleanup(cancelFunc) - watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "exec=echo 127.0.0.1", GRPCPort: testServerCfg.Ports.GRPC}, hclog.NewNullLogger()) + watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "127.0.0.1", GRPCPort: testServerCfg.Ports.GRPC}, hclog.NewNullLogger()) require.NoError(t, err) t.Cleanup(watcher.Stop) go watcher.Run() diff --git a/control-plane/controller/configentry_controller.go b/control-plane/controller/configentry_controller.go index 94206c8f4d..ea9d4249e9 100644 --- a/control-plane/controller/configentry_controller.go +++ b/control-plane/controller/configentry_controller.go @@ -9,7 +9,9 @@ import ( "github.com/go-logr/logr" "github.com/hashicorp/consul-k8s/control-plane/api/common" + "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/namespaces" + "github.com/hashicorp/consul-server-connection-manager/discovery" capi "github.com/hashicorp/consul/api" "golang.org/x/time/rate" corev1 "k8s.io/api/core/v1" @@ -50,7 +52,11 @@ type Controller interface { // all config entry types, e.g. ServiceDefaults, ServiceResolver, etc, since // they share the same reconcile behaviour. type ConfigEntryController struct { - ConsulClient *capi.Client + // ConsulClientConfig is the config for the Consul API client. + ConsulClientConfig *consul.Config + + // ConsulServerConnMgr is the watcher for the Consul server addresses. + ConsulServerConnMgr *discovery.Watcher // DatacenterName indicates the Consul Datacenter name the controller is // operating in. Adds this value as metadata on managed resources. @@ -97,6 +103,18 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont return ctrl.Result{}, err } + // Create Consul client for this reconcile. + serverState, err := r.ConsulServerConnMgr.State() + if err != nil { + logger.Error(err, "failed to get Consul server state", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + consulClient, err := consul.NewClientFromConnMgrState(r.ConsulClientConfig, serverState) + if err != nil { + logger.Error(err, "failed to create Consul API client", "name", req.Name, "ns", req.Namespace) + return ctrl.Result{}, err + } + consulEntry := configEntry.ToConsul(r.DatacenterName) if configEntry.GetDeletionTimestamp().IsZero() { @@ -114,7 +132,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont if containsString(configEntry.GetFinalizers(), FinalizerName) { logger.Info("deletion event") // Check to see if consul has config entry with the same name - entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) @@ -125,7 +143,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } else if err == nil { // Only delete the resource from Consul if it is owned by our datacenter. if entry.GetMeta()[common.DatacenterKey] == r.DatacenterName { - _, err := r.ConsulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ + _, err := consulClient.ConfigEntries().Delete(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -150,7 +168,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Check to see if consul has config entry with the same name - entry, _, err := r.ConsulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ + entry, _, err := consulClient.ConfigEntries().Get(configEntry.ConsulKind(), configEntry.ConsulName(), &capi.QueryOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) // If a config entry with this name does not exist @@ -161,7 +179,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // destination consul namespace first. if r.EnableConsulNamespaces { consulNS := r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()) - created, err := namespaces.EnsureExists(r.ConsulClient, consulNS, r.CrossNSACLPolicy) + created, err := namespaces.EnsureExists(consulClient, consulNS, r.CrossNSACLPolicy) if err != nil { return r.syncFailed(ctx, logger, crdCtrl, configEntry, ConsulAgentError, fmt.Errorf("creating consul namespace %q: %w", consulNS, err)) @@ -172,7 +190,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } // Create the config entry - _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -220,7 +238,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont } logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex()) - _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { @@ -234,7 +252,7 @@ func (r *ConfigEntryController) ReconcileEntry(ctx context.Context, crdCtrl Cont // matches the entry in Kubernetes. We just need to update the metadata // of the entry in Consul to say that it's now managed by Kubernetes. logger.Info("migrating config entry to be managed by Kubernetes") - _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + _, writeMeta, err := consulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), }) if err != nil { diff --git a/control-plane/controller/configentry_controller_ent_test.go b/control-plane/controller/configentry_controller_ent_test.go index 7b40947df5..1750e3629c 100644 --- a/control-plane/controller/configentry_controller_ent_test.go +++ b/control-plane/controller/configentry_controller_ent_test.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -193,14 +193,9 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -209,7 +204,8 @@ func TestConfigEntryController_createsConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -459,14 +455,9 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) ctx := context.Background() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -475,7 +466,8 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -499,14 +491,14 @@ func TestConfigEntryController_updatesConfigEntry_consulNamespaces(tt *testing.T // Now update it. { // First get it so we have the latest revision number. - err = fakeClient.Get(ctx, types.NamespacedName{ + err := fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: in.KubeResource.KubernetesName(), }, in.KubeResource) req.NoError(err) // Update the resource. - err := in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) + err = in.UpdateResourceFunc(fakeClient, ctx, in.KubeResource) req.NoError(err) resp, err := r.Reconcile(ctx, ctrl.Request{ @@ -712,14 +704,9 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T s := runtime.NewScheme() s.AddKnownTypes(v1alpha1.GroupVersion, in.KubeResource) - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(in.KubeResource).Build() @@ -728,7 +715,8 @@ func TestConfigEntryController_deletesConfigEntry_consulNamespaces(tt *testing.T logrtest.TestLogger{T: t}, s, &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/controller/configentry_controller_test.go b/control-plane/controller/configentry_controller_test.go index 5a26d9abd6..41e832642d 100644 --- a/control-plane/controller/configentry_controller_test.go +++ b/control-plane/controller/configentry_controller_test.go @@ -12,8 +12,10 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" + "github.com/hashicorp/consul-k8s/control-plane/consul" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" + "github.com/hashicorp/consul-server-connection-manager/discovery" capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,7 +41,7 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler + reconciler func(client.Client, *consul.Config, *discovery.Watcher, logr.Logger) testReconciler compare func(t *testing.T, consul capi.ConfigEntry) }{ { @@ -55,13 +57,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { MaxInboundConnections: 100, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -86,13 +89,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -116,13 +120,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -146,13 +151,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -189,13 +195,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -228,13 +235,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -306,13 +314,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -354,13 +363,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -393,13 +403,14 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -425,22 +436,17 @@ func TestConfigEntryControllers_createsConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) for _, configEntry := range c.consulPrereqs { written, _, err := consulClient.ConfigEntries().Set(configEntry, nil) req.NoError(err) req.True(written) } - r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) namespacedName := types.NamespacedName{ Namespace: kubeNS, Name: c.configEntryResource.KubernetesName(), @@ -476,7 +482,7 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { consulKind string consulPrereqs []capi.ConfigEntry configEntryResource common.ConfigEntryResource - reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler + reconciler func(client.Client, *consul.Config, *discovery.Watcher, logr.Logger) testReconciler updateF func(common.ConfigEntryResource) compare func(t *testing.T, consul capi.ConfigEntry) }{ @@ -492,13 +498,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -526,13 +533,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -560,13 +568,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -594,13 +603,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -642,13 +652,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -699,13 +710,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -772,13 +784,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -819,13 +832,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -862,13 +876,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -898,15 +913,9 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResource) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResource).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient // Create any prereqs. for _, configEntry := range c.consulPrereqs { @@ -930,14 +939,14 @@ func TestConfigEntryControllers_updatesConfigEntry(t *testing.T) { Name: c.configEntryResource.KubernetesName(), } // First get it so we have the latest revision number. - err = fakeClient.Get(ctx, namespacedName, c.configEntryResource) + err := fakeClient.Get(ctx, namespacedName, c.configEntryResource) req.NoError(err) // Update the entry in Kube and run reconcile. c.updateF(c.configEntryResource) - err := fakeClient.Update(ctx, c.configEntryResource) + err = fakeClient.Update(ctx, c.configEntryResource) req.NoError(err) - r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(ctx, ctrl.Request{ NamespacedName: namespacedName, }) @@ -963,7 +972,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { consulKind string consulPrereq []capi.ConfigEntry configEntryResourceWithDeletion common.ConfigEntryResource - reconciler func(client.Client, *capi.Client, logr.Logger) testReconciler + reconciler func(client.Client, *consul.Config, *discovery.Watcher, logr.Logger) testReconciler }{ { kubeKind: "ServiceDefaults", @@ -979,13 +988,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Protocol: "http", }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1006,13 +1016,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceResolverController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1033,13 +1044,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ProxyDefaultsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1060,13 +1072,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &MeshController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1101,13 +1114,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceRouterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1137,13 +1151,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceSplitterController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1203,13 +1218,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &ServiceIntentionsController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1241,13 +1257,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &IngressGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1274,13 +1291,14 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { }, }, }, - reconciler: func(client client.Client, consulClient *capi.Client, logger logr.Logger) testReconciler { + reconciler: func(client client.Client, cfg *consul.Config, watcher *discovery.Watcher, logger logr.Logger) testReconciler { return &TerminatingGatewayController{ Client: client, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: cfg, + ConsulServerConnMgr: watcher, + DatacenterName: datacenterName, }, } }, @@ -1295,15 +1313,9 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, c.configEntryResourceWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(c.configEntryResourceWithDeletion).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient // Create any prereqs. for _, configEntry := range c.consulPrereq { @@ -1326,7 +1338,7 @@ func TestConfigEntryControllers_deletesConfigEntry(t *testing.T) { Namespace: kubeNS, Name: c.configEntryResourceWithDeletion.KubernetesName(), } - r := c.reconciler(fakeClient, consulClient, logrtest.TestLogger{T: t}) + r := c.reconciler(fakeClient, testClient.Cfg, testClient.Watcher, logrtest.TestLogger{T: t}) resp, err := r.Reconcile(context.Background(), ctrl.Request{ NamespacedName: namespacedName, }) @@ -1362,18 +1374,22 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - // Construct a Consul client that will error by giving it - // an unresolvable address. - consulClient, err := capi.NewClient(&capi.Config{ - Address: "incorrect-address", - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + + // Get watcher state to make sure we can get a healthy address. + _, err := testClient.Watcher.State() + require.NoError(t, err) + // Stop the server before calling reconcile imitating a server that's not running. + _ = testClient.TestServer.Stop() + reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } @@ -1387,7 +1403,8 @@ func TestConfigEntryControllers_errorUpdatesSyncStatus(t *testing.T) { }) req.Error(err) - expErr := fmt.Sprintf("Get \"http://incorrect-address/v1/config/%s/%s\": dial tcp: lookup incorrect-address", capi.ServiceDefaults, svcDefaults.ConsulName()) + expErr := fmt.Sprintf("Get \"http://127.0.0.1:%d/v1/config/%s/%s\": dial tcp 127.0.0.1:%d: connect: connection refused", + testClient.Cfg.HTTPPort, capi.ServiceDefaults, svcDefaults.ConsulName(), testClient.Cfg.HTTPPort) req.Contains(err.Error(), expErr) req.False(resp.Requeue) @@ -1430,27 +1447,22 @@ func TestConfigEntryControllers_setsSyncedToTrue(t *testing.T) { // The config entry exists in kube but its status will be nil. fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } // Create the resource in Consul to mimic that it was created // successfully (but its status hasn't been updated). - _, _, err = consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) + _, _, err := consulClient.ConfigEntries().Set(svcDefaults.ToConsul(datacenterName), nil) require.NoError(t, err) namespacedName := types.NamespacedName{ @@ -1507,15 +1519,9 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaults) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaults).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient // We haven't run reconcile yet. We must create the config entry // in Consul ourselves in a different datacenter. @@ -1532,7 +1538,7 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Name: svcDefaults.KubernetesName(), } // First get it so we have the latest revision number. - err = fakeClient.Get(ctx, namespacedName, svcDefaults) + err := fakeClient.Get(ctx, namespacedName, svcDefaults) req.NoError(err) // Attempt to create the entry in Kube and run reconcile. @@ -1540,8 +1546,9 @@ func TestConfigEntryControllers_doesNotCreateUnownedConfigEntry(t *testing.T) { Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } resp, err := reconciler.Reconcile(ctx, ctrl.Request{ @@ -1596,21 +1603,16 @@ func TestConfigEntryControllers_doesNotDeleteUnownedConfig(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, svcDefaultsWithDeletion) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svcDefaultsWithDeletion).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient reconciler := &ServiceDefaultsController{ Client: fakeClient, Log: logrtest.TestLogger{T: t}, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } @@ -1683,15 +1685,8 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(defaults, splitter).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - require.NoError(t, err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - require.NoError(t, err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) logger := logrtest.TestLogger{T: t} @@ -1699,16 +1694,18 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } svcSplitterReconciler := ServiceSplitterController{ Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } @@ -1819,15 +1816,9 @@ func TestConfigEntryController_Migration(t *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{}) fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(&c.KubeResource).Build() - consul, err := testutil.NewTestServerConfigT(t, nil) - require.NoError(t, err) - defer consul.Stop() - - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - require.NoError(t, err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient // Create the service-defaults in Consul. success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil) @@ -1840,8 +1831,9 @@ func TestConfigEntryController_Migration(t *testing.T) { Client: fakeClient, Log: logger, ConfigEntryController: &ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: datacenterName, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, + DatacenterName: datacenterName, }, } diff --git a/control-plane/controller/exportedservices_controller_ent_test.go b/control-plane/controller/exportedservices_controller_ent_test.go index ec8f771586..a24cf024ad 100644 --- a/control-plane/controller/exportedservices_controller_ent_test.go +++ b/control-plane/controller/exportedservices_controller_ent_test.go @@ -12,8 +12,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" "github.com/hashicorp/consul-k8s/control-plane/controller" + "github.com/hashicorp/consul-k8s/control-plane/helper/test" capi "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,14 +94,9 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -110,7 +105,8 @@ func TestExportedServicesController_createsExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -214,15 +210,9 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) ctx := context.Background() - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) - + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() controller := &controller.ExportedServicesController{ @@ -230,7 +220,8 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, @@ -259,7 +250,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Now update it. { // First get it so we have the latest revision number. - err = fakeClient.Get(ctx, types.NamespacedName{ + err := fakeClient.Get(ctx, types.NamespacedName{ Namespace: c.SourceKubeNS, Name: exportedServices.KubernetesName(), }, exportedServices) @@ -267,7 +258,7 @@ func TestExportedServicesController_updatesExportedServices(tt *testing.T) { // Update the resource. exportedServices.Spec.Services[0].Name = "backend" - err := fakeClient.Update(ctx, exportedServices) + err = fakeClient.Update(ctx, exportedServices) req.NoError(err) resp, err := controller.Reconcile(ctx, ctrl.Request{ @@ -356,14 +347,9 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { } s.AddKnownTypes(v1alpha1.GroupVersion, exportedServices) - consul, err := testutil.NewTestServerConfigT(t, nil) - req.NoError(err) - defer consul.Stop() - consul.WaitForServiceIntentions(t) - consulClient, err := capi.NewClient(&capi.Config{ - Address: consul.HTTPAddr, - }) - req.NoError(err) + testClient := test.TestServerWithConnMgrWatcher(t, nil) + testClient.TestServer.WaitForServiceIntentions(t) + consulClient := testClient.APIClient fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(exportedServices).Build() @@ -372,7 +358,8 @@ func TestExportedServicesController_deletesExportedServices(tt *testing.T) { Log: logrtest.TestLogger{T: t}, Scheme: s, ConfigEntryController: &controller.ConfigEntryController{ - ConsulClient: consulClient, + ConsulClientConfig: testClient.Cfg, + ConsulServerConnMgr: testClient.Watcher, EnableConsulNamespaces: true, EnableNSMirroring: c.Mirror, NSMirroringPrefix: c.MirrorPrefix, diff --git a/control-plane/helper/test/test_util.go b/control-plane/helper/test/test_util.go index b62646d85b..a6e02092e2 100644 --- a/control-plane/helper/test/test_util.go +++ b/control-plane/helper/test/test_util.go @@ -59,7 +59,7 @@ func TestServerWithConnMgrWatcher(t *testing.T, callback testutil.ServerConfigCa ctx, cancelFunc := context.WithCancel(context.Background()) t.Cleanup(cancelFunc) hcLog := hclog.New(&hclog.LoggerOptions{Level: hclog.Debug}) - watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "exec=echo 127.0.0.1", GRPCPort: cfg.Ports.GRPC}, hcLog) + watcher, err := discovery.NewWatcher(ctx, discovery.Config{Addresses: "127.0.0.1", GRPCPort: cfg.Ports.GRPC}, hcLog) require.NoError(t, err) t.Cleanup(watcher.Stop) go watcher.Run() diff --git a/control-plane/subcommand/connect-init/command_ent_test.go b/control-plane/subcommand/connect-init/command_ent_test.go index 41c9e83a54..a283d627c5 100644 --- a/control-plane/subcommand/connect-init/command_ent_test.go +++ b/control-plane/subcommand/connect-init/command_ent_test.go @@ -85,7 +85,7 @@ func TestRun_WithNamespaces(t *testing.T) { // CONSUL_HTTP_ADDR when it processes the command template. flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-namespace", c.consulServiceNamespace, diff --git a/control-plane/subcommand/connect-init/command_test.go b/control-plane/subcommand/connect-init/command_test.go index 7219a3916b..e9a8aed09a 100644 --- a/control-plane/subcommand/connect-init/command_test.go +++ b/control-plane/subcommand/connect-init/command_test.go @@ -161,7 +161,7 @@ func TestRun_ConnectServices(t *testing.T) { flags := []string{"-pod-name", testPodName, "-pod-namespace", testPodNamespace, "-service-name", tt.serviceName, - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", proxyFile, @@ -315,7 +315,7 @@ func TestRun_Gateways(t *testing.T) { flags := []string{"-pod-name", testGatewayName, "-pod-namespace", testPodNamespace, "-gateway-kind", tt.gatewayKind, - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", proxyFile, @@ -510,7 +510,7 @@ func TestRun_ConnectServices_Errors(t *testing.T) { serviceRegistrationPollingAttempts: 1, } flags := []string{ - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-pod-name", testPodName, @@ -665,7 +665,7 @@ func TestRun_RetryServicePolling(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", proxyFile, @@ -718,7 +718,7 @@ func TestRun_InvalidProxyFile(t *testing.T) { flags := []string{ "-pod-name", testPodName, "-pod-namespace", testPodNamespace, - "-addresses", "exec=echo 127.0.0.1", + "-addresses", "127.0.0.1", "-http-port", strconv.Itoa(serverCfg.Ports.HTTP), "-grpc-port", strconv.Itoa(serverCfg.Ports.GRPC), "-proxy-id-file", randFileName, diff --git a/control-plane/subcommand/controller/command.go b/control-plane/subcommand/controller/command.go index ad07df0172..2d125a49ca 100644 --- a/control-plane/subcommand/controller/command.go +++ b/control-plane/subcommand/controller/command.go @@ -6,17 +6,18 @@ import ( "flag" "fmt" "os" + "os/signal" "sync" + "syscall" "github.com/hashicorp/consul-k8s/control-plane/api/common" "github.com/hashicorp/consul-k8s/control-plane/api/v1alpha1" - "github.com/hashicorp/consul-k8s/control-plane/consul" + connectinject "github.com/hashicorp/consul-k8s/control-plane/connect-inject" "github.com/hashicorp/consul-k8s/control-plane/controller" mutatingwebhookconfiguration "github.com/hashicorp/consul-k8s/control-plane/helper/mutating-webhook-configuration" cmdCommon "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-discover" + "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/mitchellh/cli" "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" @@ -34,13 +35,12 @@ const WebhookCAFilename = "ca.crt" type Command struct { UI cli.Ui - flagSet *flag.FlagSet - httpFlags *flags.HTTPFlags + flagSet *flag.FlagSet + consulFlags *flags.ConsulFlags flagWebhookTLSCertDir string flagEnableLeaderElection bool flagEnableWebhooks bool - flagDatacenter string flagLogLevel string flagLogJSON bool flagResourcePrefix string @@ -53,14 +53,8 @@ type Command struct { flagNSMirroringPrefix string flagCrossNSACLPolicy string - flagServerAddresses []string - flagServerPort uint - flagUseHTTPS bool - once sync.Once help string - - providers map[string]discover.Provider } var ( @@ -79,8 +73,6 @@ func (c *Command) init() { c.flagSet.BoolVar(&c.flagEnableLeaderElection, "enable-leader-election", false, "Enable leader election for controller. "+ "Enabling this will ensure there is only one active controller manager.") - c.flagSet.StringVar(&c.flagDatacenter, "datacenter", "", - "Name of the Consul datacenter the controller is operating in. This is added as metadata on managed custom resources.") c.flagSet.BoolVar(&c.flagEnableNamespaces, "enable-namespaces", false, "[Enterprise Only] Enables Consul Enterprise namespaces, in either a single Consul namespace or mirrored.") c.flagSet.StringVar(&c.flagConsulDestinationNamespace, "consul-destination-namespace", "default", @@ -106,15 +98,9 @@ func (c *Command) init() { "%q, %q, %q, and %q.", zapcore.DebugLevel.String(), zapcore.InfoLevel.String(), zapcore.WarnLevel.String(), zapcore.ErrorLevel.String())) c.flagSet.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.flagSet.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", - "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ - "At least one value is required.") - c.flagSet.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") - c.flagSet.BoolVar(&c.flagUseHTTPS, "use-https", false, - "Toggle for using HTTPS for all API calls to Consul.") - - c.httpFlags = &flags.HTTPFlags{} - flags.Merge(c.flagSet, c.httpFlags.Flags()) + + c.consulFlags = &flags.ConsulFlags{} + flags.Merge(c.flagSet, c.consulFlags.Flags()) c.help = flags.Usage(help, c.flagSet) } @@ -138,6 +124,37 @@ func (c *Command) Run(args []string) int { ctrl.SetLogger(zapLogger) klog.SetLogger(zapLogger) + // TODO (agentless): find a way to integrate zap logger (via having a generic logger interface in connection manager). + hcLog, err := cmdCommon.NamedLogger(c.flagLogLevel, c.flagLogJSON, "consul-server-connection-manager") + if err != nil { + c.UI.Error(fmt.Sprintf("Error setting up logging: %s", err.Error())) + return 1 + } + + // Create a context to be used by the processes started in this command. + ctx, cancelFunc := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancelFunc() + // Start Consul server Connection manager + serverConnMgrCfg, err := c.consulFlags.ConsulServerConnMgrConfig() + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + return 1 + } + watcher, err := discovery.NewWatcher(ctx, serverConnMgrCfg, hcLog) + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) + return 1 + } + + go watcher.Run() + defer watcher.Stop() + + _, err = watcher.State() + if err != nil { + c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) + return 1 + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Port: 9443, @@ -150,38 +167,10 @@ func (c *Command) Run(args []string) int { return 1 } - cfg := api.DefaultConfig() - if c.flagUseHTTPS { - cfg.Scheme = "https" - } - if len(c.flagServerAddresses) > 0 { - // TODO (ishustava): eventually we will use go-netaddr library which doesn't use hclog, - // and so this additional logger will go away and we'll be able to use zap logger. - hclogger, err := cmdCommon.Logger(c.flagLogLevel, c.flagLogJSON) - if err != nil { - c.UI.Error(fmt.Sprintf("Unable to create logger: %s", err)) - return 1 - } - serverAddresses, err := cmdCommon.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, hclogger) - if err != nil { - c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) - return 1 - } - - serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) - cfg.Address = serverAddr - } - c.httpFlags.MergeOntoConfig(cfg) - consulClient, err := consul.NewClient(cfg, c.httpFlags.ConsulAPITimeout()) - if err != nil { - setupLog.Error(err, "connecting to Consul agent") - return 1 - } - - partitionsEnabled := c.httpFlags.Partition() != "" + partitionsEnabled := c.consulFlags.Partition != "" consulMeta := common.ConsulMeta{ PartitionsEnabled: partitionsEnabled, - Partition: c.httpFlags.Partition(), + Partition: c.consulFlags.Partition, NamespacesEnabled: c.flagEnableNamespaces, DestinationNamespace: c.flagConsulDestinationNamespace, Mirroring: c.flagEnableNSMirroring, @@ -189,8 +178,9 @@ func (c *Command) Run(args []string) int { } configEntryReconciler := &controller.ConfigEntryController{ - ConsulClient: consulClient, - DatacenterName: c.flagDatacenter, + ConsulClientConfig: c.consulFlags.ConsulClientConfig(), + ConsulServerConnMgr: watcher, + DatacenterName: c.consulFlags.Datacenter, EnableConsulNamespaces: c.flagEnableNamespaces, ConsulDestinationNamespace: c.flagConsulDestinationNamespace, EnableNSMirroring: c.flagEnableNSMirroring, @@ -297,76 +287,71 @@ func (c *Command) Run(args []string) int { // annotation in each webhook file. mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicedefaults", &webhook.Admission{Handler: &v1alpha1.ServiceDefaultsWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceDefaults), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceDefaults), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceresolver", &webhook.Admission{Handler: &v1alpha1.ServiceResolverWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceResolver), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceResolver), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-proxydefaults", &webhook.Admission{Handler: &v1alpha1.ProxyDefaultsWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyDefaults), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ProxyDefaults), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-mesh", &webhook.Admission{Handler: &v1alpha1.MeshWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.Mesh), + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.Mesh), }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-exportedservices", &webhook.Admission{Handler: &v1alpha1.ExportedServicesWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ExportedServices), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ExportedServices), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicerouter", &webhook.Admission{Handler: &v1alpha1.ServiceRouterWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceRouter), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceRouter), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-servicesplitter", &webhook.Admission{Handler: &v1alpha1.ServiceSplitterWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceSplitter), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceSplitter), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-serviceintentions", &webhook.Admission{Handler: &v1alpha1.ServiceIntentionsWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceIntentions), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.ServiceIntentions), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-ingressgateway", &webhook.Admission{Handler: &v1alpha1.IngressGatewayWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.IngressGateway), + ConsulMeta: consulMeta, }}) mgr.GetWebhookServer().Register("/mutate-v1alpha1-terminatinggateway", &webhook.Admission{Handler: &v1alpha1.TerminatingGatewayWebhook{ - Client: mgr.GetClient(), - ConsulClient: consulClient, - Logger: ctrl.Log.WithName("webhooks").WithName(common.TerminatingGateway), - ConsulMeta: consulMeta, + Client: mgr.GetClient(), + Logger: ctrl.Log.WithName("webhooks").WithName(common.TerminatingGateway), + ConsulMeta: consulMeta, }}) } // +kubebuilder:scaffold:builder + if err = mgr.AddReadyzCheck("ready", connectinject.ReadinessCheck{CertDir: c.flagWebhookTLSCertDir}.Ready); err != nil { + setupLog.Error(err, "unable to create readiness check", "controller", connectinject.EndpointsController{}) + return 1 + } + if c.flagEnableWebhookCAUpdate { err := c.updateWebhookCABundle() if err != nil { @@ -414,12 +399,12 @@ func (c *Command) validateFlags() error { return errors.New("Invalid arguments: should have no non-flag arguments") } if c.flagEnableWebhooks && c.flagWebhookTLSCertDir == "" { - return errors.New("Invalid arguments: -webhook-tls-cert-dir must be set") + return errors.New("invalid arguments: -webhook-tls-cert-dir must be set") } - if c.flagDatacenter == "" { + if c.consulFlags.Datacenter == "" { return errors.New("Invalid arguments: -datacenter must be set") } - if c.httpFlags.ConsulAPITimeout() <= 0 { + if c.consulFlags.APITimeout <= 0 { return errors.New("-consul-api-timeout must be set to a value greater than 0") } diff --git a/control-plane/subcommand/controller/command_test.go b/control-plane/subcommand/controller/command_test.go index 016299d125..792548f324 100644 --- a/control-plane/subcommand/controller/command_test.go +++ b/control-plane/subcommand/controller/command_test.go @@ -27,12 +27,12 @@ func TestRun_FlagValidation(t *testing.T) { expErr: "-datacenter must be set", }, { - flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo"}, - expErr: "-consul-api-timeout must be set to a value greater than 0", + flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo", "-api-timeout=0s"}, + expErr: "-api-timeout must be set to a value greater than 0", }, { flags: []string{"-webhook-tls-cert-dir", "/foo", "-datacenter", "foo", - "-consul-api-timeout", "5s", "-log-level", "invalid"}, + "-log-level", "invalid"}, expErr: `unknown log level "invalid": unrecognized level: "invalid"`, }, } diff --git a/control-plane/subcommand/flags/consul.go b/control-plane/subcommand/flags/consul.go index c785c009f4..14839e8962 100644 --- a/control-plane/subcommand/flags/consul.go +++ b/control-plane/subcommand/flags/consul.go @@ -28,7 +28,8 @@ const ( CACertPEMEnvVar = "CONSUL_CACERT_PEM" TLSServerNameEnvVar = "CONSUL_TLS_SERVER_NAME" - ACLTokenEnvVar = "CONSUL_ACL_TOKEN" + ACLTokenEnvVar = "CONSUL_ACL_TOKEN" + ACLTokenFileEnvVar = "CONSUL_ACL_TOKEN_FILE" LoginAuthMethodEnvVar = "CONSUL_LOGIN_AUTH_METHOD" LoginBearerTokenFileEnvVar = "CONSUL_LOGIN_BEARER_TOKEN_FILE" @@ -65,7 +66,8 @@ type ConsulTLSFlags struct { type ConsulACLFlags struct { ConsulLogin ConsulLoginFlags - Token string + Token string + TokenFile string } type ConsulLoginFlags struct { @@ -117,7 +119,7 @@ func (f *ConsulFlags) Flags() *flag.FlagSet { fs.StringVar(&f.Addresses, "addresses", os.Getenv(AddressesEnvVar), "Consul server addresses. Can also be provided via CONSUL_ADDRESSES environment variable. "+ "Value can be:\n"+ - "1. DNS name (that resolves to servers or DNS name of a load-balancer front of Consul servers); OR\n"+ + "1. DNS name (that resolves to servers or DNS name of a load-balancer front of Consul servers) or an IP address; OR\n"+ "2.'exec='. The executable\n"+ " a) on success - should exit 0 and print to stdout whitespace delimited IP (v4/v6) addresses\n"+ " b) on failure - exit with a non-zero code and optionally print an error message of upto 1024 bytes to stderr.\n"+ @@ -143,6 +145,9 @@ func (f *ConsulFlags) Flags() *flag.FlagSet { fs.StringVar(&f.Token, "token", os.Getenv(ACLTokenEnvVar), "ACL token to use for connection to Consul."+ "This can also be specified via the CONSUL_ACL_TOKEN environment variable.") + fs.StringVar(&f.TokenFile, "token-file", os.Getenv(ACLTokenFileEnvVar), + "ACL token file to use for connection to Consul."+ + "This can also be specified via the CONSUL_ACL_TOKEN_FILE environment variable.") fs.StringVar(&f.ConsulLogin.AuthMethod, "auth-method-name", os.Getenv(LoginAuthMethodEnvVar), "Auth method name to use for login to Consul."+ "This can also be specified via the CONSUL_LOGIN_AUTH_METHOD environment variable.") @@ -209,6 +214,13 @@ func (f *ConsulFlags) ConsulServerConnMgrConfig() (discovery.Config, error) { } else if f.Token != "" { cfg.Credentials.Type = discovery.CredentialsTypeStatic cfg.Credentials.Static.Token = f.Token + } else if f.TokenFile != "" { + token, err := os.ReadFile(f.TokenFile) + if err != nil { + return discovery.Config{}, err + } + cfg.Credentials.Type = discovery.CredentialsTypeStatic + cfg.Credentials.Static.Token = string(token) } return cfg, nil @@ -240,6 +252,8 @@ func (f *ConsulFlags) ConsulClientConfig() *consul.Config { if f.Token != "" { cfg.Token = f.Token + } else if f.TokenFile != "" { + cfg.TokenFile = f.TokenFile } return &consul.Config{ diff --git a/control-plane/subcommand/flags/consul_test.go b/control-plane/subcommand/flags/consul_test.go index 8ff0c260cc..d49a8c947a 100644 --- a/control-plane/subcommand/flags/consul_test.go +++ b/control-plane/subcommand/flags/consul_test.go @@ -32,6 +32,7 @@ func TestConsulFlags_Flags(t *testing.T) { TLSServerNameEnvVar: "server.consul", ACLTokenEnvVar: "test-token", + ACLTokenFileEnvVar: "/path/to/token", LoginAuthMethodEnvVar: "test-auth-method", LoginBearerTokenFileEnvVar: "path/to/token", LoginDatacenterEnvVar: "other-test-dc", @@ -54,7 +55,8 @@ func TestConsulFlags_Flags(t *testing.T) { TLSServerName: "server.consul", }, ConsulACLFlags: ConsulACLFlags{ - Token: "test-token", + Token: "test-token", + TokenFile: "/path/to/token", ConsulLogin: ConsulLoginFlags{ AuthMethod: "test-auth-method", BearerTokenFile: "path/to/token", @@ -190,6 +192,25 @@ func TestConsulFlags_ConsulServerConnMgrConfig(t *testing.T) { }, }, }, + "Static ACL token file": { + flags: ConsulFlags{ + Addresses: "consul.address", + ConsulACLFlags: ConsulACLFlags{ + // This is the content of the token that we will + // write to a temp file and expect the config to have this in its contents + TokenFile: "test-token", + }, + }, + expConfig: discovery.Config{ + Addresses: "consul.address", + Credentials: discovery.Credentials{ + Type: discovery.CredentialsTypeStatic, + Static: discovery.StaticTokenCredential{ + Token: "test-token", + }, + }, + }, + }, } for name, c := range cases { @@ -203,6 +224,15 @@ func TestConsulFlags_ConsulServerConnMgrConfig(t *testing.T) { _, err = tokenFile.WriteString("bearer-token") require.NoError(t, err) c.flags.ConsulLogin.BearerTokenFile = tokenFile.Name() + } else if c.flags.TokenFile != "" { + tokenFile, err := os.CreateTemp("", "") + require.NoError(t, err) + t.Cleanup(func() { + _ = os.Remove(tokenFile.Name()) + }) + _, err = tokenFile.WriteString(c.flags.TokenFile) + require.NoError(t, err) + c.flags.TokenFile = tokenFile.Name() } cfg, err := c.flags.ConsulServerConnMgrConfig() require.NoError(t, err) @@ -365,6 +395,17 @@ func TestConsulFlags_ConsulAPIClientConfig(t *testing.T) { Token: "test-token", }, }, + "ACL token file provided": { + flags: ConsulFlags{ + ConsulACLFlags: ConsulACLFlags{ + TokenFile: "/path/to/token", + }, + }, + expConfig: &api.Config{ + Scheme: "http", + TokenFile: "/path/to/token", + }, + }, } for name, c := range cases { diff --git a/control-plane/subcommand/partition-init/command.go b/control-plane/subcommand/partition-init/command.go index f539b4c62a..7ca70b50a7 100644 --- a/control-plane/subcommand/partition-init/command.go +++ b/control-plane/subcommand/partition-init/command.go @@ -11,9 +11,8 @@ import ( "github.com/hashicorp/consul-k8s/control-plane/consul" "github.com/hashicorp/consul-k8s/control-plane/subcommand/common" "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" - k8sflags "github.com/hashicorp/consul-k8s/control-plane/subcommand/flags" + "github.com/hashicorp/consul-server-connection-manager/discovery" "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" "github.com/mitchellh/cli" ) @@ -21,16 +20,8 @@ import ( type Command struct { UI cli.Ui - flags *flag.FlagSet - k8s *k8sflags.K8SFlags - http *flags.HTTPFlags - - flagPartitionName string - - // Flags to configure Consul connection - flagServerAddresses []string - flagServerPort uint - flagUseHTTPS bool + flags *flag.FlagSet + consul *flags.ConsulFlags flagLogLevel string flagLogJSON bool @@ -45,21 +36,11 @@ type Command struct { once sync.Once help string - - providers map[string]discover.Provider } func (c *Command) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) - c.flags.StringVar(&c.flagPartitionName, "partition-name", "", "The name of the partition being created.") - - c.flags.Var((*flags.AppendSliceValue)(&c.flagServerAddresses), "server-address", - "The IP, DNS name or the cloud auto-join string of the Consul server(s). If providing IPs or DNS names, may be specified multiple times. "+ - "At least one value is required.") - c.flags.UintVar(&c.flagServerPort, "server-port", 8500, "The HTTP or HTTPS port of the Consul server. Defaults to 8500.") - c.flags.BoolVar(&c.flagUseHTTPS, "use-https", false, - "Toggle for using HTTPS for all API calls to Consul.") c.flags.DurationVar(&c.flagTimeout, "timeout", 10*time.Minute, "How long we'll try to bootstrap Partitions for before timing out, e.g. 1ms, 2s, 3m") c.flags.StringVar(&c.flagLogLevel, "log-level", "info", @@ -68,10 +49,8 @@ func (c *Command) init() { c.flags.BoolVar(&c.flagLogJSON, "log-json", false, "Enable or disable JSON output format for logging.") - c.k8s = &k8sflags.K8SFlags{} - c.http = &flags.HTTPFlags{} - flags.Merge(c.flags, c.k8s.Flags()) - flags.Merge(c.flags, c.http.Flags()) + c.consul = &flags.ConsulFlags{} + flags.Merge(c.flags, c.consul.Flags()) c.help = flags.Usage(help, c.flags) // Default retry to 1s. This is exposed for setting in tests. @@ -116,45 +95,52 @@ func (c *Command) Run(args []string) int { return 1 } - serverAddresses, err := common.GetResolvedServerAddresses(c.flagServerAddresses, c.providers, c.log) + // Start Consul server Connection manager + serverConnMgrCfg, err := c.consul.ConsulServerConnMgrConfig() + serverConnMgrCfg.ServerWatchDisabled = true + if err != nil { + c.UI.Error(fmt.Sprintf("unable to create config for consul-server-connection-manager: %s", err)) + return 1 + } + watcher, err := discovery.NewWatcher(c.ctx, serverConnMgrCfg, c.log.Named("consul-server-connection-manager")) if err != nil { - c.UI.Error(fmt.Sprintf("Unable to discover any Consul addresses from %q: %s", c.flagServerAddresses[0], err)) + c.UI.Error(fmt.Sprintf("unable to create Consul server watcher: %s", err)) return 1 } - scheme := "http" - if c.flagUseHTTPS { - scheme = "https" + go watcher.Run() + defer watcher.Stop() + + state, err := watcher.State() + if err != nil { + c.UI.Error(fmt.Sprintf("unable to get Consul server addresses from watcher: %s", err)) + return 1 } - // For all of the next operations we'll need a Consul client. - serverAddr := fmt.Sprintf("%s:%d", serverAddresses[0], c.flagServerPort) - cfg := api.DefaultConfig() - cfg.Address = serverAddr - cfg.Scheme = scheme - c.http.MergeOntoConfig(cfg) - consulClient, err := consul.NewClient(cfg, c.http.ConsulAPITimeout()) + + consulClient, err := consul.NewClientFromConnMgrState(c.consul.ConsulClientConfig(), state) if err != nil { - c.UI.Error(fmt.Sprintf("Error creating Consul client for addr %q: %s", serverAddr, err)) + c.UI.Error(fmt.Sprintf("unable to create Consul client: %s", err)) return 1 } + for { - partition, _, err := consulClient.Partitions().Read(c.ctx, c.flagPartitionName, nil) + partition, _, err := consulClient.Partitions().Read(c.ctx, c.consul.Partition, nil) // The API does not return an error if the Partition does not exist. It returns a nil Partition. if err != nil { - c.log.Error("Error reading Partition from Consul", "name", c.flagPartitionName, "error", err.Error()) + c.log.Error("Error reading Partition from Consul", "name", c.consul.Partition, "error", err.Error()) } else if partition == nil { // Retry Admin Partition creation until it succeeds, or we reach the command timeout. _, _, err = consulClient.Partitions().Create(c.ctx, &api.Partition{ - Name: c.flagPartitionName, + Name: c.consul.Partition, Description: "Created by Helm installation", }, nil) if err == nil { - c.log.Info("Successfully created Admin Partition", "name", c.flagPartitionName) + c.log.Info("Successfully created Admin Partition", "name", c.consul.Partition) return 0 } - c.log.Error("Error creating partition", "name", c.flagPartitionName, "error", err.Error()) + c.log.Error("Error creating partition", "name", c.consul.Partition, "error", err.Error()) } else { - c.log.Info("Admin Partition already exists", "name", c.flagPartitionName) + c.log.Info("Admin Partition already exists", "name", c.consul.Partition) return 0 } // Wait on either the retry duration (in which case we continue) or the @@ -164,28 +150,28 @@ func (c *Command) Run(args []string) int { case <-time.After(c.retryDuration): continue case <-c.ctx.Done(): - c.log.Error("Timed out attempting to create partition", "name", c.flagPartitionName) + c.log.Error("Timed out attempting to create partition", "name", c.consul.Partition) return 1 } } } func (c *Command) validateFlags() error { - if len(c.flagServerAddresses) == 0 { - return errors.New("-server-address must be set at least once") + if len(c.consul.Addresses) == 0 { + return errors.New("-addresses must be set") } - if c.flagPartitionName == "" { - return errors.New("-partition-name must be set") + if c.consul.Partition == "" { + return errors.New("-partition must be set") } - if c.http.ConsulAPITimeout() <= 0 { - return errors.New("-consul-api-timeout must be set to a value greater than 0") + if c.consul.APITimeout <= 0 { + return errors.New("-api-timeout must be set to a value greater than 0") } return nil } -const synopsis = "Initialize an Admin Partition on Consul." +const synopsis = "Initialize an Admin Partition in Consul." const help = ` Usage: consul-k8s-control-plane partition-init [options] diff --git a/control-plane/subcommand/partition-init/command_ent_test.go b/control-plane/subcommand/partition-init/command_ent_test.go index 1e833430f9..5bb1868b39 100644 --- a/control-plane/subcommand/partition-init/command_ent_test.go +++ b/control-plane/subcommand/partition-init/command_ent_test.go @@ -23,22 +23,21 @@ func TestRun_FlagValidation(t *testing.T) { }{ { flags: nil, - expErr: "-server-address must be set at least once", + expErr: "addresses must be set", }, { - flags: []string{"-server-address", "foo"}, - expErr: "-partition-name must be set", + flags: []string{"-addresses", "foo"}, + expErr: "-partition must be set", }, { flags: []string{ - "-server-address", "foo", "-partition-name", "bar"}, - expErr: "-consul-api-timeout must be set to a value greater than 0", + "-addresses", "foo", "-partition", "bar", "-api-timeout", "0s"}, + expErr: "-api-timeout must be set to a value greater than 0", }, { flags: []string{ - "-server-address", "foo", - "-partition-name", "bar", - "-consul-api-timeout", "5s", + "-addresses", "foo", + "-partition", "bar", "-log-level", "invalid", }, expErr: "unknown log level: invalid", @@ -75,10 +74,10 @@ func TestRun_PartitionCreate(t *testing.T) { } cmd.init() args := []string{ - "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], - "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-partition-name", partitionName, - "-consul-api-timeout", "5s", + "-addresses=" + "127.0.0.1", + "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-partition", partitionName, } responseCode := cmd.Run(args) @@ -114,10 +113,10 @@ func TestRun_PartitionExists(t *testing.T) { } cmd.init() args := []string{ - "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], - "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-partition-name", partitionName, - "-consul-api-timeout", "5s", + "-addresses=" + "127.0.0.1", + "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], + "-partition", partitionName, } responseCode := cmd.Run(args) @@ -143,11 +142,11 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { } cmd.init() args := []string{ - "-server-address=" + strings.Split(server.HTTPAddr, ":")[0], - "-server-port=" + strings.Split(server.HTTPAddr, ":")[1], - "-partition-name", partitionName, + "-addresses=" + "127.0.0.1", + "-http-port=" + strings.Split(server.HTTPAddr, ":")[1], + "-grpc-port=" + strings.Split(server.GRPCAddr, ":")[1], "-timeout", "500ms", - "-consul-api-timeout", "5s", + "-partition", partitionName, } server.Stop() startTime := time.Now() @@ -159,5 +158,3 @@ func TestRun_ExitsAfterTimeout(t *testing.T) { // some buffer time required for the task to run and assignments to occur. require.WithinDuration(t, completeTime, startTime, 1*time.Second) } - -// TODO: Write tests with ACLs enabled diff --git a/control-plane/subcommand/server-acl-init/rules.go b/control-plane/subcommand/server-acl-init/rules.go index 8b2dec7a14..20e4e40e79 100644 --- a/control-plane/subcommand/server-acl-init/rules.go +++ b/control-plane/subcommand/server-acl-init/rules.go @@ -53,6 +53,9 @@ agent_prefix "" { partition_prefix "" { namespace_prefix "" { acl = "write" + service_prefix "" { + policy = "write" + } } }`