Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
justinsb committed Feb 26, 2024
1 parent 13ebb81 commit 5187da1
Show file tree
Hide file tree
Showing 5 changed files with 212 additions and 9 deletions.
19 changes: 19 additions & 0 deletions clusterapi/README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,22 @@
This is experimental integration with the cluster-api. It is very much not production ready (and currently barely works).

We plug in our own bootstrap provider with the goal of enabling cluster-api nodes to join a kOps cluster.


kops create cluster foo.k8s.local --zones us-east4-a
kops update cluster foo.k8s.local --yes --admin
kops validate cluster --wait=10m

cd cluster-api-provider-gcp
REGISTRY=justinsb make docker-build docker-push
REGISTRY=justinsb make install-management-cluster # Doesn't yet exist in capg

cd kops/clusterapi

k delete machinedeployment --all; k delete gcpmachinetemplate --all

cat examples/manifest.yaml | IMAGE_ID=projects/ubuntu-os-cloud/global/images/family/ubuntu-2204-lts GCP_NODE_MACHINE_TYPE=e2-medium KUBERNETES_VERSION=v1.28.3 WORKER_MACHINE_COUNT=1 GCP_ZONE=us-east4-a GCP_REGION=us-east4 GCP_NETWORK_NAME=foo-k8s-local GCP_SUBNET=us-east4-foo-k8s-local GCP_PROJECT=$(gcloud config get project) CLUSTER_NAME=foo-k8s-local envsubst | kubectl apply --server-side -n kube-system -f -

# IMAGE_ID=projects/debian-cloud/global/images/family/debian-12 doesn't work with user-data (????)

go run . # populate secret
179 changes: 178 additions & 1 deletion clusterapi/bootstrap/controllers/kopsconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@ limitations under the License.
package controllers

import (
"bytes"
"context"
"fmt"
"sort"

corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -27,6 +29,15 @@ import (
"k8s.io/klog/v2"
api "k8s.io/kops/clusterapi/bootstrap/kops/api/v1beta1"
clusterv1 "k8s.io/kops/clusterapi/snapshot/cluster-api/api/v1beta1"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/model/resources"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/mirrors"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -66,7 +77,10 @@ func (r *KopsConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, err
}

data := []byte{}
data, err := r.buildBootstrapData(ctx)
if err != nil {
return ctrl.Result{}, err
}

if err := r.storeBootstrapData(ctx, obj, data); err != nil {
return ctrl.Result{}, err
Expand Down Expand Up @@ -138,3 +152,166 @@ func (r *KopsConfigReconciler) storeBootstrapData(ctx context.Context, parent *a
// conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition)
return nil
}

func (r *KopsConfigReconciler) buildBootstrapData(ctx context.Context) ([]byte, error) {
// tf := &TemplateFunctions{
// KopsModelContext: *modelContext,
// cloud: cloud,
// }
// TODO: Make dynamic
clusterName := "foo.k8s.local"
clusterStoreBasePath := "gs://kops-state-justinsb-root-20220725"
apiserverAdditionalIPs := []string{}
apiserverAdditionalIPs = append(apiserverAdditionalIPs, "34.145.198.26")

vfsContext := vfs.NewVFSContext()
basePath, err := vfsContext.BuildVfsPath(clusterStoreBasePath)
if err != nil {
return nil, fmt.Errorf("parsing vfs base path: %w", err)
}

// cluster := &kops.Cluster{}
// cluster.Spec.KubernetesVersion = "1.28.3"
// cluster.Spec.KubeAPIServer = &kops.KubeAPIServerConfig{}

vfsClientset := vfsclientset.NewVFSClientset(vfsContext, basePath)
cluster, err := vfsClientset.GetCluster(ctx, clusterName)
if err != nil {
return nil, fmt.Errorf("getting cluster %q: %w", clusterName, err)
}

if cluster.Spec.KubeAPIServer == nil {
cluster.Spec.KubeAPIServer = &kops.KubeAPIServerConfig{}
}

ig := &kops.InstanceGroup{}
ig.Spec.Role = kops.InstanceGroupRoleNode

getAssets := false
assetBuilder := assets.NewAssetBuilder(vfsContext, cluster.Spec.Assets, cluster.Spec.KubernetesVersion, getAssets)

encryptionConfigSecretHash := ""
// if fi.ValueOf(c.Cluster.Spec.EncryptionConfig) {
// secret, err := secretStore.FindSecret("encryptionconfig")
// if err != nil {
// return fmt.Errorf("could not load encryptionconfig secret: %v", err)
// }
// if secret == nil {
// fmt.Println("")
// fmt.Println("You have encryptionConfig enabled, but no encryptionconfig secret has been set.")
// fmt.Println("See `kops create secret encryptionconfig -h` and https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/")
// return fmt.Errorf("could not find encryptionconfig secret")
// }
// hashBytes := sha256.Sum256(secret.Data)
// encryptionConfigSecretHash = base64.URLEncoding.EncodeToString(hashBytes[:])
// }

nodeUpAssets := make(map[architectures.Architecture]*mirrors.MirroredAsset)
for _, arch := range architectures.GetSupported() {

asset, err := cloudup.NodeUpAsset(assetBuilder, arch)
if err != nil {
return nil, err
}
nodeUpAssets[arch] = asset
}

assets := make(map[architectures.Architecture][]*mirrors.MirroredAsset)
configBuilder, err := cloudup.NewNodeUpConfigBuilder(cluster, assetBuilder, assets, encryptionConfigSecretHash)
if err != nil {
return nil, err
}

// bootstrapScript := &model.BootstrapScript{
// // KopsModelContext: modelContext,
// Lifecycle: fi.LifecycleSync,
// // NodeUpConfigBuilder: configBuilder,
// // NodeUpAssets: c.NodeUpAssets,
// }

keysets := make(map[string]*fi.Keyset)

keystore, err := vfsClientset.KeyStore(cluster)
if err != nil {
return nil, err
}

for _, keyName := range []string{"kubernetes-ca"} {
keyset, err := keystore.FindKeyset(ctx, keyName)
if err != nil {
return nil, fmt.Errorf("getting keyset %q: %w", keyName, err)
}

if keyset == nil {
return nil, fmt.Errorf("failed to find keyset %q", keyName)
}

keysets[keyName] = keyset
}

_, bootConfig, err := configBuilder.BuildConfig(ig, apiserverAdditionalIPs, keysets)
if err != nil {
return nil, err
}

// configData, err := utils.YamlMarshal(config)
// if err != nil {
// return nil, fmt.Errorf("error converting nodeup config to yaml: %v", err)
// }
// sum256 := sha256.Sum256(configData)
// bootConfig.NodeupConfigHash = base64.StdEncoding.EncodeToString(sum256[:])
// b.nodeupConfig.Resource = fi.NewBytesResource(configData)

var nodeupScript resources.NodeUpScript
nodeupScript.NodeUpAssets = nodeUpAssets
nodeupScript.BootConfig = bootConfig

{
nodeupScript.EnvironmentVariables = func() (string, error) {
env := make(map[string]string)

// env, err := b.buildEnvironmentVariables()
// if err != nil {
// return "", err
// }

// Sort keys to have a stable sequence of "export xx=xxx"" statements
var keys []string
for k := range env {
keys = append(keys, k)
}
sort.Strings(keys)

var b bytes.Buffer
for _, k := range keys {
b.WriteString(fmt.Sprintf("export %s=%s\n", k, env[k]))
}
return b.String(), nil
}

nodeupScript.ProxyEnv = func() (string, error) {
return "", nil
// return b.createProxyEnv(cluster.Spec.Networking.EgressProxy)
}
}

// TODO: nodeupScript.CompressUserData = fi.ValueOf(b.ig.Spec.CompressUserData)

// By setting some sysctls early, we avoid broken configurations that prevent nodeup download.
// See https://github.com/kubernetes/kops/issues/10206 for details.
// TODO: nodeupScript.SetSysctls = setSysctls()

nodeupScript.CloudProvider = string(cluster.Spec.GetCloudProvider())

nodeupScriptResource, err := nodeupScript.Build()
if err != nil {
return nil, err
}

b, err := fi.ResourceAsBytes(nodeupScriptResource)
if err != nil {
return nil, err
}

return b, nil
}
14 changes: 10 additions & 4 deletions clusterapi/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
bootstrapapi "k8s.io/kops/clusterapi/bootstrap/kops/api/v1beta1"
controlplaneapi "k8s.io/kops/clusterapi/controlplane/kops/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
// +kubebuilder:scaffold:imports
)

Expand Down Expand Up @@ -66,12 +67,17 @@ func run(ctx context.Context) error {
}

kubeConfig := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(kubeConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddress,
options := ctrl.Options{
Scheme: scheme,
// MetricsBindAddress: metricsAddress,
// LeaderElection: true,
// LeaderElectionID: "kops-clusterapi-leader",
})
}
options.Metrics = server.Options{
BindAddress: metricsAddress,
}
mgr, err := ctrl.NewManager(kubeConfig, options)

if err != nil {
return fmt.Errorf("error starting manager: %w", err)
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/nodeidentity/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ type LegacyIdentifier interface {
}

type LegacyInfo struct {
InstanceID string
InstanceGroup string
InstanceID string
InstanceGroup string
// TODO: Remove
InstanceLifecycle string
}
4 changes: 2 additions & 2 deletions upup/pkg/fi/nodeup/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
return fmt.Errorf("no instance group defined in nodeup config")
}

if want := bootConfig.NodeupConfigHash; want != "" {
if got := base64.StdEncoding.EncodeToString(nodeupConfigHash[:]); got != want {
if bootConfig.NodeupConfigHash != "" {
if want, got := bootConfig.NodeupConfigHash, base64.StdEncoding.EncodeToString(nodeupConfigHash[:]); got != want {
return fmt.Errorf("nodeup config hash mismatch (was %q, expected %q)", got, want)
}
}
Expand Down

0 comments on commit 5187da1

Please sign in to comment.