diff --git a/.golangci.yml b/.golangci.yml index 600e5928..afb78f62 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,6 +13,9 @@ issues: linters: - testpackage - gochecknoglobals + - path: internal/controllers/import_controller(_v3)?\.go + linters: + - dupl - text: var-naming linters: - revive diff --git a/charts/rancher-turtles/templates/deployment.yaml b/charts/rancher-turtles/templates/deployment.yaml index 49132dbc..bc14f62f 100644 --- a/charts/rancher-turtles/templates/deployment.yaml +++ b/charts/rancher-turtles/templates/deployment.yaml @@ -26,7 +26,7 @@ spec: containers: - args: - --leader-elect - - --feature-gates=rancher-kube-secret-patch={{ index .Values "rancherTurtles" "features" "rancher-kubeconfigs" "label"}} + - --feature-gates=managementv3-cluster={{ index .Values "rancherTurtles" "features" "managementv3-cluster" "enabled"}},rancher-kube-secret-patch={{ index .Values "rancherTurtles" "features" "rancher-kubeconfigs" "label"}} {{- range .Values.rancherTurtles.managerArguments }} - {{ . }} {{- end }} diff --git a/charts/rancher-turtles/templates/rancher-turtles-components.yaml b/charts/rancher-turtles/templates/rancher-turtles-components.yaml index de36e160..85fb5e77 100644 --- a/charts/rancher-turtles/templates/rancher-turtles-components.yaml +++ b/charts/rancher-turtles/templates/rancher-turtles-components.yaml @@ -1751,77 +1751,23 @@ rules: - clusters/status verbs: - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinepools - verbs: - - create - - delete - get - list - patch - update - watch - apiGroups: - - cluster.x-k8s.io - resources: - - machinepools/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinepools/status - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machines - - machines/status - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io + - coordination.k8s.io resources: - - machinesets + - leases verbs: - create - - delete - get - - list - - patch - update - - watch - apiGroups: - - cluster.x-k8s.io + - infrastructure.cluster.x-k8s.io resources: - - machinesets/finalizers + - '*' verbs: - create - delete @@ -1831,47 +1777,29 @@ rules: - update - watch - apiGroups: - - cluster.x-k8s.io + - management.cattle.io resources: - - machinesets/status + - clusterregistrationtokens + - clusterregistrationtokens/status verbs: - - create - - delete - get - list - - patch - - update - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - create - - get - - update - apiGroups: - - infrastructure.cluster.x-k8s.io + - management.cattle.io resources: - - '*' + - clusters + - clusters/status verbs: - create - delete + - deletecollection - get - list - patch - update - watch -- apiGroups: - - management.cattle.io - resources: - - clusterregistrationtokens - - clusterregistrationtokens/status - verbs: - - get - - list - - watch - - create - apiGroups: - provisioning.cattle.io resources: diff --git a/charts/rancher-turtles/values.yaml b/charts/rancher-turtles/values.yaml index 54597edf..5932df4c 100644 --- a/charts/rancher-turtles/values.yaml +++ b/charts/rancher-turtles/values.yaml @@ -13,6 +13,8 @@ rancherTurtles: kubectlImage: rancher/kubectl rancher-kubeconfigs: label: true + managementv3-cluster: + enabled: false cluster-api-operator: enabled: true cert-manager: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4b47323c..d07f6135 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -37,77 +37,23 @@ rules: - clusters/status verbs: - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinepools - verbs: - - create - - delete - get - list - patch - update - watch - apiGroups: - - cluster.x-k8s.io - resources: - - machinepools/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinepools/status - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machines - - machines/status - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io + - coordination.k8s.io resources: - - machinesets + - leases verbs: - create - - delete - get - - list - - patch - update - - watch - apiGroups: - - cluster.x-k8s.io + - infrastructure.cluster.x-k8s.io resources: - - machinesets/finalizers + - '*' verbs: - create - delete @@ -117,47 +63,29 @@ rules: - update - watch - apiGroups: - - cluster.x-k8s.io + - management.cattle.io resources: - - machinesets/status + - clusterregistrationtokens + - clusterregistrationtokens/status verbs: - - create - - delete - get - list - - patch - - update - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - create - - get - - update - apiGroups: - - infrastructure.cluster.x-k8s.io + - management.cattle.io resources: - - '*' + - clusters + - clusters/status verbs: - create - delete + - deletecollection - get - list - patch - update - watch -- apiGroups: - - management.cattle.io - resources: - - clusterregistrationtokens - - clusterregistrationtokens/status - verbs: - - get - - list - - watch - - create - apiGroups: - provisioning.cattle.io resources: diff --git a/feature/feature.go b/feature/feature.go index 35ba4478..c6ab49b4 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -25,6 +25,9 @@ const ( // RancherKubeSecretPatch is used to enable patching of the Rancher v2prov created kubeconfig // secrets so that they can be used with CAPI 1.5.x. RancherKubeSecretPatch featuregate.Feature = "rancher-kube-secret-patch" //nolint:gosec + + // ManagementV3Cluster is used to enable the management.cattle.io/v3 cluster resource. + ManagementV3Cluster featuregate.Feature = "managementv3-cluster" //nolint:gosec ) func init() { @@ -33,4 +36,5 @@ func init() { var defaultGates = map[featuregate.Feature]featuregate.FeatureSpec{ RancherKubeSecretPatch: {Default: false, PreRelease: featuregate.Beta}, + ManagementV3Cluster: {Default: false, PreRelease: featuregate.Beta}, } diff --git a/internal/controllers/helpers.go b/internal/controllers/helpers.go new file mode 100644 index 00000000..9c9cb4ed --- /dev/null +++ b/internal/controllers/helpers.go @@ -0,0 +1,213 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "bufio" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + utilyaml "sigs.k8s.io/cluster-api/util/yaml" + + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/util" +) + +const ( + importLabelName = "cluster-api.cattle.io/rancher-auto-import" + ownedLabelName = "cluster-api.cattle.io/owned" + capiClusterOwner = "cluster-api.cattle.io/capi-cluster-owner" + capiClusterOwnerNamespace = "cluster-api.cattle.io/capi-cluster-owner-ns" + + defaultRequeueDuration = 1 * time.Minute +) + +func getClusterRegistrationManifest(ctx context.Context, clusterName, namespace string, cl client.Client, + insecureSkipVerify bool, +) (string, error) { + log := log.FromContext(ctx) + + token := &managementv3.ClusterRegistrationToken{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: managementv3.ClusterRegistrationTokenSpec{ + ClusterName: clusterName, + }, + } + err := cl.Get(ctx, client.ObjectKeyFromObject(token), token) + + if client.IgnoreNotFound(err) != nil { + return "", fmt.Errorf("error getting registration token for cluster %s: %w", clusterName, err) + } else if err != nil { + if err := cl.Create(ctx, token); err != nil { + return "", fmt.Errorf("failed to create cluster registration token for cluster %s: %w", clusterName, err) + } + } + + if token.Status.ManifestURL == "" { + return "", nil + } + + manifestData, err := downloadManifest(token.Status.ManifestURL, insecureSkipVerify) + if err != nil { + log.Error(err, "failed downloading import manifest") + return "", err + } + + return manifestData, nil +} + +func namespaceToCapiClusters(ctx context.Context, clusterPredicate predicate.Funcs, cl client.Client) handler.MapFunc { + log := log.FromContext(ctx) + + return func(_ context.Context, o client.Object) []ctrl.Request { + ns, ok := o.(*corev1.Namespace) + if !ok { + log.Error(nil, fmt.Sprintf("Expected a Namespace but got a %T", o)) + return nil + } + + if _, autoImport := util.ShouldImport(ns, importLabelName); !autoImport { + log.V(2).Info("Namespace doesn't have import annotation label with a true value, skipping") + return nil + } + + capiClusters := &clusterv1.ClusterList{} + if err := cl.List(ctx, capiClusters, client.InNamespace(o.GetNamespace())); err != nil { + log.Error(err, "getting capi cluster") + return nil + } + + if len(capiClusters.Items) == 0 { + log.V(2).Info("No CAPI clusters in namespace, no action") + return nil + } + + reqs := []ctrl.Request{} + + for _, cluster := range capiClusters.Items { + cluster := cluster + if !clusterPredicate.Generic(event.GenericEvent{Object: &cluster}) { + continue + } + + reqs = append(reqs, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, + }) + } + + return reqs + } +} + +func downloadManifest(url string, insecureSkipVerify bool) (string, error) { + client := &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecureSkipVerify, //nolint:gosec + }, + }} + + resp, err := client.Get(url) //nolint:gosec,noctx + if err != nil { + return "", fmt.Errorf("downloading manifest: %w", err) + } + defer resp.Body.Close() + + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("reading manifest: %w", err) + } + + return string(data), err +} + +func createImportManifest(ctx context.Context, remoteClient client.Client, in io.Reader) error { + reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096)) + + for { + raw, err := reader.Read() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return err + } + + if err := createRawManifest(ctx, remoteClient, raw); err != nil { + return err + } + } + + return nil +} + +func createRawManifest(ctx context.Context, remoteClient client.Client, bytes []byte) error { + items, err := utilyaml.ToUnstructured(bytes) + if err != nil { + return fmt.Errorf("error unmarshalling bytes or empty object passed: %w", err) + } + + for _, obj := range items { + if err := createObject(ctx, remoteClient, obj.DeepCopy()); err != nil { + return err + } + } + + return nil +} + +func createObject(ctx context.Context, c client.Client, obj client.Object) error { + log := log.FromContext(ctx) + gvk := obj.GetObjectKind().GroupVersionKind() + + err := c.Create(ctx, obj) + if apierrors.IsAlreadyExists(err) { + log.V(4).Info("object already exists in remote cluster", "gvk", gvk, "name", obj.GetName(), "namespace", obj.GetNamespace()) + return nil + } + + if err != nil { + return fmt.Errorf("creating object in remote cluster: %w", err) + } + + log.V(4).Info("object was created", "gvk", gvk, "name", obj.GetName(), "namespace", obj.GetNamespace()) + + return nil +} diff --git a/internal/controllers/import_controller.go b/internal/controllers/import_controller.go index 12b756d3..bde0e5e9 100644 --- a/internal/controllers/import_controller.go +++ b/internal/controllers/import_controller.go @@ -17,22 +17,15 @@ limitations under the License. package controllers import ( - "bufio" "context" - "crypto/tls" - "errors" "fmt" - "io" - "net/http" "strings" - "time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" errorutils "k8s.io/apimachinery/pkg/util/errors" - yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,9 +41,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" - utilyaml "sigs.k8s.io/cluster-api/util/yaml" - managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1" "github.com/rancher-sandbox/rancher-turtles/util" turtlesannotations "github.com/rancher-sandbox/rancher-turtles/util/annotations" @@ -58,13 +49,6 @@ import ( turtlespredicates "github.com/rancher-sandbox/rancher-turtles/util/predicates" ) -const ( - importLabelName = "cluster-api.cattle.io/rancher-auto-import" - ownedLabelName = "cluster-api.cattle.io/owned" - - defaultRequeueDuration = 1 * time.Minute -) - // CAPIImportReconciler represents a reconciler for importing CAPI clusters in Rancher. type CAPIImportReconciler struct { Client client.Client @@ -103,12 +87,11 @@ func (r *CAPIImportReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return fmt.Errorf("creating new controller: %w", err) } - // Watch Rancher provisioningv2 clusters + // Watch Rancher provisioningv1 clusters // NOTE: we will import the types from rancher in the future err = c.Watch( source.Kind(mgr.GetCache(), &provisioningv1.Cluster{}), handler.EnqueueRequestsFromMapFunc(r.rancherClusterToCapiCluster(ctx, capiPredicates)), - //&handler.EnqueueRequestForOwner{OwnerType: &clusterv1.Cluster{}}, ) if err != nil { return fmt.Errorf("adding watch for Rancher cluster: %w", err) @@ -117,7 +100,7 @@ func (r *CAPIImportReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma ns := &corev1.Namespace{} err = c.Watch( source.Kind(mgr.GetCache(), ns), - handler.EnqueueRequestsFromMapFunc(r.namespaceToCapiClusters(ctx, capiPredicates)), + handler.EnqueueRequestsFromMapFunc(namespaceToCapiClusters(ctx, capiPredicates, r.Client)), ) if err != nil { @@ -136,14 +119,7 @@ func (r *CAPIImportReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma // +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;create;update;delete;patch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools/status,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools/finalizers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;delete;create;update;patch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets/status,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=provisioning.cattle.io,resources=clusters;clusters/status,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups=management.cattle.io,resources=clusterregistrationtokens;clusterregistrationtokens/status,verbs=get;list;watch @@ -271,7 +247,7 @@ func (r *CAPIImportReconciler) reconcileNormal(ctx context.Context, capiCluster } // get the registration manifest - manifest, err := r.getClusterRegistrationManifest(ctx, rancherCluster.Status.ClusterName, capiCluster.Namespace) + manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Status.ClusterName, capiCluster.Namespace, r.RancherClient, r.InsecureSkipVerify) if err != nil { return ctrl.Result{}, err } @@ -288,7 +264,7 @@ func (r *CAPIImportReconciler) reconcileNormal(ctx context.Context, capiCluster return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) } - if err := r.createImportManifest(ctx, remoteClient, strings.NewReader(manifest)); err != nil { + if err := createImportManifest(ctx, remoteClient, strings.NewReader(manifest)); err != nil { return ctrl.Result{}, fmt.Errorf("creating import manifest: %w", err) } @@ -297,61 +273,6 @@ func (r *CAPIImportReconciler) reconcileNormal(ctx context.Context, capiCluster return ctrl.Result{}, nil } -func (r *CAPIImportReconciler) reconcileDelete(ctx context.Context, capiCluster *clusterv1.Cluster) (ctrl.Result, error) { - log := log.FromContext(ctx) - log.Info("Reconciling rancher cluster deletion") - - // If the Rancher Cluster was already imported, then annotate the CAPI cluster so that we don't auto-import again. - log.Info(fmt.Sprintf("Rancher cluster is being removed, annotating CAPI cluster %s with %s", - capiCluster.Name, - turtlesannotations.ClusterImportedAnnotation)) - - annotations := capiCluster.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - - annotations[turtlesannotations.ClusterImportedAnnotation] = "true" - capiCluster.SetAnnotations(annotations) - - return ctrl.Result{}, nil -} - -func (r *CAPIImportReconciler) getClusterRegistrationManifest(ctx context.Context, clusterName, namespace string) (string, error) { - log := log.FromContext(ctx) - - token := &managementv3.ClusterRegistrationToken{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - }, - Spec: managementv3.ClusterRegistrationTokenSpec{ - ClusterName: clusterName, - }, - } - err := r.RancherClient.Get(ctx, client.ObjectKeyFromObject(token), token) - - if client.IgnoreNotFound(err) != nil { - return "", fmt.Errorf("error getting registration token for cluster %s: %w", clusterName, err) - } else if err != nil { - if err := r.RancherClient.Create(ctx, token); err != nil { - return "", fmt.Errorf("failed to create cluster registration token for cluster %s: %w", clusterName, err) - } - } - - if token.Status.ManifestURL == "" { - return "", nil - } - - manifestData, err := r.downloadManifest(token.Status.ManifestURL) - if err != nil { - log.Error(err, "failed downloading import manifest") - return "", err - } - - return manifestData, nil -} - func (r *CAPIImportReconciler) rancherClusterToCapiCluster(ctx context.Context, clusterPredicate predicate.Funcs) handler.MapFunc { log := log.FromContext(ctx) @@ -376,125 +297,22 @@ func (r *CAPIImportReconciler) rancherClusterToCapiCluster(ctx context.Context, } } -func (r *CAPIImportReconciler) namespaceToCapiClusters(ctx context.Context, clusterPredicate predicate.Funcs) handler.MapFunc { - log := log.FromContext(ctx) - - return func(_ context.Context, o client.Object) []ctrl.Request { - ns, ok := o.(*corev1.Namespace) - if !ok { - log.Error(nil, fmt.Sprintf("Expected a Namespace but got a %T", o)) - return nil - } - - _, autoImport := util.ShouldImport(ns, importLabelName) - if !autoImport { - log.V(2).Info("Namespace doesn't have import annotation label with a true value, skipping") - return nil - } - - capiClusters := &clusterv1.ClusterList{} - if err := r.Client.List(ctx, capiClusters, client.InNamespace(o.GetNamespace())); err != nil { - log.Error(err, "getting capi cluster") - return nil - } - - if len(capiClusters.Items) == 0 { - log.V(2).Info("No CAPI clusters in namespace, no action") - return nil - } - - reqs := []ctrl.Request{} - - for _, cluster := range capiClusters.Items { - cluster := cluster - if !clusterPredicate.Generic(event.GenericEvent{Object: &cluster}) { - continue - } - - reqs = append(reqs, ctrl.Request{ - NamespacedName: client.ObjectKey{ - Namespace: cluster.Namespace, - Name: cluster.Name, - }, - }) - } - - return reqs - } -} - -func (r *CAPIImportReconciler) downloadManifest(url string) (string, error) { - client := &http.Client{Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: r.InsecureSkipVerify, //nolint:gosec - }, - }} - - resp, err := client.Get(url) //nolint:gosec,noctx - if err != nil { - return "", fmt.Errorf("downloading manifest: %w", err) - } - defer resp.Body.Close() - - data, err := io.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("reading manifest: %w", err) - } - - return string(data), err -} - -func (r *CAPIImportReconciler) createImportManifest(ctx context.Context, remoteClient client.Client, in io.Reader) error { - reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096)) - - for { - raw, err := reader.Read() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - return err - } - - if err := r.createRawManifest(ctx, remoteClient, raw); err != nil { - return err - } - } - - return nil -} - -func (r *CAPIImportReconciler) createRawManifest(ctx context.Context, remoteClient client.Client, bytes []byte) error { - items, err := utilyaml.ToUnstructured(bytes) - if err != nil { - return fmt.Errorf("error unmarshalling bytes or empty object passed: %w", err) - } - - for _, obj := range items { - if err := r.createObject(ctx, remoteClient, obj.DeepCopy()); err != nil { - return err - } - } - - return nil -} - -func (r *CAPIImportReconciler) createObject(ctx context.Context, c client.Client, obj client.Object) error { +func (r *CAPIImportReconciler) reconcileDelete(ctx context.Context, capiCluster *clusterv1.Cluster) (ctrl.Result, error) { log := log.FromContext(ctx) - gvk := obj.GetObjectKind().GroupVersionKind() + log.Info("Reconciling rancher cluster deletion") - err := c.Create(ctx, obj) - if apierrors.IsAlreadyExists(err) { - log.V(4).Info("object already exists in remote cluster", "gvk", gvk, "name", obj.GetName(), "namespace", obj.GetNamespace()) - return nil - } + // If the Rancher Cluster was already imported, then annotate the CAPI cluster so that we don't auto-import again. + log.Info(fmt.Sprintf("Rancher cluster is being removed, annotating CAPI cluster %s with %s", + capiCluster.Name, + turtlesannotations.ClusterImportedAnnotation)) - if err != nil { - return fmt.Errorf("creating object in remote cluster: %w", err) + annotations := capiCluster.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} } - log.V(4).Info("object was created", "gvk", gvk, "name", obj.GetName(), "namespace", obj.GetNamespace()) + annotations[turtlesannotations.ClusterImportedAnnotation] = "true" + capiCluster.SetAnnotations(annotations) - return nil + return ctrl.Result{}, nil } diff --git a/internal/controllers/import_controller_test.go b/internal/controllers/import_controller_test.go index b388c346..801e9cc1 100644 --- a/internal/controllers/import_controller_test.go +++ b/internal/controllers/import_controller_test.go @@ -17,11 +17,7 @@ limitations under the License. package controllers import ( - "bufio" - "encoding/json" - "errors" "fmt" - "io" "net/http" "net/http/httptest" "strings" @@ -39,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util/secret" @@ -56,6 +51,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { clusterRegistrationToken *managementv3.ClusterRegistrationToken capiKubeconfigSecret *corev1.Secret clusterName = "generated-rancher-cluster" + sampleTemplate string ) BeforeEach(func() { @@ -68,6 +64,11 @@ var _ = Describe("reconcile CAPI Cluster", func() { } Expect(cl.Update(ctx, ns)).To(Succeed()) + sampleTemplate = setTemplateParams( + testdata.ImportManifest, + map[string]string{"${TEST_CASE_NAME}": "provisioningv1"}, + ) + r = &CAPIImportReconciler{ Client: testEnv, RancherClient: testEnv, // rancher and rancher-turtles deployed in the same cluster @@ -108,7 +109,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { }) AfterEach(func() { - objs, err := manifestToObjects(strings.NewReader(testdata.ImportManifest)) + objs, err := manifestToObjects(strings.NewReader(sampleTemplate)) clientObjs := []client.Object{ capiCluster, rancherCluster, @@ -182,7 +183,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { It("should reconcile a CAPI cluster when rancher cluster exists", func() { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(testdata.ImportManifest)) + w.Write([]byte(sampleTemplate)) })) defer server.Close() @@ -211,7 +212,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { }) g.Expect(err).ToNot(HaveOccurred()) - objs, err := manifestToObjects(strings.NewReader(testdata.ImportManifest)) + objs, err := manifestToObjects(strings.NewReader(sampleTemplate)) g.Expect(err).ToNot(HaveOccurred()) for _, obj := range objs { @@ -359,43 +360,3 @@ var _ = Describe("reconcile CAPI Cluster", func() { }).Should(Succeed()) }) }) - -func manifestToObjects(in io.Reader) ([]runtime.Object, error) { - var result []runtime.Object - - reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096)) - - for { - raw, err := reader.Read() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - return nil, err - } - - bytes, err := yamlDecoder.ToJSON(raw) - if err != nil { - return nil, err - } - - check := map[string]interface{}{} - if err := json.Unmarshal(bytes, &check); err != nil { - return nil, err - } - - if len(check) == 0 { - continue - } - - obj, _, err := unstructured.UnstructuredJSONScheme.Decode(bytes, nil, nil) - if err != nil { - return nil, err - } - - result = append(result, obj) - } - - return result, nil -} diff --git a/internal/controllers/import_controller_v3.go b/internal/controllers/import_controller_v3.go new file mode 100644 index 00000000..2f5c37fc --- /dev/null +++ b/internal/controllers/import_controller_v3.go @@ -0,0 +1,388 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + errorutils "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/predicates" + + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/util" + turtlesannotations "github.com/rancher-sandbox/rancher-turtles/util/annotations" + turtlespredicates "github.com/rancher-sandbox/rancher-turtles/util/predicates" +) + +// CAPIImportManagementV3Reconciler represents a reconciler for importing CAPI clusters in Rancher. +type CAPIImportManagementV3Reconciler struct { + Client client.Client + RancherClient client.Client + recorder record.EventRecorder + WatchFilterValue string + Scheme *runtime.Scheme + InsecureSkipVerify bool + + controller controller.Controller + externalTracker external.ObjectTracker + remoteClientGetter remote.ClusterClientGetter +} + +// SetupWithManager sets up reconciler with manager. +func (r *CAPIImportManagementV3Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := log.FromContext(ctx) + + if r.remoteClientGetter == nil { + r.remoteClientGetter = remote.NewClusterClient + } + + capiPredicates := predicates.All(log, + predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), + turtlespredicates.ClusterWithoutImportedAnnotation(log), + turtlespredicates.ClusterWithReadyControlPlane(log), + turtlespredicates.ClusterOrNamespaceWithImportLabel(ctx, log, r.Client, importLabelName), + ) + + c, err := ctrl.NewControllerManagedBy(mgr). + For(&clusterv1.Cluster{}). + WithOptions(options). + WithEventFilter(capiPredicates). + Build(r) + if err != nil { + return fmt.Errorf("creating new controller: %w", err) + } + + // Watch Rancher managementv3 clusters + if err := c.Watch( + source.Kind(mgr.GetCache(), &managementv3.Cluster{}), + handler.EnqueueRequestsFromMapFunc(r.rancherClusterToCapiCluster(ctx, capiPredicates)), + ); err != nil { + return fmt.Errorf("adding watch for Rancher cluster: %w", err) + } + + ns := &corev1.Namespace{} + err = c.Watch( + source.Kind(mgr.GetCache(), ns), + handler.EnqueueRequestsFromMapFunc(namespaceToCapiClusters(ctx, capiPredicates, r.Client)), + ) + + if err != nil { + return fmt.Errorf("adding watch for namespaces: %w", err) + } + + r.recorder = mgr.GetEventRecorderFor("rancher-turtles") + r.controller = c + r.externalTracker = external.ObjectTracker{ + Controller: c, + } + + return nil +} + +// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=management.cattle.io,resources=clusters;clusters/status,verbs=get;list;watch;create;update;delete;deletecollection;patch +// +kubebuilder:rbac:groups=management.cattle.io,resources=clusters;clusterregistrationtokens;clusterregistrationtokens/status,verbs=get;list;watch + +// Reconcile reconciles a CAPI cluster, creating a Rancher cluster if needed and applying the import manifests. +func (r *CAPIImportManagementV3Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { + log := log.FromContext(ctx) + log.Info("Reconciling CAPI cluster") + + capiCluster := &clusterv1.Cluster{} + if err := r.Client.Get(ctx, req.NamespacedName, capiCluster); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{Requeue: true}, err + } + + if capiCluster.ObjectMeta.DeletionTimestamp.IsZero() && !turtlesannotations.HasClusterImportAnnotation(capiCluster) && + !controllerutil.ContainsFinalizer(capiCluster, managementv3.CapiClusterFinalizer) { + log.Info("capi cluster is imported, adding finalizer") + controllerutil.AddFinalizer(capiCluster, managementv3.CapiClusterFinalizer) + + if err := r.Client.Update(ctx, capiCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("error adding finalizer: %w", err) + } + } + + log = log.WithValues("cluster", capiCluster.Name) + + // Wait for controlplane to be ready. This should never be false as the predicates + // do the filtering. + if !capiCluster.Status.ControlPlaneReady && !conditions.IsTrue(capiCluster, clusterv1.ControlPlaneReadyCondition) { + log.Info("clusters control plane is not ready, requeue") + return ctrl.Result{RequeueAfter: defaultRequeueDuration}, nil + } + + // Collect errors as an aggregate to return together after all patches have been performed. + var errs []error + + result, err := r.reconcile(ctx, capiCluster) + if err != nil { + errs = append(errs, fmt.Errorf("error reconciling cluster: %w", err)) + } + + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + capiClusterCopy := capiCluster.DeepCopy() + + patchBase := client.MergeFromWithOptions(capiCluster, client.MergeFromWithOptimisticLock{}) + + if err := r.Client.Patch(ctx, capiClusterCopy, patchBase); err != nil { + errs = append(errs, fmt.Errorf("failed to patch cluster: %w", err)) + } + return nil + }); err != nil { + return ctrl.Result{}, err + } + + if len(errs) > 0 { + return ctrl.Result{}, errorutils.NewAggregate(errs) + } + + return result, nil +} + +func (r *CAPIImportManagementV3Reconciler) reconcile(ctx context.Context, capiCluster *clusterv1.Cluster) (ctrl.Result, error) { + log := log.FromContext(ctx) + + rancherCluster := &managementv3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + }, + }, + } + + rancherClusterList := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, + } + err := r.RancherClient.List(ctx, rancherClusterList, selectors...) + + if client.IgnoreNotFound(err) != nil { + log.Error(err, fmt.Sprintf("Unable to fetch rancher cluster %s", client.ObjectKeyFromObject(rancherCluster))) + return ctrl.Result{Requeue: true}, err + } + + if len(rancherClusterList.Items) != 0 { + if len(rancherClusterList.Items) > 1 { + log.Info("More than one rancher cluster found. Will default to using the first one.") + } + + rancherCluster = &rancherClusterList.Items[0] + } + + if !capiCluster.ObjectMeta.DeletionTimestamp.IsZero() { + if err := r.deleteDependentRancherCluster(ctx, capiCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("error deleting associated managementv3.Cluster resources: %w", err) + } + } + + if !rancherCluster.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, capiCluster) + } + + return r.reconcileNormal(ctx, capiCluster, rancherCluster) +} + +func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, capiCluster *clusterv1.Cluster, + rancherCluster *managementv3.Cluster, +) (ctrl.Result, error) { + log := log.FromContext(ctx) + + err := r.RancherClient.Get(ctx, client.ObjectKeyFromObject(rancherCluster), rancherCluster) + if apierrors.IsNotFound(err) { + shouldImport, err := util.ShouldAutoImport(ctx, log, r.Client, capiCluster, importLabelName) + if err != nil { + return ctrl.Result{}, err + } + + if !shouldImport { + log.Info("not auto importing cluster as namespace or cluster isn't marked auto import") + return ctrl.Result{}, nil + } + + if err := r.RancherClient.Create(ctx, &managementv3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: capiCluster.Namespace, + GenerateName: "c-", + Labels: map[string]string{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, + }, + Spec: managementv3.ClusterSpec{ + DisplayName: capiCluster.Name, + Description: "CAPI cluster imported to Rancher", + }, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("error creating rancher cluster: %w", err) + } + + return ctrl.Result{Requeue: true}, nil + } + + if err != nil { + log.Error(err, fmt.Sprintf("Unable to fetch rancher cluster %s", client.ObjectKeyFromObject(rancherCluster))) + + return ctrl.Result{}, err + } + + if conditions.IsTrue(rancherCluster, managementv3.ClusterConditionAgentDeployed) { + log.Info("agent already deployed, no action needed") + return ctrl.Result{}, nil + } + + // get the registration manifest + manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Name, rancherCluster.Name, r.RancherClient, r.InsecureSkipVerify) + if err != nil { + return ctrl.Result{}, err + } + + if manifest == "" { + log.Info("Import manifest URL not set yet, requeue") + return ctrl.Result{Requeue: true}, nil + } + + log.Info("Creating import manifest") + + remoteClient, err := r.remoteClientGetter(ctx, capiCluster.Name, r.Client, client.ObjectKeyFromObject(capiCluster)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) + } + + if err := createImportManifest(ctx, remoteClient, strings.NewReader(manifest)); err != nil { + return ctrl.Result{}, fmt.Errorf("creating import manifest: %w", err) + } + + log.Info("Successfully applied import manifest") + + return ctrl.Result{}, nil +} + +func (r *CAPIImportManagementV3Reconciler) rancherClusterToCapiCluster(ctx context.Context, clusterPredicate predicate.Funcs) handler.MapFunc { + log := log.FromContext(ctx) + + return func(_ context.Context, cluster client.Object) []ctrl.Request { + labels := cluster.GetLabels() + if _, ok := labels[capiClusterOwner]; !ok { + log.Error(fmt.Errorf("missing label %s", capiClusterOwner), "getting rancher cluster labels") + return nil + } + + if _, ok := labels[capiClusterOwnerNamespace]; !ok { + log.Error(fmt.Errorf("missing label %s", capiClusterOwnerNamespace), "getting rancher cluster labels") + return nil + } + + capiCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + Name: labels[capiClusterOwner], + Namespace: labels[capiClusterOwnerNamespace], + }} + + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(capiCluster), capiCluster); err != nil { + if !apierrors.IsNotFound(err) { + log.Error(err, "getting capi cluster") + } + + return nil + } + + if !clusterPredicate.Generic(event.GenericEvent{Object: capiCluster}) { + return nil + } + + return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: capiCluster.Namespace, Name: capiCluster.Name}}} + } +} + +func (r *CAPIImportManagementV3Reconciler) reconcileDelete(ctx context.Context, capiCluster *clusterv1.Cluster) (ctrl.Result, error) { + log := log.FromContext(ctx) + log.Info("Reconciling rancher cluster deletion") + + // If the Rancher Cluster was already imported, then annotate the CAPI cluster so that we don't auto-import again. + log.Info(fmt.Sprintf("Rancher cluster is being removed, annotating CAPI cluster %s with %s", + capiCluster.Name, + turtlesannotations.ClusterImportedAnnotation)) + + annotations := capiCluster.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[turtlesannotations.ClusterImportedAnnotation] = "true" + capiCluster.SetAnnotations(annotations) + + if controllerutil.ContainsFinalizer(capiCluster, managementv3.CapiClusterFinalizer) { + controllerutil.RemoveFinalizer(capiCluster, managementv3.CapiClusterFinalizer) + + if err := r.Client.Update(ctx, capiCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("error removing finalizer: %w", err) + } + } + + return ctrl.Result{}, nil +} + +func (r *CAPIImportManagementV3Reconciler) deleteDependentRancherCluster(ctx context.Context, capiCluster *clusterv1.Cluster) error { + log := log.FromContext(ctx) + log.Info("capi cluster is being deleted, deleting dependent rancher cluster") + + selectors := []client.DeleteAllOfOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, + } + + return r.RancherClient.DeleteAllOf(ctx, &managementv3.Cluster{}, selectors...) +} diff --git a/internal/controllers/import_controller_v3_test.go b/internal/controllers/import_controller_v3_test.go new file mode 100644 index 00000000..ea7d0d22 --- /dev/null +++ b/internal/controllers/import_controller_v3_test.go @@ -0,0 +1,443 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rancher-sandbox/rancher-turtles/internal/controllers/testdata" + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/internal/test" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("reconcile CAPI Cluster", func() { + var ( + r *CAPIImportManagementV3Reconciler + ns *corev1.Namespace + capiCluster *clusterv1.Cluster + rancherClusters *managementv3.ClusterList + rancherCluster *managementv3.Cluster + clusterRegistrationToken *managementv3.ClusterRegistrationToken + capiKubeconfigSecret *corev1.Secret + selectors []client.ListOption + capiClusterName = "generated-rancher-cluster" + sampleTemplate string + ) + BeforeEach(func() { + var err error + + ns, err = testEnv.CreateNamespace(ctx, "commonns") + Expect(err).ToNot(HaveOccurred()) + ns.Labels = map[string]string{ + importLabelName: "true", + } + Expect(cl.Update(ctx, ns)).To(Succeed()) + + sampleTemplate = setTemplateParams( + testdata.ImportManifest, + map[string]string{"${TEST_CASE_NAME}": "mgmtv3"}, + ) + + r = &CAPIImportManagementV3Reconciler{ + Client: cl, + RancherClient: cl, // rancher and rancher-turtles deployed in the same cluster + remoteClientGetter: remote.NewClusterClient, + Scheme: testEnv.GetScheme(), + } + + capiCluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: capiClusterName, + Namespace: ns.Name, + }, + } + + rancherCluster = &managementv3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: capiCluster.Namespace, + GenerateName: "c-", + Labels: map[string]string{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, + }, + } + + rancherClusters = &managementv3.ClusterList{} + + selectors = []client.ListOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + }, + } + + clusterRegistrationToken = &managementv3.ClusterRegistrationToken{ + ObjectMeta: metav1.ObjectMeta{}, + } + + capiKubeconfigSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kubeconfig", capiCluster.Name), + Namespace: ns.Name, + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: kubeConfigBytes, + }, + } + }) + + AfterEach(func() { + objs, err := manifestToObjects(strings.NewReader(sampleTemplate)) + clientObjs := []client.Object{ + capiCluster, + rancherCluster, + clusterRegistrationToken, + capiKubeconfigSecret, + } + for _, obj := range objs { + clientObj, ok := obj.(client.Object) + Expect(ok).To(BeTrue()) + clientObjs = append(clientObjs, clientObj) + } + Expect(err).ToNot(HaveOccurred()) + Expect(test.CleanupAndWait(ctx, cl, clientObjs...)).To(Succeed()) + Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + for _, cluster := range rancherClusters.Items { + testEnv.Cleanup(ctx, &cluster) + testEnv.Cleanup(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: cluster.Name}, + }, + ) + } + }) + + It("should reconcile a CAPI cluster when control plane not ready", func() { + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.RequeueAfter).To(Equal(defaultRequeueDuration)) + }).Should(Succeed()) + }) + + It("should reconcile a CAPI cluster when rancher cluster doesn't exist", func() { + ns.Labels = map[string]string{} + Expect(cl.Update(ctx, ns)).To(Succeed()) + capiCluster.Labels = map[string]string{ + importLabelName: "true", + } + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Eventually(func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + }).Should(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + Expect(rancherClusters.Items[0].Name).To(ContainSubstring("c-")) + }) + + It("should reconcile a CAPI cluster when rancher cluster doesn't exist and annotation is set on the namespace", func() { + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Eventually(func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + }).Should(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + Expect(rancherClusters.Items[0].Name).To(ContainSubstring("c-")) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists", func() { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(sampleTemplate)) + })) + defer server.Close() + + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) + + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + _, err := testEnv.CreateNamespaceWithName(ctx, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) + token := clusterRegistrationToken.DeepCopy() + token.Status.ManifestURL = server.URL + Expect(cl.Status().Update(ctx, token)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + + objs, err := manifestToObjects(strings.NewReader(sampleTemplate)) + g.Expect(err).ToNot(HaveOccurred()) + + for _, obj := range objs { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + + unstructuredObj := &unstructured.Unstructured{} + unstructuredObj.SetUnstructuredContent(u) + unstructuredObj.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + + g.Expect(cl.Get(ctx, client.ObjectKey{ + Namespace: unstructuredObj.GetNamespace(), + Name: unstructuredObj.GetName(), + }, unstructuredObj)).To(Succeed()) + } + }, 10*time.Second).Should(Succeed()) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists but cluster name not set", func() { + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + _, err := testEnv.CreateNamespaceWithName(ctx, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + + Eventually(ctx, func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + }).Should(Succeed()) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists and agent is deployed", func() { + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + conditions.Set(&cluster, conditions.TrueCondition(managementv3.ClusterConditionAgentDeployed)) + Expect(conditions.IsTrue(&cluster, managementv3.ClusterConditionAgentDeployed)).To(BeTrue()) + Expect(cl.Status().Update(ctx, &cluster)).To(Succeed()) + + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists and registration manifests not exist", func() { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + })) + defer server.Close() + + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) + + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + _, err := testEnv.CreateNamespaceWithName(ctx, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) + token := clusterRegistrationToken.DeepCopy() + token.Status.ManifestURL = server.URL + Expect(cl.Status().Update(ctx, token)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + }).Should(Succeed()) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists and a cluster registration token does not exist", func() { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + })) + defer server.Close() + + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) + + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + _, err := testEnv.CreateNamespaceWithName(ctx, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + + Eventually(ctx, func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + g.Expect(cl.Get(ctx, client.ObjectKeyFromObject(clusterRegistrationToken), clusterRegistrationToken)).ToNot(HaveOccurred()) + }).Should(Succeed()) + }) + + It("should reconcile a CAPI cluster when rancher cluster exists and registration manifests url is empty", func() { + Expect(cl.Create(ctx, capiCluster)).To(Succeed()) + capiCluster.Status.ControlPlaneReady = true + Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) + + Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) + + Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + g.Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + g.Expect(rancherClusters.Items).To(HaveLen(1)) + }).Should(Succeed()) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + _, err := testEnv.CreateNamespaceWithName(ctx, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) + + Eventually(ctx, func(g Gomega) { + res, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: capiCluster.Namespace, + Name: capiCluster.Name, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Requeue).To(BeTrue()) + }).Should(Succeed()) + }) +}) diff --git a/internal/controllers/testdata/import_sample.yaml b/internal/controllers/testdata/import_sample.yaml index 2a9f9888..f8169724 100644 --- a/internal/controllers/testdata/import_sample.yaml +++ b/internal/controllers/testdata/import_sample.yaml @@ -30,7 +30,7 @@ subjects: apiVersion: v1 kind: Namespace metadata: - name: cattle-system + name: cattle-system-${TEST_CASE_NAME} --- @@ -38,7 +38,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cattle - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} --- @@ -46,13 +46,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cattle-admin-binding - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} labels: cattle.io/creator: "norman" subjects: - kind: ServiceAccount name: cattle - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} roleRef: kind: ClusterRole name: cattle-admin @@ -64,7 +64,7 @@ apiVersion: v1 kind: Secret metadata: name: cattle-credentials-12aee23 - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} type: Opaque data: url: "aHR0cHM6Ly90aGlzaXNhdGVzdA==" @@ -97,7 +97,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: cattle-cluster-agent - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} annotations: management.cattle.io/scale-available: "2" spec: @@ -213,7 +213,7 @@ apiVersion: v1 kind: Service metadata: name: cattle-cluster-agent - namespace: cattle-system + namespace: cattle-system-${TEST_CASE_NAME} spec: ports: - port: 80 diff --git a/internal/controllers/testutils.go b/internal/controllers/testutils.go new file mode 100644 index 00000000..4a7c45b3 --- /dev/null +++ b/internal/controllers/testutils.go @@ -0,0 +1,77 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "bufio" + "encoding/json" + "errors" + "io" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" +) + +func setTemplateParams(template string, params map[string]string) string { + for k, v := range params { + template = strings.ReplaceAll(template, k, v) + } + + return template +} + +func manifestToObjects(in io.Reader) ([]runtime.Object, error) { + var result []runtime.Object + + reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096)) + + for { + raw, err := reader.Read() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + return nil, err + } + + bytes, err := yamlDecoder.ToJSON(raw) + if err != nil { + return nil, err + } + + check := map[string]interface{}{} + if err := json.Unmarshal(bytes, &check); err != nil { + return nil, err + } + + if len(check) == 0 { + continue + } + + obj, _, err := unstructured.UnstructuredJSONScheme.Decode(bytes, nil, nil) + if err != nil { + return nil, err + } + + result = append(result, obj) + } + + return result, nil +} diff --git a/internal/rancher/management/v3/cluster.go b/internal/rancher/management/v3/cluster.go new file mode 100644 index 00000000..74d3dd86 --- /dev/null +++ b/internal/rancher/management/v3/cluster.go @@ -0,0 +1,77 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + // ClusterConditionAgentDeployed is the condition type for the agent deployed condition. + ClusterConditionAgentDeployed clusterv1.ConditionType = "AgentDeployed" + // ClusterConditionReady is the condition type for the ready condition. + ClusterConditionReady clusterv1.ConditionType = "Ready" + // CapiClusterFinalizer is the finalizer applied to capi clusters. + CapiClusterFinalizer = "capicluster.turtles.cattle.io" +) + +// Cluster is the struct representing a Rancher Cluster. +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// ClusterSpec is the struct representing the specification of a Rancher Cluster. +type ClusterSpec struct { + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + FleetWorkspaceName string `json:"fleetWorkspaceName,omitempty"` +} + +// ClusterStatus is the struct representing the status of a Rancher Cluster. +type ClusterStatus struct { + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// ClusterList contains a list of ClusterList. +// +kubebuilder:object:root=true +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// GetConditions method to implement capi conditions getter interface. +func (c *Cluster) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +// SetConditions method to implement capi conditions setter interface. +func (c *Cluster) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/internal/rancher/management/v3/zz_generated.deepcopy.go b/internal/rancher/management/v3/zz_generated.deepcopy.go index 179e2f7d..d04c69fb 100644 --- a/internal/rancher/management/v3/zz_generated.deepcopy.go +++ b/internal/rancher/management/v3/zz_generated.deepcopy.go @@ -23,8 +23,68 @@ package v3 import ( runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/api/v1beta1" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRegistrationToken) DeepCopyInto(out *ClusterRegistrationToken) { *out = *in @@ -113,3 +173,40 @@ func (in *ClusterRegistrationTokenStatus) DeepCopy() *ClusterRegistrationTokenSt in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} diff --git a/internal/test/helpers/envtest.go b/internal/test/helpers/envtest.go index 743e54b6..dac436e6 100644 --- a/internal/test/helpers/envtest.go +++ b/internal/test/helpers/envtest.go @@ -116,6 +116,20 @@ func (t *TestEnvironment) CreateNamespace(ctx context.Context, generateName stri return ns, nil } +// CreateNamespaceWithName creates a new namespace with a given name. +func (t *TestEnvironment) CreateNamespaceWithName(ctx context.Context, name string) (*corev1.Namespace, error) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + if err := t.Client.Create(ctx, ns); err != nil { + return nil, err + } + + return ns, nil +} + // NewTestEnvironmentConfiguration creates a new test environment configuration for running tests. func NewTestEnvironmentConfiguration(crdDirectoryPaths ...string) *TestEnvironmentConfiguration { resolvedCrdDirectoryPaths := []string{} diff --git a/main.go b/main.go index 1b61f125..6d1d1849 100644 --- a/main.go +++ b/main.go @@ -196,17 +196,36 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { os.Exit(1) } - if err := (&controllers.CAPIImportReconciler{ - Client: mgr.GetClient(), - RancherClient: rancherClient, - WatchFilterValue: watchFilterValue, - InsecureSkipVerify: insecureSkipVerify, - }).SetupWithManager(ctx, mgr, controller.Options{ - MaxConcurrentReconciles: concurrencyNumber, - CacheSyncTimeout: maxDuration, - }); err != nil { - setupLog.Error(err, "unable to create capi controller") - os.Exit(1) + if feature.Gates.Enabled(feature.ManagementV3Cluster) { + setupLog.Info("enabling CAPI cluster import controller for `management.cattle.io/v3` resources") + + if err := (&controllers.CAPIImportManagementV3Reconciler{ + Client: mgr.GetClient(), + RancherClient: rancherClient, + WatchFilterValue: watchFilterValue, + InsecureSkipVerify: insecureSkipVerify, + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrencyNumber, + CacheSyncTimeout: maxDuration, + }); err != nil { + setupLog.Error(err, "unable to create capi controller") + os.Exit(1) + } + } else { + setupLog.Info("enabling CAPI cluster import controller for `provisioning.cattle.io/v1` resources") + + if err := (&controllers.CAPIImportReconciler{ + Client: mgr.GetClient(), + RancherClient: rancherClient, + WatchFilterValue: watchFilterValue, + InsecureSkipVerify: insecureSkipVerify, + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrencyNumber, + CacheSyncTimeout: maxDuration, + }); err != nil { + setupLog.Error(err, "unable to create capi controller") + os.Exit(1) + } } if feature.Gates.Enabled(feature.RancherKubeSecretPatch) { diff --git a/test/e2e/const.go b/test/e2e/const.go index 30b6a0bd..fe70b245 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -127,4 +127,8 @@ const ( FullTestLabel = "full" DontRunLabel = "dontrun" LocalTestLabel = "local" + + CapiClusterOwnerLabel = "cluster-api.cattle.io/capi-cluster-owner" + CapiClusterOwnerNamespaceLabel = "cluster-api.cattle.io/capi-cluster-owner-ns" + OwnedLabelName = "cluster-api.cattle.io/owned" ) diff --git a/test/e2e/specs/import_gitops.go b/test/e2e/specs/import_gitops.go index e7e9bcbc..3d9b6600 100644 --- a/test/e2e/specs/import_gitops.go +++ b/test/e2e/specs/import_gitops.go @@ -28,6 +28,7 @@ import ( "strconv" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -274,7 +275,6 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi }, rancherConnectRes) Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") - }) AfterEach(func() { diff --git a/test/e2e/specs/import_gitops_mgmtv3.go b/test/e2e/specs/import_gitops_mgmtv3.go new file mode 100644 index 00000000..e9ae85b6 --- /dev/null +++ b/test/e2e/specs/import_gitops_mgmtv3.go @@ -0,0 +1,318 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package specs + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" + + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" + "github.com/rancher-sandbox/rancher-turtles/test/testenv" +) + +type CreateMgmtV3UsingGitOpsSpecInput struct { + E2EConfig *clusterctl.E2EConfig + BootstrapClusterProxy framework.ClusterProxy + ClusterctlConfigPath string + ArtifactFolder string + RancherServerURL string + + ClusterctlBinaryPath string + ClusterTemplate []byte + ClusterName string + AdditionalTemplateVariables map[string]string + + CAPIClusterCreateWaitName string + DeleteClusterWaitName string + + // ControlPlaneMachineCount defines the number of control plane machines to be added to the workload cluster. + // If not specified, 1 will be used. + ControlPlaneMachineCount *int + + // WorkerMachineCount defines number of worker machines to be added to the workload cluster. + // If not specified, 1 will be used. + WorkerMachineCount *int + + GitAddr string + GitAuthSecretName string + + SkipCleanup bool + SkipDeletionTest bool + + LabelNamespace bool + + // management.cattle.io specifc + CapiClusterOwnerLabel string + CapiClusterOwnerNamespaceLabel string + OwnedLabelName string +} + +// CreateMgmtV3UsingGitOpsSpec implements a spec that will create a cluster via Fleet and test that it +// automatically imports into Rancher Manager. +func CreateMgmtV3UsingGitOpsSpec(ctx context.Context, inputGetter func() CreateMgmtV3UsingGitOpsSpecInput) { + var ( + specName = "creategitops" + input CreateMgmtV3UsingGitOpsSpecInput + namespace *corev1.Namespace + repoName string + cancelWatches context.CancelFunc + capiCluster *types.NamespacedName + rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + originalKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + rancherConnectRes *turtlesframework.RunCommandResult + rancherCluster *managementv3.Cluster + capiClusterCreateWait []interface{} + deleteClusterWait []interface{} + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + Expect(input.E2EConfig.Variables).To(HaveKey(e2e.KubernetesManagementVersionVar)) + namespace, cancelWatches = e2e.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + repoName = e2e.CreateRepoName(specName) + + capiClusterCreateWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.CAPIClusterCreateWaitName) + Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) + + deleteClusterWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.DeleteClusterWaitName) + Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) + + capiCluster = &types.NamespacedName{ + Namespace: namespace.Name, + Name: input.ClusterName, + } + + rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) + originalKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) + rancherConnectRes = new(turtlesframework.RunCommandResult) + + komega.SetClient(input.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + It("Should import a cluster using gitops", func() { + controlPlaneMachineCount := 1 + if input.ControlPlaneMachineCount != nil { + controlPlaneMachineCount = *input.ControlPlaneMachineCount + } + + workerMachineCount := 1 + if input.WorkerMachineCount != nil { + workerMachineCount = *input.WorkerMachineCount + } + + if input.LabelNamespace { + turtlesframework.AddLabelsToNamespace(ctx, turtlesframework.AddLabelsToNamespaceInput{ + ClusterProxy: input.BootstrapClusterProxy, + Name: namespace.Name, + Labels: map[string]string{ + "cluster-api.cattle.io/rancher-auto-import": "true", + }, + }) + } + + By("Create Git repository") + + repoCloneAddr := turtlesframework.GiteaCreateRepo(ctx, turtlesframework.GiteaCreateRepoInput{ + ServerAddr: input.GitAddr, + RepoName: repoName, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) + repoDir := turtlesframework.GitCloneRepo(ctx, turtlesframework.GitCloneRepoInput{ + Address: repoCloneAddr, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) + + By("Create fleet repository structure") + + clustersDir := filepath.Join(repoDir, "clusters") + os.MkdirAll(clustersDir, os.ModePerm) + + additionalVars := map[string]string{ + "CLUSTER_NAME": input.ClusterName, + "WORKER_MACHINE_COUNT": strconv.Itoa(workerMachineCount), + "CONTROL_PLANE_MACHINE_COUNT": strconv.Itoa(controlPlaneMachineCount), + } + for k, v := range input.AdditionalTemplateVariables { + additionalVars[k] = v + } + + clusterPath := filepath.Join(clustersDir, fmt.Sprintf("%s.yaml", input.ClusterName)) + Expect(turtlesframework.ApplyFromTemplate(ctx, turtlesframework.ApplyFromTemplateInput{ + Getter: input.E2EConfig.GetVariable, + Template: input.ClusterTemplate, + OutputFilePath: clusterPath, + AddtionalEnvironmentVariables: additionalVars, + })).To(Succeed()) + + fleetPath := filepath.Join(clustersDir, "fleet.yaml") + turtlesframework.FleetCreateFleetFile(ctx, turtlesframework.FleetCreateFleetFileInput{ + Namespace: namespace.Name, + FilePath: fleetPath, + }) + + By("Committing changes to fleet repo and pushing") + + turtlesframework.GitCommitAndPush(ctx, turtlesframework.GitCommitAndPushInput{ + CloneLocation: repoDir, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + CommitMessage: "ci: add clusters bundle", + }) + + By("Applying GitRepo") + + turtlesframework.FleetCreateGitRepo(ctx, turtlesframework.FleetCreateGitRepoInput{ + Name: repoName, + Namespace: turtlesframework.FleetLocalNamespace, + Branch: turtlesframework.DefaultBranchName, + Repo: repoCloneAddr, + FleetGeneration: 1, + Paths: []string{"clusters"}, + ClientSecretName: input.GitAuthSecretName, + ClusterProxy: input.BootstrapClusterProxy, + }) + + By("Waiting for the CAPI cluster to appear") + capiCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: input.ClusterName, + }} + Eventually( + komega.Get(capiCluster), + input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...). + Should(Succeed(), "Failed to apply CAPI cluster definition to cluster via Fleet") + + By("Waiting for cluster control plane to be Ready") + Eventually(komega.Object(capiCluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue())) + + By("Waiting for the CAPI cluster to be connectable") + Eventually(func() error { + remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, capiCluster.Namespace, capiCluster.Name).GetClient() + namespaces := &corev1.NamespaceList{} + + return remoteClient.List(ctx, namespaces) + }, capiClusterCreateWait...).Should(Succeed(), "Failed to connect to workload cluster using CAPI kubeconfig") + + By("Storing the original CAPI cluster kubeconfig") + turtlesframework.RancherGetOriginalKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-kubeconfig", capiCluster.Name), + Namespace: capiCluster.Namespace, + WriteToTempFile: true, + }, originalKubeconfig) + + By("Waiting for the rancher cluster record to appear") + rancherClusters := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + input.CapiClusterOwnerLabel: capiCluster.Name, + input.CapiClusterOwnerNamespaceLabel: capiCluster.Namespace, + input.OwnedLabelName: "", + }, + } + Eventually(func() bool { + Eventually(komega.List(rancherClusters, selectors...)).Should(Succeed()) + return len(rancherClusters.Items) == 1 + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + rancherCluster = &rancherClusters.Items[0] + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + + By("Waiting for the rancher cluster to have a deployed agent") + Eventually(func() bool { + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + return conditions.IsTrue(rancherCluster, managementv3.ClusterConditionAgentDeployed) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the rancher cluster to be ready") + Eventually(func() bool { + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + return conditions.IsTrue(rancherCluster, managementv3.ClusterConditionReady) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") + turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name), + Namespace: rancherCluster.Spec.FleetWorkspaceName, + RancherServerURL: input.RancherServerURL, + WriteToTempFile: true, + }, rancherKubeconfig) + + turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ + Command: "kubectl", + Args: []string{ + "--kubeconfig", + rancherKubeconfig.TempFilePath, + "get", + "nodes", + "--insecure-skip-tls-verify", + }, + }, rancherConnectRes) + Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") + Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") + }) + + AfterEach(func() { + err := testenv.CollectArtifacts(ctx, originalKubeconfig.TempFilePath, path.Join(input.ArtifactFolder, input.BootstrapClusterProxy.GetName(), input.ClusterName)) + if err != nil { + fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) + } + + By("Deleting GitRepo from Rancher") + turtlesframework.FleetDeleteGitRepo(ctx, turtlesframework.FleetDeleteGitRepoInput{ + Name: repoName, + Namespace: turtlesframework.FleetLocalNamespace, + ClusterProxy: input.BootstrapClusterProxy, + }) + + By("Waiting for the rancher cluster record to be removed") + Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + + e2e.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, capiCluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 602df09a..1c664321 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -33,7 +33,6 @@ import ( ) var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel, e2e.FullTestLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) @@ -63,7 +62,6 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit }) var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { - BeforeEach(func() { komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) komega.SetContext(ctx) @@ -93,7 +91,6 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul }) var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.DontRunLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) @@ -122,7 +119,6 @@ var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality sho }) var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.LocalTestLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index e4f5daf5..e6514093 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -24,9 +24,8 @@ import ( "fmt" "os" "path/filepath" - "testing" - "runtime" + "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" diff --git a/test/e2e/suites/managementv3/managementv3_test.go b/test/e2e/suites/managementv3/managementv3_test.go new file mode 100644 index 00000000..d20527f5 --- /dev/null +++ b/test/e2e/suites/managementv3/managementv3_test.go @@ -0,0 +1,125 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managementv3 + +import ( + . "github.com/onsi/ginkgo/v2" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" + + "k8s.io/utils/ptr" + + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + "github.com/rancher-sandbox/rancher-turtles/test/e2e/specs" +) + +var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel, e2e.FullTestLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIDockerKubeadm, + ClusterName: "highlander-e2e-clusterv3-1", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-rancher", + DeleteClusterWaitName: "wait-controllers", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) + +var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.DontRunLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIAzureAKSMMP, + ClusterName: "highlander-e2e-clusterv3-2", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-capz-create-cluster", + DeleteClusterWaitName: "wait-aks-delete", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) + +var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIAwsEKSMMP, + ClusterName: "highlander-e2e-clusterv3-3", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-capa-create-cluster", + DeleteClusterWaitName: "wait-eks-delete", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) diff --git a/test/e2e/suites/managementv3/suite_test.go b/test/e2e/suites/managementv3/suite_test.go new file mode 100644 index 00000000..42a8c18f --- /dev/null +++ b/test/e2e/suites/managementv3/suite_test.go @@ -0,0 +1,224 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managementv3 + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" + "github.com/rancher-sandbox/rancher-turtles/test/testenv" +) + +// Test suite flags. +var ( + flagVals *e2e.FlagValues +) + +// Test suite global vars. +var ( + // e2eConfig to be used for this test, read from configPath. + e2eConfig *clusterctl.E2EConfig + + // clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository + // with the providers specified in the configPath. + clusterctlConfigPath string + + // hostName is the host name for the Rancher Manager server. + hostName string + + ctx = context.Background() + + setupClusterResult *testenv.SetupTestClusterResult + giteaResult *testenv.DeployGiteaResult +) + +func init() { + flagVals = &e2e.FlagValues{} + e2e.InitFlags(flagVals) +} + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + + ctrl.SetLogger(klog.Background()) + + RunSpecs(t, "rancher-turtles-e2e-managementv3") +} + +var _ = BeforeSuite(func() { + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) + Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") + Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") + + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + + By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + Hostname: hostName, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + }) + + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + IsolatedMode: flagVals.IsolatedMode, + NginxIngress: e2e.NginxIngress, + NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + testenv.DeployRancher(ctx, testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + RancherHost: hostName, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + IsolatedMode: flagVals.IsolatedMode, + RancherIngressConfig: e2e.IngressConfig, + RancherServicePatch: e2e.RancherServicePatch, + Variables: e2eConfig.Variables, + }) + + testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartPath: flagVals.ChartPath, + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Image: fmt.Sprintf("ghcr.io/rancher-sandbox/rancher-turtles-%s", runtime.GOARCH), + Tag: "v0.0.1", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "rancherTurtles.features.managementv3-cluster.enabled": "true", // enable management.cattle.io/v3 controller + }, + }) + + if !shortTestOnly() && !localTestOnly() { + By("Running full tests, deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + }, + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + }, + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + }, + }) + } + + giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-getservice"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) +}) + +var _ = AfterSuite(func() { + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: flagVals.SkipCleanup, + ArtifactFolder: flagVals.ArtifactFolder, + }) +}) + +func shortTestOnly() bool { + return GinkgoLabelFilter() == e2e.ShortTestLabel +} + +func localTestOnly() bool { + return GinkgoLabelFilter() == e2e.LocalTestLabel +}