From e3690756fa39bd6549fb6fc75954d5c1dd7d4ff6 Mon Sep 17 00:00:00 2001 From: Carlos Salas Date: Mon, 22 Jan 2024 17:24:55 +0100 Subject: [PATCH] test: add e2e test suite for management.cattle.io controller Signed-off-by: Carlos Salas --- .golangci.yml | 3 + internal/controllers/helpers.go | 6 +- .../controllers/import_controller_test.go | 2 +- ..._controller.go => import_controller_v3.go} | 100 ++++-- ...r_test.go => import_controller_v3_test.go} | 152 +++++++-- .../{helpers_test.go => testutils.go} | 1 + internal/rancher/management/v3/cluster.go | 99 +++++- .../management/v3/zz_generated.deepcopy.go | 24 +- main.go | 8 - test/e2e/const.go | 4 + test/e2e/specs/import_gitops.go | 12 +- test/e2e/specs/import_gitops_mgmtv3.go | 317 ++++++++++++++++++ .../import-gitops/import_gitops_test.go | 4 - test/e2e/suites/import-gitops/suite_test.go | 3 +- .../suites/managementv3/managementv3_test.go | 125 +++++++ test/e2e/suites/managementv3/suite_test.go | 225 +++++++++++++ 16 files changed, 994 insertions(+), 91 deletions(-) rename internal/controllers/{import_management_v3_controller.go => import_controller_v3.go} (78%) rename internal/controllers/{import_management_v3_controller_test.go => import_controller_v3_test.go} (69%) rename internal/controllers/{helpers_test.go => testutils.go} (99%) create mode 100644 test/e2e/specs/import_gitops_mgmtv3.go create mode 100644 test/e2e/suites/managementv3/managementv3_test.go create mode 100644 test/e2e/suites/managementv3/suite_test.go diff --git a/.golangci.yml b/.golangci.yml index 600e59287..afb78f622 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,6 +13,9 @@ issues: linters: - testpackage - gochecknoglobals + - path: internal/controllers/import_controller(_v3)?\.go + linters: + - dupl - text: var-naming linters: - revive diff --git a/internal/controllers/helpers.go b/internal/controllers/helpers.go index 68e12d519..51907b2b5 100644 --- a/internal/controllers/helpers.go +++ b/internal/controllers/helpers.go @@ -47,8 +47,10 @@ import ( ) const ( - importLabelName = "cluster-api.cattle.io/rancher-auto-import" - ownedLabelName = "cluster-api.cattle.io/owned" + importLabelName = "cluster-api.cattle.io/rancher-auto-import" + ownedLabelName = "cluster-api.cattle.io/owned" + capiClusterOwner = "cluster-api.cattle.io/capi-cluster-owner" + capiClusterOwnerNamespace = "cluster-api.cattle.io/capi-cluster-owner-ns" defaultRequeueDuration = 1 * time.Minute ) diff --git a/internal/controllers/import_controller_test.go b/internal/controllers/import_controller_test.go index bc1fe3399..e56379438 100644 --- a/internal/controllers/import_controller_test.go +++ b/internal/controllers/import_controller_test.go @@ -62,7 +62,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { capiCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", + Name: fmt.Sprintf("test-cluster-%s", testObjectsSuffix), Namespace: testNamespace, }, } diff --git a/internal/controllers/import_management_v3_controller.go b/internal/controllers/import_controller_v3.go similarity index 78% rename from internal/controllers/import_management_v3_controller.go rename to internal/controllers/import_controller_v3.go index ee02142b7..14e6f6b1c 100644 --- a/internal/controllers/import_management_v3_controller.go +++ b/internal/controllers/import_controller_v3.go @@ -42,7 +42,6 @@ import ( managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" "github.com/rancher-sandbox/rancher-turtles/util" - turtlesnaming "github.com/rancher-sandbox/rancher-turtles/util/naming" turtlespredicates "github.com/rancher-sandbox/rancher-turtles/util/predicates" ) @@ -85,7 +84,6 @@ func (r *CAPIImportManagementV3Reconciler) SetupWithManager(ctx context.Context, } // Watch Rancher managementv3 clusters - // NOTE: we will import the types from rancher in the future err = c.Watch( source.Kind(mgr.GetCache(), &managementv3.Cluster{}), handler.EnqueueRequestsFromMapFunc(rancherClusterToCapiCluster(ctx, capiPredicates, r.RancherClient)), @@ -175,18 +173,48 @@ func (r *CAPIImportManagementV3Reconciler) Reconcile(ctx context.Context, req ct func (r *CAPIImportManagementV3Reconciler) reconcile(ctx context.Context, capiCluster *clusterv1.Cluster) (ctrl.Result, error) { log := log.FromContext(ctx) - // fetch the rancher cluster - rancherCluster := &managementv3.Cluster{ObjectMeta: metav1.ObjectMeta{ - Namespace: capiCluster.Namespace, - Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), - }} + // placeholder name/namespace must be set to be able to test this but it won't be used. + rancherCluster := &managementv3.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "c-", + Namespace: capiCluster.Namespace, + Labels: map[string]string{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + }, + }, + } + + rancherClusterList := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, + } + err := r.RancherClient.List(ctx, rancherClusterList, selectors...) - err := r.RancherClient.Get(ctx, client.ObjectKeyFromObject(rancherCluster), rancherCluster) if client.IgnoreNotFound(err) != nil { log.Error(err, fmt.Sprintf("Unable to fetch rancher cluster %s", client.ObjectKeyFromObject(rancherCluster))) return ctrl.Result{Requeue: true}, err } + if len(rancherClusterList.Items) != 0 { + if len(rancherClusterList.Items) > 1 { + log.Info("More than one rancher cluster found. Will default to using the first one.") + } + + rancherCluster = &rancherClusterList.Items[0] + } + + if !capiCluster.ObjectMeta.DeletionTimestamp.IsZero() { + err := r.deleteDependentRancherCluster(ctx, capiCluster) + if err != nil { + return ctrl.Result{Requeue: true}, fmt.Errorf("error deleting associated managementv3.Cluster resources: %w", err) + } + } + if !rancherCluster.ObjectMeta.DeletionTimestamp.IsZero() { return reconcileDelete(ctx, capiCluster) } @@ -213,21 +241,17 @@ func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, if err := r.RancherClient.Create(ctx, &managementv3.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), - Namespace: capiCluster.Namespace, - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: clusterv1.ClusterKind, - Name: capiCluster.Name, - UID: capiCluster.UID, - }}, + Namespace: capiCluster.Namespace, + GenerateName: "c-", Labels: map[string]string{ - ownedLabelName: "", + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", }, }, Spec: managementv3.ClusterSpec{ - DisplayName: fmt.Sprintf("c-displayname-%s", capiCluster.Name), - Description: "c-description", + DisplayName: capiCluster.Name, + Description: "CAPI cluster automatically imported to Rancher", }, }); err != nil { return ctrl.Result{}, fmt.Errorf("error creating rancher cluster: %w", err) @@ -242,20 +266,13 @@ func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, err } - if rancherCluster.Status.ClusterName == "" { - log.Info("cluster name not set yet, requeue") - return ctrl.Result{Requeue: true}, nil - } - - log.Info("found cluster name", "name", rancherCluster.Status.ClusterName) - - if rancherCluster.Status.AgentDeployed { + if managementv3.ClusterConditionAgentDeployed.IsTrue(rancherCluster) { log.Info("agent already deployed, no action needed") return ctrl.Result{}, nil } // get the registration manifest - manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Status.ClusterName, capiCluster.Namespace, r.RancherClient, r.InsecureSkipVerify) + manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Name, rancherCluster.Name, r.RancherClient, r.InsecureSkipVerify) if err != nil { return ctrl.Result{}, err } @@ -280,3 +297,30 @@ func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, nil } + +func (r *CAPIImportManagementV3Reconciler) deleteDependentRancherCluster(ctx context.Context, capiCluster *clusterv1.Cluster) error { + log := log.FromContext(ctx) + + rancherClusters := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + }, + } + + err := r.RancherClient.List(ctx, rancherClusters, selectors...) + if err != nil { + log.Error(err, fmt.Sprintf("Unable to fetch rancher cluster owned by capi cluster %s/%s for deletion", capiCluster.Namespace, capiCluster.Name)) + } + + for i := range rancherClusters.Items { + err := r.RancherClient.Delete(ctx, &rancherClusters.Items[i]) + if err != nil { + log.Error(err, fmt.Sprintf("Unable to delete dependent managementv3.Cluster resource %s", client.ObjectKeyFromObject(&rancherClusters.Items[i]))) + return err + } + } + + return nil +} diff --git a/internal/controllers/import_management_v3_controller_test.go b/internal/controllers/import_controller_v3_test.go similarity index 69% rename from internal/controllers/import_management_v3_controller_test.go rename to internal/controllers/import_controller_v3_test.go index 9de0da786..4fe953753 100644 --- a/internal/controllers/import_management_v3_controller_test.go +++ b/internal/controllers/import_controller_v3_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -27,7 +28,6 @@ import ( "github.com/rancher-sandbox/rancher-turtles/internal/controllers/testdata" managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" "github.com/rancher-sandbox/rancher-turtles/internal/test" - turtlesnaming "github.com/rancher-sandbox/rancher-turtles/util/naming" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -44,11 +44,13 @@ var _ = Describe("reconcile CAPI Cluster", func() { var ( r *CAPIImportManagementV3Reconciler capiCluster *clusterv1.Cluster + rancherClusters *managementv3.ClusterList rancherCluster *managementv3.Cluster clusterRegistrationToken *managementv3.ClusterRegistrationToken capiKubeconfigSecret *corev1.Secret - clusterName = "generated-rancher-cluster" + selectors []client.ListOption testObjectsSuffix = "managementv3" + capiClusterName = fmt.Sprintf("test-cluster-%s", testObjectsSuffix) ) BeforeEach(func() { r = &CAPIImportManagementV3Reconciler{ @@ -60,25 +62,36 @@ var _ = Describe("reconcile CAPI Cluster", func() { capiCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", + Name: capiClusterName, Namespace: testNamespace, }, } rancherCluster = &managementv3.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), - Namespace: testNamespace, + Namespace: capiCluster.Namespace, + GenerateName: "c-", + Labels: map[string]string{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, + ownedLabelName: "", + }, }, } - clusterRegistrationToken = &managementv3.ClusterRegistrationToken{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: testNamespace, + rancherClusters = &managementv3.ClusterList{} + + selectors = []client.ListOption{ + client.MatchingLabels{ + capiClusterOwner: capiCluster.Name, + capiClusterOwnerNamespace: capiCluster.Namespace, }, } + clusterRegistrationToken = &managementv3.ClusterRegistrationToken{ + ObjectMeta: metav1.ObjectMeta{}, + } + capiKubeconfigSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-kubeconfig", capiCluster.Name), @@ -91,7 +104,11 @@ var _ = Describe("reconcile CAPI Cluster", func() { }) AfterEach(func() { - template := setTemplateParams(testdata.ImportManifest, map[string]string{"${TEST_CASE_NAME}": testObjectsSuffix}) + template := setTemplateParams( + testdata.ImportManifest, + map[string]string{ + "${TEST_CASE_NAME}": testObjectsSuffix, + }) objs, err := manifestToObjects(strings.NewReader(template)) clientObjs := []client.Object{ capiCluster, @@ -106,6 +123,7 @@ var _ = Describe("reconcile CAPI Cluster", func() { } Expect(err).ToNot(HaveOccurred()) Expect(test.CleanupAndWait(ctx, cl, clientObjs...)).To(Succeed()) + Expect(cleanupTestClusters(ctx)).To(Succeed()) }) It("should reconcile a CAPI cluster when control plane not ready", func() { @@ -138,11 +156,12 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(err).ToNot(HaveOccurred()) Expect(res.Requeue).To(BeTrue()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + Expect(rancherClusters.Items[0].Name).To(ContainSubstring("c-")) }) - It("should reconcile a CAPI cluster when rancher cluster doesn't exist and annotation is set on the namespace using management.cattle.io/v3", func() { + It("should reconcile a CAPI cluster when rancher cluster doesn't exist and annotation is set on the namespace", func() { Expect(cl.Create(ctx, capiCluster)).To(Succeed()) capiCluster.Status.ControlPlaneReady = true Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) @@ -156,12 +175,17 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(err).ToNot(HaveOccurred()) Expect(res.Requeue).To(BeTrue()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + Expect(rancherClusters.Items[0].Name).To(ContainSubstring("c-")) }) It("should reconcile a CAPI cluster when rancher cluster exists", func() { - template := setTemplateParams(testdata.ImportManifest, map[string]string{"${TEST_CASE_NAME}": testObjectsSuffix}) + template := setTemplateParams( + testdata.ImportManifest, + map[string]string{ + "${TEST_CASE_NAME}": testObjectsSuffix, + }) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte(template)) @@ -175,11 +199,15 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).To(Succeed()) - cluster.Status.ClusterName = clusterName - Expect(cl.Status().Update(ctx, cluster)).To(Succeed()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + Expect(createRegistrationTokenNamespace(ctx, cluster.Name)).To(Succeed()) Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) token := &managementv3.ClusterRegistrationToken{} Expect(cl.Get(ctx, client.ObjectKeyFromObject(clusterRegistrationToken), token)).To(Succeed()) @@ -218,6 +246,13 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + Expect(createRegistrationTokenNamespace(ctx, cluster.Name)).To(Succeed()) + res, err := r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: capiCluster.Namespace, @@ -234,10 +269,19 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Status().Update(ctx, capiCluster)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) - cluster.Status.AgentDeployed = true - Expect(cl.Status().Update(ctx, cluster)).To(Succeed()) + + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + cluster.Status.Conditions = []managementv3.ClusterCondition{ + { + Type: managementv3.ClusterConditionType(managementv3.ClusterConditionAgentDeployed), + Status: corev1.ConditionTrue, + }, + } + Expect(cl.Status().Update(ctx, &cluster)).To(Succeed()) _, err := r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ @@ -262,11 +306,15 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) - cluster.Status.ClusterName = clusterName - Expect(cl.Status().Update(ctx, cluster)).To(Succeed()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + Expect(createRegistrationTokenNamespace(ctx, cluster.Name)).To(Succeed()) Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) token := &managementv3.ClusterRegistrationToken{} Expect(cl.Get(ctx, client.ObjectKeyFromObject(clusterRegistrationToken), token)).To(Succeed()) @@ -297,10 +345,15 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) - cluster.Status.ClusterName = clusterName - Expect(cl.Status().Update(ctx, cluster)).To(Succeed()) + + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + Expect(createRegistrationTokenNamespace(ctx, cluster.Name)).To(Succeed()) res, err := r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ @@ -322,11 +375,15 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(cl.Create(ctx, capiKubeconfigSecret)).To(Succeed()) Expect(cl.Create(ctx, rancherCluster)).To(Succeed()) - cluster := &managementv3.Cluster{} - Expect(cl.Get(ctx, client.ObjectKeyFromObject(rancherCluster), cluster)).ToNot(HaveOccurred()) - cluster.Status.ClusterName = clusterName - Expect(cl.Status().Update(ctx, cluster)).To(Succeed()) + Expect(cl.List(ctx, rancherClusters, selectors...)).ToNot(HaveOccurred()) + Expect(rancherClusters.Items).To(HaveLen(1)) + cluster := rancherClusters.Items[0] + Expect(cluster.Name).To(ContainSubstring("c-")) + + clusterRegistrationToken.Name = cluster.Name + clusterRegistrationToken.Namespace = cluster.Name + Expect(createRegistrationTokenNamespace(ctx, cluster.Name)).To(Succeed()) Expect(cl.Create(ctx, clusterRegistrationToken)).To(Succeed()) res, err := r.Reconcile(ctx, reconcile.Request{ @@ -339,3 +396,26 @@ var _ = Describe("reconcile CAPI Cluster", func() { Expect(res.Requeue).To(BeTrue()) }) }) + +func cleanupTestClusters(ctx context.Context) error { + clusterList := &managementv3.ClusterList{} + if err := cl.List(ctx, clusterList); err != nil { + return err + } + for _, cluster := range clusterList.Items { + o := cluster + if err := cl.Delete(ctx, &o); err != nil { + return err + } + } + return nil +} + +func createRegistrationTokenNamespace(ctx context.Context, clusterName string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + return cl.Create(ctx, ns) +} diff --git a/internal/controllers/helpers_test.go b/internal/controllers/testutils.go similarity index 99% rename from internal/controllers/helpers_test.go rename to internal/controllers/testutils.go index cc8cb0431..4a7c45b3a 100644 --- a/internal/controllers/helpers_test.go +++ b/internal/controllers/testutils.go @@ -32,6 +32,7 @@ func setTemplateParams(template string, params map[string]string) string { for k, v := range params { template = strings.ReplaceAll(template, k, v) } + return template } diff --git a/internal/rancher/management/v3/cluster.go b/internal/rancher/management/v3/cluster.go index caecca664..1ceb82b97 100644 --- a/internal/rancher/management/v3/cluster.go +++ b/internal/rancher/management/v3/cluster.go @@ -17,7 +17,25 @@ limitations under the License. package v3 import ( + "reflect" + + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ClusterConditionAgentDeployed is the condition type for the agent deployed condition. + ClusterConditionAgentDeployed Cond = "AgentDeployed" + // ClusterConditionReady is the condition type for the ready condition. + ClusterConditionReady Cond = "Ready" +) + +type ( + // Cond represents a condition of a Rancher Cluster. + Cond string + // ClusterConditionType represents the type of a condition. + ClusterConditionType string ) // Cluster is the struct representing a Rancher Cluster. @@ -39,9 +57,7 @@ type ClusterSpec struct { // ClusterStatus is the struct representing the status of a Rancher Cluster. type ClusterStatus struct { - ClusterName string `json:"clusterName,omitempty"` - AgentDeployed bool `json:"agentDeployed,omitempty"` - Ready bool `json:"ready,omitempty"` + Conditions []ClusterCondition `json:"conditions,omitempty"` } // ClusterList contains a list of ClusterList. @@ -52,6 +68,83 @@ type ClusterList struct { Items []Cluster `json:"items"` } +// ClusterCondition is the struct representing a condition of a Rancher Cluster. +type ClusterCondition struct { + // Type of cluster condition. + Type ClusterConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The last time this condition was updated. + LastUpdateTime string `json:"lastUpdateTime,omitempty"` + // Last time the condition transitioned from one status to another. + LastTransitionTime string `json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about last transition + Message string `json:"message,omitempty"` +} + +// IsTrue returns true if the condition is true. +func (c Cond) IsTrue(obj runtime.Object) bool { + return getStatus(obj, string(c)) == "True" +} + +func getStatus(obj interface{}, condName string) string { + cond := findOrNotCreateCond(obj, condName) + if cond == nil { + return "" + } + + return getFieldValue(*cond, "Status").String() +} + +func findOrNotCreateCond(obj interface{}, condName string) *reflect.Value { + condSlice := getValue(obj, "Status", "Conditions") + return findCond(condSlice, condName) +} + +func findCond(val reflect.Value, name string) *reflect.Value { + for i := 0; i < val.Len(); i++ { + cond := val.Index(i) + typeVal := getFieldValue(cond, "Type") + + if typeVal.String() == name { + return &cond + } + } + + return nil +} + +func getValue(obj interface{}, name ...string) reflect.Value { + if obj == nil { + return reflect.Value{} + } + + v := reflect.ValueOf(obj) + t := v.Type() + + if t.Kind() == reflect.Ptr { + v = v.Elem() + } + + field := v.FieldByName(name[0]) + if len(name) == 1 { + return field + } + + return getFieldValue(field, name[1:]...) +} + +func getFieldValue(v reflect.Value, name ...string) reflect.Value { + field := v.FieldByName(name[0]) + if len(name) == 1 { + return field + } + + return getFieldValue(field, name[1:]...) +} + func init() { SchemeBuilder.Register(&Cluster{}, &ClusterList{}) } diff --git a/internal/rancher/management/v3/zz_generated.deepcopy.go b/internal/rancher/management/v3/zz_generated.deepcopy.go index eefdb9c9b..92f0e6a1b 100644 --- a/internal/rancher/management/v3/zz_generated.deepcopy.go +++ b/internal/rancher/management/v3/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v3 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -31,7 +31,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. @@ -52,6 +52,21 @@ func (in *Cluster) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterList) DeepCopyInto(out *ClusterList) { *out = *in @@ -191,6 +206,11 @@ func (in *ClusterSpec) DeepCopy() *ClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. diff --git a/main.go b/main.go index 4172c7558..6d1d1849b 100644 --- a/main.go +++ b/main.go @@ -196,14 +196,6 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { os.Exit(1) } - // TODO: set CAPIImportReconciler based on the value of managementV3Cluster? - // - management.cattle.io/v3 and provisioning.cattle.io/v1 are not supported together - // - either one or the other will need to be reconciled - // - will use a two-differentiated-controller approach - // - each reconciler will watch its otwn resource of interest - // - use feature.Gates to enable/disable the managementV3Cluster feature - // - if no feature is provided: default to provisioning.cattle.io/v1 - // - current behavior should not be altered if feature.Gates.Enabled(feature.ManagementV3Cluster) { setupLog.Info("enabling CAPI cluster import controller for `management.cattle.io/v3` resources") diff --git a/test/e2e/const.go b/test/e2e/const.go index 30b6a0bd3..fe70b245b 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -127,4 +127,8 @@ const ( FullTestLabel = "full" DontRunLabel = "dontrun" LocalTestLabel = "local" + + CapiClusterOwnerLabel = "cluster-api.cattle.io/capi-cluster-owner" + CapiClusterOwnerNamespaceLabel = "cluster-api.cattle.io/capi-cluster-owner-ns" + OwnedLabelName = "cluster-api.cattle.io/owned" ) diff --git a/test/e2e/specs/import_gitops.go b/test/e2e/specs/import_gitops.go index e7e9bcbcc..c1b51fcef 100644 --- a/test/e2e/specs/import_gitops.go +++ b/test/e2e/specs/import_gitops.go @@ -28,6 +28,7 @@ import ( "strconv" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -241,10 +242,12 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi }, originalKubeconfig) By("Waiting for the rancher cluster record to appear") - rancherCluster = &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace.Name, - Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), - }} + rancherCluster = &provisioningv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: turtlesnaming.Name(capiCluster.Name).ToRancherName(), + }, + } Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) By("Waiting for the rancher cluster to have a deployed agent") @@ -274,7 +277,6 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi }, rancherConnectRes) Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") - }) AfterEach(func() { diff --git a/test/e2e/specs/import_gitops_mgmtv3.go b/test/e2e/specs/import_gitops_mgmtv3.go new file mode 100644 index 000000000..8e04834ff --- /dev/null +++ b/test/e2e/specs/import_gitops_mgmtv3.go @@ -0,0 +1,317 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package specs + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" + + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" + "github.com/rancher-sandbox/rancher-turtles/test/testenv" +) + +type CreateMgmtV3UsingGitOpsSpecInput struct { + E2EConfig *clusterctl.E2EConfig + BootstrapClusterProxy framework.ClusterProxy + ClusterctlConfigPath string + ArtifactFolder string + RancherServerURL string + + ClusterctlBinaryPath string + ClusterTemplate []byte + ClusterName string + AdditionalTemplateVariables map[string]string + + CAPIClusterCreateWaitName string + DeleteClusterWaitName string + + // ControlPlaneMachineCount defines the number of control plane machines to be added to the workload cluster. + // If not specified, 1 will be used. + ControlPlaneMachineCount *int + + // WorkerMachineCount defines number of worker machines to be added to the workload cluster. + // If not specified, 1 will be used. + WorkerMachineCount *int + + GitAddr string + GitAuthSecretName string + + SkipCleanup bool + SkipDeletionTest bool + + LabelNamespace bool + + // management.cattle.io specifc + CapiClusterOwnerLabel string + CapiClusterOwnerNamespaceLabel string + OwnedLabelName string +} + +// CreateMgmtV3UsingGitOpsSpec implements a spec that will create a cluster via Fleet and test that it +// automatically imports into Rancher Manager. +func CreateMgmtV3UsingGitOpsSpec(ctx context.Context, inputGetter func() CreateMgmtV3UsingGitOpsSpecInput) { + var ( + specName = "creategitops" + input CreateMgmtV3UsingGitOpsSpecInput + namespace *corev1.Namespace + repoName string + cancelWatches context.CancelFunc + capiCluster *types.NamespacedName + rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + originalKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult + rancherConnectRes *turtlesframework.RunCommandResult + rancherCluster *managementv3.Cluster + capiClusterCreateWait []interface{} + deleteClusterWait []interface{} + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + Expect(input.E2EConfig.Variables).To(HaveKey(e2e.KubernetesManagementVersionVar)) + namespace, cancelWatches = e2e.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + repoName = e2e.CreateRepoName(specName) + + capiClusterCreateWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.CAPIClusterCreateWaitName) + Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) + + deleteClusterWait = input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), input.DeleteClusterWaitName) + Expect(capiClusterCreateWait).ToNot(BeNil(), "Failed to get wait intervals %s", input.CAPIClusterCreateWaitName) + + capiCluster = &types.NamespacedName{ + Namespace: namespace.Name, + Name: input.ClusterName, + } + + rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) + originalKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) + rancherConnectRes = new(turtlesframework.RunCommandResult) + + komega.SetClient(input.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + It("Should import a cluster using gitops", func() { + controlPlaneMachineCount := 1 + if input.ControlPlaneMachineCount != nil { + controlPlaneMachineCount = *input.ControlPlaneMachineCount + } + + workerMachineCount := 1 + if input.WorkerMachineCount != nil { + workerMachineCount = *input.WorkerMachineCount + } + + if input.LabelNamespace { + turtlesframework.AddLabelsToNamespace(ctx, turtlesframework.AddLabelsToNamespaceInput{ + ClusterProxy: input.BootstrapClusterProxy, + Name: namespace.Name, + Labels: map[string]string{ + "cluster-api.cattle.io/rancher-auto-import": "true", + }, + }) + } + + By("Create Git repository") + + repoCloneAddr := turtlesframework.GiteaCreateRepo(ctx, turtlesframework.GiteaCreateRepoInput{ + ServerAddr: input.GitAddr, + RepoName: repoName, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) + repoDir := turtlesframework.GitCloneRepo(ctx, turtlesframework.GitCloneRepoInput{ + Address: repoCloneAddr, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) + + By("Create fleet repository structure") + + clustersDir := filepath.Join(repoDir, "clusters") + os.MkdirAll(clustersDir, os.ModePerm) + + additionalVars := map[string]string{ + "CLUSTER_NAME": input.ClusterName, + "WORKER_MACHINE_COUNT": strconv.Itoa(workerMachineCount), + "CONTROL_PLANE_MACHINE_COUNT": strconv.Itoa(controlPlaneMachineCount), + } + for k, v := range input.AdditionalTemplateVariables { + additionalVars[k] = v + } + + clusterPath := filepath.Join(clustersDir, fmt.Sprintf("%s.yaml", input.ClusterName)) + Expect(turtlesframework.ApplyFromTemplate(ctx, turtlesframework.ApplyFromTemplateInput{ + Getter: input.E2EConfig.GetVariable, + Template: input.ClusterTemplate, + OutputFilePath: clusterPath, + AddtionalEnvironmentVariables: additionalVars, + })).To(Succeed()) + + fleetPath := filepath.Join(clustersDir, "fleet.yaml") + turtlesframework.FleetCreateFleetFile(ctx, turtlesframework.FleetCreateFleetFileInput{ + Namespace: namespace.Name, + FilePath: fleetPath, + }) + + By("Committing changes to fleet repo and pushing") + + turtlesframework.GitCommitAndPush(ctx, turtlesframework.GitCommitAndPushInput{ + CloneLocation: repoDir, + Username: input.E2EConfig.GetVariable(e2e.GiteaUserNameVar), + Password: input.E2EConfig.GetVariable(e2e.GiteaUserPasswordVar), + CommitMessage: "ci: add clusters bundle", + }) + + By("Applying GitRepo") + + turtlesframework.FleetCreateGitRepo(ctx, turtlesframework.FleetCreateGitRepoInput{ + Name: repoName, + Namespace: turtlesframework.FleetLocalNamespace, + Branch: turtlesframework.DefaultBranchName, + Repo: repoCloneAddr, + FleetGeneration: 1, + Paths: []string{"clusters"}, + ClientSecretName: input.GitAuthSecretName, + ClusterProxy: input.BootstrapClusterProxy, + }) + + By("Waiting for the CAPI cluster to appear") + capiCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: input.ClusterName, + }} + Eventually( + komega.Get(capiCluster), + input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...). + Should(Succeed(), "Failed to apply CAPI cluster definition to cluster via Fleet") + + By("Waiting for cluster control plane to be Ready") + Eventually(komega.Object(capiCluster), capiClusterCreateWait...).Should(HaveField("Status.ControlPlaneReady", BeTrue())) + + By("Waiting for the CAPI cluster to be connectable") + Eventually(func() error { + remoteClient := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, capiCluster.Namespace, capiCluster.Name).GetClient() + namespaces := &corev1.NamespaceList{} + + return remoteClient.List(ctx, namespaces) + }, capiClusterCreateWait...).Should(Succeed(), "Failed to connect to workload cluster using CAPI kubeconfig") + + By("Storing the original CAPI cluster kubeconfig") + turtlesframework.RancherGetOriginalKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-kubeconfig", capiCluster.Name), + Namespace: capiCluster.Namespace, + WriteToTempFile: true, + }, originalKubeconfig) + + By("Waiting for the rancher cluster record to appear") + rancherClusters := &managementv3.ClusterList{} + selectors := []client.ListOption{ + client.MatchingLabels{ + input.CapiClusterOwnerLabel: capiCluster.Name, + input.CapiClusterOwnerNamespaceLabel: capiCluster.Namespace, + input.OwnedLabelName: "", + }, + } + Eventually(komega.List(rancherClusters, selectors...)).Should(Succeed()) + Expect(rancherClusters.Items).To(HaveLen(1)) + rancherCluster = &rancherClusters.Items[0] + Eventually(komega.Get(rancherCluster), input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + + By("Waiting for the rancher cluster to have a deployed agent") + Eventually(func() bool { + Expect(komega.Get(rancherCluster)).To(Succeed()) + fmt.Println("rancherCluster.Status.Conditions: ", rancherCluster.Status.Conditions) + return managementv3.ClusterConditionAgentDeployed.IsTrue(rancherCluster) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the rancher cluster to be ready") + Eventually(func() bool { + Expect(komega.Get(rancherCluster)).To(Succeed()) + fmt.Println("rancherCluster.Status.Conditions: ", rancherCluster.Status.Conditions) + return managementv3.ClusterConditionReady.IsTrue(rancherCluster) + }, input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(BeTrue()) + + By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") + turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + SecretName: fmt.Sprintf("%s-capi-kubeconfig", capiCluster.Name), + Namespace: capiCluster.Namespace, + RancherServerURL: input.RancherServerURL, + WriteToTempFile: true, + }, rancherKubeconfig) + + turtlesframework.RunCommand(ctx, turtlesframework.RunCommandInput{ + Command: "kubectl", + Args: []string{ + "--kubeconfig", + rancherKubeconfig.TempFilePath, + "get", + "nodes", + "--insecure-skip-tls-verify", + }, + }, rancherConnectRes) + Expect(rancherConnectRes.Error).NotTo(HaveOccurred(), "Failed getting nodes with Rancher Kubeconfig") + Expect(rancherConnectRes.ExitCode).To(Equal(0), "Getting nodes return non-zero exit code") + }) + + AfterEach(func() { + err := testenv.CollectArtifacts(ctx, originalKubeconfig.TempFilePath, path.Join(input.ArtifactFolder, input.BootstrapClusterProxy.GetName(), input.ClusterName)) + if err != nil { + fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) + } + + By("Deleting GitRepo from Rancher") + turtlesframework.FleetDeleteGitRepo(ctx, turtlesframework.FleetDeleteGitRepoInput{ + Name: repoName, + Namespace: turtlesframework.FleetLocalNamespace, + ClusterProxy: input.BootstrapClusterProxy, + }) + + By("Waiting for the rancher cluster record to be removed") + Eventually(komega.Get(rancherCluster), deleteClusterWait...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + + e2e.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, capiCluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 602df09aa..1c6643211 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -33,7 +33,6 @@ import ( ) var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel, e2e.FullTestLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) @@ -63,7 +62,6 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit }) var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { - BeforeEach(func() { komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) komega.SetContext(ctx) @@ -93,7 +91,6 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul }) var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.DontRunLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) @@ -122,7 +119,6 @@ var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality sho }) var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.LocalTestLabel), func() { - BeforeEach(func() { SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) SetContext(ctx) diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index e4f5daf56..e65140933 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -24,9 +24,8 @@ import ( "fmt" "os" "path/filepath" - "testing" - "runtime" + "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" diff --git a/test/e2e/suites/managementv3/managementv3_test.go b/test/e2e/suites/managementv3/managementv3_test.go new file mode 100644 index 000000000..d383bd36a --- /dev/null +++ b/test/e2e/suites/managementv3/managementv3_test.go @@ -0,0 +1,125 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managementv3 + +import ( + . "github.com/onsi/ginkgo/v2" + "sigs.k8s.io/controller-runtime/pkg/envtest/komega" + + "k8s.io/utils/ptr" + + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + "github.com/rancher-sandbox/rancher-turtles/test/e2e/specs" +) + +var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel, e2e.FullTestLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIDockerKubeadm, + ClusterName: "highlander-e2e-clusterv3-1", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-rancher", + DeleteClusterWaitName: "wait-controllers", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) + +var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.DontRunLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIAzureAKSMMP, + ClusterName: "highlander-e2e-clusterv3-2", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-capz-create-cluster", + DeleteClusterWaitName: "wait-aks-delete", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) + +var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { + BeforeEach(func() { + komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetContext(ctx) + }) + + specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + return specs.CreateMgmtV3UsingGitOpsSpecInput{ + E2EConfig: e2eConfig, + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterctlConfigPath: flagVals.ConfigPath, + ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, + ArtifactFolder: flagVals.ArtifactFolder, + ClusterTemplate: e2e.CAPIAwsEKSMMP, + ClusterName: "highlander-e2e-clusterv3-3", + ControlPlaneMachineCount: ptr.To[int](1), + WorkerMachineCount: ptr.To[int](1), + GitAddr: giteaResult.GitAddress, + GitAuthSecretName: e2e.AuthSecretName, + SkipCleanup: false, + SkipDeletionTest: false, + LabelNamespace: true, + RancherServerURL: hostName, + CAPIClusterCreateWaitName: "wait-capa-create-cluster", + DeleteClusterWaitName: "wait-eks-delete", + CapiClusterOwnerLabel: e2e.CapiClusterOwnerLabel, + CapiClusterOwnerNamespaceLabel: e2e.CapiClusterOwnerNamespaceLabel, + OwnedLabelName: e2e.OwnedLabelName, + } + }) +}) diff --git a/test/e2e/suites/managementv3/suite_test.go b/test/e2e/suites/managementv3/suite_test.go new file mode 100644 index 000000000..059c8c160 --- /dev/null +++ b/test/e2e/suites/managementv3/suite_test.go @@ -0,0 +1,225 @@ +//go:build e2e +// +build e2e + +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managementv3 + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/rancher-sandbox/rancher-turtles/test/e2e" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" + "github.com/rancher-sandbox/rancher-turtles/test/testenv" +) + +// Test suite flags. +var ( + flagVals *e2e.FlagValues +) + +// Test suite global vars. +var ( + // e2eConfig to be used for this test, read from configPath. + e2eConfig *clusterctl.E2EConfig + + // clusterctlConfigPath to be used for this test, created by generating a clusterctl local repository + // with the providers specified in the configPath. + clusterctlConfigPath string + + // hostName is the host name for the Rancher Manager server. + hostName string + + ctx = context.Background() + + setupClusterResult *testenv.SetupTestClusterResult + giteaResult *testenv.DeployGiteaResult +) + +func init() { + flagVals = &e2e.FlagValues{} + e2e.InitFlags(flagVals) +} + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + + ctrl.SetLogger(klog.Background()) + + RunSpecs(t, "rancher-turtles-e2e-managementv3") +} + +var _ = BeforeSuite(func() { + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + Expect(os.MkdirAll(flagVals.ArtifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) + Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") + Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") + + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + + By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + Hostname: hostName, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementLegacyVersionVar), + // KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + }) + + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + IsolatedMode: flagVals.IsolatedMode, + NginxIngress: e2e.NginxIngress, + NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + testenv.DeployRancher(ctx, testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + RancherHost: hostName, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + IsolatedMode: flagVals.IsolatedMode, + RancherIngressConfig: e2e.IngressConfig, + RancherServicePatch: e2e.RancherServicePatch, + Variables: e2eConfig.Variables, + }) + + testenv.DeployRancherTurtles(ctx, testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartPath: flagVals.ChartPath, + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Image: fmt.Sprintf("ghcr.io/rancher-sandbox/rancher-turtles-%s", runtime.GOARCH), + Tag: "v0.0.1", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "rancherTurtles.features.managementv3-cluster.enabled": "true", // enable management.cattle.io/v3 controller + }, + }) + + if !shortTestOnly() && !localTestOnly() { + By("Running full tests, deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + }, + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + }, + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + }, + }) + } + + giteaResult = testenv.DeployGitea(ctx, testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: flagVals.HelmBinaryPath, + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-getservice"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }) +}) + +var _ = AfterSuite(func() { + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: flagVals.SkipCleanup, + ArtifactFolder: flagVals.ArtifactFolder, + }) +}) + +func shortTestOnly() bool { + return GinkgoLabelFilter() == e2e.ShortTestLabel +} + +func localTestOnly() bool { + return GinkgoLabelFilter() == e2e.LocalTestLabel +}