Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: e2e tests #213

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,41 @@ jobs:

- run: make test

e2e:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: "1.20"

- name: Install Go dependencies
run: |
go install github.com/onsi/ginkgo/v2/ginkgo
go install sigs.k8s.io/kwok/cmd/{kwok,kwokctl}@v0.3.0
SOF3 marked this conversation as resolved.
Show resolved Hide resolved
- name: Build cluster
run: CLUSTER_PROVIDER=kwok make dev-up

- name: Build images
run: make images

- name: Run Kubeadmiral
run: |
kwokctl get kubeconfig --name kubeadmiral-host > $HOME/.kube/kubeadmiral/kubeadmiral-host.yaml
docker create \
--network host \
--name kubeadmiral-controller-manager \
ghcr.io/kubewharf/kubeadmiral-controller-manager:latest \
/kubeadmiral-controller-manager --kubeconfig=/etc/kubeconfig --klog-v=4 --cluster-join-timeout=1m
docker cp $HOME/.kube/kubeadmiral/kubeadmiral-host.yaml kubeadmiral-controller-manager:/etc/kubeconfig
docker start kubeadmiral-controller-manager
- name: Run tests
run: KUBECONFIG=$HOME/.kube/kubeadmiral/kubeadmiral-host.yaml EXTRA_GINKGO_FLAGS="-v" make e2e

lint:
runs-on: ubuntu-latest
steps:
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/automigration/automigration.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ var (
assertNoAutoMigrationDuration = 20 * time.Second
)

var _ = ginkgo.Describe("auto migration", autoMigrationTestLabel, func() {
var _ = ginkgo.Describe("Auto Migration", autoMigrationTestLabel, func() {
f := framework.NewFramework("auto-migration", framework.FrameworkOptions{CreateNamespace: true})

var clusters []*fedcorev1a1.FederatedCluster
Expand Down Expand Up @@ -161,6 +161,7 @@ var _ = ginkgo.Describe("auto migration", autoMigrationTestLabel, func() {
ctx, dp.Name, metav1.GetOptions{},
)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
g.Expect(clusterDp.Status).ToNot(gomega.BeNil())
g.Expect(clusterDp.Status.ReadyReplicas).To(gomega.Equal(replicasPerCluster))
}).WithPolling(defaultPollingInterval).Should(gomega.Succeed())
})
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/federatedcluster/clusterdelete.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,8 @@ var _ = ginkgo.Describe("Cluster Delete", federatedClusterTestLabels, func() {
// 3. service account info deleted from secret
secret, err = f.HostKubeClient().CoreV1().Secrets(framework.FedSystemNamespace).Get(ctx, secret.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
token, ca := getServiceAccountInfo(secret)
token, _ := getServiceAccountInfo(secret)
gomega.Expect(token).To(gomega.BeNil(), "Token data not removed from cluster secret")
gomega.Expect(ca).To(gomega.BeNil(), "Token data not removed from cluster secret")
}

ginkgo.Context("Without cascading delete", func() {
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/federatedcluster/clusterjoin.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,8 @@ var _ = ginkgo.Describe("Cluster Join", federatedClusterTestLabels, func() {
ginkgo.By("Assert cluster secret not updated with service account information")
secret, err := f.HostKubeClient().CoreV1().Secrets(framework.FedSystemNamespace).Get(ctx, secret.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
token, ca := getServiceAccountInfo(secret)
token, _ := getServiceAccountInfo(secret)
gomega.Expect(token).To(gomega.BeNil())
gomega.Expect(ca).To(gomega.BeNil())
})
})

Expand Down
62 changes: 55 additions & 7 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,14 @@ import (
"k8s.io/client-go/dynamic"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"

fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1"
fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned"
fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions"
"github.com/kubewharf/kubeadmiral/pkg/controllers/common"
"github.com/kubewharf/kubeadmiral/pkg/util/informermanager"
"github.com/kubewharf/kubeadmiral/test/e2e/framework/clusterprovider"
)

Expand Down Expand Up @@ -83,6 +86,7 @@ var (
hostFedClient fedclient.Interface
hostDynamicClient dynamic.Interface
hostDiscoveryClient discovery.DiscoveryInterface
ftcManager informermanager.FederatedTypeConfigManager
clusterKubeClients sync.Map
clusterFedClients sync.Map
clusterDynamicClients sync.Map
Expand All @@ -93,7 +97,12 @@ func init() {
flag.StringVar(&master, "master", "", "The address of the host Kubernetes cluster.")
flag.StringVar(&kubeconfig, "kubeconfig", "", "The path of the kubeconfig for the host Kubernetes cluster.")
flag.Float64Var(&kubeAPIQPS, "kube-api-qps", 500, "The maximum QPS from each Kubernetes client.")
flag.IntVar(&kubeAPIBurst, "kube-api-burst", 1000, "The maximum burst for throttling requests from each Kubernetes client.")
flag.IntVar(
&kubeAPIBurst,
"kube-api-burst",
1000,
"The maximum burst for throttling requests from each Kubernetes client.",
)

flag.StringVar(&clusterProvider, "cluster-provider", "kwok", "The cluster provider [kwok,kind] to use.")
flag.StringVar(
Expand All @@ -102,11 +111,26 @@ func init() {
"kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394",
"The node image to use for creating kind test clusters, it should include the image digest.",
)
flag.StringVar(&kwokImagePrefix, "kwok-image-prefix", "registry.k8s.io", "The image prefix used by kwok to pull kubernetes images.")
flag.StringVar(&kwokKubeVersion, "kwok-kube-version", "v1.20.15", "The kubernetes version to be used for kwok member clusters")
flag.StringVar(
&kwokImagePrefix,
"kwok-image-prefix",
"registry.k8s.io",
"The image prefix used by kwok to pull kubernetes images.",
)
flag.StringVar(
&kwokKubeVersion,
"kwok-kube-version",
"v1.20.15",
"The kubernetes version to be used for kwok member clusters",
)

flag.BoolVar(&preserveClusters, "preserve-clusters", false, "If set, clusters created during testing are preserved")
flag.BoolVar(&preserveNamespace, "preserve-namespaces", false, "If set, namespaces created during testing are preserved")
flag.BoolVar(
&preserveNamespace,
"preserve-namespaces",
false,
"If set, namespaces created during testing are preserved",
)
}

var _ = ginkgo.SynchronizedBeforeSuite(
Expand All @@ -125,7 +149,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(

return bytes
},
func(data []byte) {
func(ctx context.Context, data []byte) {
params := []string{}
err := json.Unmarshal(data, &params)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
Expand All @@ -150,6 +174,21 @@ var _ = ginkgo.SynchronizedBeforeSuite(
hostDiscoveryClient, err = discovery.NewDiscoveryClientForConfig(restConfig)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

fedInformerFactory := fedinformers.NewSharedInformerFactory(hostFedClient, 0)
manager := informermanager.NewInformerManager(
hostDynamicClient,
fedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(),
nil,
)
ftcManager = manager

fedInformerFactory.Start(ctx.Done())
manager.Start(ctx)

if !cache.WaitForNamedCacheSync("host-informer-manager", ctx.Done(), ftcManager.HasSynced) {
ginkgo.Fail("failed to wait for host informer manager cache sync")
}

clusterKubeClients = sync.Map{}
clusterFedClients = sync.Map{}
clusterDynamicClients = sync.Map{}
Expand All @@ -170,7 +209,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(
defaultClusterWaitTimeout,
)
default:
ginkgo.Fail(fmt.Sprintf("invalid cluster provider, %s or %s accepted", KwokClusterProvider, KindClusterProvider))
ginkgo.Fail(
fmt.Sprintf("invalid cluster provider, %s or %s accepted", KwokClusterProvider, KindClusterProvider),
)
}
},
)
Expand Down Expand Up @@ -231,12 +272,19 @@ func (*framework) HostDiscoveryClient() discovery.DiscoveryInterface {
return hostDiscoveryClient
}

func (*framework) FTCManager() informermanager.FederatedTypeConfigManager {
return ftcManager
}

func (f *framework) TestNamespace() *corev1.Namespace {
gomega.Expect(f.namespace).ToNot(gomega.BeNil(), MessageUnexpectedError)
return f.namespace
}

func (f *framework) NewCluster(ctx context.Context, clusterModifiers ...ClusterModifier) (*fedcorev1a1.FederatedCluster, *corev1.Secret) {
func (f *framework) NewCluster(
ctx context.Context,
clusterModifiers ...ClusterModifier,
) (*fedcorev1a1.FederatedCluster, *corev1.Secret) {
clusterName := strings.ToLower(fmt.Sprintf("%s-%s", f.name, rand.String(12)))
cluster, secret := f.clusterProvider.NewCluster(ctx, clusterName)

Expand Down
2 changes: 2 additions & 0 deletions test/e2e/framework/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (

fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1"
fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned"
"github.com/kubewharf/kubeadmiral/pkg/util/informermanager"
)

type FrameworkOptions struct {
Expand All @@ -37,6 +38,7 @@ type Framework interface {
HostFedClient() fedclient.Interface
HostDynamicClient() dynamic.Interface
HostDiscoveryClient() discovery.DiscoveryInterface
FTCManager() informermanager.FederatedTypeConfigManager

Name() string
TestNamespace() *corev1.Namespace
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/framework/policies/propagationpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ func PropagationPolicyForClustersWithPlacements(
},
Spec: fedcorev1a1.PropagationPolicySpec{
SchedulingMode: fedcorev1a1.SchedulingModeDuplicate,
Placements: []fedcorev1a1.ClusterReference{},
Placements: []fedcorev1a1.DesiredPlacement{},
},
}

for _, c := range clusters {
policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.ClusterReference{Cluster: c.Name})
policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.DesiredPlacement{Cluster: c.Name})
}

return policy
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/resourcepropagation/cronjobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ var _ = ginkgo.Describe("CronJob Propagation", func() {
resourcePropagationTest(
f,
&resourcePropagationTestConfig[*batchv1.CronJob]{
gvr: batchv1.SchemeGroupVersion.WithResource("jobs"),
gvr: batchv1.SchemeGroupVersion.WithResource("cronjobs"),
gvk: batchv1.SchemeGroupVersion.WithKind("CronJob"),
objectFactory: resources.GetSimpleV1CronJob,
clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*batchv1.CronJob] {
return client.BatchV1().CronJobs(namespace)
Expand All @@ -56,7 +57,6 @@ var _ = ginkgo.Describe("CronJob Propagation", func() {
return resources.IsV1CronJobScheduledOnce(cronjob), nil
},
statusCollection: &resourceStatusCollectionTestConfig{
gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federatedcronjobstatuses"),
path: "status",
},
},
Expand All @@ -66,7 +66,8 @@ var _ = ginkgo.Describe("CronJob Propagation", func() {
resourcePropagationTest(
f,
&resourcePropagationTestConfig[*batchv1b1.CronJob]{
gvr: batchv1.SchemeGroupVersion.WithResource("jobs"),
gvr: batchv1b1.SchemeGroupVersion.WithResource("cronjobs"),
gvk: batchv1b1.SchemeGroupVersion.WithKind("CronJob"),
objectFactory: resources.GetSimpleV1Beta1CronJob,
clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*batchv1b1.CronJob] {
return client.BatchV1beta1().CronJobs(namespace)
Expand All @@ -79,7 +80,6 @@ var _ = ginkgo.Describe("CronJob Propagation", func() {
return resources.IsV1Beta1CronJobScheduledOnce(cronjob), nil
},
statusCollection: &resourceStatusCollectionTestConfig{
gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federatedcronjobstatuses"),
path: "status",
},
},
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/resourcepropagation/deployments.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ var _ = ginkgo.Describe("Deployment Propagation", func() {
f,
&resourcePropagationTestConfig[*appsv1.Deployment]{
gvr: appsv1.SchemeGroupVersion.WithResource("deployments"),
gvk: appsv1.SchemeGroupVersion.WithKind("Deployment"),
objectFactory: resources.GetSimpleDeployment,
clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*appsv1.Deployment] {
return client.AppsV1().Deployments(namespace)
Expand All @@ -46,7 +47,6 @@ var _ = ginkgo.Describe("Deployment Propagation", func() {
return resources.IsDeploymentProgressing(deployment), nil
},
statusCollection: &resourceStatusCollectionTestConfig{
gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federateddeploymentstatuses"),
path: "status",
},
},
Expand Down
49 changes: 26 additions & 23 deletions test/e2e/resourcepropagation/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,12 @@ import (
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
jsonutil "k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"

fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1"
controllerutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util"
"github.com/kubewharf/kubeadmiral/pkg/util/naming"
"github.com/kubewharf/kubeadmiral/test/e2e/framework"
"github.com/kubewharf/kubeadmiral/test/e2e/framework/policies"
"github.com/kubewharf/kubeadmiral/test/e2e/framework/util"
Expand Down Expand Up @@ -71,14 +72,13 @@ type resourceClient[T k8sObject] interface {
}

type resourceStatusCollectionTestConfig struct {
// GVR of the federatedstatus.
gvr schema.GroupVersionResource
// Path to a field in the resource whose value should be collected by status collection.
path string
}

type resourcePropagationTestConfig[T k8sObject] struct {
gvr schema.GroupVersionResource
gvk schema.GroupVersionKind
statusCollection *resourceStatusCollectionTestConfig
// Returns an object template with the given name.
objectFactory func(name string) T
Expand Down Expand Up @@ -153,18 +153,17 @@ func resourcePropagationTest[T k8sObject](
})

ginkgo.By("Updating the source object", func() {
patch := []map[string]interface{}{
{
"op": "add",
// escape the / in annotation key
"path": "/metadata/annotations/" + strings.Replace(resourceUpdateTestAnnotationKey, "/", "~1", 1),
"value": resourceUpdateTestAnnotationValue,
patch := map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
resourceUpdateTestAnnotationKey: resourceUpdateTestAnnotationValue,
},
},
}
patchBytes, err := json.Marshal(patch)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError)

object, err = hostClient.Patch(ctx, object.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{})
object, err = hostClient.Patch(ctx, object.GetName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError)
})

Expand Down Expand Up @@ -245,29 +244,33 @@ func resourcePropagationTest[T k8sObject](
actualFieldByCluster[cluster.Name] = actualField
}

fedStatusUns, err := f.HostDynamicClient().Resource(config.statusCollection.gvr).Namespace(object.GetNamespace()).Get(
ctx, object.GetName(), metav1.GetOptions{})
ftc, exists := f.FTCManager().GetResourceFTC(config.gvk)
g.Expect(exists).To(gomega.BeTrue())

collectedStatusName := naming.GenerateFederatedObjectName(object.GetName(), ftc.Name)
fedStatus, err := f.HostFedClient().CoreV1alpha1().CollectedStatuses(object.GetNamespace()).Get(
ctx, collectedStatusName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
// status might not have been created yet, use local g to fail only this attempt
g.Expect(err).NotTo(gomega.HaveOccurred(), "Federated status object has not been created")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError)

fedStatus := controllerutil.FederatedResource{}
err = pkgruntime.DefaultUnstructuredConverter.FromUnstructured(fedStatusUns.Object, &fedStatus)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError)

g.Expect(fedStatus.ClusterStatus).
g.Expect(fedStatus.Clusters).
To(gomega.HaveLen(len(actualFieldByCluster)), "Collected status has wrong number of clusters")
for _, clusterStatus := range fedStatus.ClusterStatus {
actualField, exists := actualFieldByCluster[clusterStatus.ClusterName]
g.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("collected from unexpected cluster %s", clusterStatus.ClusterName))

collectedField, exists, err := unstructured.NestedFieldNoCopy(clusterStatus.CollectedFields, pathSegments...)
for _, clusterStatus := range fedStatus.Clusters {
actualField, exists := actualFieldByCluster[clusterStatus.Cluster]
g.Expect(exists).
To(gomega.BeTrue(), fmt.Sprintf("collected from unexpected cluster %s", clusterStatus.Cluster))

collectedFields := &map[string]interface{}{}
err := jsonutil.Unmarshal(clusterStatus.CollectedFields.Raw, collectedFields)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
collectedField, exists, err := unstructured.NestedFieldNoCopy(*collectedFields, pathSegments...)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError)
g.Expect(exists).To(
gomega.BeTrue(),
fmt.Sprintf("collected fields does not contain %q for cluster %s", config.statusCollection.path, clusterStatus.ClusterName),
fmt.Sprintf("collected fields does not contain %q for cluster %s", config.statusCollection.path, clusterStatus.Cluster),
)
g.Expect(collectedField).To(gomega.Equal(actualField), "collected and actual fields differ")
}
Expand Down
Loading