From 620aa2799b6c858079dd43de3047a49944eac906 Mon Sep 17 00:00:00 2001 From: Rahul Sharma Date: Thu, 12 Dec 2024 01:39:55 +0000 Subject: [PATCH] test --- .github/dependabot.yml | 38 + Makefile | 13 +- e2e/test/assert-ccm-resources.yaml | 8 + e2e/test/ccm_e2e_test.go | 1398 ----------------- e2e/test/ccm_suite_test.go | 113 -- e2e/test/framework/cluster.go | 9 - e2e/test/framework/framework.go | 90 -- e2e/test/framework/loadbalancer_suite.go | 86 - e2e/test/framework/namespace.go | 26 - e2e/test/framework/node.go | 26 - e2e/test/framework/pod.go | 56 - e2e/test/framework/secret.go | 117 -- e2e/test/framework/service.go | 137 -- e2e/test/framework/util.go | 180 --- .../fw-use-specified-nb/chainsaw-test.yaml | 124 ++ .../create-pods-services.yaml | 48 + .../chainsaw-test.yaml | 121 ++ .../create-pods-services.yaml | 49 + .../chainsaw-test.yaml | 73 + .../create-pods-services.yaml | 49 + .../lb-delete-svc-no-nb/chainsaw-test.yaml | 124 ++ .../create-pods-services.yaml | 49 + .../chainsaw-test.yaml | 121 ++ .../create-pods-services.yaml | 49 + .../chainsaw-test.yaml | 73 + .../create-pods-services.yaml | 49 + .../chainsaw-test.yaml | 64 + .../create-pods-services.yaml | 66 + .../chainsaw-test.yaml | 66 + .../create-pods-services.yaml | 52 + .../chainsaw-test.yaml | 65 + .../create-pods-services.yaml | 51 + .../chainsaw-test.yaml | 64 + .../create-pods-services.yaml | 50 + .../chainsaw-test.yaml | 106 ++ .../create-pods-services.yaml | 49 + .../chainsaw-test.yaml | 68 + .../create-pods-services.yaml | 49 + e2e/test/lb-simple/chainsaw-test.yaml | 84 + e2e/test/lb-simple/create-pods-services.yaml | 59 + e2e/test/lb-single-tls/chainsaw-test.yaml | 92 ++ .../lb-single-tls/create-pods-services.yaml | 50 + .../chainsaw-test.yaml | 67 + .../create-pods-services.yaml | 53 + .../lb-updated-with-nb-id/chainsaw-test.yaml | 69 + .../create-pods-services.yaml | 47 + .../lb-with-http-to-https/chainsaw-test.yaml | 90 ++ .../create-pods-services.yaml | 50 + .../chainsaw-test.yaml | 84 + .../create-pods-services.yaml | 68 + .../lb-with-node-addition/chainsaw-test.yaml | 99 ++ .../create-pods-services.yaml | 47 + .../chainsaw-test.yaml | 112 ++ .../create-pods-services.yaml | 54 + .../chainsaw-test.yaml | 100 ++ .../create-pods-services.yaml | 54 + .../chainsaw-test.yaml | 66 + .../create-pods-services.yaml | 54 + .../chainsaw-test.yaml | 77 + .../create-pods-services.yaml | 54 + e2e/test/scripts/get-nb-id.sh | 20 + 61 files changed, 3286 insertions(+), 2240 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 e2e/test/assert-ccm-resources.yaml delete mode 100644 e2e/test/ccm_e2e_test.go delete mode 100644 e2e/test/ccm_suite_test.go delete mode 100644 e2e/test/framework/cluster.go delete mode 100644 e2e/test/framework/framework.go delete mode 100644 e2e/test/framework/loadbalancer_suite.go delete mode 100644 e2e/test/framework/namespace.go delete mode 100644 e2e/test/framework/node.go delete mode 100644 e2e/test/framework/pod.go delete mode 100644 e2e/test/framework/secret.go delete mode 100644 e2e/test/framework/service.go delete mode 100644 e2e/test/framework/util.go create mode 100644 e2e/test/fw-use-specified-nb/chainsaw-test.yaml create mode 100644 e2e/test/fw-use-specified-nb/create-pods-services.yaml create mode 100644 e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml create mode 100644 e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml create mode 100644 e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml create mode 100644 e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml create mode 100644 e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml create mode 100644 e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml create mode 100644 e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml create mode 100644 e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml create mode 100644 e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml create mode 100644 e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml create mode 100644 e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml create mode 100644 e2e/test/lb-hostname-only-ingress/create-pods-services.yaml create mode 100644 e2e/test/lb-http-body-health-check/chainsaw-test.yaml create mode 100644 e2e/test/lb-http-body-health-check/create-pods-services.yaml create mode 100644 e2e/test/lb-http-status-health-check/chainsaw-test.yaml create mode 100644 e2e/test/lb-http-status-health-check/create-pods-services.yaml create mode 100644 e2e/test/lb-passive-health-check/chainsaw-test.yaml create mode 100644 e2e/test/lb-passive-health-check/create-pods-services.yaml create mode 100644 e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml create mode 100644 e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml create mode 100644 e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml create mode 100644 e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml create mode 100644 e2e/test/lb-simple/chainsaw-test.yaml create mode 100644 e2e/test/lb-simple/create-pods-services.yaml create mode 100644 e2e/test/lb-single-tls/chainsaw-test.yaml create mode 100644 e2e/test/lb-single-tls/create-pods-services.yaml create mode 100644 e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml create mode 100644 e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml create mode 100644 e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml create mode 100644 e2e/test/lb-updated-with-nb-id/create-pods-services.yaml create mode 100644 e2e/test/lb-with-http-to-https/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-http-to-https/create-pods-services.yaml create mode 100644 e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml create mode 100644 e2e/test/lb-with-node-addition/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-node-addition/create-pods-services.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml create mode 100644 e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml create mode 100755 e2e/test/scripts/get-nb-id.sh diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..70f9fe7f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +version: 2 +updates: + +# Go - root directory + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ## group all dependencies with a k8s.io prefix into a single PR. + groups: + kubernetes: + patterns: [ "k8s.io/*", "sigs.k8s.io/*" ] + otel: + patterns: ["go.opentelemetry.io/*"] + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# Docker + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# github-actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" diff --git a/Makefile b/Makefile index 17c5f76c..7d6f6247 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ HELM_VERSION ?= v3.16.3 ##################################################################### CLUSTER_NAME ?= ccm-$(shell git rev-parse --short HEAD) K8S_VERSION ?= "v1.31.2" -CAPI_VERSION ?= "v1.6.3" +CAPI_VERSION ?= "v1.8.5" CAAPH_VERSION ?= "v0.2.1" CAPL_VERSION ?= "v0.7.1" CONTROLPLANE_NODES ?= 1 @@ -24,6 +24,7 @@ LINODE_FIREWALL_ENABLED ?= true LINODE_REGION ?= us-lax LINODE_OS ?= linode/ubuntu22.04 KUBECONFIG_PATH ?= $(CURDIR)/test-cluster-kubeconfig.yaml +MGMT_KUBECONFIG_PATH ?= $(CURDIR)/mgmt-cluster-kubeconfig.yaml # if the $DEVBOX_PACKAGES_DIR env variable exists that means we are within a devbox shell and can safely # use devbox's bin for our tools @@ -174,8 +175,11 @@ mgmt-cluster: --wait-providers \ --wait-provider-timeout 600 \ --core cluster-api:$(CAPI_VERSION) \ + --bootstrap kubeadm:$(CAPI_VERSION) \ + --control-plane kubeadm:$(CAPI_VERSION) \ --addon helm:$(CAAPH_VERSION) \ --infrastructure linode-linode:$(CAPL_VERSION) + kind get kubeconfig --name=caplccm > $(MGMT_KUBECONFIG_PATH) .PHONY: cleanup-cluster cleanup-cluster: @@ -186,7 +190,12 @@ cleanup-cluster: .PHONY: e2e-test e2e-test: - $(MAKE) -C e2e test LINODE_API_TOKEN=$(LINODE_TOKEN) SUITE_ARGS="--region=$(LINODE_REGION) --use-existing --timeout=5m --kubeconfig=$(KUBECONFIG_PATH) --image=$(IMG) --linode-url https://api.linode.com/" + CLUSTER_NAME=$(CLUSTER_NAME) \ + MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \ + KUBECONFIG=$(KUBECONFIG_PATH) \ + REGION=$(LINODE_REGION) \ + LINODE_TOKEN=$(LINODE_TOKEN) \ + chainsaw test e2e/test --parallel 2 ##################################################################### # OS / ARCH diff --git a/e2e/test/assert-ccm-resources.yaml b/e2e/test/assert-ccm-resources.yaml new file mode 100644 index 00000000..4d7d87d6 --- /dev/null +++ b/e2e/test/assert-ccm-resources.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ccm-linode + namespace: kube-system +status: + numberAvailable: 1 + numberReady: 1 diff --git a/e2e/test/ccm_e2e_test.go b/e2e/test/ccm_e2e_test.go deleted file mode 100644 index d5ce41ba..00000000 --- a/e2e/test/ccm_e2e_test.go +++ /dev/null @@ -1,1398 +0,0 @@ -package test - -import ( - "context" - "e2e_test/test/framework" - "fmt" - "os/exec" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - - "github.com/linode/linodego" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/types" - core "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/watch" -) - -func EnsuredService() types.GomegaMatcher { - return And( - WithTransform(func(e watch.Event) (string, error) { - event, ok := e.Object.(*core.Event) - if !ok { - return "", fmt.Errorf("failed to poll event") - } - return event.Reason, nil - }, Equal("EnsuredLoadBalancer")), - ) -} - -var _ = Describe("e2e tests", func() { - var ( - err error - f *framework.Invocation - workers []string - ) - - const ( - annLinodeProxyProtocolDeprecated = "service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol" - annLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" - annLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" - annLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" - annLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" - annLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" - annLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" - annLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" - annLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" - annLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" - annLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" - annLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" - annLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" - annLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" - ) - - BeforeEach(func() { - f = root.Invoke() - workers, err = f.GetNodeList() - Expect(err).NotTo(HaveOccurred()) - Expect(len(workers)).Should(BeNumerically(">=", 2)) - }) - - createPodWithLabel := func(pods []string, ports []core.ContainerPort, image string, labels map[string]string, selectNode bool) { - for i, pod := range pods { - p := f.LoadBalancer.GetPodObject(pod, image, ports, labels) - if selectNode { - p = f.LoadBalancer.SetNodeSelector(p, workers[i]) - } - Expect(f.LoadBalancer.CreatePod(p)).ToNot(BeNil()) - Eventually(f.LoadBalancer.GetPod).WithArguments(p.ObjectMeta.Name, f.LoadBalancer.Namespace()).Should(HaveField("Status.Phase", Equal(core.PodRunning))) - } - } - - deletePods := func(pods []string) { - for _, pod := range pods { - Expect(f.LoadBalancer.DeletePod(pod)).NotTo(HaveOccurred()) - } - } - - deleteService := func() { - Expect(f.LoadBalancer.DeleteService()).NotTo(HaveOccurred()) - } - - deleteSecret := func(name string) { - Expect(f.LoadBalancer.DeleteSecret(name)).NotTo(HaveOccurred()) - } - - ensureServiceLoadBalancer := func() { - watcher, err := f.LoadBalancer.GetServiceWatcher() - Expect(err).NotTo(HaveOccurred()) - Eventually(watcher.ResultChan()).Should(Receive(EnsuredService())) - } - - ensureServiceWasDeleted := func() { - err := func() error { - _, err := f.LoadBalancer.GetService() - return err - } - Eventually(err).WithTimeout(10 * time.Second).Should(MatchError(errors.IsNotFound, "IsNotFound")) - } - - createServiceWithSelector := func(selector map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(selector, nil, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - createServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - updateServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.UpdateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - deleteNodeBalancer := func(id int) { - Expect(getLinodeClient().DeleteNodeBalancer(context.Background(), id)).NotTo(HaveOccurred()) - } - - createNodeBalancer := func() int { - var nb *linodego.NodeBalancer - nb, err = getLinodeClient().CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ - Region: region, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nb).NotTo(BeNil()) - return nb.ID - } - - checkNumberOfWorkerNodes := func(numNodes int) { - Eventually(f.GetNodeList).Should(HaveLen(numNodes)) - } - - checkNumberOfUpNodes := func(numNodes int) { - By("Checking the Number of Up Nodes") - Eventually(f.LoadBalancer.GetNodeBalancerUpNodes).WithArguments(framework.TestServerResourceName).Should(BeNumerically(">=", numNodes)) - } - - checkNodeBalancerExists := func(id int) { - By("Checking if the NodeBalancer exists") - Eventually(getLinodeClient().GetNodeBalancer).WithArguments(context.Background(), id).Should(HaveField("ID", Equal(id))) - } - - checkNodeBalancerNotExists := func(id int) { - Eventually(func() int { - _, err := getLinodeClient().GetNodeBalancer(context.Background(), id) - if err == nil { - return 0 - } - linodeErr, _ := err.(*linodego.Error) - return linodeErr.Code - }).Should(Equal(404)) - } - - type checkArgs struct { - checkType, path, body, interval, timeout, attempts, checkPassive, protocol, proxyProtocol string - checkNodes bool - } - - checkNodeBalancerID := func(service string, expectedID int) { - Eventually(f.LoadBalancer.GetNodeBalancerID).WithArguments(service).Should(Equal(expectedID)) - } - - checkLBStatus := func(service string, hasIP bool) { - Eventually(f.LoadBalancer.GetNodeBalancerFromService).WithArguments(service, hasIP).Should(Not(BeNil())) - } - - checkNodeBalancerConfigForPort := func(port int, args checkArgs) { - By("Getting NodeBalancer Configuration for port " + strconv.Itoa(port)) - var nbConfig *linodego.NodeBalancerConfig - Eventually(func() error { - nbConfig, err = f.LoadBalancer.GetNodeBalancerConfigForPort(framework.TestServerResourceName, port) - return err - }).Should(BeNil()) - - if args.checkType != "" { - By("Checking Health Check Type") - Expect(string(nbConfig.Check)).To(Equal(args.checkType)) - } - - if args.path != "" { - By("Checking Health Check Path") - Expect(nbConfig.CheckPath).To(Equal(args.path)) - } - - if args.body != "" { - By("Checking Health Check Body") - Expect(nbConfig.CheckBody).To(Equal(args.body)) - } - - if args.interval != "" { - By("Checking TCP Connection Health Check Body") - intInterval, err := strconv.Atoi(args.interval) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckInterval).To(Equal(intInterval)) - } - - if args.timeout != "" { - By("Checking TCP Connection Health Check Timeout") - intTimeout, err := strconv.Atoi(args.timeout) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckTimeout).To(Equal(intTimeout)) - } - - if args.attempts != "" { - By("Checking TCP Connection Health Check Attempts") - intAttempts, err := strconv.Atoi(args.attempts) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckAttempts).To(Equal(intAttempts)) - } - - if args.checkPassive != "" { - By("Checking for Passive Health Check") - boolCheckPassive, err := strconv.ParseBool(args.checkPassive) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckPassive).To(Equal(boolCheckPassive)) - } - - if args.protocol != "" { - By("Checking for Protocol") - Expect(string(nbConfig.Protocol)).To(Equal(args.protocol)) - } - - if args.proxyProtocol != "" { - By("Checking for Proxy Protocol") - Expect(string(nbConfig.ProxyProtocol)).To(Equal(args.proxyProtocol)) - } - - if args.checkNodes { - checkNumberOfUpNodes(2) - } - } - - addNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=3", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - deleteNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=2", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - waitForNodeAddition := func() { - checkNumberOfUpNodes(3) - } - - Describe("Test", func() { - Context("Simple", func() { - Context("Load Balancer", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - pods = []string{"test-pod-1", "test-pod-2"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, true) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should reach all pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[1])) - }) - }) - }) - }) - - Describe("Test", func() { - Context("LoadBalancer", func() { - AfterEach(func() { - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - Context("With single TLS port", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"test-single-port-pod"} - ports := []core.ContainerPort{ - { - Name: "https", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "https", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - secretName = "tls-secret" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodePortConfigPrefix + "80": `{ "tls-secret-name": "` + secretName + `" }`, - annLinodeDefaultProtocol: "https", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - Expect(f.LoadBalancer.CreateTLSSecret("tls-secret")).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pod via tls", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - - By("Waiting for Response from the LoadBalancer url: " + eps[0]) - Eventually(framework.WaitForHTTPSResponse).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With Hostname only ingress", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-hostname-only-ingress", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can update service to only use Hostname in ingress", func() { - By("Checking LB Status has IP") - checkLBStatus(framework.TestServerResourceName, true) - - By("Annotating service with " + annLinodeHostnameOnlyIngress) - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeHostnameOnlyIngress: "true", - }, servicePorts, false) - - By("Checking LB Status does not have IP") - checkLBStatus(framework.TestServerResourceName, false) - }) - - annotations[annLinodeHostnameOnlyIngress] = "true" - - It("can create a service that only uses Hostname in ingress", func() { - By("Creating a service annotated with " + annLinodeHostnameOnlyIngress) - checkLBStatus(framework.TestServerResourceName, true) - }) - }) - - Context("With ProxyProtocol", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - proxyProtocolV1 = string(linodego.ProxyProtocolV1) - proxyProtocolV2 = string(linodego.ProxyProtocolV2) - proxyProtocolNone = string(linodego.ProxyProtocolNone) - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - { - Name: "http-2", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-proxyprotocol", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can set proxy-protocol on each port", func() { - By("Annotating port 80 with v1 and 8080 with v2") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "80": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - }) - - It("should override default proxy-protocol annotation when a port configuration is specified", func() { - By("Annotating a default version of ProxyProtocol v2 and v1 for port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeDefaultProxyProtocol: proxyProtocolV2, - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have the default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - - It("port specific configuration should not effect other ports", func() { - By("Annotating ProxyProtocol v2 on port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocolv2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 80 should not have ProxyProtocol enabled") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolNone}) - }) - - It("default annotations can be used to apply ProxyProtocol to all NodeBalancerConfigs", func() { - annotations := make(map[string]string) - - By("By specifying ProxyProtocol v2 using the deprecated annotation " + annLinodeProxyProtocolDeprecated) - annotations[annLinodeProxyProtocolDeprecated] = proxyProtocolV2 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("specifying ProxyProtocol v1 using the annotation " + annLinodeDefaultProtocol) - annotations[annLinodeDefaultProxyProtocol] = proxyProtocolV1 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - }) - - Context("With Multiple HTTP and HTTPS Ports", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName1 string - secretName2 string - ) - BeforeEach(func() { - pods = []string{"tls-multi-port-pod"} - secretName1 = "tls-secret-1" - secretName2 = "tls-secret-2" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "8080": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName1 + `"}`, - annLinodePortConfigPrefix + "8443": `{"tls-secret-name": "` + secretName2 + `", "protocol": "https"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - { - Name: "beta", - ContainerPort: 8989, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-1", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-2", - Port: 8443, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName1) - Expect(err).NotTo(HaveOccurred()) - err = f.LoadBalancer.CreateTLSSecret(secretName2) - Expect(err).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName1) - deleteSecret(secretName2) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(4)) - - // in order of the spec - http80, http8080, https443, https8443 := eps[0], eps[1], eps[2], eps[3] - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPResponse).WithArguments(http8080).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https8443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With HTTP updating to have HTTPS", Serial, func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"tls-pod"} - secretName = "tls-secret-1" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName) - Expect(err).NotTo(HaveOccurred()) - - By("Updating the Service") - updateAnnotations := map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName + `", "protocol": "https"}`, - } - updateServicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - updateServiceWithAnnotations(labels, updateAnnotations, updateServicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(2)) - http80, https443 := eps[0], eps[1] - By("Waiting for Response from the LoadBalancer url: " + http80) - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - - By("Waiting for Response from the LoadBalancer url: " + https443) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("For HTTP body health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http_body" - path = "/" - body = "nginx" - protocol = "http" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-body"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeCheckBody: body, - annLinodeDefaultProtocol: protocol, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - body: body, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("Updated with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should use the specified NodeBalancer", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - }) - }) - - Context("Created with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - }) - - Context("Deleted Service when NodeBalancer not present", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - - It("should delete the service with no NodeBalancer present", func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - - By("Deleting the Service") - deleteService() - - By("Checking if the service was deleted") - ensureServiceWasDeleted() - }) - }) - - Context("With Preserve Annotation", func() { - var ( - pods []string - servicePorts []core.ServicePort - labels map[string]string - annotations map[string]string - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeLoadBalancerPreserve: "true", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Getting NodeBalancer ID") - nodeBalancerID, err = f.LoadBalancer.GetNodeBalancerID(framework.TestServerResourceName) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should preserve the underlying nodebalancer after service deletion", func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Checking if the NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - - It("should preserve the underlying nodebalancer after a new one is specified", func() { - defer func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }() - - By("Creating new NodeBalancer") - newID := createNodeBalancer() - defer func() { - By("Deleting new NodeBalancer") - deleteNodeBalancer(newID) - }() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(newID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the service's NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, newID) - - By("Checking the old NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - }) - - Context("With Node Addition", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - Skip("skip until rewritten to drop terraform") - pods = []string{"test-pod-node-add"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Newly Created Nodes") - deleteNewNode() - - By("Waiting for the Node to be removed") - checkNumberOfWorkerNodes(2) - }) - - It("should reach the same pod every time it requests", func() { - By("Adding a New Node") - addNewNode() - - By("Waiting for the Node to be Added to the NodeBalancer") - waitForNodeAddition() - }) - }) - - Context("For TCP Connection health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "connection" - interval = "10" - timeout = "5" - attempts = "4" - protocol = "tcp" - ) - BeforeEach(func() { - pods = []string{"test-pod-tcp"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeDefaultProtocol: protocol, - annLinodeHealthCheckInterval: interval, - annLinodeHealthCheckTimeout: timeout, - annLinodeHealthCheckAttempts: attempts, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - interval: interval, - timeout: timeout, - attempts: attempts, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("For Passive Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "none" - checkPassive = "true" - ) - BeforeEach(func() { - pods = []string{"test-pod-passive-hc"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckPassive: checkPassive, - annLinodeHealthCheckType: checkType, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - checkPassive: checkPassive, - checkNodes: true, - }) - }) - }) - - Context("For HTTP Status Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http" - path = "/" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-status"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeDefaultProtocol: "http", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - checkNodes: true, - }) - }) - }) - }) - }) -}) diff --git a/e2e/test/ccm_suite_test.go b/e2e/test/ccm_suite_test.go deleted file mode 100644 index 8f5c9ca8..00000000 --- a/e2e/test/ccm_suite_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package test - -import ( - "e2e_test/test/framework" - "flag" - "os" - "path/filepath" - "testing" - "time" - - "github.com/linode/linodego" - - "github.com/appscode/go/crypto/rand" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var ( - useExisting = false - reuse = false - clusterName string - region = "us-east" - k8s_version string - linodeURL = "https://api.linode.com" -) - -func init() { - flag.StringVar(&framework.Image, "image", framework.Image, "registry/repository:tag") - flag.StringVar(&framework.ApiToken, "api-token", os.Getenv("LINODE_API_TOKEN"), "linode api token") - flag.BoolVar(&reuse, "reuse", reuse, "Create a cluster and continue to use it") - flag.BoolVar(&useExisting, "use-existing", useExisting, "Use an existing kubernetes cluster") - flag.StringVar(&framework.KubeConfigFile, "kubeconfig", os.Getenv("TEST_KUBECONFIG"), "To use existing cluster provide kubeconfig file") - flag.StringVar(®ion, "region", region, "Region to create load balancers") - flag.StringVar(&k8s_version, "k8s_version", k8s_version, "k8s_version for child cluster") - flag.DurationVar(&framework.Timeout, "timeout", 5*time.Minute, "Timeout for a test to complete successfully") - flag.StringVar(&linodeURL, "linode-url", linodeURL, "The Linode API URL to send requests to") -} - -const ( - TIMEOUT = 5 * time.Minute -) - -var root *framework.Framework - -func TestE2e(t *testing.T) { - RegisterFailHandler(Fail) - SetDefaultEventuallyTimeout(framework.Timeout) - RunSpecs(t, "e2e Suite") -} - -var getLinodeClient = func() *linodego.Client { - linodeClient := linodego.NewClient(nil) - linodeClient.SetToken(framework.ApiToken) - linodeClient.SetBaseURL(linodeURL) - return &linodeClient -} - -var _ = BeforeSuite(func() { - if reuse { - clusterName = "ccm-linode-for-reuse" - } else { - clusterName = rand.WithUniqSuffix("ccm-linode") - } - - dir, err := os.Getwd() - Expect(err).NotTo(HaveOccurred()) - kubeConfigFile := filepath.Join(dir, clusterName+".conf") - - if reuse { - if _, err := os.Stat(kubeConfigFile); !os.IsNotExist(err) { - useExisting = true - framework.KubeConfigFile = kubeConfigFile - } - } - - if !useExisting { - err := framework.CreateCluster(clusterName, region, k8s_version) - Expect(err).NotTo(HaveOccurred()) - framework.KubeConfigFile = kubeConfigFile - } - - By("Using kubeconfig from " + framework.KubeConfigFile) - config, err := clientcmd.BuildConfigFromFlags("", framework.KubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - - // Clients - kubeClient := kubernetes.NewForConfigOrDie(config) - linodeClient := getLinodeClient() - - // Framework - root = framework.New(config, kubeClient, *linodeClient) - - By("Using Namespace " + root.Namespace()) - err = root.CreateNamespace() - Expect(err).NotTo(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - if !(useExisting || reuse) { - By("Deleting cluster") - err := framework.DeleteCluster(clusterName) - Expect(err).NotTo(HaveOccurred()) - } else { - By("Deleting Namespace " + root.Namespace()) - err := root.DeleteNamespace() - Expect(err).NotTo(HaveOccurred()) - - By("Not deleting cluster") - } -}) diff --git a/e2e/test/framework/cluster.go b/e2e/test/framework/cluster.go deleted file mode 100644 index e40676d2..00000000 --- a/e2e/test/framework/cluster.go +++ /dev/null @@ -1,9 +0,0 @@ -package framework - -func CreateCluster(cluster, region, k8s_version string) error { - return RunScript("create_cluster.sh", ApiToken, cluster, Image, k8s_version, region) -} - -func DeleteCluster(clusterName string) error { - return RunScript("delete_cluster.sh", clusterName) -} diff --git a/e2e/test/framework/framework.go b/e2e/test/framework/framework.go deleted file mode 100644 index a54491e2..00000000 --- a/e2e/test/framework/framework.go +++ /dev/null @@ -1,90 +0,0 @@ -package framework - -import ( - "fmt" - "time" - - "github.com/appscode/go/crypto/rand" - "github.com/linode/linodego" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -var ( - Image = "linode/linode-cloud-controller-manager:latest" - ApiToken = "" - Timeout time.Duration - - KubeConfigFile = "" - TestServerResourceName = "e2e-test-server-" + rand.Characters(5) -) - -const ( - MaxRetry = 100 - TestServerImage = "appscode/test-server:2.3" -) - -type Framework struct { - restConfig *rest.Config - kubeClient kubernetes.Interface - namespace string - name string - - linodeClient linodego.Client -} - -func generateNamespaceName() string { - return rand.WithUniqSuffix("ccm") -} - -func New( - restConfig *rest.Config, - kubeClient kubernetes.Interface, - linodeClient linodego.Client, -) *Framework { - return &Framework{ - restConfig: restConfig, - kubeClient: kubeClient, - linodeClient: linodeClient, - - name: "cloud-controller-manager", - namespace: generateNamespaceName(), - } -} - -func (f *Framework) Invoke() *Invocation { - r := &rootInvocation{ - Framework: f, - app: rand.WithUniqSuffix("csi-driver-e2e"), - } - return &Invocation{ - rootInvocation: r, - LoadBalancer: &lbInvocation{rootInvocation: r}, - } -} - -func (f *Framework) Recycle() error { - if err := f.DeleteNamespace(); err != nil { - return fmt.Errorf("failed to delete namespace (%s)", f.namespace) - } - - f.namespace = generateNamespaceName() - if err := f.CreateNamespace(); err != nil { - return fmt.Errorf("failed to create namespace (%s)", f.namespace) - } - return nil -} - -type Invocation struct { - *rootInvocation - LoadBalancer *lbInvocation -} - -type rootInvocation struct { - *Framework - app string -} - -type lbInvocation struct { - *rootInvocation -} diff --git a/e2e/test/framework/loadbalancer_suite.go b/e2e/test/framework/loadbalancer_suite.go deleted file mode 100644 index d5a6d186..00000000 --- a/e2e/test/framework/loadbalancer_suite.go +++ /dev/null @@ -1,86 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - "github.com/linode/linodego" -) - -func (i *lbInvocation) GetNodeBalancerFromService(svcName string, checkIP bool) (*linodego.NodeBalancer, error) { - ingress, err := i.getServiceIngress(svcName, i.Namespace()) - if err != nil { - return nil, err - } - hostname := ingress[0].Hostname - ip := ingress[0].IP - nbList, errListNodeBalancers := i.linodeClient.ListNodeBalancers(context.Background(), nil) - if errListNodeBalancers != nil { - return nil, fmt.Errorf("Error listingNodeBalancer for hostname %s: %s", hostname, errListNodeBalancers.Error()) - } - - for _, nb := range nbList { - if *nb.Hostname == hostname { - if checkIP { - if *nb.IPv4 == ip { - return &nb, nil - } else { - return nil, fmt.Errorf("IPv4 for Nodebalancer (%s) does not match IP (%s) for service %v", *nb.IPv4, ip, svcName) - } - } - return &nb, nil - } - } - return nil, fmt.Errorf("no NodeBalancer Found for service %v", svcName) -} - -func (i *lbInvocation) GetNodeBalancerID(svcName string) (int, error) { - nb, err := i.GetNodeBalancerFromService(svcName, false) - if err != nil { - return -1, err - } - return nb.ID, nil -} - -func (i *lbInvocation) GetNodeBalancerConfig(svcName string) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - return &nbcList[0], nil -} - -func (i *lbInvocation) GetNodeBalancerConfigForPort(svcName string, port int) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbConfigs, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - - for _, config := range nbConfigs { - if config.Port == port { - return &config, nil - } - } - return nil, fmt.Errorf("NodeBalancerConfig for port %d was not found", port) -} - -func (i *lbInvocation) GetNodeBalancerUpNodes(svcName string) (int, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return 0, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return 0, err - } - nb := &nbcList[0] - return nb.NodesStatus.Up, nil -} diff --git a/e2e/test/framework/namespace.go b/e2e/test/framework/namespace.go deleted file mode 100644 index c95207d6..00000000 --- a/e2e/test/framework/namespace.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (f *Framework) Namespace() string { - return f.namespace -} - -func (f *Framework) CreateNamespace() error { - obj := &core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.namespace, - }, - } - _, err := f.kubeClient.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{}) - return err -} - -func (f *Framework) DeleteNamespace() error { - return f.kubeClient.CoreV1().Namespaces().Delete(context.TODO(), f.namespace, deleteInForeground()) -} diff --git a/e2e/test/framework/node.go b/e2e/test/framework/node.go deleted file mode 100644 index 2ac0ad55..00000000 --- a/e2e/test/framework/node.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - masterLabel = "node-role.kubernetes.io/master" -) - -func (i *Invocation) GetNodeList() ([]string, error) { - workers := make([]string, 0) - nodes, err := i.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - - for _, node := range nodes.Items { - if _, found := node.ObjectMeta.Labels[masterLabel]; !found { - workers = append(workers, node.Name) - } - } - return workers, nil -} diff --git a/e2e/test/framework/pod.go b/e2e/test/framework/pod.go deleted file mode 100644 index 46f307d7..00000000 --- a/e2e/test/framework/pod.go +++ /dev/null @@ -1,56 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (i *lbInvocation) GetPodObject(podName, image string, ports []core.ContainerPort, labels map[string]string) *core.Pod { - return &core.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: i.Namespace(), - Labels: labels, - }, - Spec: core.PodSpec{ - Containers: []core.Container{ - { - Name: "server", - Image: image, - Env: []core.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &core.EnvVarSource{ - FieldRef: &core.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - }, - Ports: ports, - }, - }, - }, - } -} - -func (i *lbInvocation) SetNodeSelector(pod *core.Pod, nodeName string) *core.Pod { - pod.Spec.NodeSelector = map[string]string{ - "kubernetes.io/hostname": nodeName, - } - return pod -} - -func (i *lbInvocation) CreatePod(pod *core.Pod) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Create(context.TODO(), pod, metav1.CreateOptions{}) -} - -func (i *lbInvocation) DeletePod(name string) error { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Delete(context.TODO(), name, deleteInForeground()) -} - -func (i *lbInvocation) GetPod(name, ns string) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) -} diff --git a/e2e/test/framework/secret.go b/e2e/test/framework/secret.go deleted file mode 100644 index 1d761aa3..00000000 --- a/e2e/test/framework/secret.go +++ /dev/null @@ -1,117 +0,0 @@ -package framework - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - serverCert = `-----BEGIN CERTIFICATE----- -MIIFvTCCA6WgAwIBAgIUBpS47ArkUC0MXYK3LvXU3eRh/CowDQYJKoZIhvcNAQEL -BQAwUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM -GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjQxMTE1 -MTcxNjI1WhcNMjcxMTE1MTcxNjI1WjByMQswCQYDVQQGEwJVUzELMAkGA1UECAwC -UEExFTATBgNVBAcMDFBoaWxhZGVscGhpYTETMBEGA1UECgwKTGlub2RlIExMQzEU -MBIGA1UECwwLTGlub2RlIFRlc3QxFDASBgNVBAMMC2xpbm9kZS50ZXN0MIICIjAN -BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT -5mGFCNaQFRBDdxxLTUF6UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTf -JlNBVp28fO43HrUtaHFCZncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP1 -1WNLSWP2eZn4+q4hr7iUqVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW -84DMfa2TrcG4bw0i0r4nKWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL -9bviQKSLjjn48VPoV/w5lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIEC -nccueVExw8LtXBYOUKT4A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38 -+QIDD6IKr58zuest6q0/lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip -2nFTBYXoB75jLsXHULhOC+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiL -ilf7WMdR3bLHccFAA/Qg3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgr -DoTISDNAvZgPOt9ebs7AEM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN -+STXedL5M3IUwUcCAwEAAaNrMGkwJwYDVR0RBCAwHoILbGlub2RlLnRlc3SCD3d3 -dy5saW5vZGUudGVzdDAdBgNVHQ4EFgQUgNqzhL/JpxllvFu18qvlg/usDrEwHwYD -VR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDQYJKoZIhvcNAQELBQADggIB -AL38v8A0Yfi3Qcr7JtMJ+EOgiHo+W1PW05CAKrswqIZGb9pLwcc46N1ICX4/wItH -DfOmiLHEJ+eEaf07XWy1G+orvqsz6FLh2lfr1cne2DH1udiBXw2VyHDeaighgqTX -rHPcV9lLPcRgQgE8AC2WSn3Rmjd4eU+twlqYcJTLt3cy+TulwXxGBjn7CSmRamRA -AaURnVpsMhw9baINrN6+3zbjw1LKpMO3JfPx9NPw0iUYYbUWFMli2RTEwdR0o9Fu -Om6ogyYHHLTUDv2+cHYY4TKJ0LGz9PGB3iwdGbSSpLadjV7xkFERio5B4o/FedLB -CuECSIoWqjScSrVWjpIpG6b7LVkuDI7ZrZ6Rvkwcv4Zezx5TkynQUw9EezEgGRQf -RiBSKoPGKJfRGiYGNXDjqENX3kxqt5cuVe/Z0czrb+2zOMfaTZwJtp2rrJqckxBh -CK4CXQz2nsfGRW/lyJ1Jyc+ul0obXXhynDBA9dE5woCIwgTCRL9M0ZOHjoQi1tDh -27i0j4YzIvlIDIi6iex/XVZi9mhuRvDR7f7c5RVpHsu38znCLyQetFnwOQOmIVZI -lEUQvU1Jnk+e5+RqvOcZ0ZcLppBa71XjUdYm56mzY1ph04n1VUO4rmaI3wNBETGd -jJ3K7XuBBL/YT+02AzsZR/0fiHLdA9DbLUdhtRs0mb5u ------END CERTIFICATE-----` - serverKey = `-----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT5mGFCNaQFRBDdxxLTUF6 -UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTfJlNBVp28fO43HrUtaHFC -Zncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP11WNLSWP2eZn4+q4hr7iU -qVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW84DMfa2TrcG4bw0i0r4n -KWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL9bviQKSLjjn48VPoV/w5 -lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIECnccueVExw8LtXBYOUKT4 -A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38+QIDD6IKr58zuest6q0/ -lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip2nFTBYXoB75jLsXHULhO -C+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiLilf7WMdR3bLHccFAA/Qg -3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgrDoTISDNAvZgPOt9ebs7A -EM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN+STXedL5M3IUwUcCAwEA -AQKCAgBgau3p7cm0K4zrX+wjC2fNr9RhFQgewYm7GT9enyacraQ2oZfnyuSu3j+E -TbQFczaZ4VU7l4ovbifp9qLoVUuLcBux2Kh+j2dLdip0wa8bIPRus9YqVgBys7Kv -JtWuLGn+sV+jjAzvZAcCBR6PhaSXZ5KbqEVJgyxVZzOSpopoqedK0T0dHgmlVy5I -KMhEKP+2o+tzdyAGCfYYQeSBMtRbSLVF4H9JGqukNHttdGlXA3LW/nD9cK7T17f5 -4+uc0I4M1v2UlRbmnlYtSBRMYSUhBAPYuioGjJB9QjmlD7g7YVHE24MCBoBuklQg -c0macL2FzHbKoEmcMIvaCifvHu8X0J5qjZghmi7Zozh/Skg9B4XINdHpX7vX7INZ -A7z2nx5x4xaNPO3hJJJkbpCcpSIEQkuqe8a/GYcn0tTMTqoGXr/OFz+ut1ZzZThs -YL8YWh2SqVOzR8xJE3cR9qd/ISTl1CPrxWyWm3eOZ0WGOKZTzUIN3p8gcDIDucs4 -kXGDCh7tj7EsYWpa0fnEp5n8kupLWPY050aal898xPP4RDNQFx/VdDBfa/PVKKMy -OzXFq801UoOdF9d6FR3p3YS5O0Zd8UILJQui3s2dpY6/BzuWa2ch9PwvEFI8rsT6 -8VxRCEG9gJxA/GSV/ZNU4hH3Tiv7fSG/aED/uUSvI/t7AWgQgQKCAQEA+Xrshwnt -Cp0cDdkHde/0WnT3DUEvYM0tlJY6z1YR5Kx0GL4zR+yhBuTfmgCMsbkNLvHsc3Us -UbwM4OSAD0oHMa6LCYer6fiYWfv4c19gCtLCZhjBPYHSwXGaQxdjiEE4N6J+mnPW -n39DCjXhl//WlatbLkZRbGYnbORfcE2Kx72OAJt2ujp0Jr/Loi1px6KMbKnzhEhy -mI6FPejx1h8KC5xlCq6faUnal1ZvdNc5WkxtZ1YOCzaKbVuGEok3bFK986aSYYlP -AI4SMo0M/Sy/5tlb9CL5H8s4Dbz35CRyKmXYMQYeGtJ/7HTSdrU7qcp4EZTu5RVX -1xtq6S+w4/V3JwKCAQEA4XBDaxw2B5ica9xxTAzzq7H9QtGgtYaBIQmkBVqVvoDs -ywGbe7ueJFY7id2rWdeDB7Nxt6feoTuoyXmA3YYAeUBQZGtLKc3MZfdIFJt6yM1D -6FZyITwo0Zl6ShPxIYsc94BRA7YzmQWaucByrRFLX+y463u2UGqD9s3aPZm921mb -oweIkEQiD2lJNqhx0gRphN+Le+0z7Gh+1ZxI8XikSIkuQ+nvuh5zQA/lqmWr4E9m -EICTP6D5lvJj3EpKZ1pUgHvPEy/fyUq+i7nu0hS394blI6amv2iwmrLhe2NafCHu -+Nux305uO8jqHzEl+l1CvGf0BqNXCM3x5CgLMJW44QKCAQBpmRpc3lqzT2T8h4yc -4wBu+WtI9Pp04uQULLKf6DKStFw/zOIv430VSfNLYEgtQcLOyB/pjwM/ZXWeC5oY -3qDE6rh3RDIESvFRxVGYpBom+qbGSFwjCLyInOlK1K+QkOqWwfUMs1N5F4js3Xmr -uOK/X1Ss9Z6pX2P4t4GeK3Q+r4FXyHYsxWk8rZon/0jy81608ArfRzsaT9keJ2eV -1nWODJjIOLnI+zXHMRLkReVEz2zPfKFdJazaNQ8+8U3AUBWO+EalelUySvBw7Ts+ -Pp7Lu90sLVF9n6sORZo3uyWHxKwJtCkx+T+kep5LGNM0PzsrVfr4hFw19KkAIuug -0dmpAoIBAQCbbix9b+DskdLfJwjSV2e1bC1iYWe9YDQtlBkLO+5cf0VDniMWRz/8 -a5v3LOdUNRt5NsZjypDbd2ejKWuo0BgJgUcsRTF4bBTOBJUk6CHaynNUgC2GLpUy -FfBTnLY221QobMbumTOwAEYyZbZrDq56P5sreIs1nIrJohojOJnG31xIJgyI8wDM -wVmiHrcDBtm9q+belaekClPQcUV1fyk9fZ9xYZxQJWhutccyGZFMQVHsdMmRKCqN -YSdqnan44jW6tCIMZ4iSnz8K1TIMlA5W0iGv19nFxKdmsYh26wRa64Z4+/gCL3Af -NiH9SYSWvrAheEauQPXj8yIgnV9BqyjhAoIBAA0NGugiXqloQD4tKFYROZ2rm1kx -IlbC5rVePSeMz59Qty79dODAvGuJxOb/vKOlQqcULfgidpctBdtZJ/oencwOf/49 -e0R5uYpvsxyvAro5OKxk0SD2YSgkdBf8gF5+opG6ZjcBcRk3jp8cdYDTIpViJco5 -IJwbMqoWpJxuilj0imxDNQPPoN6yf3mkD2tyYp2YL9X5bgSB58l1LCBJDdJDC4tR -rrXq0Btn9jpwwW/AJ6mIFWWGQKDpkGhLRHxOOK4dC+XgbkEogDSOlZDOEALLvFI9 -OVIIxvytGW/Qy6AEzsMnsTPUJMyPsktCQ2YI628dytmqXOniZe1QQ2R7dzw= ------END RSA PRIVATE KEY-----` -) - -func (i *lbInvocation) CreateTLSSecret(secretName string) (err error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte(serverCert), - corev1.TLSPrivateKeyKey: []byte(serverKey), - }, - Type: corev1.SecretTypeTLS, - } - - _, err = i.kubeClient.CoreV1().Secrets(i.Namespace()).Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err -} - -func (i *lbInvocation) DeleteSecret(name string) error { - err := i.kubeClient.CoreV1().Secrets(i.Namespace()).Delete(context.TODO(), name, metav1.DeleteOptions{}) - return err -} diff --git a/e2e/test/framework/service.go b/e2e/test/framework/service.go deleted file mode 100644 index e1c1d8be..00000000 --- a/e2e/test/framework/service.go +++ /dev/null @@ -1,137 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/util/retry" -) - -func (i *lbInvocation) createOrUpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP, isCreate bool) error { - var sessionAffinity core.ServiceAffinity = "None" - if isSessionAffinityClientIP { - sessionAffinity = "ClientIP" - } - svc := &core.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestServerResourceName, - Namespace: i.Namespace(), - Annotations: annotations, - Labels: map[string]string{ - "app": "test-server-" + i.app, - }, - }, - Spec: core.ServiceSpec{ - Ports: ports, - Selector: selector, - Type: core.ServiceTypeLoadBalancer, - SessionAffinity: sessionAffinity, - }, - } - - service := i.kubeClient.CoreV1().Services(i.Namespace()) - if isCreate { - _, err := service.Create(context.TODO(), svc, metav1.CreateOptions{}) - if err != nil { - return err - } - } else { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - options := metav1.GetOptions{} - resource, err := service.Get(context.TODO(), TestServerResourceName, options) - if err != nil { - return err - } - svc.ObjectMeta.ResourceVersion = resource.ResourceVersion - svc.Spec.ClusterIP = resource.Spec.ClusterIP - _, err = service.Update(context.TODO(), svc, metav1.UpdateOptions{}) - return err - }); err != nil { - return err - } - } - return nil -} - -func (i *lbInvocation) GetServiceWatcher() (watch.Interface, error) { - var timeoutSeconds int64 = 30 - watcher, err := i.kubeClient.CoreV1().Events(i.Namespace()).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "involvedObject.kind=Service", - Watch: true, - TimeoutSeconds: &timeoutSeconds, - }) - if err != nil { - return nil, err - } - return watcher, nil -} - -func (i *lbInvocation) GetService() (*core.Service, error) { - return i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) -} - -func (i *lbInvocation) CreateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, true) -} - -func (i *lbInvocation) UpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - err := i.deleteEvents() - if err != nil { - return err - } - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, false) -} - -func (i *lbInvocation) DeleteService() error { - return i.kubeClient.CoreV1().Services(i.Namespace()).Delete(context.TODO(), TestServerResourceName, metav1.DeleteOptions{}) -} - -func (i *lbInvocation) GetServiceEndpoints() ([]core.EndpointAddress, error) { - ep, err := i.kubeClient.CoreV1().Endpoints(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if len(ep.Subsets) == 0 { - return nil, fmt.Errorf("No service endpoints found for %s", TestServerResourceName) - } - return ep.Subsets[0].Addresses, err -} - -func (i *lbInvocation) deleteEvents() error { - return i.kubeClient.CoreV1().Events(i.Namespace()).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "involvedObject.kind=Service"}) -} - -func (i *lbInvocation) GetLoadBalancerIps() ([]string, error) { - svc, err := i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - var serverAddr []string - for _, ingress := range svc.Status.LoadBalancer.Ingress { - if len(svc.Spec.Ports) > 0 { - for _, port := range svc.Spec.Ports { - if port.NodePort > 0 { - serverAddr = append(serverAddr, fmt.Sprintf("%s:%d", ingress.IP, port.Port)) - } - } - } - } - if serverAddr == nil { - return nil, fmt.Errorf("failed to get Status.LoadBalancer.Ingress for service %s/%s", TestServerResourceName, i.Namespace()) - } - return serverAddr, nil -} - -func (i *lbInvocation) getServiceIngress(name, namespace string) ([]core.LoadBalancerIngress, error) { - svc, err := i.kubeClient.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if svc.Status.LoadBalancer.Ingress == nil { - return nil, fmt.Errorf("Status.LoadBalancer.Ingress is empty for %s", name) - } - return svc.Status.LoadBalancer.Ingress, nil -} diff --git a/e2e/test/framework/util.go b/e2e/test/framework/util.go deleted file mode 100644 index 31379256..00000000 --- a/e2e/test/framework/util.go +++ /dev/null @@ -1,180 +0,0 @@ -package framework - -import ( - "context" - "crypto/tls" - "crypto/x509" - "io" - "log" - "net" - "net/http" - "os" - "os/exec" - "path" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - scriptDirectory = "scripts" - RetryInterval = 5 * time.Second - RetryTimeout = 15 * time.Minute - caCert = `-----BEGIN CERTIFICATE----- -MIIFejCCA2KgAwIBAgIJAN7D2Ju254yUMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTE5MDQwOTA5MzYxNFoXDTI5 -MDQwNjA5MzYxNFowUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx -ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2Ew -ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDoTwE1kijjrhCcGXSPyHlf -7NngxPCFuFqVdRvG4DrrdL7YW3iEovAXTbuoyiPpF/U9T5BfDVs2dCEHGlpiOADR -tA/Z5mFbVcefOCBL+rL2sTN2o19U7eimcZjH1xN1L5j2RkYmRAoI+nwG/g5NehOu -YM930oPqe3vOYevOHBCebHuKc7zaM31AtKcDG0IjIJ1ZdJy91+rx8Prb+IxTIKZl -Ca/e0e6iZWCPp5kaJyNUGZkjjcRVzFM79xVf34DEuS+N1RZP7EevM0bfHehJfSpU -M6gfsrL9WctD0nGJd2YsH9hLCub2G7emgiV7dvN1R0QW9ijguwZ9aBemiat5AnGs -QHSR+WRijZNjHTWY4DEaTNWecDd2Tz37RNN9Ow8FThERwZVnpji1kcijEg4g7Ppy -9P6tdavjkFVW0xOieInjS/m5Bxj2a44UT1JshNr1M4HGXvqUcCFS4vhytIc05lOv -X20NR+C+RgNy7G14Hz/3+qRo9hlkonyTJAoU++2vgsaNmmhcU6fGgYpARHm1Y675 -pGrgZAcjFcsG84q0dSdr6AeY+6+1UyS6pktBobXIiciSPmseHJ24dRd06OYQMxQ3 -ccOZhZ3cNy8OMT9eUwcjnif36BVmZdCObJexqXq/cSVX3IhhaQhLLfN9ZyGDkxWl -N5ehRMCabgv3mQCDd/9HMwIDAQABo1MwUTAdBgNVHQ4EFgQUC2AMOf90/zpuQ588 -rPLfe7EukIUwHwYDVR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDwYDVR0T -AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAHopjHkeIciVtlAyAPEfh/pnf -r91H1aQMPmHisqlveM3Bz9MOIa9a26YO+ZzCPozALxkJIjdp7L3L8Q8CuLmkC4YV -6nHvSLaC/82UGoiRGyjdFh30puqekWMZ62ZrQLpCr0DzOJrarslLM0fONqpjDTWP -8OXyRcnVSbFB1n5XUoviMTTxYOQ3HQe8b3Tt7GO/9w6dWkkSX1Vy4RmzNt7fb9K5 -mxu/n+SVu+2iQX9oEWq2rpvsD3RGnhewCPlZU8NQYKb72K00kEcG/J+WU1IPtkq0 -JaU5TDMMzfp3PMYxCzYD9pdM8J0N0zJac2t9hkx7H83jy/TfLrmDvB6nCK8N3+6j -8In6RwYw4XJ41AWsJpGXBpvYCq5GJjdogEi9IaBXSmtVPYm0NURYbephk+Wg0oyk -ESk4cyWUhYG8mcMyORc8lzOQ79YT6A5QnitTGCVQGTlnNRjevtfhAFEXr9e8UZFq -oWtfEdltH6ElGDpivwuOERAN9v3GoPlifpo1UDElnPJft+C0cRv0YpPwvwJTy1MU -q1op/4Z/7SHzFWTSyRZqvI41AsLImylzfZ0w9U8sogd4pHv30kGc9+LhqrsfLDvK -9XedVoWJx/x3i8BUhVDyd4FyVWHCf9N/6a9HzbFWT8QZTBk5pErTaFiTi5TQxoi7 -ER4ILjvRX7mLWUGhN58= ------END CERTIFICATE-----` - Domain = "linode.test" -) - -func RunScript(script string, args ...string) error { - wd, err := os.Getwd() - if err != nil { - return err - } - - return runCommand(path.Join(wd, scriptDirectory, script), args...) -} - -func runCommand(cmd string, args ...string) error { - c := exec.Command(cmd, args...) - c.Stdout = os.Stdout - c.Stderr = os.Stderr - log.Printf("Running command %q\n", cmd) - return c.Run() -} - -func deleteInForeground() metav1.DeleteOptions { - policy := metav1.DeletePropagationForeground - graceSeconds := int64(0) - return metav1.DeleteOptions{ - PropagationPolicy: &policy, - GracePeriodSeconds: &graceSeconds, - } -} - -func getHTTPSResponse(domain, ip, port string) (string, error) { - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - - if ok := rootCAs.AppendCertsFromPEM([]byte(caCert)); !ok { - log.Println("No certs appended, using system certs only") - } - - config := &tls.Config{ - RootCAs: rootCAs, - } - - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) { - if addr == domain+":"+port { - addr = ip + ":" + port - } - return dialer.DialContext(ctx, network, addr) - } - - tr := &http.Transport{ - TLSClientConfig: config, - DialContext: dialContext, - } - client := &http.Client{Transport: tr} - - log.Println("Waiting for response from https://" + ip + ":" + port) - u := "https://" + domain + ":" + port - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return "", err - } - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - bodyString := string(bodyBytes) - - return bodyString, nil -} - -func WaitForHTTPSResponse(link string) (string, error) { - hostPort := strings.Split(link, ":") - host, port := hostPort[0], hostPort[1] - - resp, err := getHTTPSResponse(Domain, host, port) - if err != nil { - return "", err - } - return resp, nil -} - -func getHTTPResponse(link string) (bool, string, error) { - resp, err := http.Get("http://" + link) - if err != nil { - return false, "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return false, "", err - } - return resp.StatusCode == 200, string(bodyBytes), nil -} - -func WaitForHTTPResponse(link string) (string, error) { - ok, resp, err := getHTTPResponse(link) - if err != nil { - return "", err - } - if ok { - return resp, nil - } - return "", nil -} - -func GetResponseFromCurl(endpoint string) string { - resp, err := exec.Command("curl", "--max-time", "5", "-s", endpoint).Output() - if err != nil { - return "" - } - return string(resp) -} diff --git a/e2e/test/fw-use-specified-nb/chainsaw-test.yaml b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..2cbd659a --- /dev/null +++ b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: fw-use-specified-nb +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "fw-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create firewall, Create pods and services + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + + create_fw=$(curl -s --write-out "%{http_code}\n" --output /dev/null --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/networking/firewalls" \ + --data " + { + \"label\": \"$FWLABEL\", + \"rules\": { + \"inbound\": [{ + \"action\": \"ACCEPT\", + \"label\": \"inbound-rule123\", + \"description\": \"inbound rule123\", + \"ports\": \"4321\", + \"protocol\": \"TCP\", + \"addresses\": { + \"ipv4\": [\"0.0.0.0/0\"] + } + }], + \"inbound_policy\": \"ACCEPT\", + \"outbound_policy\": \"ACCEPT\" + } + } + " + ) + + if [[ $create_fw == "200" ]]; then + echo "fw created" + fi + check: + ($error == null): true + (contains($stdout, 'fw created')): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service with nodebalancer id + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + re='^[0-9]+$' + + fwid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"label\": \"$FWLABEL\"}" \ + "https://api.linode.com/v4/networking/firewalls" | jq .data[].id) + + if ! [[ $fwid =~ $re ]]; then + echo "Firewall id [$fwid] is incorrect, failed to fetch firewall" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-firewall-id=$fwid + sleep 5 + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fwconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid") + + fw_attached_to_nb=$(echo $fwconfig | jq ".entities[] | select(.id == $nbid) | .id == $nbid") + + if [[ $fw_attached_to_nb == "true" ]]; then + echo "Conditions met" + break + fi + + sleep 10 + done + + curl -s -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid" + check: + ($error == null): true + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/fw-use-specified-nb/create-pods-services.yaml b/e2e/test/fw-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..00113a2f --- /dev/null +++ b/e2e/test/fw-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fw-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: fw-use-specified-nb + template: + metadata: + labels: + app: fw-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + name: svc-test + labels: + app: fw-use-specified-nb +spec: + type: LoadBalancer + selector: + app: fw-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..df1a0952 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-new-nb-id +spec: + namespace: "lb-created-with-new-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..c37615c7 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-new-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-new-nb-id + template: + metadata: + labels: + app: created-with-new-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-new-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-new-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..0b77dbe9 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-specified-nb-id +spec: + namespace: "lb-created-with-specified-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..1d286209 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-specified-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-specified-nb-id + template: + metadata: + labels: + app: created-with-specified-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-specified-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-specified-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml new file mode 100644 index 00000000..723a5d35 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-no-nb +spec: + namespace: "lb-delete-svc-no-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Delete nodebalancer, delete service and make sure its deleted + try: + - script: + content: | + set -e + + re='^[0-9]+$' + nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect" + exit 1 + fi + + # Delete nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp == "200" ]]; then + echo "nodebalancer deleted" + fi + + # Check to make sure nodebalancer is deleted + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + + # Delete service and make sure its deleted + kubectl --timeout=60s delete svc svc-test -n $NAMESPACE + + for i in {1..10}; do + if kubectl get svc svc-test -n $NAMESPACE > /dev/null 2>&1; then + sleep 5 + else + echo "service is deleted" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'nodebalancer deleted')): true + (contains($stdout, 'old nodebalancer not found')): true + (contains($stdout, 'service is deleted')): true diff --git a/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml new file mode 100644 index 00000000..55ea60f9 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-no-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-no-nb + template: + metadata: + labels: + app: delete-svc-no-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-no-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-no-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml new file mode 100644 index 00000000..7369d478 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-new-nbid +spec: + namespace: "lb-delete-svc-use-new-nbid" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml new file mode 100644 index 00000000..58815cf6 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-new-nbid + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-new-nbid + template: + metadata: + labels: + app: delete-svc-use-new-nbid + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-new-nbid +spec: + type: LoadBalancer + selector: + app: delete-svc-use-new-nbid + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..99ceb8e4 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-specified-nb +spec: + namespace: "lb-delete-svc-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..87461401 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-specified-nb + template: + metadata: + labels: + app: delete-svc-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-specified-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml new file mode 100644 index 00000000..69c7cd0e --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-hostname-only-ingress +spec: + namespace: "lb-hostname-only-ingress" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that svc-test-1 loadbalancer ingress contains only hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-1 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true + - name: Check that svc-test-2 loadbalancer ingress contains ip + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): true + (loadBalancer.ingress[0].hostname != null): true + - name: Annotate service + try: + - script: + content: | + set -e + kubectl annotate svc svc-test-2 -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress=true + check: + ($error == null): true + - name: Check and make sure svc-test-2 ingress only contains hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true diff --git a/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml new file mode 100644 index 00000000..59d52fe6 --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hostname-ingress + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: hostname-ingress + template: + metadata: + labels: + app: hostname-ingress + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-1 + annotations: + service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress: "true" + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-2 + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-body-health-check/chainsaw-test.yaml b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..16a2ed30 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-body-health-check +spec: + namespace: "lb-http-body-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http_body"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_body=$(echo $nbconfig | jq '.check_body == "nginx"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_body == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-body-health-check/create-pods-services.yaml b/e2e/test/lb-http-body-health-check/create-pods-services.yaml new file mode 100644 index 00000000..1e93bd31 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/create-pods-services.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-body-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-body-health-check + template: + metadata: + labels: + app: http-body-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-body: nginx + service.beta.kubernetes.io/linode-loadbalancer-check-path: / + service.beta.kubernetes.io/linode-loadbalancer-check-type: http_body + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: http + name: svc-test + labels: + app: http-body-health-check +spec: + type: LoadBalancer + selector: + app: http-body-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-status-health-check/chainsaw-test.yaml b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..fbe259e7 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-status-health-check +spec: + namespace: "lb-http-status-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-status-health-check/create-pods-services.yaml b/e2e/test/lb-http-status-health-check/create-pods-services.yaml new file mode 100644 index 00000000..ab76db96 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/create-pods-services.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-status-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-status-health-check + template: + metadata: + labels: + app: http-status-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/" + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + name: svc-test + labels: + app: http-status-health-check +spec: + type: LoadBalancer + selector: + app: http-status-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-passive-health-check/chainsaw-test.yaml b/e2e/test/lb-passive-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..1d2ed8e9 --- /dev/null +++ b/e2e/test/lb-passive-health-check/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-passive-health-check +spec: + namespace: "lb-passive-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "none"') + port_80_passive=$(echo $nbconfig | jq '.check_passive == true') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_passive == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-passive-health-check/create-pods-services.yaml b/e2e/test/lb-passive-health-check/create-pods-services.yaml new file mode 100644 index 00000000..daf4f6fd --- /dev/null +++ b/e2e/test/lb-passive-health-check/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: passive-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: passive-health-check + template: + metadata: + labels: + app: passive-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" + service.beta.kubernetes.io/linode-loadbalancer-check-type: none + name: svc-test + labels: + app: passive-health-check +spec: + type: LoadBalancer + selector: + app: passive-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml new file mode 100644 index 00000000..d7f2661d --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml @@ -0,0 +1,106 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-new-nb-specified +spec: + namespace: "lb-preserve-annotation-new-nb-specified" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + # Get existing nodebalancer id + old_nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + # Create new nodebalancer and use it + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "updated nodebalancer used" + break + fi + sleep 5 + done + + # Check old nodebalancer still exists + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "200" ]]; then + echo "old nodebalancer found" + fi + + # cleanup old nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + + # cleanup new nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'updated nodebalancer used')): true + (contains($stdout, 'old nodebalancer found')): true diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml new file mode 100644 index 00000000..f0b9bc1c --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-new-nb-specified + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-new-nb-specified + template: + metadata: + labels: + app: preserve-annotation-new-nb-specified + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-new-nb-specified +spec: + type: LoadBalancer + selector: + app: preserve-annotation-new-nb-specified + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml new file mode 100644 index 00000000..2e33d401 --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml @@ -0,0 +1,68 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-svc-delete +spec: + namespace: "lb-preserve-annotation-svc-delete" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Delete pods, delete service and validate nb still exists + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + kubectl --timeout=60s -n $NAMESPACE delete deploy test + kubectl --timeout=60s -n $NAMESPACE delete svc svc-test + sleep 20 + + get_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $get_resp == "200" ]]; then + echo "nodebalancer exists" + fi + + # cleanup remaining nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if ! [[ $delete_resp == "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'nodebalancer exists')): true diff --git a/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml new file mode 100644 index 00000000..3888da4a --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-svc-delete + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-svc-delete + template: + metadata: + labels: + app: preserve-annotation-svc-delete + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-svc-delete +spec: + type: LoadBalancer + selector: + app: preserve-annotation-svc-delete + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-simple/chainsaw-test.yaml b/e2e/test/lb-simple/chainsaw-test.yaml new file mode 100644 index 00000000..2661961a --- /dev/null +++ b/e2e/test/lb-simple/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-simple +spec: + namespace: "lb-simple" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check both pods reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 2 ]]; then + output=$(curl -s $IP:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 2 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-simple/create-pods-services.yaml b/e2e/test/lb-simple/create-pods-services.yaml new file mode 100644 index 00000000..0f503d9a --- /dev/null +++ b/e2e/test/lb-simple/create-pods-services.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-simple + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-simple + template: + metadata: + labels: + app: lb-simple + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: lb-simple +spec: + type: LoadBalancer + selector: + app: lb-simple + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-single-tls/chainsaw-test.yaml b/e2e/test/lb-single-tls/chainsaw-test.yaml new file mode 100644 index 00000000..a75e4964 --- /dev/null +++ b/e2e/test/lb-single-tls/chainsaw-test.yaml @@ -0,0 +1,92 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-single-tls +spec: + namespace: "lb-single-tls" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create secret + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod is reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 1 ]]; then + output=$(curl --resolve linode.test:80:$IP --cacert ../certificates/ca.crt -s https://linode.test:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 1 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-single-tls/create-pods-services.yaml b/e2e/test/lb-single-tls/create-pods-services.yaml new file mode 100644 index 00000000..d749a6b6 --- /dev/null +++ b/e2e/test/lb-single-tls/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-single-tls + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: lb-single-tls + template: + metadata: + labels: + app: lb-single-tls + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: https + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{ "tls-secret-name": "tls-secret" }' + labels: + app: lb-single-tls +spec: + type: LoadBalancer + selector: + app: lb-single-tls + ports: + - name: https + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..b6d79a43 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml @@ -0,0 +1,67 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-tcp-connection-health-check +spec: + namespace: "lb-tcp-connection-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "connection"') + port_80_interval=$(echo $nbconfig | jq '.check_interval == 10') + port_80_timeout=$(echo $nbconfig | jq '.check_timeout == 5') + port_80_attempts=$(echo $nbconfig | jq '.check_attempts == 4') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "tcp"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_interval == "true" && $port_80_timeout == "true" && $port_80_attempts == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml new file mode 100644 index 00000000..0eae0673 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: tcp-connection-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: tcp-connection-health-check + template: + metadata: + labels: + app: tcp-connection-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "4" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "10" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-type: connection + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: tcp + name: svc-test + labels: + app: tcp-connection-health-check +spec: + type: LoadBalancer + selector: + app: tcp-connection-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..c897979b --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml @@ -0,0 +1,69 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-updated-with-nb-id +spec: + namespace: "lb-updated-with-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create nodebalancer, annotate svc with nodebalancer id and validate + try: + - script: + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] for label [$lABEL] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + sleep 5 + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..41b75aab --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: updated-with-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: updated-with-nb-id + template: + metadata: + labels: + app: updated-with-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: updated-with-nb-id +spec: + type: LoadBalancer + selector: + app: updated-with-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-http-to-https/chainsaw-test.yaml b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml new file mode 100644 index 00000000..d8bd79b9 --- /dev/null +++ b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml @@ -0,0 +1,90 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-http-to-https +spec: + namespace: "lb-with-http-to-https" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Update service to have another annotation and port + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-443='{"tls-secret-name": "tls-secret-1", "protocol": "https"}' + kubectl patch svc svc-test -n $NAMESPACE --type='json' -p='[{"op": "add", "path": "/spec/ports/-", "value": {"name": "https", "port": 443, "targetPort": 8080, "protocol": "TCP"}}]' + sleep 10 + check: + ($error == null): true + - name: Check endpoints + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 2 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-") + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-") + + if [[ -z $port_80 || -z $port_443 ]]; then + sleep 10 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-http-to-https/create-pods-services.yaml b/e2e/test/lb-with-http-to-https/create-pods-services.yaml new file mode 100644 index 00000000..775db623 --- /dev/null +++ b/e2e/test/lb-with-http-to-https/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-to-https + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-to-https + template: + metadata: + labels: + app: http-to-https + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + name: svc-test + labels: + app: http-to-https +spec: + type: LoadBalancer + selector: + app: http-to-https + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml new file mode 100644 index 00000000..587fbb7a --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-multiple-http-https-ports +spec: + namespace: "lb-with-multiple-http-https-ports" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + kubectl -n $NAMESPACE create secret tls tls-secret-2 --cert=../certificates/server.crt --key=../certificates/server.key + sleep 2 + check: + ($error == null): true + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 4 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-") + port_8080=$(curl -s $IP:8080 | grep "test-") + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-") + port_8443=$(curl --resolve linode.test:8443:$IP --cacert ../certificates/ca.crt -s https://linode.test:8443 | grep "test-") + + if [[ -z $port_80 || -z $port_8080 || -z $port_443 || -z $port_8443 ]]; then + sleep 15 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml new file mode 100644 index 00000000..c29dc014 --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multiple-http-https-ports + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: multiple-http-https-ports + template: + metadata: + labels: + app: multiple-http-https-ports + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + - name: beta + containerPort: 8989 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-443: '{"tls-secret-name": "tls-secret-1"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8080: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8443: '{"tls-secret-name": "tls-secret-2", "protocol": "https"}' + name: svc-test + labels: + app: multiple-http-https-ports +spec: + type: LoadBalancer + selector: + app: multiple-http-https-ports + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8989 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + - name: https-1 + protocol: TCP + port: 443 + targetPort: 8080 + - name: https-2 + protocol: TCP + port: 8443 + targetPort: 8989 + sessionAffinity: None diff --git a/e2e/test/lb-with-node-addition/chainsaw-test.yaml b/e2e/test/lb-with-node-addition/chainsaw-test.yaml new file mode 100644 index 00000000..52f136b4 --- /dev/null +++ b/e2e/test/lb-with-node-addition/chainsaw-test.yaml @@ -0,0 +1,99 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-node-addition +spec: + namespace: "lb-with-node-addition" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer has 2 nodes + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 2') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true + - name: Add new node and check nodebalancer gets updated + try: + - script: + content: | + set -e + + current_replicas=$(KUBECONFIG=$MGMT_KUBECONFIG kubectl get machinedeployment ${CLUSTER_NAME}-md-0 -o=jsonpath='{.spec.replicas}') + required_replicas=$((current_replicas + 1)) + KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$required_replicas}}" + + sleep 180 + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 3') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 20 + done + + #KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$current_replicas}}" + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true diff --git a/e2e/test/lb-with-node-addition/create-pods-services.yaml b/e2e/test/lb-with-node-addition/create-pods-services.yaml new file mode 100644 index 00000000..39a55b9d --- /dev/null +++ b/e2e/test/lb-with-node-addition/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: with-node-addition + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: with-node-addition + template: + metadata: + labels: + app: with-node-addition + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: with-node-addition +spec: + type: LoadBalancer + selector: + app: with-node-addition + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml new file mode 100644 index 00000000..e8e07665 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml @@ -0,0 +1,112 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-default-annotation +spec: + namespace: "lb-with-proxyprotocol-default-annotation" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Add ProxyProtocol v2 using deprecated annotation + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol=v2 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v2 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Add default annotation for ProxyProtocol v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v1 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v1 + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + hostname=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].hostname) + ip=$(echo $hostname | awk -F'.' '{gsub("-", ".", $1); print $1}') + nbid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"ipv4\": \"$ip\"}" \ + "https://api.linode.com/v4/nodebalancers" | jq .data[].id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, doesn't meet regex requirements" + exit 1 + fi + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v1 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml new file mode 100644 index 00000000..4ac2edc2 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-default-annotation + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-default-annotation + template: + metadata: + labels: + app: proxyprotocol-default-annotation + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-default-annotation +spec: + type: LoadBalancer + selector: + app: proxyprotocol-default-annotation + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml new file mode 100644 index 00000000..384fdc4a --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml @@ -0,0 +1,100 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-override +spec: + namespace: "lb-with-proxyprotocol-override" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Update service annotation for port 80 to v2 and 8080 with v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v2 + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80- + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v1"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v2 and port 8080 to have ProxyProtocol v1 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v2 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml new file mode 100644 index 00000000..a6247c4d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-override + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-override + template: + metadata: + labels: + app: proxyprotocol-override + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-override +spec: + type: LoadBalancer + selector: + app: proxyprotocol-override + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml new file mode 100644 index 00000000..61cc3d25 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-port-specific +spec: + namespace: "lb-with-proxyprotocol-port-specific" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to not have ProxyProtocol and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_none=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "none"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_none == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml new file mode 100644 index 00000000..95c0a822 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-port-specific + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-port-specific + template: + metadata: + labels: + app: proxyprotocol-port-specific + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-port-specific +spec: + type: LoadBalancer + selector: + app: proxyprotocol-port-specific + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml new file mode 100644 index 00000000..c4a43b2d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml @@ -0,0 +1,77 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-set +spec: + namespace: "lb-with-proxyprotocol-set" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml new file mode 100644 index 00000000..80b96d86 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-set + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-set + template: + metadata: + labels: + app: proxyprotocol-set + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-set +spec: + type: LoadBalancer + selector: + app: proxyprotocol-set + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/scripts/get-nb-id.sh b/e2e/test/scripts/get-nb-id.sh new file mode 100755 index 00000000..cf8a12b1 --- /dev/null +++ b/e2e/test/scripts/get-nb-id.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e + +re='^[0-9]+$' + +hostname=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].hostname) +ip=$(echo $hostname | awk -F'.' '{gsub("-", ".", $1); print $1}') +nbid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"ipv4\": \"$ip\"}" \ + "https://api.linode.com/v4/nodebalancers" | jq .data[].id) + +if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect" + exit 1 +fi + +echo $nbid