diff --git a/.gitignore b/.gitignore index c1844549..9c40695d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,5 @@ vendor/ hacks/ tmp/ .idea +/cloud-sa.json +/Tiltfile \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 7e1cde81..2addd3cc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -# syntax=docker/dockerfile:1.1-experimental +# syntax=docker/dockerfile:1 # Copyright 2020 The Kubernetes Authors. # @@ -15,7 +15,7 @@ # limitations under the License. # Build the manager binary -ARG GOVER=1.19 +ARG GOVER=1.21 FROM --platform=$BUILDPLATFORM golang:${GOVER} as builder ARG TARGETPLATFORM diff --git a/README.md b/README.md index 6d2d5501..c6639313 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![Twitter Follow](https://img.shields.io/twitter/follow/equinixmetal.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=equinixmetal&user_id=788180534543339520) ![Equinix Metal Maintained](https://img.shields.io/badge/stability-maintained-green.svg) - `cloud-provider-equinix-metal` is the Kubernetes CCM implementation for Equinix Metal. Read more about the CCM in [the official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). This repository is [Maintained](https://github.com/equinix-labs/equinix-labs/blob/main/maintained-statement.md)! @@ -21,10 +20,11 @@ At the current state of Kubernetes, running the CCM requires a few things. Please read through the requirements carefully as they are critical to running the CCM on a Kubernetes cluster. ### Version + Recommended versions of Equinix Metal CCM based on your Kubernetes version: -* Equinix Metal CCM version v0.0.4 supports Kubernetes version >=v1.10 -* Equinix Metal CCM version v1.0.0+ supports Kubernetes version >=1.15.0 +- Equinix Metal CCM version v0.0.4 supports Kubernetes version >=v1.10 +- Equinix Metal CCM version v1.0.0+ supports Kubernetes version >=1.15.0 ### BGP @@ -41,8 +41,8 @@ If they come from the _public_ address, they will be dropped. There are two ways to get the packets to have the correct source address: -* use BGP software that knows how to set the source address on a packet -* set static routes on your host +- use BGP software that knows how to set the source address on a packet +- set static routes on your host #### BGP Software @@ -60,8 +60,8 @@ If your BGP software does not support using a specific source IP, then you must You need to retrieve the following: -* your private IPv4 upstream gateway address -* your BGP peer addresses +- your private IPv4 upstream gateway address +- your BGP peer addresses Before you can retrieve the information, you must enable BGP at both the Equinix Metal project level, and for each device. You can do this in the Equinix Metal Web UI, API or CLI. CCM ensures these settings on the project and each device. However, @@ -92,8 +92,8 @@ done Control plane binaries in your cluster must start with the correct flags: -* `kubelet`: All kubelets in your cluster **MUST** set the flag `--cloud-provider=external`. This must be done for _every_ kubelet. Note that [k3s](https://k3s.io) sets its own CCM by default. If you want to use the CCM with k3s, you must disable the k3s CCM and enable this one, as `--disable-cloud-controller --kubelet-arg cloud-provider=external`. -* `kube-apiserver` and `kube-controller-manager` must **NOT** set the flag `--cloud-provider`. They then will use no cloud provider natively, leaving room for the Equinix Metal CCM. +- `kubelet`: All kubelets in your cluster **MUST** set the flag `--cloud-provider=external`. This must be done for _every_ kubelet. Note that [k3s](https://k3s.io) sets its own CCM by default. If you want to use the CCM with k3s, you must disable the k3s CCM and enable this one, as `--disable-cloud-controller --kubelet-arg cloud-provider=external`. +- `kube-apiserver` and `kube-controller-manager` must **NOT** set the flag `--cloud-provider`. They then will use no cloud provider natively, leaving room for the Equinix Metal CCM. **WARNING**: setting the kubelet flag `--cloud-provider=external` will taint all nodes in a cluster with `node.cloudprovider.kubernetes.io/uninitialized`. The CCM itself will untaint those nodes when it initializes them. @@ -155,10 +155,9 @@ stringData: { "apiKey": "abc123abc123abc123", "projectID": "abc123abc123abc123" - } + } ``` - Then apply the secret, e.g.: ```bash @@ -167,17 +166,18 @@ kubectl apply -f /tmp/secret.yaml` You can confirm that the secret was created with the following: -````bash +```bash $ kubectl -n kube-system get secrets metal-cloud-config NAME TYPE DATA AGE metal-cloud-config Opaque 1 2m -```` +``` ### Deploy CCM To apply the CCM itself, select your release and apply the manifest: Example: + ``` RELEASE=v3.6.2 kubectl apply -f https://github.com/equinix/cloud-provider-equinix-metal/releases/download/${RELEASE}/deployment.yaml @@ -198,9 +198,9 @@ See further in this document under loadbalancing, for details. By default, ccm does minimal logging, relying on the supporting infrastructure from kubernetes. However, it does support optional additional logging levels via the `--v=` flag. In general: -* `--v=2`: log most function calls for devices and facilities, when relevant logging the returned values -* `--v=3`: log additional data when logging returned values, usually entire go structs -* `--v=5`: log every function call, including those called very frequently +- `--v=2`: log most function calls for devices and facilities, when relevant logging the returned values +- `--v=3`: log additional data when logging returned values, usually entire go structs +- `--v=5`: log every function call, including those called very frequently ## Configuration @@ -213,29 +213,30 @@ The Equinix Metal CCM has multiple configuration options. These include three di This section lists each configuration option, and whether it can be set by each method. -| Purpose | CLI Flag | Env Var | Secret Field | Default | -| --- | --- | --- | --- | --- | -| Path to config secret | `cloud-config` | | | error | -| API Key | | `METAL_API_KEY` | `apiKey` | error | -| Project ID | | `METAL_PROJECT_ID` | `projectID` | error | -| Metro in which to create LoadBalancer Elastic IPs | | `METAL_METRO_NAME` | `metro` | Service-specific annotation, else error | -| Facility in which to create LoadBalancer Elastic IPs, only if Metro is not set | | `METAL_FACILITY_NAME` | `facility` | Service-specific annotation, else metro | -| Base URL to Equinix API | | | `base-url` | Official Equinix Metal API | -| Load balancer setting | | `METAL_LOAD_BALANCER` | `loadbalancer` | none | -| BGP ASN for cluster nodes when enabling BGP on the project; if the project **already** has BGP enabled, will use the existing BGP local ASN from the project | | `METAL_LOCAL_ASN` | `localASN` | `65000` | -| BGP passphrase to use when enabling BGP on the project; if the project **already** has BGP enabled, will use the existing BGP pass from the project | | `METAL_BGP_PASS` | `bgpPass` | `""` | -| Kubernetes annotation to set node's BGP ASN, `{{n}}` replaced with ordinal index of peer | | `METAL_ANNOTATION_LOCAL_ASN` | `annotationLocalASN` | `"metal.equinix.com/bgp-peers-{{n}}-node-asn"` | -| Kubernetes annotation to set BGP peer's ASN, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_PEER_ASN` | `annotationPeerASN` | `"metal.equinix.com/bgp-peers-{{n}}-peer-asn"` | -| Kubernetes annotation to set BGP peer's IPs, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_PEER_IP` | `annotationPeerIP` | `"metal.equinix.com/bgp-peers-{{n}}-peer-ip"` | -| Kubernetes annotation to set source IP for BGP peering, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_SRC_IP` | `annotationSrcIP` | `"metal.equinix.com/bgp-peers-{{n}}-src-ip"` | -| Kubernetes annotation to set BGP MD5 password, base64-encoded (see security warning below) | | `METAL_ANNOTATION_BGP_PASS` | `annotationBGPPass` | `"metal.equinix.com/bgp-peers-{{n}}-bgp-pass"` | -| Kubernetes annotation to set the CIDR for the network range of the private address | | `METAL_ANNOTATION_NETWORK_IPV4_PRIVATE` | `annotationNetworkIPv4Private` | `metal.equinix.com/network-4-private` | -| Kubernetes Service annotation to set EIP metro | | `METAL_ANNOTATION_EIP_METRO` | `annotationEIPMetro` | `"metal.equinix.com/eip-metro"` | -| Kubernetes Service annotation to set EIP facility | | `METAL_ANNOTATION_EIP_FACILITY` | `annotationEIPFacility` | `"metal.equinix.com/eip-facility"` | -| Tag for control plane Elastic IP | | `METAL_EIP_TAG` | `eipTag` | No control plane Elastic IP | -| Kubernetes API server port for Elastic IP | | `METAL_API_SERVER_PORT` | `apiServerPort` | Same as `kube-apiserver` on control plane nodes, same as `0` | -| Filter for cluster nodes on which to enable BGP | | `METAL_BGP_NODE_SELECTOR` | `bgpNodeSelector` | All nodes | -| Use host IP for Control Plane endpoint health checks | | `METAL_EIP_HEALTH_CHECK_USE_HOST_IP` | `eipHealthCheckUseHostIP` | false | +| Purpose | CLI Flag | Env Var | Secret Field | Default | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------- | --------------------------------------- | ------------------------------ | ------------------------------------------------------------ | +| Path to config secret | `cloud-config` | | | error | +| API Key | | `METAL_API_KEY` | `apiKey` | error | +| Project ID | | `METAL_PROJECT_ID` | `projectID` | error | +| Metro in which to create LoadBalancer Elastic IPs | | `METAL_METRO_NAME` | `metro` | Service-specific annotation, else error | +| Facility in which to create LoadBalancer Elastic IPs, only if Metro is not set | | `METAL_FACILITY_NAME` | `facility` | Service-specific annotation, else metro | +| Base URL to Equinix API | | | `base-url` | Official Equinix Metal API | +| Load balancer setting | | `METAL_LOAD_BALANCER` | `loadbalancer` | none | +| BGP ASN for cluster nodes when enabling BGP on the project; if the project **already** has BGP enabled, will use the existing BGP local ASN from the project | | `METAL_LOCAL_ASN` | `localASN` | `65000` | +| BGP passphrase to use when enabling BGP on the project; if the project **already** has BGP enabled, will use the existing BGP pass from the project | | `METAL_BGP_PASS` | `bgpPass` | `""` | +| Kubernetes annotation to set node's BGP ASN, `{{n}}` replaced with ordinal index of peer | | `METAL_ANNOTATION_LOCAL_ASN` | `annotationLocalASN` | `"metal.equinix.com/bgp-peers-{{n}}-node-asn"` | +| Kubernetes annotation to set BGP peer's ASN, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_PEER_ASN` | `annotationPeerASN` | `"metal.equinix.com/bgp-peers-{{n}}-peer-asn"` | +| Kubernetes annotation to set BGP peer's IPs, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_PEER_IP` | `annotationPeerIP` | `"metal.equinix.com/bgp-peers-{{n}}-peer-ip"` | +| Kubernetes annotation to set source IP for BGP peering, {{n}} replaced with ordinal index of peer | | `METAL_ANNOTATION_SRC_IP` | `annotationSrcIP` | `"metal.equinix.com/bgp-peers-{{n}}-src-ip"` | +| Kubernetes annotation to set BGP MD5 password, base64-encoded (see security warning below) | | `METAL_ANNOTATION_BGP_PASS` | `annotationBGPPass` | `"metal.equinix.com/bgp-peers-{{n}}-bgp-pass"` | +| Kubernetes annotation to set the CIDR for the network range of the private address | | `METAL_ANNOTATION_NETWORK_IPV4_PRIVATE` | `annotationNetworkIPv4Private` | `metal.equinix.com/network-4-private` | +| Kubernetes Service annotation to set EIP metro | | `METAL_ANNOTATION_EIP_METRO` | `annotationEIPMetro` | `"metal.equinix.com/eip-metro"` | +| Kubernetes Service annotation to set EIP facility | | `METAL_ANNOTATION_EIP_FACILITY` | `annotationEIPFacility` | `"metal.equinix.com/eip-facility"` | +| Tag for control plane Elastic IP | | `METAL_EIP_TAG` | `eipTag` | No control plane Elastic IP | +| ID for control plane Equinix Metal Load Balancer | | `METAL_LOAD_BALANCER_ID` | `loadBalancerID` | No control plane Equinix Metal Load Balancer | +| Kubernetes API server port for Elastic IP | | `METAL_API_SERVER_PORT` | `apiServerPort` | Same as `kube-apiserver` on control plane nodes, same as `0` | +| Filter for cluster nodes on which to enable BGP | | `METAL_BGP_NODE_SELECTOR` | `bgpNodeSelector` | All nodes | +| Use host IP for Control Plane endpoint health checks | | `METAL_EIP_HEALTH_CHECK_USE_HOST_IP` | `eipHealthCheckUseHostIP` | false | Security Warning Including your project's BGP password, even base64-encoded, may have security implications. Because Equinix Metal @@ -248,23 +249,24 @@ to system pods that have reasonable need to access them. The Kubernetes CCM for Equinix Metal deploys as a `Deployment` into your cluster with a replica of `1`. It provides the following services: -* lists and retrieves instances by ID, returning Equinix Metal servers -* manages load balancers +- lists and retrieves instances by ID, returning Equinix Metal servers +- manages load balancers + +### Service Load Balancers -### Load Balancers +Equinix CCM supports two approaches to load balancing: -Equinix Metal does not offer managed load balancers like [AWS ELB](https://aws.amazon.com/elasticloadbalancing/) -or [GCP Load Balancers](https://cloud.google.com/load-balancing/). Instead, if configured to do so, -Equinix Metal CCM will interface with and configure external bare-metal loadbalancers. +1. If configured to do so, Equinix Metal CCM will interface with and configure external bare-metal load balancers +2. If configured to do so, and if the feature is available on your Equinix Metal account, Equinix Metal CCM will interface with and configure external, managed Equinix Metal Load Balancers (EMLB) -When a load balancer is enabled, the CCM does the following: +When any load balancer is enabled, the CCM does the following: 1. Enable BGP for the project 1. Enable BGP on each node as it comes up 1. Sets ASNs based on configuration or default -1. For each `Service` of `type=LoadBalancer`: - * If you have specified a load balancer IP on `Service.Spec.LoadBalancerIP` (bring your own IP, or BYOIP), do nothing - * If you have not specified a load balancer IP on `Service.Spec.LoadBalancerIP`, get an Equinix Metal Elastic IP and set it on `Service.Spec.LoadBalancerIP`, see below +1. If you are using bare-metal load balancers, then for each `Service` of `type=LoadBalancer`: + - If you have specified a load balancer IP on `Service.Spec.LoadBalancerIP` (bring your own IP, or BYOIP), do nothing + - If you have not specified a load balancer IP on `Service.Spec.LoadBalancerIP`, get an Equinix Metal Elastic IP and set it on `Service.Spec.LoadBalancerIP`, see below 1. Pass control to the specific load balancer implementation #### Service Load Balancer IP @@ -303,7 +305,7 @@ CCM will detect that `loadBalancerIP` already was set and not try to create a ne ##### Equinix EIP -If the `Service.Spec.LoadBalancerIP` was *not* set, then CCM will use the Equinix Metal API to request a new, +If the `Service.Spec.LoadBalancerIP` was _not_ set, then CCM will use the Equinix Metal API to request a new, metro- or facility-specific Elastic IP and set it to `Service.Spec.LoadBalancerIP`. The CCM needs to determine where to request the EIP. It does not attempt to figure out where the nodes are, as that can change over time, @@ -323,12 +325,6 @@ are created at a system-wide level, ignoring the annotations. Using these flags and annotations, you can run the CCM on a node in a different metro or facility, or even outside of Equinix Metal entirely. -#### Control Plane LoadBalancer Implementation - -For the control plane nodes, the Equinix Metal CCM uses static Elastic IP assignment, via the Equinix Metal API, to tell the -Equinix Metal network which control plane node should receive the traffic. For more details on the control plane -load-balancer, see [this section](#control-plane-load-balancing). - #### Service LoadBalancer Implementations Loadbalancing is enabled as follows. @@ -339,24 +335,43 @@ Loadbalancing is enabled as follows. The value of the loadbalancing configuration is `:///` where: -* `` is the named supported type, of one of those listed below -* `` is any additional detail needed to configure the implementation, details in the description below +- `` is the named supported type, of one of those listed below +- `` is any additional detail needed to configure the implementation, details in the description below For loadbalancing for Kubernetes `Service` of `type=LoadBalancer`, the following implementations are supported: -* [kube-vip](#kube-vip) -* [MetalLB](#metallb) -* [empty](#empty) + +- [Equinix Metal Load Balancer](#EquinixMetalLoadBalancer) +- [kube-vip](#kube-vip) +- [MetalLB](#metallb) +- [empty](#empty) CCM does **not** deploy _any_ load balancers for you. It limits itself to managing the Equinix Metal-specific API calls to support a load balancer, and providing configuration for supported load balancers. +##### Equinix Metal Load Balancer + +Equinix Metal Load Balancer (EMLB) is a beta service that is available to a limited number of Equinix Metal customers that provides managed layer 4 load balancers. + +When the EMLB option is enabled, for user-deployed Kubernetes `Service` of `type=LoadBalancer`, the Equinix Metal CCM: +- creates an Equinix Metal Load Balancer for the service +- creates listener ports on the Equinix Metal Load Balancer for each port on the service +- creates origin pools for each listener port that send traffic to the corresponding NodePorts in your cluster + +To enable EMLB, set the configuration `METAL_LOAD_BALANCER` or config `loadbalancer` to: + +``` +emlb:// +``` + +Where `` is the Equinix metro in which you want CCM to deploy your external load balancers. For example, to deploy your load balancers in Silicon Valley, you would set the configuration to `emlb://sv`. Note that EMLB is available in a limited number of Equinix metros (as of this writing, `sv`, `da`, and `ny`). + ##### kube-vip -**Supported Versions**: +**Supported Versions**: -* Equinix Metal CCM version < v3.3.0 supports kube-vip version < v0.5.11 -* Equinix Metal CCM version >= v3.3.0 supports kube-vip version >= v0.5.11 +- Equinix Metal CCM version < v3.3.0 supports kube-vip version < v0.5.11 +- Equinix Metal CCM version >= v3.3.0 supports kube-vip version >= v0.5.11 When the [kube-vip](https://kube-vip.io) option is enabled, for user-deployed Kubernetes `Service` of `type=LoadBalancer`, the Equinix Metal CCM enables BGP on the project and nodes, assigns an EIP for each such @@ -369,22 +384,22 @@ To enable it, set the configuration `METAL_LOAD_BALANCER` or config `loadbalance kube-vip:// ``` -Directions on using configuring kube-vip in this method are available at the kube-vip [site](https://kube-vip.io/hybrid/daemonset/#equinix-metal-overview-(using-the-%5Bequinix-cloud-provider-equinix-metal%5D(https://github.com/equinix/cloud-provider-equinix-metal))) +Directions on using configuring kube-vip in this method are available at the kube-vip [site]() If `kube-vip` management is enabled, then CCM does the following. 1. Enable BGP on the Equinix Metal project 1. For each node currently in the cluster or added: - * retrieve the node's Equinix Metal ID via the node provider ID - * retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP - * add the information to appropriate annotations on the node + - retrieve the node's Equinix Metal ID via the node provider ID + - retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP + - add the information to appropriate annotations on the node 1. For each service of `type=LoadBalancer` currently in the cluster or added: - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core) - * if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core) + - if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. 1. For each service of `type=LoadBalancer` deleted from the cluster: - * find the Elastic IP address from the service spec and remove it - * delete the Elastic IP reservation from Equinix Metal + - find the Elastic IP address from the service spec and remove it + - delete the Elastic IP reservation from Equinix Metal ##### MetalLB @@ -396,7 +411,7 @@ requiring an additional managed service (or hop). BGP route advertisements enabl to route traffic for your services at the Elastic IP to the correct host. **NOTE:** MetalLB 0.13.2+ [uses CRs for configuration](https://metallb.universe.tf/release-notes/#version-0-13-2), and no longer uses a ConfigMap. -Currently, the CCM defaults to using a ConfigMap for backwards compatibility. In a future release, the CCM will default to using CRDs with MetalLB. +Currently, the CCM defaults to using a ConfigMap for backwards compatibility. In a future release, the CCM will default to using CRDs with MetalLB. To configure the CCM to integrate with MetalLB <= v0.12.1, follow the instructions in [MetalLB from v0.11.0 to v0.12.1](#metallb-from-v0110-to-v0121). @@ -411,7 +426,7 @@ that are specifically structured to be ignored by metallb. For example: ```yaml - node-selectors: +node-selectors: - match-labels: kubernetes.io/hostname: dc-worker-1 - match-labels: @@ -441,13 +456,13 @@ metallb://// For example: -* `metallb:///metallb-system/config` - enable `MetalLB` management and update the configmap `config` in the namespace `metallb-system` -* `metallb:///foonamespace/myconfig` - - enable `MetalLB` management and update the configmap `myconfig` in the namespace `foonamespae` -* `metallb:///` - enable `MetalLB` management and update the default configmap, i.e. `config` in the namespace `metallb-system` +- `metallb:///metallb-system/config` - enable `MetalLB` management and update the configmap `config` in the namespace `metallb-system` +- `metallb:///foonamespace/myconfig` - - enable `MetalLB` management and update the configmap `myconfig` in the namespace `foonamespae` +- `metallb:///` - enable `MetalLB` management and update the default configmap, i.e. `config` in the namespace `metallb-system` -Notice the **three* slashes. In the URL, the namespace and the configmap are in the path. +Notice the \*_three_ slashes. In the URL, the namespace and the configmap are in the path. -By default, the CCM configures MetalLB using a ConfigMap. ConfigMap configuration only works with MetalLB <= v0.12.1. For forward compatibility, you may optionally append `?crdConfiguration=false` to the configuration string in order to explicitly tell the CCM to use a ConfigMap to configure MetalLB. In a future release, the CCM will default to using CRDs with MetalLB. +By default, the CCM configures MetalLB using a ConfigMap. ConfigMap configuration only works with MetalLB <= v0.12.1. For forward compatibility, you may optionally append `?crdConfiguration=false` to the configuration string in order to explicitly tell the CCM to use a ConfigMap to configure MetalLB. In a future release, the CCM will default to using CRDs with MetalLB. When enabled, CCM controls the loadbalancer by updating the provided `ConfigMap`. @@ -457,26 +472,26 @@ If `MetalLB` management is enabled, then CCM does the following. 1. If the `ConfigMap` does not exist, do the rest of the behaviours, but do not update the `ConfigMap` 1. Enable BGP on the Equinix Metal project 1. For each node currently in the cluster or added: - * retrieve the node's Equinix Metal ID via the node provider ID - * retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP - * add them to the metallb `ConfigMap` with a kubernetes selector ensuring that the peer is only for this node + - retrieve the node's Equinix Metal ID via the node provider ID + - retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP + - add them to the metallb `ConfigMap` with a kubernetes selector ensuring that the peer is only for this node 1. For each node deleted from the cluster: - * remove the node from the MetalLB `ConfigMap` + - remove the node from the MetalLB `ConfigMap` 1. For each service of `type=LoadBalancer` currently in the cluster or added: - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core) and ensure it is in the pools of the MetalLB `ConfigMap` with `auto-assign: false` - * if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec, and ensure is in the pools of the metallb `ConfigMap` with `auto-assign: false`; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core) and ensure it is in the pools of the MetalLB `ConfigMap` with `auto-assign: false` + - if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec, and ensure is in the pools of the metallb `ConfigMap` with `auto-assign: false`; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. 1. For each service of `type=LoadBalancer` deleted from the cluster: - * find the Elastic IP address from the service spec and remove it - * remove the IP from the `ConfigMap` - * delete the Elastic IP reservation from Equinix Metal + - find the Elastic IP address from the service spec and remove it + - remove the IP from the `ConfigMap` + - delete the Elastic IP reservation from Equinix Metal CCM itself does **not** deploy the load-balancer or any part of it, including the `ConfigMap`. It only modifies an existing `ConfigMap`. This can be deployed by the administrator separately, using the manifest provided in the releases page, or in any other manner. In order to instruct metallb which IPs to announce and from where, CCM takes direct responsibility for managing the -metallb `ConfigMap`. As described above, this is normally at `metallb-system/config`. +metallb `ConfigMap`. As described above, this is normally at `metallb-system/config`. You **should not** attempt to modify this `ConfigMap` separately, as CCM will modify it with each loop. Modifying it separately is likely to break metallb's functioning. @@ -489,42 +504,42 @@ To enable the CCM to use MetalLB v0.13.2+, you must set the configuration `METAL metallb:///?crdConfiguration=true ``` -Note that the `?crdConfiguration=true` is _required_ in order for the CCM to correctly configure MetalLB v0.13.2+ via CRDs instead of using a ConfigMap. Currently, the CCM defaults to using a ConfigMap for backwards compatibility. In a future release, the CCM will default to using CRDs with MetalLB. +Note that the `?crdConfiguration=true` is _required_ in order for the CCM to correctly configure MetalLB v0.13.2+ via CRDs instead of using a ConfigMap. Currently, the CCM defaults to using a ConfigMap for backwards compatibility. In a future release, the CCM will default to using CRDs with MetalLB. For example: -* `metallb:///metallb-system?crdConfiguration=true` - enable `MetalLB` management and update configuration in the namespace `metallb-system` (default) -* `metallb:///foonamespace?crdConfiguration=true` - enable `MetalLB` management and update configuration in the namespace `metallb-system` -* `metallb:///?crdConfiguration=true` - enable `MetalLB` management and update configuration in the default namespace `metallb-system` +- `metallb:///metallb-system?crdConfiguration=true` - enable `MetalLB` management and update configuration in the namespace `metallb-system` (default) +- `metallb:///foonamespace?crdConfiguration=true` - enable `MetalLB` management and update configuration in the namespace `metallb-system` +- `metallb:///?crdConfiguration=true` - enable `MetalLB` management and update configuration in the default namespace `metallb-system` -Notice the **three* slashes. In the URL, the namespace are in the path. +Notice the \*_three_ slashes. In the URL, the namespace are in the path. If `MetalLB` management is enabled, then CCM does the following. 1. Get the appropriate namespace, based on the rules above. 1. Enable BGP on the Equinix Metal project 1. For each node currently in the cluster or added: - * retrieve the node's Equinix Metal ID via the node provider ID - * retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP - * create a `bgpeers.metallb.io` for each peer IP with a kubernetes selector ensuring that those BGPPeers are only for this node + - retrieve the node's Equinix Metal ID via the node provider ID + - retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP + - create a `bgpeers.metallb.io` for each peer IP with a kubernetes selector ensuring that those BGPPeers are only for this node 1. For each node deleted from the cluster: - * delete the affiliated BGPeers. + - delete the affiliated BGPeers. 1. For each service of `type=LoadBalancer` currently in the cluster or added: - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core) and ensure there is an `ipaddresspools.metallb.io` with `auto-assign: false`, and there is an elegible `bgpadvertisement.metallb.io`. If no bgpadvertisement exists with the appropriate tag ("cloud-provider":"equinix-metal"), a default bgpadvertisement "equinix-metal-bgp-adv" with the ipaddresspool name in the ipAddressPools spec will be created. - * if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec, and ensure there is an `ipaddresspools.metallb.io` with `auto-assign: false`, and there is an elegible `bgpadvertisement.metallb.io`. If no bgpadvertisement exists with the appropriate tag ("cloud-provider":"equinix-metal"), a default bgpadvertisement "equinix-metal-bgp-adv" with the ipaddresspool name in the ipAddressPools spec will be created; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#servicespec-v1-core) and ensure there is an `ipaddresspools.metallb.io` with `auto-assign: false`, and there is an elegible `bgpadvertisement.metallb.io`. If no bgpadvertisement exists with the appropriate tag ("cloud-provider":"equinix-metal"), a default bgpadvertisement "equinix-metal-bgp-adv" with the ipaddresspool name in the ipAddressPools spec will be created. + - if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec, and ensure there is an `ipaddresspools.metallb.io` with `auto-assign: false`, and there is an elegible `bgpadvertisement.metallb.io`. If no bgpadvertisement exists with the appropriate tag ("cloud-provider":"equinix-metal"), a default bgpadvertisement "equinix-metal-bgp-adv" with the ipaddresspool name in the ipAddressPools spec will be created; see [Equinix EIP][Equinix EIP] to control in which metro or facility the EIP will be created. 1. For each service of `type=LoadBalancer` deleted from the cluster: - * find the Elastic IP address from the service spec and remove it - * remove the affiliated `ipaddresspools.metallb.io` - * If there is no other service, delete all CCM managed `bgpeers` and the default `bgpadvertisement` - * delete the Elastic IP reservation from Equinix Metal + - find the Elastic IP address from the service spec and remove it + - remove the affiliated `ipaddresspools.metallb.io` + - If there is no other service, delete all CCM managed `bgpeers` and the default `bgpadvertisement` + - delete the Elastic IP reservation from Equinix Metal **NOTE:** (IP Address sharing)[https://metallb.universe.tf/usage/#ip-address-sharing] is not yet supported in Cloud Provider Equinix Metal. CCM itself does **not** install/deploy the load-balancer and it may exists before enable it. This can be deployed by the administrator separately, using the manifest provided in the releases page, or in any other manner. Not having metallb installed but enabled in the CCM configuration will end up allowing you to continue deploying kubernetes services, but the external ip assignment will remain pending, making it useless. In order to instruct metallb which IPs to announce and from where, CCM takes direct responsibility for managing the -metallb configuration. As described above, this is normally at `metallb-system`. Users can create and manage their own `bgpadvertisements.metallb.io` resources for advanced configuration, but they must have the appropriate tag ("cloud-provider":"equinix-metal") to prevent the CCM from creating a default bgpadvertisement. +metallb configuration. As described above, this is normally at `metallb-system`. Users can create and manage their own `bgpadvertisements.metallb.io` resources for advanced configuration, but they must have the appropriate tag ("cloud-provider":"equinix-metal") to prevent the CCM from creating a default bgpadvertisement. You **should not** attempt to modify metallb resources created by the CCM separately, as CCM will modify it with each loop. Modifying it separately is likely to break metallb's functioning. @@ -546,42 +561,57 @@ If `empty` management is enabled, then CCM does the following. 1. Enable BGP on the Equinix Metal project 1. For each node currently in the cluster or added: - * retrieve the node's Equinix Metal ID via the node provider ID - * retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP - * add the information to appropriate annotations on the node + - retrieve the node's Equinix Metal ID via the node provider ID + - retrieve the device's BGP configuration: node ASN, peer ASN, peer IPs, source IP + - add the information to appropriate annotations on the node 1. For each service of `type=LoadBalancer` currently in the cluster or added: - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore - * if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core) - * if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` already has that IP address affiliated with it, it is ready; ignore + - if an Elastic IP address reservation with the appropriate tags exists, and the `Service` does not have that IP affiliated with it, add it to the [service spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#servicespec-v1-core) + - if an Elastic IP address reservation with the appropriate tags does not exist, create it and add it to the services spec 1. For each service of `type=LoadBalancer` deleted from the cluster: - * find the Elastic IP address from the service spec and remove it - * delete the Elastic IP reservation from Equinix Metal + - find the Elastic IP address from the service spec and remove it + - delete the Elastic IP reservation from Equinix Metal ### Language In order to ease understanding, we use several different terms for an IP address: -* Requested: A dedicated `/32` IP address has been requested for the service from Equinix Metal. It may be returned immediately, or it may need to wait for Equinix Metal intervention. -* Reserved: A dedicated `/32` IP address has been reserved for the service from Equinix Metal. -* Assigned: The dedicated IP address has been marked on the service as `Service.Spec.LoadBalancerIP` as assigned. -* Mapped: The dedicated IP address has been added to the metallb `ConfigMap` as available. +- Requested: A dedicated `/32` IP address has been requested for the service from Equinix Metal. It may be returned immediately, or it may need to wait for Equinix Metal intervention. +- Reserved: A dedicated `/32` IP address has been reserved for the service from Equinix Metal. +- Assigned: The dedicated IP address has been marked on the service as `Service.Spec.LoadBalancerIP` as assigned. +- Mapped: The dedicated IP address has been added to the metallb `ConfigMap` as available. From Equinix Metal's perspective, the IP reservation is either Requested or Reserved, but not both. For the load balancer to work, the IP address needs to be all of: Reserved, Assigned, Mapped. ## Control Plane Load Balancing -CCM implements an optional control plane load balancer using an Equinix Metal Elastic IP (EIP) and the Equinix Metal API's -ability to assign that EIP to different devices. +CCM implements an optional control plane load balancer using one of two approaches: + +1. an Equinix Metal Load Balancer +1. an Equinix Metal Elastic IP (EIP) and the Equinix Metal API's ability to assign that EIP to different devices. You have several options for control plane load-balancing: -* CCM managed -* kube-vip managed -* No control plane load-balancing (or at least, none known to CCM) +- CCM managed +- kube-vip managed +- No control plane load-balancing (or at least, none known to CCM) ### CCM Managed +#### Equinix Metal Load Balancer + +If you have configured the CCM to use Equinix Metal Load Balancers (EMLB) for service load balancing, you can also choose to use EMLB for control plane load balancing. To enable control plane load balancing with EMLB: + +1. Create a Load Balancer using the Equinix Metal API or Web UI +1. When starting the CCM + - set the [configuration](#Configuration) for load balancing with EMLB, e.g. env var `METAL_LOAD_BALANCER=emlb://`, where `` is the metro in which you want the CCM to create your load balancers + - set the [configuration](#Configuration) for the control plane EIP tag, e.g. env var `METAL_LOAD_BALANCER_ID=`, where `` is the ID of the Load Balancer you created earlier + +When run with the correct configuration, on startup, CCM will automatically update your Load Balancer to send traffic to your control plane nodes. + +#### Elastic IP Load Balancer + It is a common procedure to use Elastic IP as Control Plane endpoint in order to have a static endpoint that you can use from the outside, or when configuring the advertise address for the kubelet. @@ -591,10 +621,10 @@ To enable CCM to manage the control plane EIP: 1. Create an Elastic IP, using the Equinix Metal API, Web UI or CLI 1. Put an arbitrary but unique tag on the EIP 1. When starting the CCM - * set the [configuration][Configuration] for the control plane EIP tag, e.g. env var `METAL_EIP_TAG=`, where `` is whatever tag you set on the EIP - * (optional) set the port that the EIP should listen on; by default, or when set to `0`, it will use the same port as the `kube-apiserver` on the control plane nodes. This port can also be specified with `METAL_API_SERVER_PORT=.` - * (optional) set the [configuration][Configuration] for using the host IP for control plane endpoint health checks. This is - needed when the EIP is configured as an loopback IP address, such as the case with [CAPP](https://github.com/kubernetes-sigs/cluster-api-provider-packet) + - set the [configuration](#Configuration) for the control plane EIP tag, e.g. env var `METAL_EIP_TAG=`, where `` is whatever tag you set on the EIP + - (optional) set the port that the EIP should listen on; by default, or when set to `0`, it will use the same port as the `kube-apiserver` on the control plane nodes. This port can also be specified with `METAL_API_SERVER_PORT=.` + - (optional) set the [configuration](#Configuration) for using the host IP for control plane endpoint health checks. This is + needed when the EIP is configured as an loopback IP address, such as the case with [CAPP](https://github.com/kubernetes-sigs/cluster-api-provider-packet) In [CAPP](https://github.com/kubernetes-sigs/cluster-api-provider-packet) we create one for every cluster for example. Equinix Metal does not provide an as a @@ -624,7 +654,7 @@ The logic will circle over all the available control planes looking for an active api server. As soon as it can find one the Elastic IP will be unassigned and reassigned to the working node. -#### How the Elastic IP Traffic is Routed +##### How the Elastic IP Traffic is Routed Of course, even if the router sends traffic for your Elastic IP (EIP) to a given control plane node, that node needs to know to process the traffic. Rather than require you to @@ -636,23 +666,23 @@ creates an `Endpoints` structure that includes all of the functioning control pl nodes. The CCM does the following on each loop: 1. Reads the Kubernetes-created `default/kubernetes` service to discover: - * what port `kube-apiserver` is listening on from `targetPort` - * all of the endpoints, i.e. control plane nodes where `kube-apiserver` is running + - what port `kube-apiserver` is listening on from `targetPort` + - all of the endpoints, i.e. control plane nodes where `kube-apiserver` is running 1. Creates a service named `kube-system/cloud-provider-equinix-metal-kubernetes-external` with the following settings: - * `type=LoadBalancer` - * `spec.loadBalancerIP=` - * `status.loadBalancer.ingress[0].ip=` - * `metadata.annotations["metallb.universe.tf/address-pool"]=disabled-metallb-do-not-use-any-address-pool` - * `spec.ports[0].targetPort=` - * `spec.ports[0].port=` + - `type=LoadBalancer` + - `spec.loadBalancerIP=` + - `status.loadBalancer.ingress[0].ip=` + - `metadata.annotations["metallb.universe.tf/address-pool"]=disabled-metallb-do-not-use-any-address-pool` + - `spec.ports[0].targetPort=` + - `spec.ports[0].port=` 1. Updates the service `kube-system/cloud-provider-equinix-metal-kubernetes-external` to have endpoints identical to those in `default/kubernetes` This has the following effect: -* the annotation prevents metallb from trying to manage the IP -* the name prevents CCM from passing it to the loadbalancer provider address mapping, thus preventing any of them from managing it -* the `spec.loadBalancerIP` and `status.loadBalancer.ingress[0].ip` cause kube-proxy to set up routes on all of the nodes -* the endpoints cause the traffic to be routed to the control plane nodes +- the annotation prevents metallb from trying to manage the IP +- the name prevents CCM from passing it to the loadbalancer provider address mapping, thus preventing any of them from managing it +- the `spec.loadBalancerIP` and `status.loadBalancer.ingress[0].ip` cause kube-proxy to set up routes on all of the nodes +- the endpoints cause the traffic to be routed to the control plane nodes Note that we _wanted_ to just set `externalIPs` on the original `default/kubernetes`, but that would prevent traffic from being routed to it from the control nodes, due to iptables rules. LoadBalancer types allow local traffic. @@ -672,21 +702,21 @@ The CCM does not maintain its own control loop, instead relying on the services On startup, the CCM: 1. Implements the [cloud-provider interface](https://pkg.go.dev/k8s.io/cloud-provider#Interface), providing primarily the following API calls: - * `Initialize()` - * `InstancesV2()` - * `LoadBalancer()` + - `Initialize()` + - `InstancesV2()` + - `LoadBalancer()` 1. In `Initialize`: 1. If BGP is configured, enable BGP on the project 1. If EIP control plane management is enabled, create an informer for `Service`, `Node` and `Endpoints`, updating the control plane EIP as needed. The CCM then relies on the cloud-provider control loop to call it: -* whenever a `Node` is added, to get node metadata -* whenever a `Service` of `type=LoadBalancer` is added, removed or updated -* if EIP control plane management is enabled, via shared informers: - * whenever a control plane `Node` is added, removed or updated - * whenever the `default/kubernetes` service is added or updated - * whenever the endpoints behind the `default/kubernetes` service are added, updated or removed +- whenever a `Node` is added, to get node metadata +- whenever a `Service` of `type=LoadBalancer` is added, removed or updated +- if EIP control plane management is enabled, via shared informers: + - whenever a control plane `Node` is added, removed or updated + - whenever the `default/kubernetes` service is added or updated + - whenever the endpoints behind the `default/kubernetes` service are added, updated or removed Further, it relies on the `resync` property of the above to ensure it always is up to date, and did not miss any events. @@ -695,8 +725,8 @@ Further, it relies on the `resync` property of the above to ensure it always is If a loadbalancer is enabled, the CCM enables BGP for the project and enables it by default on all nodes as they come up. It sets the ASNs as follows: -* Node, a.k.a. local, ASN: `65000` -* Peer Router ASN: `65530` +- Node, a.k.a. local, ASN: `65000` +- Peer Router ASN: `65530` These are the settings per Equinix Metal's BGP config, see [here](https://github.com/packet-labs/kubernetes-bgp). It is _not_ recommended to override them. However, you can do so, using the options in [Configuration][Configuration]. @@ -708,35 +738,35 @@ Value for node selector should be a valid Kubernetes label selector (e.g. key1=v The Equinix Metal CCM sets Kubernetes annotations on each cluster node. -* Node, or local, ASN, default annotation `metal.equinix.com/bgp-peers-{{n}}-node-asn` -* Peer ASN, default annotation `metal.equinix.com/bgp-peers-{{n}}-peer-asn` -* Peer IP, default annotation `metal.equinix.com/bgp-peers-{{n}}-peer-ip` -* Source IP to use when communicating with peer, default annotation `metal.equinix.com/bgp-peers-{{n}}-src-ip` -* BGP password for peer, default annotation `metal.equinix.com/bgp-peers-{{n}}-bgp-pass` -* CIDR of the private network range in the project which this node is part of, default annotation `metal.equinix.com/network-4-private` +- Node, or local, ASN, default annotation `metal.equinix.com/bgp-peers-{{n}}-node-asn` +- Peer ASN, default annotation `metal.equinix.com/bgp-peers-{{n}}-peer-asn` +- Peer IP, default annotation `metal.equinix.com/bgp-peers-{{n}}-peer-ip` +- Source IP to use when communicating with peer, default annotation `metal.equinix.com/bgp-peers-{{n}}-src-ip` +- BGP password for peer, default annotation `metal.equinix.com/bgp-peers-{{n}}-bgp-pass` +- CIDR of the private network range in the project which this node is part of, default annotation `metal.equinix.com/network-4-private` These annotation names can be overridden, if you so choose, using the options in [Configuration][Configuration]. Note that the annotations for BGP peering are a _pattern_. There is one annotation per data point per peer, following the pattern `metal.equinix.com/bgp-peers-{{n}}-`, where: -* `{{n}}` is the number of the peer, **always** starting with `0` -* `` is the relevant information, such as `node-asn` or `peer-ip` +- `{{n}}` is the number of the peer, **always** starting with `0` +- `` is the relevant information, such as `node-asn` or `peer-ip` For example: -* `metal.equinix.com/bgp-peers-0-peer-asn` - ASN of peer 0 -* `metal.equinix.com/bgp-peers-1-peer-asn` - ASN of peer 1 -* `metal.equinix.com/bgp-peers-0-peer-ip` - IP of peer 0 -* `metal.equinix.com/bgp-peers-1-peer-ip` - IP of peer 1 +- `metal.equinix.com/bgp-peers-0-peer-asn` - ASN of peer 0 +- `metal.equinix.com/bgp-peers-1-peer-asn` - ASN of peer 1 +- `metal.equinix.com/bgp-peers-0-peer-ip` - IP of peer 0 +- `metal.equinix.com/bgp-peers-1-peer-ip` - IP of peer 1 ## Elastic IP Configuration If a loadbalancer is enabled, CCM creates an Equinix Metal Elastic IP (EIP) reservation for each `Service` of `type=LoadBalancer`. It tags the Reservation with the following tags: -* `usage="cloud-provider-equinix-metal-auto"` -* `service=""` where `` is the sha256 hash of `/`. We do this so that the name of the service does not leak out to Equinix Metal itself. -* `cluster=` where `` is the UID of the immutable `kube-system` namespace. We do this so that if someone runs two clusters in the same project, and there is one `Service` in each cluster with the same namespace and name, then the two EIPs will not conflict. +- `usage="cloud-provider-equinix-metal-auto"` +- `service=""` where `` is the sha256 hash of `/`. We do this so that the name of the service does not leak out to Equinix Metal itself. +- `cluster=` where `` is the UID of the immutable `kube-system` namespace. We do this so that if someone runs two clusters in the same project, and there is one `Service` in each cluster with the same namespace and name, then the two EIPs will not conflict. IP addresses always are created `/32`. diff --git a/dev/Tiltfile b/dev/Tiltfile new file mode 100644 index 00000000..4976a1b0 --- /dev/null +++ b/dev/Tiltfile @@ -0,0 +1,38 @@ +# Specify the registry you wish to store the image in +registry = 'YOUR_REGISTRY_HERE' + +# List the k8s context you wish to run this in +allow_k8s_contexts('YOUR_K8S_CONTEXT_HERE') + +# Specify docker registry you wish to store image in +docker_build(registry + '/cloud-provider-equinix-metal', + context='.', + dockerfile='./Dockerfile', + ignore=['cloud-sa.json','dev/'], +) + +# read in the yaml file and replace the image name with the one we built +deployment = read_yaml_stream('deploy/template/deployment.yaml') +deployment[0]['spec']['template']['spec']['containers'][0]['image'] = registry + '/cloud-provider-equinix-metal' +deployment[0]['spec']['template']['spec']['containers'][0]['env']=[] +deployment[0]['spec']['template']['spec']['containers'][0]['env'].append({"name": "METAL_METRO_NAME","value":"YOUR_METRO_HERE"}) +deployment[0]['spec']['template']['spec']['containers'][0]['env'].append({"name": "METAL_LOAD_BALANCER","value":"YOUR_LOAD_BALANCER_HERE"}) +k8s_yaml(encode_yaml_stream(deployment)) +k8s_resource(workload='cloud-provider-equinix-metal',objects=['cloud-provider-equinix-metal:ServiceAccount:kube-system','cloud-provider-equinix-metal:ClusterRole:default','cloud-provider-equinix-metal:ClusterRoleBinding:default']) +k8s_resource(new_name='metal-cloud-config',objects=['metal-cloud-config:Secret:kube-system']) + +# Load the secret extension +load('ext://secret', 'secret_create_generic') + +# Create the cloud-provider-equinix-metal secret based on the contents of the +# file named cloud-sa.json put the apiKey and projectID in it +# The file should look like this: +# { +# "apiKey":"YOUR_API_KEY", +# "projectID":"YOUR_PROJECT_ID" +# } +secret_create_generic( + 'metal-cloud-config', + 'kube-system', + from_file='cloud-sa.json=./cloud-sa.json' +) \ No newline at end of file diff --git a/dev/cloud-sa.json b/dev/cloud-sa.json new file mode 100644 index 00000000..0523008e --- /dev/null +++ b/dev/cloud-sa.json @@ -0,0 +1,4 @@ +{ + "apiKey":"YOUR_API_KEY", + "projectID": "YOUR_PROJECT_ID" +} \ No newline at end of file diff --git a/dev/web-updated.yaml b/dev/web-updated.yaml new file mode 100644 index 00000000..9cf2e94c --- /dev/null +++ b/dev/web-updated.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: web + labels: + app: web +spec: + ports: + - port: 8100 + targetPort: 80 + name: web + selector: + app: web + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web + labels: + app: web +spec: + selector: + matchLabels: + app: web + template: + metadata: + labels: + app: web + spec: + containers: + - name: web + image: dockersamples/wordsmith-web + ports: + - containerPort: 80 + name: web diff --git a/dev/web-updated2.yaml b/dev/web-updated2.yaml new file mode 100644 index 00000000..c28a0224 --- /dev/null +++ b/dev/web-updated2.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: webby + labels: + app: web +spec: + ports: + - port: 8200 + targetPort: 80 + name: web + selector: + app: web + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web + labels: + app: web +spec: + selector: + matchLabels: + app: web + template: + metadata: + labels: + app: web + spec: + containers: + - name: web + image: dockersamples/wordsmith-web + ports: + - containerPort: 80 + name: web diff --git a/dev/web.yaml b/dev/web.yaml new file mode 100644 index 00000000..ecad38be --- /dev/null +++ b/dev/web.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: web + labels: + app: web +spec: + ports: + - port: 8080 + targetPort: 80 + name: web + selector: + app: web + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web + labels: + app: web +spec: + selector: + matchLabels: + app: web + template: + metadata: + labels: + app: web + spec: + containers: + - name: web + image: dockersamples/wordsmith-web + ports: + - containerPort: 80 + name: web diff --git a/dev/words.sql b/dev/words.sql new file mode 100644 index 00000000..ec13172c --- /dev/null +++ b/dev/words.sql @@ -0,0 +1,55 @@ +CREATE TABLE nouns (word TEXT NOT NULL); +CREATE TABLE verbs (word TEXT NOT NULL); +CREATE TABLE adjectives (word TEXT NOT NULL); + +INSERT INTO nouns(word) VALUES + ('cloud'), + ('elephant'), + ('gø language'), + ('laptøp'), + ('cøntainer'), + ('micrø-service'), + ('turtle'), + ('whale'), + ('gøpher'), + ('møby døck'), + ('server'), + ('bicycle'), + ('viking'), + ('mermaid'), + ('fjørd'), + ('legø'), + ('flødebolle'), + ('smørrebrød'); + +INSERT INTO verbs(word) VALUES + ('will drink'), + ('smashes'), + ('smøkes'), + ('eats'), + ('walks tøwards'), + ('løves'), + ('helps'), + ('pushes'), + ('debugs'), + ('invites'), + ('hides'), + ('will ship'); + +INSERT INTO adjectives(word) VALUES + ('the exquisite'), + ('a pink'), + ('the røtten'), + ('a red'), + ('the serverless'), + ('a brøken'), + ('a shiny'), + ('the pretty'), + ('the impressive'), + ('an awesøme'), + ('the famøus'), + ('a gigantic'), + ('the gløriøus'), + ('the nørdic'), + ('the welcøming'), + ('the deliciøus'); diff --git a/dev/wordsmith-ingress.yaml b/dev/wordsmith-ingress.yaml new file mode 100644 index 00000000..3ba7a95d --- /dev/null +++ b/dev/wordsmith-ingress.yaml @@ -0,0 +1,34 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: minimal-ingress + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: websvc + port: + number: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: websvc + labels: + app: web +spec: + ports: + - port: 8080 + targetPort: 80 + name: web + selector: + app: web + type: ClusterIP diff --git a/go.mod b/go.mod index 25a0b0a4..c2121745 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,28 @@ module github.com/equinix/cloud-provider-equinix-metal -go 1.19 +go 1.21 require ( - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/hashicorp/go-retryablehttp v0.7.4 github.com/packethost/packet-api-server v0.0.0-20230223042617-bc7d1539adbb github.com/packethost/packngo v0.30.0 github.com/pallinder/go-randomdata v1.2.0 go.universe.tf/metallb v0.13.7 - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 - k8s.io/api v0.26.4 - k8s.io/apimachinery v0.26.4 - k8s.io/client-go v0.26.4 - k8s.io/cloud-provider v0.26.4 - k8s.io/component-base v0.26.4 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/oauth2 v0.13.0 + k8s.io/api v0.26.1 + k8s.io/apimachinery v0.26.1 + k8s.io/client-go v0.26.1 + k8s.io/cloud-provider v0.26.1 + k8s.io/component-base v0.26.1 k8s.io/klog/v2 v2.100.1 sigs.k8s.io/controller-runtime v0.14.6 sigs.k8s.io/yaml v1.3.0 ) require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect @@ -46,7 +48,7 @@ require ( github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/cel-go v0.12.6 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect @@ -89,31 +91,30 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.16.0 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect google.golang.org/grpc v1.49.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.26.1 // indirect - k8s.io/apiserver v0.26.4 // indirect - k8s.io/component-helpers v0.26.4 // indirect - k8s.io/controller-manager v0.26.4 // indirect - k8s.io/kms v0.26.4 // indirect + k8s.io/apiserver v0.26.1 // indirect + k8s.io/component-helpers v0.26.1 // indirect + k8s.io/controller-manager v0.26.1 // indirect + k8s.io/kms v0.26.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 7806c7d5..83373b39 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -110,6 +114,7 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -178,11 +183,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= @@ -213,14 +220,16 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -242,6 +251,7 @@ github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7P github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -290,7 +300,9 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= github.com/packethost/packet-api-server v0.0.0-20230223042617-bc7d1539adbb h1:Tq95IyiReWp+eWNco4KoDNHIALQbNHFg5dD4TELEg9o= github.com/packethost/packet-api-server v0.0.0-20230223042617-bc7d1539adbb/go.mod h1:xX9d7NVrCzFoIFdy5hav33pCMZwiE37ZKgd1XM9qMEY= github.com/packethost/packngo v0.30.0 h1:JVeTwbXXETsLTDQncUbYwIFpkOp/xevXrffM2HrFECI= @@ -337,7 +349,9 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= @@ -361,23 +375,30 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU= +go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I= +go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0= +go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -409,6 +430,7 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= @@ -425,8 +447,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -437,8 +459,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -494,8 +516,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -503,8 +525,9 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -563,12 +586,12 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -577,8 +600,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -729,8 +752,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -758,6 +781,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -765,28 +789,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.4 h1:qSG2PmtcD23BkYiWfoYAcak870eF/hE7NNYBYavTT94= -k8s.io/api v0.26.4/go.mod h1:WwKEXU3R1rgCZ77AYa7DFksd9/BAIKyOmRlbVxgvjCk= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.26.4 h1:rZccKdBLg9vP6J09JD+z8Yr99Ce8gk3Lbi9TCx05Jzs= -k8s.io/apimachinery v0.26.4/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/apiserver v0.26.4 h1:3Oq4mnJv0mzVX7BR/Nod+8KjlELf/3Ljvu9ZWDyLUoA= -k8s.io/apiserver v0.26.4/go.mod h1:yAY3O1vBM4/0OIGAGeWcdfzQvgdwJ188VirLcuSAVnw= -k8s.io/client-go v0.26.4 h1:/7P/IbGBuT73A+G97trf44NTPSNqvuBREpOfdLbHvD4= -k8s.io/client-go v0.26.4/go.mod h1:6qOItWm3EwxJdl/8p5t7FWtWUOwyMdA8N9ekbW4idpI= -k8s.io/cloud-provider v0.26.4 h1:mqN4vhC4mRoMi+ujI92ImkIOuYS7ZS55FvXB10d6Wp4= -k8s.io/cloud-provider v0.26.4/go.mod h1:F9xY0PvBuZDuGIHOM28dNiPLHxQnWfsiUuCSUikHevo= -k8s.io/component-base v0.26.4 h1:Bg2xzyXNKL3eAuiTEu3XE198d6z22ENgFgGQv2GGOUk= -k8s.io/component-base v0.26.4/go.mod h1:lTuWL1Xz/a4e80gmIC3YZG2JCO4xNwtKWHJWeJmsq20= -k8s.io/component-helpers v0.26.4 h1:qbZrh8QmfL+Yn7lWEI/BPrvITGgkBy33djP5Tzsu2hA= -k8s.io/component-helpers v0.26.4/go.mod h1:2Siz5eWmaKu0khASXMTCfJuASZAbCPX9mtjlCe5IWRs= -k8s.io/controller-manager v0.26.4 h1:SeOHV55WKqCa5HQfPHjMpfSPzJNblDvVDzfNgbQlSdQ= -k8s.io/controller-manager v0.26.4/go.mod h1:HJPU8OKTI8YhrtnvpuFdllK1QCQfibhJXDToDzCEsnQ= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc= +k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= +k8s.io/cloud-provider v0.26.1 h1:qEZmsGWGptOtVSpeMdTsapHX2BEqIk7rc5MA4caBqE0= +k8s.io/cloud-provider v0.26.1/go.mod h1:6PheIxRySYuRBBxtTUADya8S2rbr18xKi+fhGbLkduc= +k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= +k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= +k8s.io/component-helpers v0.26.1 h1:Y5h1OYUJTGyHZlSAsc7mcfNsWF08S/MlrQyF/vn93mU= +k8s.io/component-helpers v0.26.1/go.mod h1:jxNTnHb1axLe93MyVuvKj9T/+f4nxBVrj/xf01/UNFk= +k8s.io/controller-manager v0.26.1 h1:KmwVTmZ61dxUoHI1TQXlfsbmmk1NVZPUTKjtRowRD30= +k8s.io/controller-manager v0.26.1/go.mod h1:2K95SC0wv5qVbXuC5dJnSgU6vM9J+YBgAaJdVijIy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.26.4 h1:mQ+DeOvgAHC6+heZcozPkEd3rWtP4DVVjo1hLSih9w4= -k8s.io/kms v0.26.4/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= +k8s.io/kms v0.26.1 h1:JE0n4J4+8/Z+egvXz2BTJeJ9ecsm4ZSLKF7ttVXXm/4= +k8s.io/kms v0.26.1/go.mod h1:ReC1IEGuxgfN+PDCIpR6w8+XMmDE7uJhxcCwMZFdIYc= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= @@ -794,8 +818,8 @@ k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36 h1:PUuX1qIFv309AT8hF/CdPKDmsG/hn/L8zRX7VvISM3A= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= diff --git a/metal/cloud.go b/metal/cloud.go index 15284f3f..d48e5045 100644 --- a/metal/cloud.go +++ b/metal/cloud.go @@ -27,11 +27,12 @@ const ( // cloud implements cloudprovider.Interface type cloud struct { - client *packngo.Client - config Config - instances *instances - loadBalancer *loadBalancers - controlPlaneEndpointManager *controlPlaneEndpointManager + client *packngo.Client + config Config + instances *instances + loadBalancer *loadBalancers + controlPlaneEndpointManager *controlPlaneEndpointManager + controlPlaneLoadBalancerManager *controlPlaneLoadBalancerManager // holds our bgp service handler bgp *bgp } @@ -80,6 +81,10 @@ func (c *cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, if err != nil { klog.Fatalf("could not initialize ControlPlaneEndpointManager: %v", err) } + lbm, err := newControlPlaneLoadBalancerManager(clientset, stop, c.config.ProjectID, c.config.LoadBalancerID, c.config.APIServerPort, c.config.EIPHealthCheckUseHostIP) + if err != nil { + klog.Fatalf("could not initialize ControlPlaneEndpointManager: %v", err) + } bgp, err := newBGP(c.client, clientset, c.config) if err != nil { klog.Fatalf("could not initialize BGP: %v", err) @@ -93,6 +98,7 @@ func (c *cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, c.bgp = bgp c.instances = newInstances(c.client, c.config.ProjectID) c.controlPlaneEndpointManager = epm + c.controlPlaneLoadBalancerManager = lbm klog.Info("Initialize of cloud provider complete") } diff --git a/metal/config.go b/metal/config.go index 5826a058..3642a548 100644 --- a/metal/config.go +++ b/metal/config.go @@ -31,6 +31,7 @@ const ( envVarAPIServerPort = "METAL_API_SERVER_PORT" envVarBGPNodeSelector = "METAL_BGP_NODE_SELECTOR" envVarEIPHealthCheckUseHostIP = "METAL_EIP_HEALTH_CHECK_USE_HOST_IP" + envVarLoadBalancerID = "METAL_LOAD_BALANCER_ID" ) // Config configuration for a provider, includes authentication token, project ID ID, and optional override URL to talk to a different Equinix Metal API endpoint @@ -55,6 +56,7 @@ type Config struct { APIServerPort int32 `json:"apiServerPort,omitempty"` BGPNodeSelector string `json:"bgpNodeSelector,omitempty"` EIPHealthCheckUseHostIP bool `json:"eipHealthCheckUseHostIP,omitempty"` + LoadBalancerID string `json:"loadBalancerID,omitempty"` } // String converts the Config structure to a string, while masking hidden fields. @@ -79,6 +81,7 @@ func (c Config) Strings() []string { ret = append(ret, fmt.Sprintf("Elastic IP Tag: '%s'", c.EIPTag)) ret = append(ret, fmt.Sprintf("API Server Port: '%d'", c.APIServerPort)) ret = append(ret, fmt.Sprintf("BGP Node Selector: '%s'", c.BGPNodeSelector)) + ret = append(ret, fmt.Sprintf("Load Balancer ID: '%s'", c.LoadBalancerID)) return ret } @@ -165,6 +168,8 @@ func getMetalConfig(providerConfig io.Reader) (Config, error) { config.EIPTag = override(os.Getenv(envVarEIPTag), rawConfig.EIPTag) + config.LoadBalancerID = override(os.Getenv(envVarLoadBalancerID), rawConfig.LoadBalancerID) + apiServer := os.Getenv(envVarAPIServerPort) switch { case apiServer != "": diff --git a/metal/controlplane_load_balancer_manager.go b/metal/controlplane_load_balancer_manager.go new file mode 100644 index 00000000..8bde160b --- /dev/null +++ b/metal/controlplane_load_balancer_manager.go @@ -0,0 +1,211 @@ +package metal + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + v1applyconfig "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +type controlPlaneLoadBalancerManager struct { + apiServerPort int32 // node on which the external load balancer should listen + nodeAPIServerPort int32 // port on which the api server is listening on the control plane nodes + projectID string + loadBalancerID string + httpClient *http.Client + k8sclient kubernetes.Interface + serviceMutex sync.Mutex + endpointsMutex sync.Mutex + controlPlaneSelectors []labels.Selector + useHostIP bool +} + +func newControlPlaneLoadBalancerManager(k8sclient kubernetes.Interface, stop <-chan struct{}, projectID string, loadBalancerID string, apiServerPort int32, useHostIP bool) (*controlPlaneLoadBalancerManager, error) { + klog.V(2).Info("newControlPlaneLoadBalancerManager()") + + if loadBalancerID == "" { + klog.Info("Load balancer ID is not configured, skipping control plane load balancer management") + return nil, nil + } + + m := &controlPlaneLoadBalancerManager{ + httpClient: &http.Client{ + Timeout: time.Second * 5, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + }, + apiServerPort: apiServerPort, + projectID: projectID, + loadBalancerID: loadBalancerID, + k8sclient: k8sclient, + useHostIP: useHostIP, + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-stop + cancel() + }() + + for _, label := range controlPlaneLabels { + req, err := labels.NewRequirement(label, selection.Exists, nil) + if err != nil { + return m, err + } + + m.controlPlaneSelectors = append(m.controlPlaneSelectors, labels.NewSelector().Add(*req)) + } + + sharedInformer := informers.NewSharedInformerFactory(k8sclient, checkLoopTimerSeconds*time.Second) + + if _, err := sharedInformer.Core().V1().Endpoints().Informer().AddEventHandler( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + e, _ := obj.(*v1.Endpoints) + if e.Namespace != metav1.NamespaceDefault && e.Name != "kubernetes" { + return false + } + + return true + }, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + k8sEndpoints, _ := obj.(*v1.Endpoints) + klog.Infof("handling add, endpoints: %s/%s", k8sEndpoints.Namespace, k8sEndpoints.Name) + + if err := m.syncEndpoints(ctx, k8sEndpoints); err != nil { + klog.Errorf("failed to sync endpoints from default/kubernetes to %s/%s: %v", externalServiceNamespace, externalServiceName, err) + return + } + }, + UpdateFunc: func(_, obj interface{}) { + k8sEndpoints, _ := obj.(*v1.Endpoints) + klog.Infof("handling update, endpoints: %s/%s", k8sEndpoints.Namespace, k8sEndpoints.Name) + + if err := m.syncEndpoints(ctx, k8sEndpoints); err != nil { + klog.Errorf("failed to sync endpoints from default/kubernetes to %s/%s: %v", externalServiceNamespace, externalServiceName, err) + return + } + }, + }, + }, + ); err != nil { + return m, err + } + + if _, err := sharedInformer.Core().V1().Services().Informer().AddEventHandler( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + s, _ := obj.(*v1.Service) + // Filter only service default/kubernetes + if s.Namespace == metav1.NamespaceDefault && s.Name == "kubernetes" { + return true + } + //else + return false + }, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + k8sService, _ := obj.(*v1.Service) + klog.Infof("handling add, service: %s/%s", k8sService.Namespace, k8sService.Name) + + if err := m.syncService(ctx, k8sService); err != nil { + klog.Errorf("failed to sync service from default/kubernetes to %s/%s: %v", externalServiceNamespace, externalServiceName, err) + return + } + }, + UpdateFunc: func(_, obj interface{}) { + k8sService, _ := obj.(*v1.Service) + klog.Infof("handling update, service: %s/%s", k8sService.Namespace, k8sService.Name) + + if err := m.syncService(ctx, k8sService); err != nil { + klog.Errorf("failed to sync service from default/kubernetes to %s/%s: %v", externalServiceNamespace, externalServiceName, err) + return + } + }, + }, + }, + ); err != nil { + return m, err + } + + sharedInformer.Start(stop) + sharedInformer.WaitForCacheSync(stop) + + return m, nil +} + +func (m *controlPlaneLoadBalancerManager) syncEndpoints(ctx context.Context, k8sEndpoints *v1.Endpoints) error { + m.endpointsMutex.Lock() + defer m.endpointsMutex.Unlock() + + applyConfig := v1applyconfig.Endpoints(externalServiceName, externalServiceNamespace) + for _, subset := range k8sEndpoints.Subsets { + applyConfig = applyConfig.WithSubsets(EndpointSubsetApplyConfig(subset)) + } + + if _, err := m.k8sclient.CoreV1().Endpoints(externalServiceNamespace).Apply( + ctx, + applyConfig, + metav1.ApplyOptions{FieldManager: emIdentifier}, + ); err != nil { + return fmt.Errorf("failed to apply endpoint %s/%s: %w", externalServiceNamespace, externalServiceName, err) + } + + return nil +} + +func (m *controlPlaneLoadBalancerManager) syncService(ctx context.Context, k8sService *v1.Service) error { + m.serviceMutex.Lock() + defer m.serviceMutex.Unlock() + + // get the target port + existingPorts := k8sService.Spec.Ports + if len(existingPorts) < 1 { + return errors.New("default/kubernetes service does not have any ports defined") + } + + // track which port the kube-apiserver actually is listening on + m.nodeAPIServerPort = existingPorts[0].TargetPort.IntVal + // did we set a specific port, or did we request that it just be left as is? + if m.apiServerPort == 0 { + m.apiServerPort = m.nodeAPIServerPort + } + + annotations := map[string]string{} + annotations["equinix.com/loadbalancerID"] = m.loadBalancerID + + specApplyConfig := v1applyconfig.ServiceSpec().WithType(v1.ServiceTypeLoadBalancer) + + for _, port := range existingPorts { + specApplyConfig = specApplyConfig.WithPorts(ServicePortApplyConfig(port)) + } + + applyConfig := v1applyconfig.Service(externalServiceName, externalServiceNamespace). + WithAnnotations(annotations). + WithSpec(specApplyConfig) + + if _, err := m.k8sclient.CoreV1().Services(externalServiceNamespace).Apply( + ctx, + applyConfig, + metav1.ApplyOptions{FieldManager: emIdentifier}, + ); err != nil { + return fmt.Errorf("failed to apply service %s/%s: %w", externalServiceNamespace, externalServiceName, err) + } + + return nil +} diff --git a/metal/loadbalancers.go b/metal/loadbalancers.go index 4673adf6..bd726039 100644 --- a/metal/loadbalancers.go +++ b/metal/loadbalancers.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers" + "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers/emlb" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers/empty" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers/kubevip" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers/metallb" @@ -23,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" ) @@ -47,6 +49,7 @@ type loadBalancers struct { eipFacilityAnnotation string nodeSelector labels.Selector eipTag string + usesBGP bool } func newLoadBalancers(client *packngo.Client, k8sclient kubernetes.Interface, projectID, metro, facility, config string, localASN int, bgpPass, annotationNetwork, annotationLocalASN, annotationPeerASN, annotationPeerIP, annotationSrcIP, annotationBgpPass, eipMetroAnnotation, eipFacilityAnnotation, nodeSelector, eipTag string) (*loadBalancers, error) { @@ -55,7 +58,11 @@ func newLoadBalancers(client *packngo.Client, k8sclient kubernetes.Interface, pr selector, _ = labels.Parse(nodeSelector) } - l := &loadBalancers{client, k8sclient, projectID, metro, facility, "", nil, config, localASN, bgpPass, annotationNetwork, annotationLocalASN, annotationPeerASN, annotationPeerIP, annotationSrcIP, annotationBgpPass, eipMetroAnnotation, eipFacilityAnnotation, selector, eipTag} + // TODO: refactor this and related functions so we can move common code + // for BGP-based load balancers somewhere else + defaultUsesBgp := true + + l := &loadBalancers{client, k8sclient, projectID, metro, facility, "", nil, config, localASN, bgpPass, annotationNetwork, annotationLocalASN, annotationPeerASN, annotationPeerIP, annotationSrcIP, annotationBgpPass, eipMetroAnnotation, eipFacilityAnnotation, selector, eipTag, defaultUsesBgp} // parse the implementor config and see what kind it is - allow for no config if l.implementorConfig == "" { @@ -90,6 +97,11 @@ func newLoadBalancers(client *packngo.Client, k8sclient kubernetes.Interface, pr case "empty": klog.Info("loadbalancer implementation enabled: empty, bgp only") impl = empty.NewLB(k8sclient, lbconfig) + case "emlb": + klog.Info("loadbalancer implementation enabled: emlb") + impl = emlb.NewLB(k8sclient, lbconfig, client.APIKey, projectID) + // TODO remove when common BGP code has been refactored to somewhere else + l.usesBGP = false default: klog.Info("loadbalancer implementation disabled") impl = nil @@ -101,7 +113,8 @@ func newLoadBalancers(client *packngo.Client, k8sclient kubernetes.Interface, pr return l, nil } -// implementation of cloudprovider.LoadBalancer +// validate our implementation of cloudprovider.LoadBalancer +var _ cloudprovider.LoadBalancer = (*loadBalancers)(nil) // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. @@ -114,27 +127,30 @@ func (l *loadBalancers) GetLoadBalancer(ctx context.Context, clusterName string, svcIP := service.Spec.LoadBalancerIP var svcIPCidr string + if l.usesBGP { + // get IP address reservations and check if they any exists for this svc + ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) + if err != nil { + return nil, false, fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) + } - // get IP address reservations and check if they any exists for this svc - ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) - if err != nil { - return nil, false, fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) - } - - ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) + ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) - klog.V(2).Infof("GetLoadBalancer(): remove: %s with existing IP assignment %s", svcName, svcIP) + klog.V(2).Infof("GetLoadBalancer(): remove: %s with existing IP assignment %s", svcName, svcIP) - // get the IPs and see if there is anything to clean up - if ipReservation == nil { - return nil, false, nil + // get the IPs and see if there is anything to clean up + if ipReservation == nil { + return nil, false, nil + } + svcIPCidr = fmt.Sprintf("%s/%d", ipReservation.Address, ipReservation.CIDR) + return &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: svcIPCidr}, + }, + }, true, nil + } else { + return l.implementor.GetLoadBalancer(ctx, clusterName, service) } - svcIPCidr = fmt.Sprintf("%s/%d", ipReservation.Address, ipReservation.CIDR) - return &v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - {IP: svcIPCidr}, - }, - }, true, nil } // GetLoadBalancerName returns the name of the load balancer. Implementations must treat the @@ -149,34 +165,40 @@ func (l *loadBalancers) GetLoadBalancerName(ctx context.Context, clusterName str // Implementations must treat the *v1.Service and *v1.Node // parameters as read-only and not modify them. // Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager -func (l *loadBalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (l *loadBalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, readOnlyService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + service := readOnlyService.DeepCopy() klog.V(2).Infof("EnsureLoadBalancer(): add: service %s/%s", service.Namespace, service.Name) - // get IP address reservations and check if they any exists for this svc - ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) - } var ipCidr string - // handling is completely different if it is the control plane vs a regular service of type=LoadBalancer - if service.Name == externalServiceName && service.Namespace == externalServiceNamespace { - ipCidr, err = l.retrieveIPByTag(ctx, service, ips, l.eipTag) + var err error + + // TODO: Split out most of this to "reconcileLoadBalancer" + // TODO: Split out status checking to a separate function that reconcileLoadBalancer calls + + // For EIP-based (BGP) load balancers, handling is completely different if it is the control plane vs a regular service of type=LoadBalancer + if l.usesBGP && service.Name == externalServiceName && service.Namespace == externalServiceNamespace { + ipCidr, err = l.retrieveIPByTag(ctx, service, l.eipTag) if err != nil { return nil, fmt.Errorf("failed to add service %s: %w", service.Name, err) } + + // get the IP only + ip := strings.SplitN(ipCidr, "/", 2) + + return &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: ip[0]}, + }, + }, nil } else { - ipCidr, err = l.addService(ctx, service, ips, filterNodes(nodes, l.nodeSelector)) + loadBalancerName := l.GetLoadBalancerName(ctx, clusterName, service) + _, err = l.addService(ctx, service, filterNodes(nodes, l.nodeSelector), loadBalancerName) if err != nil { return nil, fmt.Errorf("failed to add service %s: %w", service.Name, err) } } - // get the IP only - ip := strings.SplitN(ipCidr, "/", 2) - return &v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{ - {IP: ip[0]}, - }, - }, nil + status, _, err := l.GetLoadBalancer(ctx, clusterName, service) + return status, err } // UpdateLoadBalancer updates hosts under the specified load balancer. @@ -187,40 +209,45 @@ func (l *loadBalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri klog.V(2).Infof("UpdateLoadBalancer(): service %s", service.Name) var n []loadbalancers.Node - for _, node := range filterNodes(nodes, l.nodeSelector) { - klog.V(2).Infof("UpdateLoadBalancer(): %s", node.Name) - // get the node provider ID - id := node.Spec.ProviderID - if id == "" { - return fmt.Errorf("no provider ID given for node %s, skipping", node.Name) - } - // ensure BGP is enabled for the node - if err := ensureNodeBGPEnabled(id, l.client); err != nil { - klog.Errorf("could not ensure BGP enabled for node %s: %w", node.Name, err) - continue - } - klog.V(2).Infof("bgp enabled on node %s", node.Name) - // ensure the node has the correct annotations - if err := l.annotateNode(ctx, node); err != nil { - return fmt.Errorf("failed to annotate node %s: %w", node.Name, err) - } - var ( - peer *packngo.BGPNeighbor - err error - ) - if peer, err = getNodeBGPConfig(id, l.client); err != nil || peer == nil { - return fmt.Errorf("could not add metallb node peer address for node %s: %w", node.Name, err) + + // TODO remove this conditional when common BGP code has been refactored to somewhere else + if l.usesBGP { + for _, node := range filterNodes(nodes, l.nodeSelector) { + klog.V(2).Infof("UpdateLoadBalancer(): %s", node.Name) + // get the node provider ID + id := node.Spec.ProviderID + if id == "" { + return fmt.Errorf("no provider ID given for node %s, skipping", node.Name) + } + // ensure BGP is enabled for the node + if err := ensureNodeBGPEnabled(id, l.client); err != nil { + klog.Errorf("could not ensure BGP enabled for node %s: %w", node.Name, err) + continue + } + klog.V(2).Infof("bgp enabled on node %s", node.Name) + // ensure the node has the correct annotations + if err := l.annotateNode(ctx, node); err != nil { + return fmt.Errorf("failed to annotate node %s: %w", node.Name, err) + } + var ( + peer *packngo.BGPNeighbor + err error + ) + if peer, err = getNodeBGPConfig(id, l.client); err != nil || peer == nil { + return fmt.Errorf("could not add metallb node peer address for node %s: %w", node.Name, err) + } + n = append(n, loadbalancers.Node{ + Name: node.Name, + LocalASN: peer.CustomerAs, + PeerASN: peer.PeerAs, + SourceIP: peer.CustomerIP, + Peers: peer.PeerIps, + Password: peer.Md5Password, + }) } - n = append(n, loadbalancers.Node{ - Name: node.Name, - LocalASN: peer.CustomerAs, - PeerASN: peer.PeerAs, - SourceIP: peer.CustomerIP, - Peers: peer.PeerIps, - Password: peer.Md5Password, - }) } - return l.implementor.UpdateService(ctx, service.Namespace, service.Name, n) + + return l.implementor.UpdateService(ctx, service.Namespace, service.Name, n, service, nodes) } // EnsureLoadBalancerDeleted deletes the specified load balancer if it @@ -240,38 +267,41 @@ func (l *loadBalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa var svcIPCidr string - // get IP address reservations and check if they any exists for this svc - ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) - if err != nil { - return fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) - } + if l.usesBGP { + // get IP address reservations and check if they any exists for this svc + ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) + if err != nil { + return fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) + } - ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) + ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) - klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: %s with existing IP assignment %s", svcName, svcIP) + klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: %s with existing IP assignment %s", svcName, svcIP) - // get the IPs and see if there is anything to clean up - if ipReservation == nil { - klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: no IP reservation found for %s, nothing to delete", svcName) - return nil - } - // delete the reservation - klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: for %s EIP ID %s", svcName, ipReservation.ID) - if _, err := l.client.ProjectIPs.Remove(ipReservation.ID); err != nil { - return fmt.Errorf("failed to remove IP address reservation %s from project: %w", ipReservation.String(), err) + // get the IPs and see if there is anything to clean up + if ipReservation == nil { + klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: no IP reservation found for %s, nothing to delete", svcName) + return nil + } + // delete the reservation + klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: for %s EIP ID %s", svcName, ipReservation.ID) + if _, err := l.client.ProjectIPs.Remove(ipReservation.ID); err != nil { + return fmt.Errorf("failed to remove IP address reservation %s from project: %w", ipReservation.String(), err) + } + // remove it from any implementation-specific parts + svcIPCidr = fmt.Sprintf("%s/%d", ipReservation.Address, ipReservation.CIDR) + klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: for %s entry %s", svcName, svcIPCidr) } - // remove it from any implementation-specific parts - svcIPCidr = fmt.Sprintf("%s/%d", ipReservation.Address, ipReservation.CIDR) - klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: for %s entry %s", svcName, svcIPCidr) - if err := l.implementor.RemoveService(ctx, service.Namespace, service.Name, svcIPCidr); err != nil { + + if err := l.implementor.RemoveService(ctx, service.Namespace, service.Name, svcIPCidr, service); err != nil { return fmt.Errorf("error removing IP from configmap for %s: %w", svcName, err) } klog.V(2).Infof("EnsureLoadBalancerDeleted(): remove: removed service %s from implementation", svcName) + return nil } // utility funcs - // annotateNode ensure a node has the correct annotations. func (l *loadBalancers) annotateNode(ctx context.Context, node *v1.Node) error { klog.V(2).Infof("annotateNode: %s", node.Name) @@ -361,7 +391,7 @@ func (l *loadBalancers) annotateNode(ctx context.Context, node *v1.Node) error { } // addService add a single service; wraps the implementation -func (l *loadBalancers) addService(ctx context.Context, svc *v1.Service, ips []packngo.IPAddressReservation, nodes []*v1.Node) (string, error) { +func (l *loadBalancers) addService(ctx context.Context, svc *v1.Service, nodes []*v1.Node, loadBalancerName string) (string, error) { svcName := serviceRep(svc) svcTag := serviceTag(svc) svcRegion := serviceAnnotation(svc, l.eipMetroAnnotation) @@ -372,135 +402,150 @@ func (l *loadBalancers) addService(ctx context.Context, svc *v1.Service, ips []p var ( svcIPCidr string err error + n []loadbalancers.Node + ips []packngo.IPAddressReservation ) - ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) - klog.V(2).Infof("processing %s with existing IP assignment %s", svcName, svcIP) - // if it already has an IP, no need to get it one - if svcIP == "" { - klog.V(2).Infof("no IP assigned for service %s; searching reservations", svcName) - - // if no IP found, request a new one - if ipReservation == nil { + if l.usesBGP { + // get IP address reservations and check if they any exists for this svc + ips, _, err = l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) + if err != nil { + return "", fmt.Errorf("unable to retrieve IP reservations for project %s: %w", l.project, err) + } + ipReservation := ipReservationByAllTags([]string{svcTag, emTag, clsTag}, ips) + + klog.V(2).Infof("processing %s with existing IP assignment %s", svcName, svcIP) + // if it already has an IP, no need to get it one + if svcIP == "" { + klog.V(2).Infof("no IP assigned for service %s; searching reservations", svcName) + + // if no IP found, request a new one + if ipReservation == nil { + + // if we did not find an IP reserved, create a request + klog.V(2).Infof("no IP assignment found for %s, requesting", svcName) + // create a request + // our logic as to where to create the IP: + // 1. if metro is set globally, use it; else + // 2. if facility is set globally, use it; else + // 3. if Service.Metadata.Labels["topology.kubernetes.io/region"] is set, use it; else + // 4. if Service.Metadata.Labels["topology.kubernetes.io/zone"] is set, use it; else + // 5. Return error, cannot set an EIP + facility := l.facility + metro := l.metro + req := packngo.IPReservationRequest{ + Type: "public_ipv4", + Quantity: 1, + Description: ccmIPDescription, + Tags: []string{ + emTag, + svcTag, + clsTag, + }, + FailOnApprovalRequired: true, + } + switch { + case svcRegion != "": + req.Metro = &svcRegion + case svcZone != "": + req.Facility = &svcZone + case metro != "": + req.Metro = &metro + case facility != "": + req.Facility = &facility + default: + return "", errors.New("unable to create load balancer when no IP, region or zone specified, either globally or on service") + } + + ipReservation, _, err = l.client.ProjectIPs.Request(l.project, &req) + if err != nil { + return "", fmt.Errorf("failed to request an IP for the load balancer: %w", err) + } + } - // if we did not find an IP reserved, create a request - klog.V(2).Infof("no IP assignment found for %s, requesting", svcName) - // create a request - // our logic as to where to create the IP: - // 1. if metro is set globally, use it; else - // 2. if facility is set globally, use it; else - // 3. if Service.Metadata.Labels["topology.kubernetes.io/region"] is set, use it; else - // 4. if Service.Metadata.Labels["topology.kubernetes.io/zone"] is set, use it; else - // 5. Return error, cannot set an EIP - facility := l.facility - metro := l.metro - req := packngo.IPReservationRequest{ - Type: "public_ipv4", - Quantity: 1, - Description: ccmIPDescription, - Tags: []string{ - emTag, - svcTag, - clsTag, - }, - FailOnApprovalRequired: true, + // if we have no IP from existing or a new reservation, log it and return + if ipReservation == nil { + klog.V(2).Infof("no IP to assign to service %s, will need to wait until it is allocated", svcName) + return "", nil } - switch { - case svcRegion != "": - req.Metro = &svcRegion - case svcZone != "": - req.Facility = &svcZone - case metro != "": - req.Metro = &metro - case facility != "": - req.Facility = &facility - default: - return "", errors.New("unable to create load balancer when no IP, region or zone specified, either globally or on service") + + // we have an IP, either found from existing reservations or a new reservation. + // map and assign it + svcIP = ipReservation.Address + + // assign the IP and save it + klog.V(2).Infof("assigning IP %s to %s", svcIP, svcName) + intf := l.k8sclient.CoreV1().Services(svc.Namespace) + existing, err := intf.Get(ctx, svc.Name, metav1.GetOptions{}) + if err != nil || existing == nil { + klog.V(2).Infof("failed to get latest for service %s: %v", svcName, err) + return "", fmt.Errorf("failed to get latest for service %s: %w", svcName, err) } + existing.Spec.LoadBalancerIP = svcIP - ipReservation, _, err = l.client.ProjectIPs.Request(l.project, &req) + _, err = intf.Update(ctx, existing, metav1.UpdateOptions{}) if err != nil { - return "", fmt.Errorf("failed to request an IP for the load balancer: %w", err) + klog.V(2).Infof("failed to update service %s: %v", svcName, err) + return "", fmt.Errorf("failed to update service %s: %w", svcName, err) } + klog.V(2).Infof("successfully assigned %s update service %s", svcIP, svcName) } - - // if we have no IP from existing or a new reservation, log it and return - if ipReservation == nil { - klog.V(2).Infof("no IP to assign to service %s, will need to wait until it is allocated", svcName) - return "", nil - } - - // we have an IP, either found from existing reservations or a new reservation. - // map and assign it - svcIP = ipReservation.Address - - // assign the IP and save it - klog.V(2).Infof("assigning IP %s to %s", svcIP, svcName) - intf := l.k8sclient.CoreV1().Services(svc.Namespace) - existing, err := intf.Get(ctx, svc.Name, metav1.GetOptions{}) - if err != nil || existing == nil { - klog.V(2).Infof("failed to get latest for service %s: %v", svcName, err) - return "", fmt.Errorf("failed to get latest for service %s: %w", svcName, err) - } - existing.Spec.LoadBalancerIP = svcIP - - _, err = intf.Update(ctx, existing, metav1.UpdateOptions{}) - if err != nil { - klog.V(2).Infof("failed to update service %s: %v", svcName, err) - return "", fmt.Errorf("failed to update service %s: %w", svcName, err) - } - klog.V(2).Infof("successfully assigned %s update service %s", svcIP, svcName) - } - // our default CIDR for each address is 32 - cidr := 32 - if ipReservation != nil { - cidr = ipReservation.CIDR - } - svcIPCidr = fmt.Sprintf("%s/%d", svcIP, cidr) - // now need to pass it the nodes - - var n []loadbalancers.Node - for _, node := range nodes { - // get the node provider ID - id := node.Spec.ProviderID - if id == "" { - klog.Errorf("no provider ID given for node %s, skipping", node.Name) - continue - } - // ensure BGP is enabled for the node - if err := ensureNodeBGPEnabled(id, l.client); err != nil { - klog.Errorf("could not ensure BGP enabled for node %s: %w", node.Name, err) - continue + // our default CIDR for each address is 32 + cidr := 32 + if ipReservation != nil { + cidr = ipReservation.CIDR } - klog.V(2).Infof("bgp enabled on node %s", node.Name) - // ensure the node has the correct annotations - if err := l.annotateNode(ctx, node); err != nil { - klog.Errorf("failed to annotate node %s: %w", node.Name, err) - continue - } - peer, err := getNodeBGPConfig(id, l.client) - if err != nil || peer == nil { - klog.Errorf("loadbalancers.addService(): could not get node peer address for node %s: %w", node.Name, err) - continue + svcIPCidr = fmt.Sprintf("%s/%d", svcIP, cidr) + // now need to pass it the nodes + + for _, node := range nodes { + // get the node provider ID + id := node.Spec.ProviderID + if id == "" { + klog.Errorf("no provider ID given for node %s, skipping", node.Name) + continue + } + // ensure BGP is enabled for the node + if err := ensureNodeBGPEnabled(id, l.client); err != nil { + klog.Errorf("could not ensure BGP enabled for node %s: %w", node.Name, err) + continue + } + klog.V(2).Infof("bgp enabled on node %s", node.Name) + // ensure the node has the correct annotations + if err := l.annotateNode(ctx, node); err != nil { + klog.Errorf("failed to annotate node %s: %w", node.Name, err) + continue + } + peer, err := getNodeBGPConfig(id, l.client) + if err != nil || peer == nil { + klog.Errorf("loadbalancers.addService(): could not get node peer address for node %s: %w", node.Name, err) + continue + } + n = append(n, loadbalancers.Node{ + Name: node.Name, + LocalASN: peer.CustomerAs, + PeerASN: peer.PeerAs, + SourceIP: peer.CustomerIP, + Peers: peer.PeerIps, + Password: peer.Md5Password, + }) } - n = append(n, loadbalancers.Node{ - Name: node.Name, - LocalASN: peer.CustomerAs, - PeerASN: peer.PeerAs, - SourceIP: peer.CustomerIP, - Peers: peer.PeerIps, - Password: peer.Md5Password, - }) } - return svcIPCidr, l.implementor.AddService(ctx, svc.Namespace, svc.Name, svcIPCidr, n) + return svcIPCidr, l.implementor.AddService(ctx, svc.Namespace, svc.Name, svcIPCidr, n, svc, nodes, loadBalancerName) } -func (l *loadBalancers) retrieveIPByTag(ctx context.Context, svc *v1.Service, ips []packngo.IPAddressReservation, tag string) (string, error) { +func (l *loadBalancers) retrieveIPByTag(ctx context.Context, svc *v1.Service, tag string) (string, error) { svcName := serviceRep(svc) svcIP := svc.Spec.LoadBalancerIP cidr := 32 + // get IP address reservations and check if they any exists for this svc + ips, _, err := l.client.ProjectIPs.List(l.project, &packngo.ListOptions{}) + if err != nil { + return "", err + } + var svcIPCidr string ipReservation := ipReservationByAllTags([]string{tag}, ips) diff --git a/metal/loadbalancers/emlb/emlb.go b/metal/loadbalancers/emlb/emlb.go new file mode 100644 index 00000000..4b4e3ade --- /dev/null +++ b/metal/loadbalancers/emlb/emlb.go @@ -0,0 +1,145 @@ +// Implementation of Equinix Metal Load Balancer +package emlb + +import ( + "context" + "fmt" + "strings" + + "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers" + "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers/emlb/infrastructure" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + clientconfig "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +type LB struct { + manager *infrastructure.Manager + k8sclient kubernetes.Interface + client client.Client +} + +const ( + LoadBalancerIDAnnotation = "equinix.com/loadbalancerID" +) + +var _ loadbalancers.LB = (*LB)(nil) + +func NewLB(k8sclient kubernetes.Interface, config, metalAPIKey, projectID string) *LB { + // Parse config for Equinix Metal Load Balancer + // The format is emlb:/// + // An example config using Dallas as the location would look like emlb:///da + // it may have an extra slash at the beginning or end, so get rid of it + metro := strings.TrimPrefix(config, "/") + + // Create a new LB object. + lb := &LB{} + + // Set the manager subobject to have the API key and project id and metro. + lb.manager = infrastructure.NewManager(metalAPIKey, projectID, metro) + + // Pass the k8sclient into the LB object. + lb.k8sclient = k8sclient + + // Set up a new controller-runtime k8s client for LB object. + scheme := runtime.NewScheme() + err := v1.AddToScheme(scheme) + if err != nil { + panic(err) + } + newClient, err := client.New(clientconfig.GetConfigOrDie(), client.Options{Scheme: scheme}) + if err != nil { + panic(err) + } + lb.client = newClient + + return lb +} + +func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node, loadBalancerName string) error { + return l.reconcileService(ctx, svc, n, loadBalancerName) +} + +func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string, svc *v1.Service) error { + // 1. Gather the properties we need: ID of load balancer + loadBalancerId := svc.Annotations[LoadBalancerIDAnnotation] + + // 2. Delete the infrastructure (do we need to return anything here?) + err := l.manager.DeleteLoadBalancer(ctx, loadBalancerId) + + return err +} + +func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node) error { + loadBalancerName := "" // TODO should UpdateService accept the load balancer name? + return l.reconcileService(ctx, svc, n, loadBalancerName) +} + +func (l *LB) reconcileService(ctx context.Context, svc *v1.Service, n []*v1.Node, loadBalancerName string) error { + loadBalancerId := svc.Annotations[LoadBalancerIDAnnotation] + + pools := l.convertToPools(svc, n) + + loadBalancer, err := l.manager.ReconcileLoadBalancer(ctx, loadBalancerId, loadBalancerName, pools) + + if err != nil { + return err + } + + patch := client.MergeFrom(svc.DeepCopy()) + + svc.Annotations[LoadBalancerIDAnnotation] = loadBalancer.GetId() + svc.Annotations["equinix.com/loadbalancerMetro"] = l.manager.GetMetro() + + return l.client.Patch(ctx, svc, patch) +} + +func (l *LB) convertToPools(svc *v1.Service, nodes []*v1.Node) infrastructure.Pools { + pools := infrastructure.Pools{} + for _, svcPort := range svc.Spec.Ports { + targets := []infrastructure.Target{} + for _, node := range nodes { + for _, address := range node.Status.Addresses { + if address.Type == v1.NodeExternalIP { + targets = append(targets, infrastructure.Target{ + IP: address.Address, + Port: svcPort.NodePort, + }) + } + } + } + pools[svcPort.Port] = targets + } + + return pools +} +func (l *LB) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + loadBalancerId := svc.Annotations[LoadBalancerIDAnnotation] + + if loadBalancerId != "" { + // TODO probably need to check if err is 404, maybe others? + loadBalancer, err := l.manager.GetLoadBalancer(ctx, loadBalancerId) + + if err != nil { + return nil, false, fmt.Errorf("unable to retrieve load balancer: %w", err) + } + + if loadBalancer != nil { + var ingress []v1.LoadBalancerIngress + for _, ip := range loadBalancer.GetIps() { + ingress = append(ingress, v1.LoadBalancerIngress{ + IP: ip, + }) + } + + loadBalancerStatus := v1.LoadBalancerStatus{ + Ingress: ingress, + } + return &loadBalancerStatus, true, nil + } + } + + return nil, false, nil +} diff --git a/metal/loadbalancers/emlb/infrastructure/manager.go b/metal/loadbalancers/emlb/infrastructure/manager.go new file mode 100644 index 00000000..93a52edc --- /dev/null +++ b/metal/loadbalancers/emlb/infrastructure/manager.go @@ -0,0 +1,247 @@ +package infrastructure + +import ( + "context" + "fmt" + "net/http" + "reflect" + + lbaas "github.com/equinix/cloud-provider-equinix-metal/internal/lbaas/v1" +) + +const ProviderID = "loadpvd-gOB_-byp5ebFo7A3LHv2B" + +var LBMetros = map[string]string{ + "da": "lctnloc--uxs0GLeAELHKV8GxO_AI", + "ny": "lctnloc-Vy-1Qpw31mPi6RJQwVf9A", + "sv": "lctnloc-H5rl2M2VL5dcFmdxhbEKx", +} + +type Pools map[int32][]Target + +type Target struct { + IP string + Port int32 +} + +type Manager struct { + client *lbaas.APIClient + metro string + projectID string + tokenExchanger *TokenExchanger +} + +func NewManager(metalAPIKey, projectID, metro string) *Manager { + manager := &Manager{} + emlbConfig := lbaas.NewConfiguration() + + manager.client = lbaas.NewAPIClient(emlbConfig) + manager.tokenExchanger = &TokenExchanger{ + metalAPIKey: metalAPIKey, + client: manager.client.GetConfig().HTTPClient, + } + manager.projectID = projectID + manager.metro = metro + + return manager +} + +func (m *Manager) GetMetro() string { + return m.metro +} + +// Returns a Load Balancer object given an id +func (m *Manager) GetLoadBalancer(ctx context.Context, id string) (*lbaas.LoadBalancer, error) { + ctx = context.WithValue(ctx, lbaas.ContextOAuth2, m.tokenExchanger) + + LoadBalancer, _, err := m.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute() + return LoadBalancer, err +} + +// Returns a list of Load Balancer objects in the project +func (m *Manager) GetLoadBalancers(ctx context.Context) (*lbaas.LoadBalancerCollection, error) { + ctx = context.WithValue(ctx, lbaas.ContextOAuth2, m.tokenExchanger) + + LoadBalancers, _, err := m.client.ProjectsApi.ListLoadBalancers(ctx, m.projectID).Execute() + return LoadBalancers, err +} + +func (m *Manager) GetPools(ctx context.Context) (*lbaas.LoadBalancerPoolCollection, error) { + ctx = context.WithValue(ctx, lbaas.ContextOAuth2, m.tokenExchanger) + + LoadBalancerPools, _, err := m.client.ProjectsApi.ListPools(ctx, m.projectID).Execute() + return LoadBalancerPools, err +} + +func (m *Manager) DeleteLoadBalancer(ctx context.Context, id string) error { + ctx = context.WithValue(ctx, lbaas.ContextOAuth2, m.tokenExchanger) + + lb, _, err := m.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute() + + if err != nil { + return err + } + + for _, poolGroups := range lb.Pools { + for _, pool := range poolGroups { + _, err := m.client.PoolsApi.DeleteLoadBalancerPool(ctx, pool.GetId()).Execute() + if err != nil { + return err + } + } + + } + + // TODO lb, resp, err := + _, err = m.client.LoadBalancersApi.DeleteLoadBalancer(ctx, id).Execute() + return err +} + +func (m *Manager) ReconcileLoadBalancer(ctx context.Context, id, name string, pools Pools) (*lbaas.LoadBalancer, error) { + ctx = context.WithValue(ctx, lbaas.ContextOAuth2, m.tokenExchanger) + + if id == "" { + locationId, ok := LBMetros[m.metro] + if !ok { + return nil, fmt.Errorf("could not determine load balancer location for metro %v; valid values are %v", m.metro, reflect.ValueOf(LBMetros).MapKeys()) + } + + lbCreateRequest := lbaas.LoadBalancerCreate{ + Name: name, + LocationId: locationId, + ProviderId: ProviderID, + } + + lbCreated, _, err := m.client.ProjectsApi.CreateLoadBalancer(ctx, m.projectID).LoadBalancerCreate(lbCreateRequest).Execute() + if err != nil { + return nil, err + } + + id = lbCreated.GetId() + } + + lb, _, err := m.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute() + if err != nil { + return nil, err + } + + existingPorts := map[int32]struct{}{} + existingPools := lb.GetPools() + // Update or delete existing targets + for i, port := range lb.GetPorts() { + portNumber := port.GetNumber() + targets, wanted := pools[portNumber] + if wanted { + // We have a pool for this port and we want to keep it + existingPorts[portNumber] = struct{}{} + + for _, existingPool := range existingPools[i] { + existingOrigins, _, err := m.client.PoolsApi.ListLoadBalancerPoolOrigins(ctx, existingPool.GetId()).Execute() + if err != nil { + return nil, err + } + + // TODO: can/should we be more granular here? figure out which to add and which to update? + + // Create new origins for all targets + for j, target := range targets { + _, _, err := m.createOrigin(ctx, existingPool.GetId(), existingPool.GetName(), int32(j), target) + if err != nil { + return nil, err + } + } + + // Delete old origins (some of which may be duplicates of the new ones) + for _, origin := range existingOrigins.GetOrigins() { + _, err := m.client.OriginsApi.DeleteLoadBalancerOrigin(ctx, origin.GetId()).Execute() + if err != nil { + return nil, err + } + } + } + } else { + // We have a pool for this port and we want to get rid of it + for _, existingPool := range existingPools[i] { + _, err := m.client.PoolsApi.DeleteLoadBalancerPool(ctx, existingPool.GetId()).Execute() + if err != nil { + return nil, err + } + } + _, err := m.client.PortsApi.DeleteLoadBalancerPort(ctx, port.GetId()).Execute() + if err != nil { + return nil, err + } + } + } + + // Create ports & pools for new targets + for externalPort, pool := range pools { + if _, exists := existingPorts[externalPort]; !exists { + poolID, err := m.createPool(ctx, getResourceName(lb.GetName(), "pool", externalPort), pool) + if err != nil { + return nil, err + } + + createPortRequest := lbaas.LoadBalancerPortCreate{ + Name: getResourceName(lb.GetName(), "port", externalPort), + Number: externalPort, + PoolIds: []string{poolID}, + } + + // TODO do we need the port ID for something? + _, _, err = m.client.PortsApi.CreateLoadBalancerPort(ctx, id).LoadBalancerPortCreate(createPortRequest).Execute() + if err != nil { + return nil, err + } + } + } + + lb, _, err = m.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute() + + return lb, err +} + +func (m *Manager) createPool(ctx context.Context, name string, targets []Target) (string, error) { + createPoolRequest := lbaas.LoadBalancerPoolCreate{ + Name: name, + Protocol: lbaas.LoadBalancerPoolCreateProtocol{ + LoadBalancerPoolProtocol: lbaas.LOADBALANCERPOOLPROTOCOL_TCP.Ptr(), + }, + } + + poolCreated, _, err := m.client.ProjectsApi.CreatePool(ctx, m.projectID).LoadBalancerPoolCreate(createPoolRequest).Execute() + + if err != nil { + return "", err + } + + poolID := poolCreated.GetId() + + for i, target := range targets { + // TODO do we need the origin IDs for something? + _, _, err := m.createOrigin(ctx, poolID, name, int32(i), target) + if err != nil { + return "", err + } + } + + return poolID, nil +} + +func (m *Manager) createOrigin(ctx context.Context, poolID, poolName string, number int32, target Target) (*lbaas.ResourceCreatedResponse, *http.Response, error) { + createOriginRequest := lbaas.LoadBalancerPoolOriginCreate{ + Name: getResourceName(poolName, "origin", number), + Target: target.IP, + PortNumber: lbaas.LoadBalancerPoolOriginPortNumber{ + Int32: &target.Port, + }, + Active: true, + PoolId: poolID, + } + return m.client.PoolsApi.CreateLoadBalancerPoolOrigin(ctx, poolID).LoadBalancerPoolOriginCreate(createOriginRequest).Execute() + +} + +func getResourceName(loadBalancerName, resourceType string, number int32) string { + return fmt.Sprintf("%v-%v-%v", loadBalancerName, resourceType, number) +} diff --git a/metal/loadbalancers/emlb/infrastructure/token_exchanger.go b/metal/loadbalancers/emlb/infrastructure/token_exchanger.go new file mode 100644 index 00000000..31fab909 --- /dev/null +++ b/metal/loadbalancers/emlb/infrastructure/token_exchanger.go @@ -0,0 +1,57 @@ +package infrastructure + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +type TokenExchanger struct { + metalAPIKey string + client *http.Client +} + +func (m *TokenExchanger) Token() (*oauth2.Token, error) { + tokenExchangeURL := "https://iam.metalctrl.io/api-keys/exchange" + tokenExchangeRequest, err := http.NewRequest("POST", tokenExchangeURL, nil) + if err != nil { + return nil, err + } + tokenExchangeRequest.Header.Add("Authorization", fmt.Sprintf("Bearer %v", m.metalAPIKey)) + + resp, err := m.client.Do(tokenExchangeRequest) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("token exchange request failed with status %v, body %v", resp.StatusCode, string(body[:])) + } + + token := oauth2.Token{} + err = json.Unmarshal(body, &token) + if err != nil { + fmt.Println(len(body)) + fmt.Println(token) + fmt.Println(err) + return nil, err + } + + expiresIn := token.Extra("expires_in") + if expiresIn != nil { + expiresInSeconds := expiresIn.(int) + token.Expiry = time.Now().Add(time.Second * time.Duration(expiresInSeconds)) + } + + return &token, nil +} diff --git a/metal/loadbalancers/empty/empty.go b/metal/loadbalancers/empty/empty.go index c63e76af..ccc83492 100644 --- a/metal/loadbalancers/empty/empty.go +++ b/metal/loadbalancers/empty/empty.go @@ -5,23 +5,31 @@ import ( "context" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) type LB struct{} +var _ loadbalancers.LB = (*LB)(nil) + func NewLB(k8sclient kubernetes.Interface, config string) *LB { return &LB{} } -func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node) error { +func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node, loadBalancerName string) error { return nil } -func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string) error { +func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string, svc *v1.Service) error { return nil } -func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node) error { +func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node) error { return nil } + +func (l *LB) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + // TODO + return nil, false, nil +} diff --git a/metal/loadbalancers/interface.go b/metal/loadbalancers/interface.go index 887f13db..ea51b6a9 100644 --- a/metal/loadbalancers/interface.go +++ b/metal/loadbalancers/interface.go @@ -2,13 +2,17 @@ package loadbalancers import ( "context" + + v1 "k8s.io/api/core/v1" ) type LB interface { // AddService add a service with the provided name and IP - AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []Node) error + AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []Node, svc *v1.Service, n []*v1.Node, loadBalancerName string) error // RemoveService remove service with the given IP - RemoveService(ctx context.Context, svcNamespace, svcName, ip string) error + RemoveService(ctx context.Context, svcNamespace, svcName, ip string, svc *v1.Service) error // UpdateService ensure that the nodes handled by the service are correct - UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []Node) error + UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []Node, svc *v1.Service, n []*v1.Node) error + // GetLoadBalancer implements cloudprovider.GetLoadBalancer + GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) } diff --git a/metal/loadbalancers/kubevip/kubevip.go b/metal/loadbalancers/kubevip/kubevip.go index 11fdf55d..252f2c40 100644 --- a/metal/loadbalancers/kubevip/kubevip.go +++ b/metal/loadbalancers/kubevip/kubevip.go @@ -5,23 +5,31 @@ import ( "context" "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) type LB struct{} +var _ loadbalancers.LB = (*LB)(nil) + func NewLB(k8sclient kubernetes.Interface, config string) *LB { return &LB{} } -func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node) error { +func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node, loadBalancerName string) error { return nil } -func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string) error { +func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string, svc *v1.Service) error { return nil } -func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node) error { +func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node) error { return nil } + +func (l *LB) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + // TODO + return nil, false, nil +} diff --git a/metal/loadbalancers/metallb/metallb.go b/metal/loadbalancers/metallb/metallb.go index c6a4e704..eb03bb64 100644 --- a/metal/loadbalancers/metallb/metallb.go +++ b/metal/loadbalancers/metallb/metallb.go @@ -9,6 +9,7 @@ import ( "github.com/equinix/cloud-provider-equinix-metal/metal/loadbalancers" metallbv1beta1 "go.universe.tf/metallb/api/v1beta1" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" @@ -115,7 +116,7 @@ func NewLB(k8sclient kubernetes.Interface, config string, featureFlags url.Value return lb } -func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node) error { +func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node, loadBalancerName string) error { config := l.configurer if err := config.Get(ctx); err != nil { return fmt.Errorf("unable to add service: %w", err) @@ -131,7 +132,7 @@ func (l *LB) AddService(ctx context.Context, svcNamespace, svcName, ip string, n return nil } -func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string) error { +func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string, svc *v1.Service) error { config := l.configurer if err := config.Get(ctx); err != nil { return fmt.Errorf("unable to remove service: %w", err) @@ -156,7 +157,7 @@ func (l *LB) RemoveService(ctx context.Context, svcNamespace, svcName, ip string return nil } -func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node) error { +func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node, svc *v1.Service, n []*v1.Node) error { // ensure nodes are correct if err := l.updateNodes(ctx, svcNamespace, svcName, nodes); err != nil { return fmt.Errorf("failed to add nodes: %w", err) @@ -164,6 +165,11 @@ func (l *LB) UpdateService(ctx context.Context, svcNamespace, svcName string, no return nil } +func (l *LB) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + // TODO + return nil, false, nil +} + // updateNodes add/delete one or more nodes with the provided name, srcIP, and bgp information func (l *LB) updateNodes(ctx context.Context, svcNamespace, svcName string, nodes []loadbalancers.Node) error { config := l.configurer