diff --git a/pkg/cmd/adm/register_member.go b/pkg/cmd/adm/register_member.go index 7bd6d5a..8a371e0 100644 --- a/pkg/cmd/adm/register_member.go +++ b/pkg/cmd/adm/register_member.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "os" "strings" "time" @@ -28,6 +29,7 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/kubectl/pkg/scheme" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -60,11 +62,16 @@ type registerMemberArgs struct { hostNamespace string memberNamespace string nameSuffix string - useLetsEncrypt bool + skipTlsVerify *bool waitForReadyTimeout time.Duration } func NewRegisterMemberCmd() *cobra.Command { + return newRegisterMemberCmd(registerMemberCluster) +} + +// newRegisterMemberCmd builds the register member command. +func newRegisterMemberCmd(exec func(*extendedCommandContext, registerMemberArgs, restartFunc) error) *cobra.Command { commandArgs := registerMemberArgs{} cmd := &cobra.Command{ Use: "register-member", @@ -73,7 +80,40 @@ func NewRegisterMemberCmd() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := newExtendedCommandContext(term, client.DefaultNewClientFromRestConfig) - return registerMemberCluster(ctx, commandArgs, restart) + + // handle the deprecated --lets-encrypt flag first. If the new --insecure-skip-tls-verify is used, we use that value + // instead. + // + // Note on the handling of the default values. --lets-encrypt is true by default and corresponds to --insecure-skip-tls-verify=false. + // The default value of --insecure-skip-tls-verify is unset, meaning that we rely on the value inside kubeconfig (which defaults to false). + // + // We set up the handling such that --insecure-skip-tls-verify takes precedence over --lets-encrypt when explicitly set. This is so that + // the clients can decide about the proper value of --insecure-skip-tls-verify when they upgrade. + // + // The behavior is only different when --lets-encrypt is unspecified, --insecure-skip-tls-verify is unspecified and the value + // inside kubeconfig is explicitly true. But that is OK, we can even call that a feature :) + if cmd.Flags().Changed("lets-encrypt") { + val, err := cmd.Flags().GetBool("lets-encrypt") + if err != nil { + return err + } + + commandArgs.skipTlsVerify = pointer.Bool(!val) + } + + // we need special handling for the insecure-skip-tls-verify. If it is not set explicitly on the commandline + // we interpret it as "use the default in the kubeconfig" but we override whatever is in the kubeconfig with + // the provided explicit value. Therefore, we need to distinguish between not set, true and false. + if cmd.Flags().Changed("insecure-skip-tls-verify") { + val, err := cmd.Flags().GetBool("insecure-skip-tls-verify") + if err != nil { + return err + } + + commandArgs.skipTlsVerify = &val + } + + return exec(ctx, commandArgs, restart) }, } @@ -87,7 +127,8 @@ func NewRegisterMemberCmd() *cobra.Command { flags.MustMarkRequired(cmd, "host-kubeconfig") cmd.Flags().StringVar(&commandArgs.memberKubeConfig, "member-kubeconfig", "", "Path to the kubeconfig file of the member cluster") flags.MustMarkRequired(cmd, "member-kubeconfig") - cmd.Flags().BoolVar(&commandArgs.useLetsEncrypt, "lets-encrypt", true, "Whether to use Let's Encrypt certificates or rely on the cluster certs.") + cmd.Flags().Bool("lets-encrypt", true, "DEPRECATED, use the --insecure-skip-tls-verify flag.") + cmd.Flags().Bool("insecure-skip-tls-verify", false, "If true, the TLS verification errors are ignored during the connection to both host and member. If false, TLS verification is required to succeed. If not specified, the value is inherited from the respective kubeconfig files.") cmd.Flags().StringVar(&commandArgs.nameSuffix, "name-suffix", defaultNameSuffix, "The suffix to append to the member name used when there are multiple members in a single cluster.") cmd.Flags().StringVar(&commandArgs.hostNamespace, "host-ns", defaultHostNs, "The namespace of the host operator in the host cluster.") cmd.Flags().StringVar(&commandArgs.memberNamespace, "member-ns", defaultMemberNs, "The namespace of the member operator in the member cluster.") @@ -151,7 +192,7 @@ func (v *registerMemberValidated) addCluster(ctx *extendedCommandContext, source ctx.Printlnf("The API endpoint of the target cluster: %s", targetClusterDetails.apiEndpoint) // generate a token that will be used for the kubeconfig - sourceTargetRestClient, err := newRestClient(sourceClusterDetails.kubeConfig) + sourceTargetRestClient, err := newRestClient(sourceClusterDetails.kubeConfigPath) if err != nil { return err } @@ -159,17 +200,11 @@ func (v *registerMemberValidated) addCluster(ctx *extendedCommandContext, source if err != nil { return err } - // TODO drop this part together with the --lets-encrypt flag and start loading certificate from the kubeconfig as soon as ToolchainCluster controller supports loading certificates from kubeconfig - var insecureSkipTLSVerify bool - if v.args.useLetsEncrypt { - ctx.Printlnf("using let's encrypt certificate") - insecureSkipTLSVerify = false - } else { - ctx.Printlnf("setting insecure skip tls verification flags") - insecureSkipTLSVerify = true - } // generate the kubeconfig that can be used by target cluster to interact with the source cluster - generatedKubeConfig := generateKubeConfig(token, sourceClusterDetails.apiEndpoint, sourceClusterDetails.namespace, insecureSkipTLSVerify) + generatedKubeConfig, err := generateKubeConfig(token, sourceClusterDetails.namespace, v.args.skipTlsVerify, sourceClusterDetails.kubeConfig) + if err != nil { + return err + } generatedKubeConfigFormatted, err := clientcmd.Write(*generatedKubeConfig) if err != nil { return err @@ -248,8 +283,50 @@ func newRestClient(kubeConfigPath string) (*rest.RESTClient, error) { return restClient, nil } -func generateKubeConfig(token, apiEndpoint, namespace string, insecureSkipTLSVerify bool) *clientcmdapi.Config { - // create apiConfig based on the secret content +func generateKubeConfig(token, namespace string, insecureSkipTLSVerify *bool, sourceKubeConfig *clientcmdapi.Config) (*clientcmdapi.Config, error) { + sourceContext, present := sourceKubeConfig.Contexts[sourceKubeConfig.CurrentContext] + if !present { + return nil, errors.New("invalid kubeconfig file: current context not present") + } + sourceCluster, present := sourceKubeConfig.Clusters[sourceContext.Cluster] + if !present { + return nil, errors.New("invalid kubeconfig file: cluster definition not found") + } + sourceAuth, present := sourceKubeConfig.AuthInfos[sourceContext.AuthInfo] + if !present { + // can happen in tests, unlikely in practice :) + // The token auth will work like this though as long as there are no required client certs. + sourceAuth = clientcmdapi.NewAuthInfo() + } + + // let's only set what we need in the auth. If there are any certificate files, we need to copy + // their data into the data fields, because those files are not going to be available on the target + // cluster. + targetAuth := clientcmdapi.NewAuthInfo() + targetAuth.Token = token + if err := loadDataInto(sourceAuth.ClientCertificate, sourceAuth.ClientCertificateData, &targetAuth.ClientCertificateData); err != nil { + return nil, fmt.Errorf("failed to read the data of the client certificate: %w", err) + } + if err := loadDataInto(sourceAuth.ClientKey, sourceAuth.ClientKeyData, &targetAuth.ClientKeyData); err != nil { + return nil, fmt.Errorf("failed to read the data of the client key: %w", err) + } + + targetCluster := clientcmdapi.NewCluster() + targetCluster.Server = sourceCluster.Server + targetCluster.ProxyURL = sourceCluster.ProxyURL + // if there was an explicit value set for the insecureSkipTlsVerify, we use that instead of what's + // in the kubeconfig. + if insecureSkipTLSVerify != nil { + targetCluster.InsecureSkipTLSVerify = *insecureSkipTLSVerify + } else { + targetCluster.InsecureSkipTLSVerify = sourceCluster.InsecureSkipTLSVerify + } + if !targetCluster.InsecureSkipTLSVerify { + if err := loadDataInto(sourceCluster.CertificateAuthority, sourceCluster.CertificateAuthorityData, &targetCluster.CertificateAuthorityData); err != nil { + return nil, fmt.Errorf("failed to read the data of the certificate authority: %w", err) + } + } + return &clientcmdapi.Config{ Contexts: map[string]*clientcmdapi.Context{ "ctx": { @@ -260,17 +337,12 @@ func generateKubeConfig(token, apiEndpoint, namespace string, insecureSkipTLSVer }, CurrentContext: "ctx", Clusters: map[string]*clientcmdapi.Cluster{ - "cluster": { - Server: apiEndpoint, - InsecureSkipTLSVerify: insecureSkipTLSVerify, - }, + "cluster": targetCluster, }, AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "auth": { - Token: token, - }, + "auth": targetAuth, }, - } + }, nil } // waitForToolchainClusterSA waits for the toolchaincluster service account to be present @@ -340,10 +412,11 @@ func getToolchainClustersWithHostname(ctx context.Context, cl runtimeclient.Clie type clusterData struct { client runtimeclient.Client + kubeConfig *clientcmdapi.Config apiEndpoint string namespace string toolchainClusterName string - kubeConfig string + kubeConfigPath string } type registerMemberValidated struct { @@ -354,8 +427,7 @@ type registerMemberValidated struct { errors []string } -func getApiEndpointAndClient(ctx *extendedCommandContext, kubeConfigPath string) (apiEndpoint string, cl runtimeclient.Client, err error) { - var kubeConfig *clientcmdapi.Config +func getApiEndpointAndClient(ctx *extendedCommandContext, kubeConfigPath string) (apiEndpoint string, cl runtimeclient.Client, kubeConfig *clientcmdapi.Config, err error) { var clientConfig *rest.Config kubeConfig, err = clientcmd.LoadFromFile(kubeConfigPath) @@ -376,12 +448,12 @@ func getApiEndpointAndClient(ctx *extendedCommandContext, kubeConfigPath string) } func validateArgs(ctx *extendedCommandContext, args registerMemberArgs) (*registerMemberValidated, error) { - hostApiEndpoint, hostClusterClient, err := getApiEndpointAndClient(ctx, args.hostKubeConfig) + hostApiEndpoint, hostClusterClient, hostKubeConfig, err := getApiEndpointAndClient(ctx, args.hostKubeConfig) if err != nil { return nil, err } - memberApiEndpoint, memberClusterClient, err := getApiEndpointAndClient(ctx, args.memberKubeConfig) + memberApiEndpoint, memberClusterClient, memberKubeConfig, err := getApiEndpointAndClient(ctx, args.memberKubeConfig) if err != nil { return nil, err } @@ -431,17 +503,19 @@ func validateArgs(ctx *extendedCommandContext, args registerMemberArgs) (*regist args: args, hostClusterData: clusterData{ client: hostClusterClient, + kubeConfig: hostKubeConfig, apiEndpoint: hostApiEndpoint, namespace: args.hostNamespace, toolchainClusterName: hostToolchainClusterName, - kubeConfig: args.hostKubeConfig, + kubeConfigPath: args.hostKubeConfig, }, memberClusterData: clusterData{ client: memberClusterClient, + kubeConfig: memberKubeConfig, apiEndpoint: memberApiEndpoint, namespace: args.memberNamespace, toolchainClusterName: memberToolchainClusterName, - kubeConfig: args.memberKubeConfig, + kubeConfigPath: args.memberKubeConfig, }, warnings: warnings, errors: errors, @@ -524,7 +598,7 @@ func (v *registerMemberValidated) getRegMemConfigFlagsAndClient(_ *clicontext.Co kubeConfigFlags.Namespace = &v.hostClusterData.namespace kubeConfigFlags.APIServer = &v.hostClusterData.apiEndpoint - kubeConfigFlags.KubeConfig = &v.hostClusterData.kubeConfig + kubeConfigFlags.KubeConfig = &v.hostClusterData.kubeConfigPath return kubeConfigFlags, v.hostClusterData.client, nil } @@ -537,3 +611,16 @@ func findToolchainClusterForMember(allToolchainClusters []toolchainv1alpha1.Tool } return nil } + +func loadDataInto(path string, source []byte, target *[]byte) error { + if path != "" && len(source) == 0 { + data, err := os.ReadFile(path) + if err != nil { + return err + } + *target = data + } else { + *target = source + } + return nil +} diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index 3ba43f1..0f8f336 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -3,6 +3,7 @@ package adm import ( "context" "fmt" + "os" "path/filepath" "strings" "testing" @@ -20,11 +21,14 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/util/homedir" + "k8s.io/utils/pointer" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -56,6 +60,52 @@ func TestRegisterMember(t *testing.T) { memberToolchainClusterName, err := utils.GetToolchainClusterName(string(configuration.Member), "https://cool-server.com", "") require.NoError(t, err) + t.Run("commandline parsing", func(t *testing.T) { + testWithArgs := func(t *testing.T, args []string) registerMemberArgs { + t.Helper() + + var parsedArgs *registerMemberArgs + cmd := newRegisterMemberCmd(func(_ *extendedCommandContext, parsed registerMemberArgs, _ restartFunc) error { + parsedArgs = &parsed + return nil + }) + + cmd.SetErr(&strings.Builder{}) + cmd.SetOut(&strings.Builder{}) + cmd.SetArgs(args) + + require.NoError(t, cmd.Execute()) + require.NotNil(t, parsedArgs) + + return *parsedArgs + } + + t.Run("insecureSkipTlsVerify not specified", func(t *testing.T) { + args := testWithArgs(t, []string{"--host-kubeconfig=h", "--member-kubeconfig", "m"}) + assert.Nil(t, args.skipTlsVerify) + assert.Equal(t, "h", args.hostKubeConfig) + assert.Equal(t, "m", args.memberKubeConfig) + }) + + t.Run("insecureSkipTlsVerify false", func(t *testing.T) { + args := testWithArgs(t, []string{"--host-kubeconfig=h", "--member-kubeconfig", "m", "--insecure-skip-tls-verify=false"}) + require.NotNil(t, args.skipTlsVerify) + assert.False(t, *args.skipTlsVerify) + }) + + t.Run("insecureSkipTlsVerify true", func(t *testing.T) { + args := testWithArgs(t, []string{"--host-kubeconfig=h", "--member-kubeconfig", "m", "--insecure-skip-tls-verify"}) + require.NotNil(t, args.skipTlsVerify) + assert.True(t, *args.skipTlsVerify) + }) + + t.Run("lets-encrypt false", func(t *testing.T) { + args := testWithArgs(t, []string{"--host-kubeconfig=h", "--member-kubeconfig", "m", "--lets-encrypt=false"}) + require.NotNil(t, args.skipTlsVerify) + assert.True(t, *args.skipTlsVerify) + }) + }) + t.Run("produces valid example SPC", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") @@ -172,7 +222,7 @@ func TestRegisterMember(t *testing.T) { assert.Contains(t, term.Output(), "kind: SpaceProvisionerConfig") }) - t.Run("single toolchain in cluster with --lets-encrypt", func(t *testing.T) { + t.Run("single toolchain in cluster with --insecure-skip-tls-verify", func(t *testing.T) { // given term := NewFakeTerminalWithResponse("Y") newClient, fakeClient := newFakeClientsFromRestConfig(t, &toolchainClusterMemberSa, &toolchainClusterHostSa) @@ -516,7 +566,155 @@ func TestRegisterMember(t *testing.T) { require.NoError(t, err) assert.Equal(t, 1, called) }) +} + +func TestCreateKubeConfig(t *testing.T) { + t.Run("--insecureSkipTlsVerify", func(t *testing.T) { + t.Run("overrides true with false", func(t *testing.T) { + // given + hostKubeconfigSecure := HostKubeConfig() + hostKubeconfigSecure.Clusters["host"].InsecureSkipTLSVerify = true + + // when + config, err := generateKubeConfig("token", "ns", pointer.Bool(false), hostKubeconfigSecure) + require.NoError(t, err) + + // then + assert.False(t, config.Clusters["cluster"].InsecureSkipTLSVerify) + }) + t.Run("overrides false with true", func(t *testing.T) { + // given + hostKubeconfigSecure := HostKubeConfig() + hostKubeconfigSecure.Clusters["host"].InsecureSkipTLSVerify = false + + // when + config, err := generateKubeConfig("token", "ns", pointer.Bool(true), hostKubeconfigSecure) + require.NoError(t, err) + + // then + assert.True(t, config.Clusters["cluster"].InsecureSkipTLSVerify) + }) + t.Run("leaves true from kubeconfig when undefined", func(t *testing.T) { + // given + hostKubeconfigSecure := HostKubeConfig() + hostKubeconfigSecure.Clusters["host"].InsecureSkipTLSVerify = true + + // when + config, err := generateKubeConfig("token", "ns", nil, hostKubeconfigSecure) + require.NoError(t, err) + + // then + assert.True(t, config.Clusters["cluster"].InsecureSkipTLSVerify) + }) + t.Run("leaves false from kubeconfig when undefined", func(t *testing.T) { + // given + hostKubeconfigSecure := HostKubeConfig() + hostKubeconfigSecure.Clusters["host"].InsecureSkipTLSVerify = false + + // when + config, err := generateKubeConfig("token", "ns", nil, hostKubeconfigSecure) + require.NoError(t, err) + + // then + assert.False(t, config.Clusters["cluster"].InsecureSkipTLSVerify) + }) + }) + t.Run("other auth methods cleared", func(t *testing.T) { + // given + kubeConfig := HostKubeConfig() + + auth := &clientcmdapi.AuthInfo{ + ClientCertificate: "client-certificate", + ClientCertificateData: []byte("client-certificate-data"), + ClientKey: "client-key", + ClientKeyData: []byte("client-key-data"), + Token: "", + TokenFile: "token-file", + Impersonate: "root", + ImpersonateUID: "1", + ImpersonateGroups: []string{"root"}, + ImpersonateUserExtra: map[string][]string{}, + Username: "johndoe", + Password: "123456", + AuthProvider: &clientcmdapi.AuthProviderConfig{ + Name: "gimme-root", + Config: map[string]string{}, + }, + Exec: &clientcmdapi.ExecConfig{}, + Extensions: map[string]runtime.Object{}, + } + kubeConfig.AuthInfos[kubeConfig.Contexts[kubeConfig.CurrentContext].AuthInfo] = auth + + // when + config, err := generateKubeConfig("token", "ns", nil, kubeConfig) + require.NoError(t, err) + + // then + generatedAuth := config.AuthInfos[config.Contexts[config.CurrentContext].AuthInfo] + + assert.Equal(t, []byte("client-certificate-data"), generatedAuth.ClientCertificateData) + assert.Equal(t, []byte("client-key-data"), generatedAuth.ClientKeyData) + assert.Equal(t, "token", generatedAuth.Token) + assert.Empty(t, generatedAuth.ClientKey) + assert.Empty(t, generatedAuth.ClientCertificate) + assert.Empty(t, generatedAuth.TokenFile) + assert.Empty(t, generatedAuth.Impersonate) + assert.Empty(t, generatedAuth.ImpersonateUID) + assert.Empty(t, generatedAuth.ImpersonateGroups) + assert.Empty(t, generatedAuth.ImpersonateUserExtra) + assert.Empty(t, generatedAuth.Username) + assert.Empty(t, generatedAuth.Password) + assert.Nil(t, generatedAuth.AuthProvider) + assert.Nil(t, generatedAuth.Exec) + }) + + t.Run("namespace overridden", func(t *testing.T) { + // given + kubeConfig := HostKubeConfig() + require.Equal(t, "toolchain-host-operator", kubeConfig.Contexts[kubeConfig.CurrentContext].Namespace) + + // when + config, err := generateKubeConfig("token", "ns", nil, kubeConfig) + require.NoError(t, err) + + // then + generatedContext := config.Contexts[config.CurrentContext] + + assert.Equal(t, "ns", generatedContext.Namespace) + }) + + t.Run("reads referenced files in kubeconfig to appropriate data fields", func(t *testing.T) { + // given + f, err := os.CreateTemp("", "ref-test") + require.NoError(t, err) + require.NoError(t, os.WriteFile(f.Name(), []byte("data"), 0)) + defer os.Remove(f.Name()) + + kubeConfig := HostKubeConfig() + kubeConfig.Clusters["host"].CertificateAuthority = f.Name() + kubeConfig.AuthInfos["auth"] = clientcmdapi.NewAuthInfo() + kubeConfig.AuthInfos["auth"].ClientCertificate = f.Name() + kubeConfig.AuthInfos["auth"].ClientKey = f.Name() + + kubeConfig.Contexts[kubeConfig.CurrentContext].AuthInfo = "auth" + + // when + config, err := generateKubeConfig("token", "ns", nil, kubeConfig) + require.NoError(t, err) + + // then + context := config.Contexts[config.CurrentContext] + generatedCluster := config.Clusters[context.Cluster] + generatedAuth := config.AuthInfos[context.AuthInfo] + + assert.Equal(t, []byte("data"), generatedCluster.CertificateAuthorityData) + assert.Empty(t, generatedCluster.CertificateAuthority) + assert.Equal(t, []byte("data"), generatedAuth.ClientKeyData) + assert.Empty(t, generatedAuth.ClientKey) + assert.Equal(t, []byte("data"), generatedAuth.ClientCertificateData) + assert.Empty(t, generatedAuth.ClientCertificate) + }) } func mockCreateToolchainClusterInNamespaceWithReadyCondition(t *testing.T, fakeClient *test.FakeClient, namespace string) { @@ -575,7 +773,7 @@ func verifyToolchainClusterSecret(t *testing.T, fakeClient *test.FakeClient, saN require.NoError(t, err) require.False(t, api.IsConfigEmpty(apiConfig)) assert.Equal(t, "https://cool-server.com", apiConfig.Clusters["cluster"].Server) - assert.True(t, apiConfig.Clusters["cluster"].InsecureSkipTLSVerify) // by default the insecure flag is being set + assert.False(t, apiConfig.Clusters["cluster"].InsecureSkipTLSVerify) // by default the insecure flag is not being set assert.Equal(t, "cluster", apiConfig.Contexts["ctx"].Cluster) assert.Equal(t, ctxNamespace, apiConfig.Contexts["ctx"].Namespace) assert.NotEmpty(t, apiConfig.AuthInfos["auth"].Token) @@ -618,20 +816,20 @@ func extractExampleSPCFromOutput(t *testing.T, output string) toolchainv1alpha1. return spc } -func newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig string, useLetsEncrypt bool) registerMemberArgs { +func newRegisterMemberArgsWith(hostKubeconfig, memberKubeconfig string, skipTlsVerify bool) registerMemberArgs { args := defaultRegisterMemberArgs() args.hostKubeConfig = hostKubeconfig args.memberKubeConfig = memberKubeconfig - args.useLetsEncrypt = useLetsEncrypt + args.skipTlsVerify = &skipTlsVerify args.waitForReadyTimeout = 1 * time.Second return args } -func newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig string, useLetsEncrypt bool, nameSuffix string) registerMemberArgs { +func newRegisterMemberArgsWithSuffix(hostKubeconfig, memberKubeconfig string, skipTlsVerify bool, nameSuffix string) registerMemberArgs { args := defaultRegisterMemberArgs() args.hostKubeConfig = hostKubeconfig args.memberKubeConfig = memberKubeconfig - args.useLetsEncrypt = useLetsEncrypt + args.skipTlsVerify = &skipTlsVerify args.nameSuffix = nameSuffix return args } @@ -649,7 +847,7 @@ func defaultRegisterMemberArgs() registerMemberArgs { args.memberKubeConfig = defaultKubeConfigPath args.hostNamespace = "toolchain-host-operator" args.memberNamespace = "toolchain-member-operator" - args.useLetsEncrypt = true + args.skipTlsVerify = pointer.Bool(true) return args } diff --git a/pkg/test/config.go b/pkg/test/config.go index 30fa7e1..6286958 100644 --- a/pkg/test/config.go +++ b/pkg/test/config.go @@ -165,6 +165,11 @@ func SetFileConfig(t *testing.T, clusterDefs ...ClusterDefinitionWithName) { func PersistKubeConfigFile(t *testing.T, config *clientcmdapi.Config) string { tmpFile, err := os.CreateTemp(os.TempDir(), "kubeconfig-*.yaml") require.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, os.Remove(tmpFile.Name())) + }) + // it is important to use clientcmd.WriteToFile instead of just YAML marshalling, // because clientcmd uses custom encoders and decoders for the config object. require.NoError(t, clientcmd.WriteToFile(*config, tmpFile.Name())) diff --git a/resources/roles/host.yaml b/resources/roles/host.yaml index bfe9fda..192dd0b 100644 --- a/resources/roles/host.yaml +++ b/resources/roles/host.yaml @@ -12,16 +12,24 @@ objects: provider: ksctl rules: - apiGroups: - - apps + - "apps" resources: - - deployments + - "deployments" verbs: - "get" - "list" - "patch" - "update" - "watch" + - apiGroups: + - "" + resources: + - "pods" + verbs: - "delete" + - "get" + - "list" + - "watch" - kind: Role apiVersion: rbac.authorization.k8s.io/v1 @@ -287,3 +295,22 @@ objects: - "get" - "list" - "delete" + - apiGroups: + - "apps" + resources: + - "deployments" + verbs: + - "get" + - "list" + - "patch" + - "update" + - "watch" + - apiGroups: + - "" + resources: + - "pods" + verbs: + - "delete" + - "get" + - "list" + - "watch" diff --git a/resources/roles/member.yaml b/resources/roles/member.yaml index 735212d..70bdef2 100644 --- a/resources/roles/member.yaml +++ b/resources/roles/member.yaml @@ -12,16 +12,24 @@ objects: provider: ksctl rules: - apiGroups: - - apps + - "apps" resources: - - deployments + - "deployments" verbs: - "get" - "list" - "patch" - "update" - "watch" + - apiGroups: + - "" + resources: + - "pods" + verbs: - "delete" + - "get" + - "list" + - "watch" - kind: Role apiVersion: rbac.authorization.k8s.io/v1