From 057b1bcb90d7df205b1d2880167a5fa94c2248c7 Mon Sep 17 00:00:00 2001 From: Matous Jobanek Date: Mon, 18 Mar 2024 10:32:50 +0100 Subject: [PATCH] rename sandbox-config.yaml to kubesaw-admins.yaml (#9) --- pkg/assets/assets.go | 6 +- pkg/assets/assets_test.go | 20 +- pkg/assets/sandbox_config.go | 2 +- pkg/cmd/adm/generate_cli_configs.go | 46 ++-- pkg/cmd/adm/generate_cli_configs_test.go | 36 +-- pkg/cmd/adm/setup.go | 32 +-- pkg/cmd/adm/setup_cluster.go | 4 +- pkg/cmd/adm/setup_cluster_test.go | 16 +- pkg/cmd/adm/setup_mock_test.go | 14 +- pkg/cmd/adm/setup_permissions_test.go | 4 +- pkg/cmd/adm/setup_roles_manager_test.go | 6 +- pkg/cmd/adm/setup_test.go | 26 +-- pkg/test/environment_config.go | 4 +- .../kubesaw-admins.yaml | 216 ++++++++++++++++++ 14 files changed, 324 insertions(+), 108 deletions(-) create mode 100644 test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml diff --git a/pkg/assets/assets.go b/pkg/assets/assets.go index e58fb99..73049c0 100644 --- a/pkg/assets/assets.go +++ b/pkg/assets/assets.go @@ -31,12 +31,12 @@ func init() { type FilenameMatcher func(string) bool -func GetSandboxEnvironmentConfig(sandboxConfigFile string) (*SandboxEnvironmentConfig, error) { - content, err := os.ReadFile(sandboxConfigFile) +func GetKubeSawAdminsConfig(kubeSawAdminsFile string) (*KubeSawAdmins, error) { + content, err := os.ReadFile(kubeSawAdminsFile) if err != nil { return nil, err } - config := &SandboxEnvironmentConfig{} + config := &KubeSawAdmins{} if err := yaml.Unmarshal(content, config); err != nil { return nil, err } diff --git a/pkg/assets/assets_test.go b/pkg/assets/assets_test.go index e0b341a..90a0bce 100644 --- a/pkg/assets/assets_test.go +++ b/pkg/assets/assets_test.go @@ -92,32 +92,32 @@ func TestGetRoles(t *testing.T) { } } -func TestGetSandboxEnvironmentConfig(t *testing.T) { +func TestGetKubeSawAdmins(t *testing.T) { // given require.NoError(t, client.AddToScheme()) // when - sandboxEnvConfig, err := assets.GetSandboxEnvironmentConfig("../../test-resources/dummy.openshiftapps.com/sandbox-config.yaml") + kubeSawAdmins, err := assets.GetKubeSawAdminsConfig("../../test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml") // then require.NoError(t, err) - assert.NotEmpty(t, sandboxEnvConfig) - assert.NotEmpty(t, sandboxEnvConfig.Clusters.Host.API) - assert.NotEmpty(t, sandboxEnvConfig.Clusters.Members) + assert.NotEmpty(t, kubeSawAdmins) + assert.NotEmpty(t, kubeSawAdmins.Clusters.Host.API) + assert.NotEmpty(t, kubeSawAdmins.Clusters.Members) - for _, member := range sandboxEnvConfig.Clusters.Members { + for _, member := range kubeSawAdmins.Clusters.Members { assert.NotEmpty(t, member.Name) assert.NotEmpty(t, member.API) } - assert.NotEmpty(t, sandboxEnvConfig.ServiceAccounts) - for _, sa := range sandboxEnvConfig.ServiceAccounts { + assert.NotEmpty(t, kubeSawAdmins.ServiceAccounts) + for _, sa := range kubeSawAdmins.ServiceAccounts { assert.NotEmpty(t, sa.Name) verifyNamespacePermissions(t, sa.Name, sa.PermissionsPerClusterType) } - assert.NotEmpty(t, sandboxEnvConfig.Users) - for _, user := range sandboxEnvConfig.Users { + assert.NotEmpty(t, kubeSawAdmins.Users) + for _, user := range kubeSawAdmins.Users { assert.NotEmpty(t, user.Name) assert.NotEmpty(t, user.ID) verifyNamespacePermissions(t, user.Name, user.PermissionsPerClusterType) diff --git a/pkg/assets/sandbox_config.go b/pkg/assets/sandbox_config.go index 222886f..9003010 100644 --- a/pkg/assets/sandbox_config.go +++ b/pkg/assets/sandbox_config.go @@ -1,6 +1,6 @@ package assets -type SandboxEnvironmentConfig struct { +type KubeSawAdmins struct { Clusters Clusters `yaml:"clusters"` ServiceAccounts []ServiceAccount `yaml:"serviceAccounts"` Users []User `yaml:"users"` diff --git a/pkg/cmd/adm/generate_cli_configs.go b/pkg/cmd/adm/generate_cli_configs.go index d0667dd..934755b 100644 --- a/pkg/cmd/adm/generate_cli_configs.go +++ b/pkg/cmd/adm/generate_cli_configs.go @@ -27,7 +27,7 @@ import ( ) type generateFlags struct { - sandboxConfigFile, outDir string + kubeSawAdminsFile, outDir string dev bool kubeconfigs []string } @@ -35,17 +35,17 @@ type generateFlags struct { func NewGenerateCliConfigsCmd() *cobra.Command { f := generateFlags{} command := &cobra.Command{ - Use: "generate-cli-configs --sandbox-config=", + Use: "generate-cli-configs --kubesaw-admins=", Short: "Generate ksctl.yaml files", - Long: `Generate ksctl.yaml files, that is used by ksctl, for every ServiceAccount defined in the given sandbox-config.yaml file`, + Long: `Generate ksctl.yaml files, that is used by ksctl, for every ServiceAccount defined in the given kubesaw-admins.yaml file`, Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, _ []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) return generate(term, f, runtimeclient.New, DefaultNewExternalClientFromConfig) }, } - command.Flags().StringVarP(&f.sandboxConfigFile, "sandbox-config", "c", "", "Use the given sandbox config file") - flags.MustMarkRequired(command, "sandbox-config") + command.Flags().StringVarP(&f.kubeSawAdminsFile, "kubesaw-admins", "c", "", "Use the given sandbox config file") + flags.MustMarkRequired(command, "kubesaw-admins") command.Flags().BoolVarP(&f.dev, "dev", "d", false, "If running in a dev cluster") configDirPath := fmt.Sprintf("%s/src/github.com/kubesaw/ksctl/out/config", os.Getenv("GOPATH")) @@ -80,25 +80,25 @@ func generate(term ioutils.Terminal, flags generateFlags, newClient NewClientFro return err } - // Get the unmarshalled version of sandbox-config.yaml - sandboxEnvConfig, err := assets.GetSandboxEnvironmentConfig(flags.sandboxConfigFile) + // Get the unmarshalled version of kubesaw-admins.yaml + kubeSawAdmins, err := assets.GetKubeSawAdminsConfig(flags.kubeSawAdminsFile) if err != nil { - return errs.Wrapf(err, "unable get sandbox-config.yaml file from %s", flags.sandboxConfigFile) + return errs.Wrapf(err, "unable get kubesaw-admins.yaml file from %s", flags.kubeSawAdminsFile) } ctx := &generateContext{ - Terminal: term, - newClient: newClient, - newRESTClient: newExternalClient, - sandboxEnvConfig: sandboxEnvConfig, - kubeconfigPaths: flags.kubeconfigs, + Terminal: term, + newClient: newClient, + newRESTClient: newExternalClient, + kubeSawAdmins: kubeSawAdmins, + kubeconfigPaths: flags.kubeconfigs, } // sandboxUserConfigsPerName contains all sandboxUserConfig objects that will be marshalled to ksctl.yaml files sandboxUserConfigsPerName := map[string]configuration.SandboxUserConfig{} - // use host API either from the sandbox-config.yaml or from kubeconfig if --dev flag was used - hostSpec := sandboxEnvConfig.Clusters.Host + // use host API either from the kubesaw-admins.yaml or from kubeconfig if --dev flag was used + hostSpec := kubeSawAdmins.Clusters.Host if flags.dev { term.Printlnf("Using kubeconfig located at '%s' for retrieving the host cluster information...", flags.kubeconfigs[0]) kubeconfig, err := clientcmd.BuildConfigFromFlags("", flags.kubeconfigs[0]) @@ -113,10 +113,10 @@ func generate(term ioutils.Terminal, flags generateFlags, newClient NewClientFro return err } - // and then based on the data from sandbox-config.yaml files generate also all members - for _, member := range sandboxEnvConfig.Clusters.Members { + // and then based on the data from kubesaw-admins.yaml files generate also all members + for _, member := range kubeSawAdmins.Clusters.Members { - // use either the member API from sandbox-config.yaml file or use the same as API as for host if --dev flag was used + // use either the member API from kubesaw-admins.yaml file or use the same as API as for host if --dev flag was used memberSpec := member.ClusterConfig if flags.dev { memberSpec.API = hostSpec.API @@ -159,10 +159,10 @@ func writeSandboxUserConfigs(term ioutils.Terminal, configDirPath string, sandbo type generateContext struct { ioutils.Terminal - newClient NewClientFromConfigFunc - newRESTClient NewRESTClientFromConfigFunc - sandboxEnvConfig *assets.SandboxEnvironmentConfig - kubeconfigPaths []string + newClient NewClientFromConfigFunc + newRESTClient NewRESTClientFromConfigFunc + kubeSawAdmins *assets.KubeSawAdmins + kubeconfigPaths []string } // contains tokens mapped by SA name @@ -185,7 +185,7 @@ func generateForCluster(ctx *generateContext, clusterType configuration.ClusterT tokenPerSAName := tokenPerSA{} - for _, sa := range ctx.sandboxEnvConfig.ServiceAccounts { + for _, sa := range ctx.kubeSawAdmins.ServiceAccounts { for saClusterType := range sa.PermissionsPerClusterType { if saClusterType != clusterType.String() { continue diff --git a/pkg/cmd/adm/generate_cli_configs_test.go b/pkg/cmd/adm/generate_cli_configs_test.go index d17a3f5..47921e4 100644 --- a/pkg/cmd/adm/generate_cli_configs_test.go +++ b/pkg/cmd/adm/generate_cli_configs_test.go @@ -30,7 +30,7 @@ import ( func TestGenerateCliConfigs(t *testing.T) { // given require.NoError(t, client.AddToScheme()) - sandboxEnvConfig := NewSandboxEnvironmentConfig( + kubeSawAdmins := NewKubeSawAdmins( Clusters(HostServerAPI). AddMember("member1", Member1ServerAPI). AddMember("member2", Member2ServerAPI), @@ -43,7 +43,7 @@ func TestGenerateCliConfigs(t *testing.T) { MemberRoleBindings("toolchain-member-operator", Role("restart=restart-deployment"), ClusterRole("restart=edit")))), Users()) - sandboxEnvConfigContent, err := yaml.Marshal(sandboxEnvConfig) + kubeSawAdminsContent, err := yaml.Marshal(kubeSawAdmins) require.NoError(t, err) kubeconfigFiles := createKubeconfigFiles(t, sandboxKubeconfigContent, sandboxKubeconfigContentMember2) @@ -61,9 +61,9 @@ func TestGenerateCliConfigs(t *testing.T) { ) t.Cleanup(gock.OffAll) - configFile := createSandboxConfigFile(t, "sandbox.host.openshiftapps.com", sandboxEnvConfigContent) + configFile := createKubeSawAdminsFile(t, "kubesaw.host.openshiftapps.com", kubeSawAdminsContent) - _, newClient, newExternalClient := newFakeClientFuncs(t, sandboxEnvConfig.Clusters) + _, newClient, newExternalClient := newFakeClientFuncs(t, kubeSawAdmins.Clusters) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) @@ -72,7 +72,7 @@ func TestGenerateCliConfigs(t *testing.T) { // given tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) - flags := generateFlags{kubeconfigs: kubeconfigFiles, sandboxConfigFile: configFile, outDir: tempDir} + flags := generateFlags{kubeconfigs: kubeconfigFiles, kubeSawAdminsFile: configFile, outDir: tempDir} // when err = generate(term, flags, newClient, newExternalClient) @@ -85,7 +85,7 @@ func TestGenerateCliConfigs(t *testing.T) { t.Run("when there SAs are defined for host cluster only", func(t *testing.T) { // given - saInHostOnly := NewSandboxEnvironmentConfig( + saInHostOnly := NewKubeSawAdmins( Clusters(HostServerAPI). AddMember("member1", Member1ServerAPI). AddMember("member2", Member2ServerAPI), @@ -95,12 +95,12 @@ func TestGenerateCliConfigs(t *testing.T) { Sa("bob", "", HostRoleBindings("toolchain-host-operator", Role("restart=restart-deployment"), ClusterRole("restart=edit")))), Users()) - sandboxEnvConfigContent, err := yaml.Marshal(saInHostOnly) + kubeSawAdminsContent, err := yaml.Marshal(saInHostOnly) require.NoError(t, err) - configFile := createSandboxConfigFile(t, "sandbox.host.openshiftapps.com", sandboxEnvConfigContent) + configFile := createKubeSawAdminsFile(t, "kubesaw.host.openshiftapps.com", kubeSawAdminsContent) tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) - flags := generateFlags{kubeconfigs: kubeconfigFiles, sandboxConfigFile: configFile, outDir: tempDir} + flags := generateFlags{kubeconfigs: kubeconfigFiles, kubeSawAdminsFile: configFile, outDir: tempDir} // when err = generate(term, flags, newClient, newExternalClient) @@ -120,7 +120,7 @@ func TestGenerateCliConfigs(t *testing.T) { tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) kubeconfigFiles := createKubeconfigFiles(t, sandboxKubeconfigContent) - flags := generateFlags{kubeconfigs: kubeconfigFiles, sandboxConfigFile: configFile, outDir: tempDir, dev: true} + flags := generateFlags{kubeconfigs: kubeconfigFiles, kubeSawAdminsFile: configFile, outDir: tempDir, dev: true} // when err = generate(term, flags, newClient, newExternalClient) @@ -153,25 +153,25 @@ func TestGenerateCliConfigs(t *testing.T) { require.ErrorContains(t, err, "could not setup client from any of the provided kubeconfig files") }) - t.Run("wrong sandbox-config.yaml file path", func(t *testing.T) { + t.Run("wrong kubesaw-admins.yaml file path", func(t *testing.T) { // given tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) - flags := generateFlags{kubeconfigs: kubeconfigFiles, sandboxConfigFile: "does/not/exist", outDir: tempDir} + flags := generateFlags{kubeconfigs: kubeconfigFiles, kubeSawAdminsFile: "does/not/exist", outDir: tempDir} // when err = generate(term, flags, newClient, newExternalClient) // then require.Error(t, err) - require.ErrorContains(t, err, "unable get sandbox-config.yaml file from does/not/exist") + require.ErrorContains(t, err, "unable get kubesaw-admins.yaml file from does/not/exist") }) t.Run("wrong kubeconfig file path", func(t *testing.T) { // given tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) - flags := generateFlags{kubeconfigs: []string{"does/not/exist"}, sandboxConfigFile: configFile, outDir: tempDir} + flags := generateFlags{kubeconfigs: []string{"does/not/exist"}, kubeSawAdminsFile: configFile, outDir: tempDir} // when err = generate(term, flags, newClient, newExternalClient) @@ -183,18 +183,18 @@ func TestGenerateCliConfigs(t *testing.T) { t.Run("when token call is not mocked for SA", func(t *testing.T) { // given - saInHostOnly := NewSandboxEnvironmentConfig( + saInHostOnly := NewKubeSawAdmins( Clusters(HostServerAPI), ServiceAccounts( Sa("notmocked", "", HostRoleBindings("toolchain-host-operator", Role("install-operator"), ClusterRole("admin")))), Users()) - sandboxEnvConfigContent, err := yaml.Marshal(saInHostOnly) + kubeSawAdminsContent, err := yaml.Marshal(saInHostOnly) require.NoError(t, err) - configFile := createSandboxConfigFile(t, "sandbox.host.openshiftapps.com", sandboxEnvConfigContent) + configFile := createKubeSawAdminsFile(t, "sandbox.host.openshiftapps.com", kubeSawAdminsContent) tempDir, err := os.MkdirTemp("", "sandbox-sre-out-") require.NoError(t, err) - flags := generateFlags{kubeconfigs: kubeconfigFiles, sandboxConfigFile: configFile, outDir: tempDir} + flags := generateFlags{kubeconfigs: kubeconfigFiles, kubeSawAdminsFile: configFile, outDir: tempDir} // when err = generate(term, flags, newClient, newExternalClient) diff --git a/pkg/cmd/adm/setup.go b/pkg/cmd/adm/setup.go index adcac2b..709bfb2 100644 --- a/pkg/cmd/adm/setup.go +++ b/pkg/cmd/adm/setup.go @@ -15,31 +15,31 @@ import ( ) type setupFlags struct { - sandboxConfigFile, outDir, hostRootDir, memberRootDir string + kubeSawAdminsFile, outDir, hostRootDir, memberRootDir string singleCluster bool } func NewSetupCmd() *cobra.Command { f := setupFlags{} command := &cobra.Command{ - Use: "setup --sandbox-config= --out-dir ", - Example: `ksctl adm setup ./path/to/sandbox.openshiftapps.com/sandbox-config.yaml --out-dir ./components/auth/devsandbox-production -ksctl adm setup ./path/to/sandbox-stage.openshiftapps.com/sandbox-config.yaml --out-dir ./components/auth/devsandbox-staging -s`, + Use: "setup --kubesaw-admins= --out-dir ", + Example: `ksctl adm setup ./path/to/kubesaw.openshiftapps.com/kubesaw-admins.yaml --out-dir ./components/auth/kubesaw-production +ksctl adm setup ./path/to/kubesaw-stage.openshiftapps.com/kubesaw-admins.yaml --out-dir ./components/auth/kubesaw-staging -s`, Short: "Generates user-management manifests", - Long: `Reads the sandbox-config.yaml file and based on the content it generates user-management RBAC and manifests.`, + Long: `Reads the kubesaw-admins.yaml file and based on the content it generates user-management RBAC and manifests.`, Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, _ []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) return Setup(term, resources.Resources, f) }, } - command.Flags().StringVarP(&f.sandboxConfigFile, "sandbox-config", "c", "", "Use the given sandbox config file") + command.Flags().StringVarP(&f.kubeSawAdminsFile, "kubesaw-admins", "c", "", "Use the given sandbox config file") command.Flags().StringVarP(&f.outDir, "out-dir", "o", "", "Directory where generated manifests should be stored") command.Flags().BoolVarP(&f.singleCluster, "single-cluster", "s", false, "If host and member are deployed to the same cluster") command.Flags().StringVar(&f.hostRootDir, "host-root-dir", "host", "The root directory name for host manifests") command.Flags().StringVar(&f.memberRootDir, "member-root-dir", "member", "The root directory name for member manifests") - flags.MustMarkRequired(command, "sandbox-config") + flags.MustMarkRequired(command, "kubesaw-admins") flags.MustMarkRequired(command, "out-dir") return command @@ -55,20 +55,20 @@ func Setup(term ioutils.Terminal, files assets.FS, flags setupFlags) error { } flags.outDir = abs - // Get the unmarshalled version of sandbox-config.yaml - sandboxEnvConfig, err := assets.GetSandboxEnvironmentConfig(flags.sandboxConfigFile) + // Get the unmarshalled version of kubesaw-admins.yaml + kubeSawAdmins, err := assets.GetKubeSawAdminsConfig(flags.kubeSawAdminsFile) if err != nil { - return errs.Wrapf(err, "unable get sandbox-config.yaml file from %s", flags.sandboxConfigFile) + return errs.Wrapf(err, "unable get kubesaw-admins.yaml file from %s", flags.kubeSawAdminsFile) } err = os.RemoveAll(flags.outDir) if err != nil { return err } ctx := &setupContext{ - Terminal: term, - sandboxEnvConfig: sandboxEnvConfig, - setupFlags: flags, - files: files, + Terminal: term, + kubeSawAdmins: kubeSawAdmins, + setupFlags: flags, + files: files, } objsCache := objectsCache{} if err := ensureCluster(ctx, configuration.Host, objsCache); err != nil { @@ -83,8 +83,8 @@ func Setup(term ioutils.Terminal, files assets.FS, flags setupFlags) error { type setupContext struct { ioutils.Terminal setupFlags - sandboxEnvConfig *assets.SandboxEnvironmentConfig - files assets.FS + kubeSawAdmins *assets.KubeSawAdmins + files assets.FS } func ensureCluster(ctx *setupContext, clusterType configuration.ClusterType, cache objectsCache) error { diff --git a/pkg/cmd/adm/setup_cluster.go b/pkg/cmd/adm/setup_cluster.go index 57731ae..9eec5fb 100644 --- a/pkg/cmd/adm/setup_cluster.go +++ b/pkg/cmd/adm/setup_cluster.go @@ -13,7 +13,7 @@ type clusterContext struct { // It generates SA and roles & roleBindings for them func ensureServiceAccounts(ctx *clusterContext, objsCache objectsCache) error { ctx.Printlnf("-> Ensuring ServiceAccounts and its RoleBindings...") - for _, sa := range ctx.sandboxEnvConfig.ServiceAccounts { + for _, sa := range ctx.kubeSawAdmins.ServiceAccounts { // by default, it should use the sandbox sre namespace. let's keep this empty (if the target namespace is not defined) so it is recognized in the ensureServiceAccount method based on the cluster type it is being applied in saNamespace := "" @@ -41,7 +41,7 @@ func ensureServiceAccounts(ctx *clusterContext, objsCache objectsCache) error { func ensureUsers(ctx *clusterContext, objsCache objectsCache) error { ctx.Printlnf("-> Ensuring Users and its RoleBindings...") - for _, user := range ctx.sandboxEnvConfig.Users { + for _, user := range ctx.kubeSawAdmins.Users { permissions := &permissionsManager{ objectsCache: objsCache, diff --git a/pkg/cmd/adm/setup_cluster_test.go b/pkg/cmd/adm/setup_cluster_test.go index 3868908..1660305 100644 --- a/pkg/cmd/adm/setup_cluster_test.go +++ b/pkg/cmd/adm/setup_cluster_test.go @@ -15,7 +15,7 @@ import ( func TestEnsureServiceAccounts(t *testing.T) { t.Run("create permissions for SA base names", func(t *testing.T) { // given - sandboxEnvConfig := newSandboxEnvironmentConfigWithDefaultClusterAndNamespaces( + kubeSawAdmins := newKubeSawAdminsWithDefaultClusters( ServiceAccounts( Sa("john", "", permissionsForAllNamespaces...), @@ -23,7 +23,7 @@ func TestEnsureServiceAccounts(t *testing.T) { HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("view")), MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("view")))), []assets.User{}) - ctx := newSetupContextWithDefaultFiles(t, sandboxEnvConfig) + ctx := newSetupContextWithDefaultFiles(t, kubeSawAdmins) cache := objectsCache{} for _, clusterType := range configuration.ClusterTypes { @@ -68,12 +68,12 @@ func TestEnsureServiceAccounts(t *testing.T) { t.Run("create SA with the fixed name, in the given namespace, ClusterRoleBinding set, and don't gather the token", func(t *testing.T) { // given - sandboxEnvConfig := newSandboxEnvironmentConfigWithDefaultClusterAndNamespaces( + kubeSawAdmins := newKubeSawAdminsWithDefaultClusters( ServiceAccounts( Sa("john", "openshift-customer-monitoring", HostRoleBindings("toolchain-host-operator", Role("install-operator"), ClusterRole("view")), HostClusterRoleBindings("cluster-monitoring-view"))), Users()) - ctx := newSetupContextWithDefaultFiles(t, sandboxEnvConfig) + ctx := newSetupContextWithDefaultFiles(t, kubeSawAdmins) clusterCtx := newFakeClusterContext(ctx, configuration.Host) t.Cleanup(gock.OffAll) cache := objectsCache{} @@ -95,7 +95,7 @@ func TestEnsureServiceAccounts(t *testing.T) { func TestUsers(t *testing.T) { t.Run("ensure users", func(t *testing.T) { // given - sandboxEnvConfig := newSandboxEnvironmentConfigWithDefaultClusterAndNamespaces( + kubeSawAdmins := newKubeSawAdminsWithDefaultClusters( ServiceAccounts(), Users( User("john-user", []string{"12345"}, "crtadmins", @@ -106,7 +106,7 @@ func TestUsers(t *testing.T) { MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("view")), MemberClusterRoleBindings("cluster-monitoring-view")))) - ctx := newSetupContextWithDefaultFiles(t, sandboxEnvConfig) + ctx := newSetupContextWithDefaultFiles(t, kubeSawAdmins) cache := objectsCache{} for _, clusterType := range configuration.ClusterTypes { @@ -153,8 +153,8 @@ func TestUsers(t *testing.T) { }) } -func newSandboxEnvironmentConfigWithDefaultClusterAndNamespaces(serviceAccounts []assets.ServiceAccount, users []assets.User) *assets.SandboxEnvironmentConfig { - return NewSandboxEnvironmentConfig( +func newKubeSawAdminsWithDefaultClusters(serviceAccounts []assets.ServiceAccount, users []assets.User) *assets.KubeSawAdmins { + return NewKubeSawAdmins( Clusters(HostServerAPI).AddMember("member-1", Member1ServerAPI), serviceAccounts, users) diff --git a/pkg/cmd/adm/setup_mock_test.go b/pkg/cmd/adm/setup_mock_test.go index 47d0490..fba566f 100644 --- a/pkg/cmd/adm/setup_mock_test.go +++ b/pkg/cmd/adm/setup_mock_test.go @@ -32,10 +32,10 @@ func newDefaultFiles(t *testing.T, fakeFiles ...test.FakeFileCreator) assets.FS return files } -func createSandboxConfigFile(t *testing.T, dirPrefix string, content []byte) string { //nolint:unparam +func createKubeSawAdminsFile(t *testing.T, dirPrefix string, content []byte) string { //nolint:unparam configTempDir, err := os.MkdirTemp("", dirPrefix+"-") require.NoError(t, err) - configFile := fmt.Sprintf("%s/sandbox-config.yaml", configTempDir) + configFile := fmt.Sprintf("%s/kubesaw-admins.yaml", configTempDir) err = os.WriteFile(configFile, content, 0600) require.NoError(t, err) return configFile @@ -43,20 +43,20 @@ func createSandboxConfigFile(t *testing.T, dirPrefix string, content []byte) str // setupContext part -func newSetupContextWithDefaultFiles(t *testing.T, config *assets.SandboxEnvironmentConfig) *setupContext { //nolint:unparam +func newSetupContextWithDefaultFiles(t *testing.T, config *assets.KubeSawAdmins) *setupContext { //nolint:unparam return newSetupContext(t, config, newDefaultFiles(t)) } -func newSetupContext(t *testing.T, config *assets.SandboxEnvironmentConfig, files assets.FS) *setupContext { +func newSetupContext(t *testing.T, config *assets.KubeSawAdmins, files assets.FS) *setupContext { fakeTerminal := test.NewFakeTerminal() fakeTerminal.Tee(os.Stdout) require.NoError(t, client.AddToScheme()) temp, err := os.MkdirTemp("", "cli-tests-") require.NoError(t, err) return &setupContext{ - Terminal: fakeTerminal, - sandboxEnvConfig: config, - files: files, + Terminal: fakeTerminal, + kubeSawAdmins: config, + files: files, setupFlags: setupFlags{ outDir: temp, memberRootDir: "member", diff --git a/pkg/cmd/adm/setup_permissions_test.go b/pkg/cmd/adm/setup_permissions_test.go index 5f41338..7e94112 100644 --- a/pkg/cmd/adm/setup_permissions_test.go +++ b/pkg/cmd/adm/setup_permissions_test.go @@ -23,7 +23,7 @@ var permissionsForAllNamespaces = []PermissionsPerClusterTypeModifier{ func TestEnsurePermissionsInNamespaces(t *testing.T) { // given - config := newSandboxEnvironmentConfigWithDefaultClusterAndNamespaces([]assets.ServiceAccount{}, []assets.User{}) + config := newKubeSawAdminsWithDefaultClusters([]assets.ServiceAccount{}, []assets.User{}) t.Run("create permissions", func(t *testing.T) { // given @@ -195,7 +195,7 @@ func TestEnsureGroupsForUser(t *testing.T) { }) } -func newPermissionsManager(t *testing.T, clusterType configuration.ClusterType, config *assets.SandboxEnvironmentConfig) (permissionsManager, *clusterContext) { // nolint:unparam +func newPermissionsManager(t *testing.T, clusterType configuration.ClusterType, config *assets.KubeSawAdmins) (permissionsManager, *clusterContext) { // nolint:unparam ctx := newSetupContextWithDefaultFiles(t, config) clusterCtx := newFakeClusterContext(ctx, clusterType) cache := objectsCache{} diff --git a/pkg/cmd/adm/setup_roles_manager_test.go b/pkg/cmd/adm/setup_roles_manager_test.go index e1dc095..0d7e809 100644 --- a/pkg/cmd/adm/setup_roles_manager_test.go +++ b/pkg/cmd/adm/setup_roles_manager_test.go @@ -26,7 +26,7 @@ func TestGetRole(t *testing.T) { files := NewFakeFiles(t, FakeTemplate("setup/roles/host.yaml", installOperatorRole), FakeTemplate("setup/roles/member.yaml", restartDeploymentRole, registerClusterRole)) - ctx := newSetupContext(t, &assets.SandboxEnvironmentConfig{}, files) + ctx := newSetupContext(t, &assets.KubeSawAdmins{}, files) t.Run("for host cluster type", func(t *testing.T) { // given @@ -94,7 +94,7 @@ func TestEnsureRole(t *testing.T) { t.Run("create install-operator role for host", func(t *testing.T) { // given - ctx := newSetupContext(t, &assets.SandboxEnvironmentConfig{}, files) + ctx := newSetupContext(t, &assets.KubeSawAdmins{}, files) hostCtx := newFakeClusterContext(ctx, configuration.Host) memberCtx := newFakeClusterContext(ctx, configuration.Member) cache := objectsCache{} @@ -155,7 +155,7 @@ func TestEnsureRole(t *testing.T) { t.Run("create restart-deployment role for member", func(t *testing.T) { // given - ctx := newSetupContext(t, &assets.SandboxEnvironmentConfig{}, files) + ctx := newSetupContext(t, &assets.KubeSawAdmins{}, files) memberCtx := newFakeClusterContext(ctx, configuration.Member) cache := objectsCache{} diff --git a/pkg/cmd/adm/setup_test.go b/pkg/cmd/adm/setup_test.go index 86e1f2a..7e2f2bf 100644 --- a/pkg/cmd/adm/setup_test.go +++ b/pkg/cmd/adm/setup_test.go @@ -23,7 +23,7 @@ import ( func TestSetup(t *testing.T) { // given require.NoError(t, client.AddToScheme()) - sandboxEnvConfig := NewSandboxEnvironmentConfig( + kubeSawAdmins := NewKubeSawAdmins( Clusters(HostServerAPI). AddMember("member1", Member1ServerAPI). AddMember("member2", Member2ServerAPI), @@ -42,10 +42,10 @@ func TestSetup(t *testing.T) { HostRoleBindings("toolchain-host-operator", Role("restart-deployment"), ClusterRole("admin")), MemberRoleBindings("toolchain-member-operator", Role("restart-deployment"), ClusterRole("admin"))))) - sandboxEnvConfigContent, err := yaml.Marshal(sandboxEnvConfig) + kubeSawAdminsContent, err := yaml.Marshal(kubeSawAdmins) require.NoError(t, err) - configFile := createSandboxConfigFile(t, "sandbox.host.openshiftapps.com", sandboxEnvConfigContent) + configFile := createKubeSawAdminsFile(t, "kubesaw.host.openshiftapps.com", kubeSawAdminsContent) files := newDefaultFiles(t) t.Run("all created", func(t *testing.T) { @@ -54,7 +54,7 @@ func TestSetup(t *testing.T) { require.NoError(t, err) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile)) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile)) // when err = Setup(term, files, flags) @@ -70,7 +70,7 @@ func TestSetup(t *testing.T) { require.NoError(t, err) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile), singleCluster()) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), singleCluster()) // when err = Setup(term, files, flags) @@ -86,7 +86,7 @@ func TestSetup(t *testing.T) { require.NoError(t, err) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile), hostRootDir("host-cluster")) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), hostRootDir("host-cluster")) // when err = Setup(term, files, flags) @@ -102,7 +102,7 @@ func TestSetup(t *testing.T) { require.NoError(t, err) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile), memberRootDir("member-clusters")) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile), memberRootDir("member-clusters")) // when err = Setup(term, files, flags) @@ -119,7 +119,7 @@ func TestSetup(t *testing.T) { storeDummySA(t, outTempDir) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile)) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile)) // when err = Setup(term, files, flags) @@ -134,7 +134,7 @@ func TestSetup(t *testing.T) { outTempDir := filepath.Join(os.TempDir(), fmt.Sprintf("setup-cli-test-%s", uuid.NewV4().String())) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile(configFile)) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile(configFile)) // when err = Setup(term, files, flags) @@ -144,13 +144,13 @@ func TestSetup(t *testing.T) { verifyFiles(t, flags) }) - t.Run("fails for non-existing sandbox-config.yaml file", func(t *testing.T) { + t.Run("fails for non-existing kubesaw-admins.yaml file", func(t *testing.T) { // given outTempDir, err := os.MkdirTemp("", "setup-cli-test-") require.NoError(t, err) term := NewFakeTerminalWithResponse("Y") term.Tee(os.Stdout) - flags := newSetupFlags(outDir(outTempDir), sandboxConfigFile("does/not/exist")) + flags := newSetupFlags(outDir(outTempDir), kubeSawAdminsFile("does/not/exist")) // when err = Setup(term, files, flags) @@ -320,9 +320,9 @@ func newSetupFlags(setupFlagsOptions ...setupFlagsOption) setupFlags { return flags } -func sandboxConfigFile(configName string) setupFlagsOption { +func kubeSawAdminsFile(configName string) setupFlagsOption { return func(flags *setupFlags) { - flags.sandboxConfigFile = configName + flags.kubeSawAdminsFile = configName } } diff --git a/pkg/test/environment_config.go b/pkg/test/environment_config.go index b56525c..e612122 100644 --- a/pkg/test/environment_config.go +++ b/pkg/test/environment_config.go @@ -5,8 +5,8 @@ import ( "github.com/kubesaw/ksctl/pkg/configuration" ) -func NewSandboxEnvironmentConfig(addClusters ClustersCreator, serviceAccounts []assets.ServiceAccount, users []assets.User) *assets.SandboxEnvironmentConfig { - sc := &assets.SandboxEnvironmentConfig{ +func NewKubeSawAdmins(addClusters ClustersCreator, serviceAccounts []assets.ServiceAccount, users []assets.User) *assets.KubeSawAdmins { + sc := &assets.KubeSawAdmins{ ServiceAccounts: serviceAccounts, Users: users, } diff --git a/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml b/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml new file mode 100644 index 0000000..3851f00 --- /dev/null +++ b/test-resources/dummy.openshiftapps.com/kubesaw-admins.yaml @@ -0,0 +1,216 @@ +clusters: + host: + api: https://api.dummy-host.openshiftapps.com:6443 + members: + - api: https://api.dummy-m1.openshiftapps.com:6443 + name: member-1 + - api: https://api.dummy-m2.openshiftapps.com:6443 + name: member-2 + - api: https://api.dummy-m3.openshiftapps.com:6443 + name: member-3 + +serviceAccounts: + +- name: first-admin + host: + roleBindings: + - namespace: toolchain-host-operator + roles: + - install-operator + - restart-deployment + - approve-user + - view-secrets + - deactivate-user + - ban-user + - promote-user + - disable-user + - retarget-user + - gdpr-delete + - create-social-event + - add-space-users + clusterRoles: + - edit + - view + - namespace: openshift-customer-monitoring + roles: + - install-operator + - view-secrets + - configure-monitoring + clusterRoles: + - edit + - namespace: openshift-logging + roles: + - install-operator + clusterRoles: + - edit + member: + roleBindings: + - namespace: toolchain-member-operator + roles: + - install-operator + - restart-deployment + - view-secrets + clusterRoles: + - edit + - view + - namespace: openshift-customer-monitoring + roles: + - install-operator + - view-secrets + - configure-monitoring + clusterRoles: + - edit + - namespace: openshift-logging + roles: + - install-operator + clusterRoles: + - edit + - namespace: openshift-config-managed + roles: + - configure-monitoring + clusterRoles: + - edit + clusterRoleBindings: + clusterRoles: + - manage-console-resources + +- name: second-admin + host: + roleBindings: + - namespace: toolchain-host-operator + roles: + - approve-user + - view-secrets + - deactivate-user + - ban-user + - promote-user + - disable-user + - retarget-user + - gdpr-delete + - restart-deployment + - create-social-event + - add-space-users + clusterRoles: + - view + member: + roleBindings: + - namespace: toolchain-member-operator + roles: + - restart-deployment + - view-secrets + clusterRoles: + - view + +- name: viewer + host: + roleBindings: + - namespace: toolchain-host-operator + clusterRoles: + - view + member: + roleBindings: + - namespace: toolchain-member-operator + clusterRoles: + - view + +users: +- name: standard-user-admin + id: + - 123456 + - abc1234 + groups: + - crtadmin-users-view + - inspect-pods + host: + roleBindings: + - namespace: toolchain-host-operator + roles: + - edit-secrets + clusterRoles: + - view + - namespace: openshift-customer-monitoring + roles: + - install-operator + - view-secrets + - configure-monitoring + clusterRoles: + - edit + - namespace: openshift-logging + clusterRoles: + - view + - namespace: sandbox-sre-host + roles: + - view-secrets + clusterRoles: + - view + member: + roleBindings: + - namespace: toolchain-member-operator + roles: + - edit-secrets + clusterRoles: + - view + - namespace: crw + roles: + - view-secrets + clusterRoles: + - view + - namespace: openshift-customer-monitoring + roles: + - install-operator + - view-secrets + - configure-monitoring + clusterRoles: + - edit + - namespace: openshift-logging + clusterRoles: + - view + - namespace: sandbox-sre-member + roles: + - view-secrets + clusterRoles: + - view + +- name: standard-user-viewer + id: + - 987654 + groups: + - crtadmin-users-view + - kubesaw-team + host: + roleBindings: + - namespace: toolchain-host-operator + clusterRoles: + - view + member: + roleBindings: + - namespace: toolchain-member-operator + clusterRoles: + - view + +- name: other-component-admin + id: + - 561234287 + - f:528d:some-admin + member: + roleBindings: + - namespace: some-component + roles: + - approve-operator-update + clusterRoles: + - edit + clusterRoleBindings: + clusterRoles: + - list-operators-group + +- name: other-component-viewer + id: + - 5412345 + member: + roleBindings: + - namespace: first-component + clusterRoles: + - view + - namespace: second-component + clusterRoles: + - view