From f8747d18d1613a765707a1b6e819f3e5c3fd6614 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:16:18 +0100 Subject: [PATCH 01/36] refresh grant lists (#2746) --- catalog/resource_grants.go | 15 +++++++++++++-- docs/resources/grants.md | 39 +++++++++++++++++++++++++++++++++----- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/catalog/resource_grants.go b/catalog/resource_grants.go index 93240af1d8..e44d144998 100644 --- a/catalog/resource_grants.go +++ b/catalog/resource_grants.go @@ -177,11 +177,13 @@ var mapping = securableMapping{ // v1.0 "ALL_PRIVILEGES": true, + "APPLY_TAG": true, "BROWSE": true, }, "view": { - "SELECT": true, - "BROWSE": true, + "SELECT": true, + "APPLY_TAG": true, + "BROWSE": true, }, "catalog": { "CREATE": true, @@ -189,6 +191,7 @@ var mapping = securableMapping{ // v1.0 "ALL_PRIVILEGES": true, + "APPLY_TAG": true, "USE_CATALOG": true, "USE_SCHEMA": true, "CREATE_SCHEMA": true, @@ -211,6 +214,7 @@ var mapping = securableMapping{ // v1.0 "ALL_PRIVILEGES": true, + "APPLY_TAG": true, "USE_SCHEMA": true, "CREATE_TABLE": true, "CREATE_FUNCTION": true, @@ -250,12 +254,14 @@ var mapping = securableMapping{ "metastore": { // v1.0 "CREATE_CATALOG": true, + "CREATE_CLEAN_ROOM": true, "CREATE_CONNECTION": true, "CREATE_EXTERNAL_LOCATION": true, "CREATE_STORAGE_CREDENTIAL": true, "CREATE_SHARE": true, "CREATE_RECIPIENT": true, "CREATE_PROVIDER": true, + "MANAGE_ALLOWLIST": true, "USE_CONNECTION": true, "USE_PROVIDER": true, "USE_SHARE": true, @@ -267,6 +273,11 @@ var mapping = securableMapping{ "ALL_PRIVILEGES": true, "EXECUTE": true, }, + "model": { + "ALL_PRIVILEGES": true, + "APPLY_TAG": true, + "EXECUTE": true, + }, "materialized_view": { "ALL_PRIVILEGES": true, "SELECT": true, diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 9660cf8d9d..f42b3d30b2 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -29,7 +29,7 @@ Unlike the [SQL specification](https://docs.databricks.com/sql/language-manual/s ## Metastore grants -You can grant `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SHARE`, `SET_SHARE_PERMISSION`, `USE_MARKETPLACE_ASSETS`, `USE_CONNECTION`, `USE_PROVIDER`, `USE_RECIPIENT` and `USE_SHARE` privileges to [databricks_metastore](metastore.md) id specified in `metastore` attribute. +You can grant `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SHARE`, `MANAGE_ALLOWLIST`, `SET_SHARE_PERMISSION`, `USE_MARKETPLACE_ASSETS`, `USE_CONNECTION`, `USE_PROVIDER`, `USE_RECIPIENT` and `USE_SHARE` privileges to [databricks_metastore](metastore.md) id specified in `metastore` attribute. ```hcl resource "databricks_grants" "sandbox" { @@ -47,7 +47,7 @@ resource "databricks_grants" "sandbox" { ## Catalog grants -You can grant `ALL_PRIVILEGES`, `CREATE_SCHEMA`, `USE_CATALOG` privileges to [databricks_catalog](catalog.md) specified in the `catalog` attribute. You can also grant `CREATE_FUNCTION`, `CREATE_TABLE`, `CREATE_VOLUME`, `EXECUTE`, `MODIFY`, `REFRESH`, `SELECT`, `READ_VOLUME`, `WRITE_VOLUME` and `USE_SCHEMA` at the catalog level to apply them to the pertinent current and future securable objects within the catalog: +You can grant `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE_CONNECTION`, `CREATE_SCHEMA`, `USE_CATALOG` privileges to [databricks_catalog](catalog.md) specified in the `catalog` attribute. You can also grant `CREATE_FUNCTION`, `CREATE_TABLE`, `CREATE_VOLUME`, `EXECUTE`, `MODIFY`, `REFRESH`, `SELECT`, `READ_VOLUME`, `WRITE_VOLUME` and `USE_SCHEMA` at the catalog level to apply them to the pertinent current and future securable objects within the catalog: ```hcl resource "databricks_catalog" "sandbox" { @@ -78,7 +78,7 @@ resource "databricks_grants" "sandbox" { ## Schema grants -You can grant `ALL_PRIVILEGES`, `CREATE_FUNCTION`, `CREATE_TABLE`, `CREATE_VOLUME` and `USE_SCHEMA` privileges to [_`catalog.schema`_](schema.md) specified in the `schema` attribute. You can also grant `EXECUTE`, `MODIFY`, `REFRESH`, `SELECT`, `READ_VOLUME`, `WRITE_VOLUME` at the schema level to apply them to the pertinent current and future securable objects within the schema: +You can grant `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE_FUNCTION`, `CREATE_TABLE`, `CREATE_VOLUME` and `USE_SCHEMA` privileges to [_`catalog.schema`_](schema.md) specified in the `schema` attribute. You can also grant `EXECUTE`, `MODIFY`, `REFRESH`, `SELECT`, `READ_VOLUME`, `WRITE_VOLUME` at the schema level to apply them to the pertinent current and future securable objects within the schema: ```hcl resource "databricks_schema" "things" { @@ -101,7 +101,7 @@ resource "databricks_grants" "things" { ## Table grants -You can grant `ALL_PRIVILEGES`, `SELECT` and `MODIFY` privileges to [_`catalog.schema.table`_](tables.md) specified in the `table` attribute. +You can grant `ALL_PRIVILEGES`, `APPLY_TAG`, `SELECT` and `MODIFY` privileges to [_`catalog.schema.table`_](tables.md) specified in the `table` attribute. ```hcl resource "databricks_grants" "customers" { @@ -139,7 +139,7 @@ resource "databricks_grants" "things" { ## View grants -You can grant `ALL_PRIVILEGES` and `SELECT` privileges to [_`catalog.schema.view`_](views.md) specified in `table` attribute. +You can grant `ALL_PRIVILEGES`, `APPLY_TAG` and `SELECT` privileges to [_`catalog.schema.view`_](views.md) specified in `table` attribute. ```hcl resource "databricks_grants" "customer360" { @@ -237,6 +237,35 @@ resource "databricks_grants" "some" { } ``` +## Connection grants + +You can grant `ALL_PRIVILEGES`, `USE_CONNECTION` and `CREATE_FOREIGN_CATALOG` to [databricks_connection](connection.md) specified in `foreign_connection` attribute: + +```hcl +resource "databricks_connection" "mysql" { + name = "mysql_connection" + connection_type = "MYSQL" + comment = "this is a connection to mysql db" + options = { + host = "test.mysql.database.azure.com" + port = "3306" + user = "user" + password = "password" + } + properties = { + purpose = "testing" + } +} + +resource "databricks_grants" "some" { + foreign_connection = databricks_connection.mysql.name + grant { + principal = "Data Engineers" + privileges = ["CREATE_FOREIGN_CATALOG", "USE_CONNECTION"] + } +} +``` + ## Delta Sharing share grants You can grant `SELECT` to [databricks_recipient](recipient.md) on [databricks_share](share.md) name specified in `share` attribute: From 0eff50f6e8243ed6cedfa43116556dd20a4156d0 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Sun, 1 Oct 2023 17:04:03 +0100 Subject: [PATCH 02/36] add `databricks_system_schema` resource (#2606) * add `databricks_system_schema` resource * doc note * refactor * fix tests --- catalog/resource_system_schema.go | 113 ++++++++ catalog/resource_system_schema_test.go | 297 ++++++++++++++++++++++ docs/resources/system_schema.md | 41 +++ internal/acceptance/system_schema_test.go | 14 + provider/provider.go | 1 + 5 files changed, 466 insertions(+) create mode 100644 catalog/resource_system_schema.go create mode 100644 catalog/resource_system_schema_test.go create mode 100644 docs/resources/system_schema.md create mode 100644 internal/acceptance/system_schema_test.go diff --git a/catalog/resource_system_schema.go b/catalog/resource_system_schema.go new file mode 100644 index 0000000000..9d0e9b3be1 --- /dev/null +++ b/catalog/resource_system_schema.go @@ -0,0 +1,113 @@ +package catalog + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceSystemSchema() *schema.Resource { + systemSchema := common.StructToSchema(catalog.SystemSchemaInfo{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { + m["metastore_id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + m["state"].Computed = true + return m + }) + pi := common.NewPairID("metastore_id", "schema").Schema( + func(m map[string]*schema.Schema) map[string]*schema.Schema { + return systemSchema + }) + createOrUpdate := func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + o, n := d.GetChange("schema") + old, okOld := o.(string) + new, okNew := n.(string) + if !okNew || !okOld { + return fmt.Errorf("internal type casting error") + } + log.Printf("[DEBUG] Old system schema: %s, new: %s", old, new) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + metastoreSummary, err := w.Metastores.Summary(ctx) + if err != nil { + return err + } + //enable new schema + err = w.SystemSchemas.Enable(ctx, catalog.EnableRequest{ + MetastoreId: metastoreSummary.MetastoreId, + SchemaName: catalog.EnableSchemaName(new), + }) + //ignore "schema already exists" error + if err != nil && !strings.Contains(err.Error(), "already exists") { + return err + } + //disable old schemas if needed + if old != "" { + err = w.SystemSchemas.Disable(ctx, catalog.DisableRequest{ + MetastoreId: metastoreSummary.MetastoreId, + SchemaName: catalog.DisableSchemaName(old), + }) + if err != nil { + return err + } + } + d.Set("metastore_id", metastoreSummary.MetastoreId) + pi.Pack(d) + return nil + } + return common.Resource{ + Schema: systemSchema, + Create: createOrUpdate, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, schemaName, err := pi.Unpack(d) + if err != nil { + return err + } + w, err := c.WorkspaceClient() + if err != nil { + return err + } + metastoreSummary, err := w.Metastores.Summary(ctx) + if err != nil { + return err + } + systemSchemaInfo, err := w.SystemSchemas.ListByMetastoreId(ctx, metastoreSummary.MetastoreId) + if err != nil { + return err + } + for _, schema := range systemSchemaInfo.Schemas { + if schema.Schema == schemaName { + return common.StructToData(schema, systemSchema, d) + } + } + return nil + }, + Update: createOrUpdate, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, schemaName, err := pi.Unpack(d) + if err != nil { + return err + } + w, err := c.WorkspaceClient() + if err != nil { + return err + } + metastoreSummary, err := w.Metastores.Summary(ctx) + if err != nil { + return err + } + return w.SystemSchemas.Disable(ctx, catalog.DisableRequest{ + MetastoreId: metastoreSummary.MetastoreId, + SchemaName: catalog.DisableSchemaName(schemaName), + }) + }, + }.ToResource() +} diff --git a/catalog/resource_system_schema_test.go b/catalog/resource_system_schema_test.go new file mode 100644 index 0000000000..baaeb02f65 --- /dev/null +++ b/catalog/resource_system_schema_test.go @@ -0,0 +1,297 @@ +package catalog + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" +) + +func TestSystemSchemaCreate(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodPut, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access", + Status: 200, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: catalog.ListSystemSchemasResponse{ + Schemas: []catalog.SystemSchemaInfo{ + { + Schema: "access", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + { + Schema: "billing", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + }, + }, + }, + }, + Resource: ResourceSystemSchema(), + HCL: `schema = "access"`, + Create: true, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc|access", d.Id()) +} + +func TestSystemSchemaCreate_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodPut, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceSystemSchema(), + HCL: `schema = "access"`, + Create: true, + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "", d.Id(), "Id should be empty for error creates") +} + +func TestSystemSchemaUpdate(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodPut, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access", + Status: 200, + }, + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/information_schema?", + Status: 200, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: catalog.ListSystemSchemasResponse{ + Schemas: []catalog.SystemSchemaInfo{ + { + Schema: "access", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + { + Schema: "billing", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + }, + }, + }, + }, + Resource: ResourceSystemSchema(), + InstanceState: map[string]string{ + "schema": "information_schema", + }, + HCL: `schema = "access"`, + Update: true, + ID: "abc|information_schema", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc|access", d.Id()) + assert.Equal(t, "access", d.Get("schema")) +} + +func TestSystemSchemaUpdate_Error(t *testing.T) { + _, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodPut, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceSystemSchema(), + InstanceState: map[string]string{ + "schema": "information_schema", + }, + HCL: `schema = "access"`, + Update: true, + ID: "abc|information_schema", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") +} + +func TestSystemSchemaRead(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: catalog.ListSystemSchemasResponse{ + Schemas: []catalog.SystemSchemaInfo{ + { + Schema: "access", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + { + Schema: "billing", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + }, + }, + }, + }, + Resource: ResourceSystemSchema(), + Read: true, + ID: "abc|access", + }.ApplyAndExpectData(t, map[string]any{ + "schema": "access", + "state": string(catalog.SystemSchemaInfoStateEnableCompleted), + }) +} + +func TestSystemSchemaRead_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceSystemSchema(), + Read: true, + ID: "abc|access", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "abc|access", d.Id(), "Id should not be empty for error reads") +} + +func TestSystemSchemaDelete(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access?", + Status: 200, + }, + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/billing?", + Status: 200, + }, + }, + HCL: `schema = "access"`, + Resource: ResourceSystemSchema(), + Delete: true, + ID: "abc|access", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "abc|access", d.Id()) +} + +func TestSystemSchemaDelete_Error(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas/access?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceSystemSchema(), + HCL: `schema = "access"`, + Delete: true, + ID: "abc|access", + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "Internal error happened") + assert.Equal(t, "abc|access", d.Id()) +} diff --git a/docs/resources/system_schema.md b/docs/resources/system_schema.md new file mode 100644 index 0000000000..286c812151 --- /dev/null +++ b/docs/resources/system_schema.md @@ -0,0 +1,41 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_system_schema Resource + +-> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). + +-> **Notes** + Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. + +Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. + +## Example Usage + +Enable the system schema `access` + +```hcl +resource "databricks_system_schema" "this" { + schema = "access" +} +``` + +## Argument Reference + +The following arguments are available: + +* `schema` - (Required) Full name of the system schema. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `state` - The current state of enablement for the system schema. + +## Import + +This resource can be imported by the metastore id and schema name + +```bash +terraform import databricks_system_schema.this | +``` diff --git a/internal/acceptance/system_schema_test.go b/internal/acceptance/system_schema_test.go new file mode 100644 index 0000000000..aff46e4daa --- /dev/null +++ b/internal/acceptance/system_schema_test.go @@ -0,0 +1,14 @@ +package acceptance + +import ( + "testing" +) + +func TestUcAccResourceSystemSchema(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_system_schema" "this" { + schema = "access" + }`, + }) +} diff --git a/provider/provider.go b/provider/provider.go index 105787ed9c..a3fabfb459 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -153,6 +153,7 @@ func DatabricksProvider() *schema.Provider { "databricks_sql_visualization": sql.ResourceSqlVisualization(), "databricks_sql_widget": sql.ResourceSqlWidget(), "databricks_storage_credential": catalog.ResourceStorageCredential(), + "databricks_system_schema": catalog.ResourceSystemSchema(), "databricks_table": catalog.ResourceTable(), "databricks_token": tokens.ResourceToken(), "databricks_user": scim.ResourceUser(), From 9ef8366f40ad952a2b9bf7640d0883e107578016 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 2 Oct 2023 14:26:51 +0200 Subject: [PATCH 03/36] Added logging package and fixed issue with API calls not being shown in DEBUG or lower log levels (#2747) * add logger * fix format strings * - * - * - * - * - * - * - --- logger/logger.go | 65 +++++++++++++++++++++++++++++++++++++++++++ logger/logger_test.go | 19 +++++++++++++ provider/provider.go | 2 ++ 3 files changed, 86 insertions(+) create mode 100644 logger/logger.go create mode 100644 logger/logger_test.go diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000000..a713c5f906 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,65 @@ +package logger + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/logger" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type TfLogger struct { + Name string +} + +// This function is always enabled because TfLogger implements the Logger interface from Go SDK and there we check +// if the logging is enabled based on level (which default to Info). +// This however isn't possible here since tflog isn't enabled / disabled based on log level. +// Omitting is done internally through the `ShouldOmit` method that filters based on logger configurations. +func (tfLogger *TfLogger) Enabled(_ context.Context, _ logger.Level) bool { + return true +} + +func (tfLogger *TfLogger) Tracef(ctx context.Context, format string, v ...any) { + if tfLogger == nil { + tflog.Trace(ctx, fmt.Sprintf(format, v...), nil) + } else { + tflog.SubsystemTrace(ctx, tfLogger.Name, fmt.Sprintf(format, v...), nil) + } +} + +func (tfLogger *TfLogger) Debugf(ctx context.Context, format string, v ...any) { + if tfLogger == nil { + tflog.Debug(ctx, fmt.Sprintf(format, v...), nil) + } else { + tflog.SubsystemDebug(ctx, tfLogger.Name, fmt.Sprintf(format, v...), nil) + } +} + +func (tfLogger *TfLogger) Infof(ctx context.Context, format string, v ...any) { + if tfLogger == nil { + tflog.Info(ctx, fmt.Sprintf(format, v...), nil) + } else { + tflog.SubsystemInfo(ctx, tfLogger.Name, fmt.Sprintf(format, v...), nil) + } +} + +func (tfLogger *TfLogger) Warnf(ctx context.Context, format string, v ...any) { + if tfLogger == nil { + tflog.Warn(ctx, fmt.Sprintf(format, v...), nil) + } else { + tflog.SubsystemWarn(ctx, tfLogger.Name, fmt.Sprintf(format, v...), nil) + } +} + +func (tfLogger *TfLogger) Errorf(ctx context.Context, format string, v ...any) { + if tfLogger == nil { + tflog.Error(ctx, fmt.Sprintf(format, v...), nil) + } else { + tflog.SubsystemError(ctx, tfLogger.Name, fmt.Sprintf(format, v...), nil) + } +} + +func SetLogger() { + logger.DefaultLogger = &TfLogger{} +} diff --git a/logger/logger_test.go b/logger/logger_test.go new file mode 100644 index 0000000000..970077c9f6 --- /dev/null +++ b/logger/logger_test.go @@ -0,0 +1,19 @@ +package logger + +import ( + "context" + "testing" + + goLogger "github.com/databricks/databricks-sdk-go/logger" + "github.com/stretchr/testify/assert" +) + +func TestTfLogger_Enabled(t *testing.T) { + l := &TfLogger{} + assert.True(t, l.Enabled(context.Background(), goLogger.LevelInfo)) +} + +func TestSetLogger(t *testing.T) { + SetLogger() + assert.IsType(t, &TfLogger{}, goLogger.DefaultLogger) +} diff --git a/provider/provider.go b/provider/provider.go index a3fabfb459..f9d89b2082 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -21,6 +21,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" + tflogger "github.com/databricks/terraform-provider-databricks/logger" "github.com/databricks/terraform-provider-databricks/mlflow" "github.com/databricks/terraform-provider-databricks/mws" "github.com/databricks/terraform-provider-databricks/permissions" @@ -169,6 +170,7 @@ func DatabricksProvider() *schema.Provider { if p.TerraformVersion != "" { useragent.WithUserAgentExtra("terraform", p.TerraformVersion) } + tflogger.SetLogger() return configureDatabricksClient(ctx, d) } common.AddContextToAllResources(p, "databricks") From f1f50359496962be0f6a8fb5f1867ccbf3702e53 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 3 Oct 2023 12:36:53 +0200 Subject: [PATCH 04/36] [DOC] Updated `databricks_grants` examples for `databricks_external_location` (#2735) * [DOC] Updated `databricks_grants` examples for `databricks_external_location` Also updated missing grants This fixes #2653 * Add more examples of references to resources --- docs/resources/external_location.md | 4 ++-- docs/resources/grants.md | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/resources/external_location.md b/docs/resources/external_location.md index a6b01c7a57..8f1514a04e 100644 --- a/docs/resources/external_location.md +++ b/docs/resources/external_location.md @@ -32,7 +32,7 @@ resource "databricks_grants" "some" { external_location = databricks_external_location.some.id grant { principal = "Data Engineers" - privileges = ["CREATE_TABLE", "READ_FILES"] + privileges = ["CREATE_EXTERNAL_TABLE", "READ_FILES"] } } ``` @@ -69,7 +69,7 @@ resource "databricks_grants" "some" { external_location = databricks_external_location.some.id grant { principal = "Data Engineers" - privileges = ["CREATE_TABLE", "READ_FILES"] + privileges = ["CREATE_EXTERNAL_TABLE", "READ_FILES"] } } ``` diff --git a/docs/resources/grants.md b/docs/resources/grants.md index f42b3d30b2..1a8daacc0d 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -196,7 +196,7 @@ resource "databricks_grants" "volume" { ## Storage credential grants -You can grant `ALL_PRIVILEGES`, `CREATE_EXTERNAL_TABLE`, `READ_FILES` and `WRITE_FILES` privileges to [databricks_storage_credential](storage_credential.md) id specified in `storage_credential` attribute: +You can grant `ALL_PRIVILEGES`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `READ_FILES` and `WRITE_FILES` privileges to [databricks_storage_credential](storage_credential.md) id specified in `storage_credential` attribute: ```hcl resource "databricks_storage_credential" "external" { @@ -216,7 +216,7 @@ resource "databricks_grants" "external_creds" { } ``` -## Storage location grants +## External location grants You can grant `ALL_PRIVILEGES`, `CREATE_EXTERNAL_TABLE`, `CREATE_MANAGED_STORAGE`, `CREATE EXTERNAL VOLUME`, `READ_FILES` and `WRITE_FILES` privileges to [databricks_external_location](external_location.md) id specified in `external_location` attribute: @@ -234,6 +234,18 @@ resource "databricks_grants" "some" { principal = "Data Engineers" privileges = ["CREATE_TABLE", "READ_FILES"] } + grant { + principal = databricks_service_principal.my_sp.application_id + privileges = ["USE_SCHEMA", "MODIFY"] + } + grant { + principal = databricks_group.my_group.display_name + privileges = ["USE_SCHEMA", "MODIFY"] + } + grant { + principal = databricks_group.my_user.user_name + privileges = ["USE_SCHEMA", "MODIFY"] + } } ``` From 1590897f6c94db92094cc5c93668170e84e01c64 Mon Sep 17 00:00:00 2001 From: Krishna Swaroop K Date: Tue, 3 Oct 2023 16:32:43 +0200 Subject: [PATCH 05/36] Add enabled field for queueing (#2741) * add enabled field for queueing * nit * use struct from go sdk * update tests --- jobs/resource_job.go | 5 +---- jobs/resource_job_test.go | 12 +++++++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/jobs/resource_job.go b/jobs/resource_job.go index 98fa1af67e..f6d8afd8d0 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -231,9 +231,6 @@ type ContinuousConf struct { PauseStatus string `json:"pause_status,omitempty" tf:"default:UNPAUSED"` } -type Queue struct { -} - type JobRunAs struct { UserName string `json:"user_name,omitempty"` ServicePrincipalName string `json:"service_principal_name,omitempty"` @@ -291,7 +288,7 @@ type JobSettings struct { WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty" tf:"suppress_diff"` NotificationSettings *jobs.JobNotificationSettings `json:"notification_settings,omitempty"` Tags map[string]string `json:"tags,omitempty"` - Queue *Queue `json:"queue,omitempty"` + Queue *jobs.QueueSettings `json:"queue,omitempty"` RunAs *JobRunAs `json:"run_as,omitempty" tf:"suppress_diff"` Health *JobHealth `json:"health,omitempty"` Parameters []JobParameterDefinition `json:"parameters,omitempty" tf:"alias:parameter"` diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 0b43ac8e15..aa57d16cf0 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -48,7 +48,9 @@ func TestResourceJobCreate(t *testing.T) { MinRetryIntervalMillis: 5000, RetryOnTimeout: true, MaxConcurrentRuns: 1, - Queue: &Queue{}, + Queue: &jobs.QueueSettings{ + Enabled: true, + }, RunAs: &JobRunAs{ UserName: "user@mail.com", }, @@ -86,7 +88,9 @@ func TestResourceJobCreate(t *testing.T) { TimezoneID: "America/Los_Angeles", PauseStatus: "PAUSED", }, - Queue: &Queue{}, + Queue: &jobs.QueueSettings{ + Enabled: true, + }, RunAs: &JobRunAs{ UserName: "user@mail.com", }, @@ -116,7 +120,9 @@ func TestResourceJobCreate(t *testing.T) { library { jar = "dbfs://ff/gg/hh.jar" } - queue {} + queue { + enabled = true + } run_as { user_name = "user@mail.com" }`, From 173d0c0e601d870f368e11f9829cbee451ee3189 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:13:53 +0200 Subject: [PATCH 06/36] Bump github.com/databricks/databricks-sdk-go from 0.21.0 to 0.22.0 (#2761) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.21.0 to 0.22.0. - [Release notes](https://github.com/databricks/databricks-sdk-go/releases) - [Changelog](https://github.com/databricks/databricks-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/databricks/databricks-sdk-go/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: github.com/databricks/databricks-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 215480fbd1..dda0390323 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.19 require ( - github.com/databricks/databricks-sdk-go v0.21.0 + github.com/databricks/databricks-sdk-go v0.22.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index b9e7a935c7..fa14369b13 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/databricks/databricks-sdk-go v0.21.0 h1:d/T4oljsbvyrO3GQzUfkkWHckw/Z9qe1gL+/KMzRVn8= -github.com/databricks/databricks-sdk-go v0.21.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk= +github.com/databricks/databricks-sdk-go v0.22.0 h1:CIwNZcOV7wYZmRLl1NWA+07f2j6H9h5L6MhR5O/4dRw= +github.com/databricks/databricks-sdk-go v0.22.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 3098694a7919fe9ec09e3ae3de5c493284dc1ad6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 4 Oct 2023 10:54:15 +0200 Subject: [PATCH 07/36] GitHub Actions workflow to compute provider schema diff (#2740) * wip * wip * wip * wip * wip * wip * wip * wip * Name * Continue on error --- .github/workflows/schema.yml | 145 +++++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 .github/workflows/schema.yml diff --git a/.github/workflows/schema.yml b/.github/workflows/schema.yml new file mode 100644 index 0000000000..af32bdd3de --- /dev/null +++ b/.github/workflows/schema.yml @@ -0,0 +1,145 @@ +name: Provider schema + +on: + pull_request: + types: [opened, synchronize] + + workflow_dispatch: + inputs: + base: + description: 'Base ref' + default: 'master' + required: true + head: + description: 'Head ref' + default: 'master' + required: true + +jobs: + compute_current: + name: "Generate current" + runs-on: ubuntu-latest + + steps: + - if: github.event_name == 'pull_request' + name: Checkout + uses: actions/checkout@v4 + with: + # Checkout main branch to generate schema for current release + ref: master + + - if: github.event_name == 'workflow_dispatch' + name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.base }} + + - name: 'Setup Go' + uses: actions/setup-go@v4 + with: + go-version: 1.21.x + + - name: 'Setup Terraform' + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + + - run: make install + + - name: Generate provider schema + shell: bash + run: | + set -ex + cd /tmp + cat > main.tf < provider.json + + - name: 'Upload provider schema' + uses: actions/upload-artifact@v3 + with: + name: schema-current + path: /tmp/provider.json + retention-days: 1 + + compute_new: + name: "Generate new" + runs-on: ubuntu-latest + + steps: + - if: github.event_name == 'pull_request' + name: Checkout + uses: actions/checkout@v4 + + - if: github.event_name == 'workflow_dispatch' + name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.head }} + + - name: 'Setup Go' + uses: actions/setup-go@v4 + with: + go-version: 1.21.x + + - name: 'Setup Terraform' + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + + - run: make install + + - name: Generate provider schema + shell: bash + run: | + set -ex + cd /tmp + cat > main.tf < provider.json + + - name: 'Upload provider schema' + uses: actions/upload-artifact@v3 + with: + name: schema-new + path: /tmp/provider.json + retention-days: 1 + + diff: + needs: [compute_current, compute_new] + + name: "Compute diff" + runs-on: ubuntu-latest + + steps: + - name: 'Setup Go' + uses: actions/setup-go@v4 + with: + go-version: 1.21.x + cache: false + + - run: go install github.com/josephburnett/jd@latest + + - name: 'Download provider schemas' + uses: actions/download-artifact@v3 + + - run: ls -l schema*/* + + - run: jd -color schema-current/provider.json schema-new/provider.json + continue-on-error: true From 5a13e1b2ea74b9577931d9afff0a60f2350daa43 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 4 Oct 2023 11:10:54 +0200 Subject: [PATCH 08/36] Mask sensitive field (#2755) * Mask sensitive field * Add test --- mws/resource_mws_workspaces.go | 20 +++++++++++++++----- mws/resource_mws_workspaces_test.go | 16 ++++++++++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/mws/resource_mws_workspaces.go b/mws/resource_mws_workspaces.go index 617e78e719..9f1a6ea7fc 100644 --- a/mws/resource_mws_workspaces.go +++ b/mws/resource_mws_workspaces.go @@ -340,10 +340,20 @@ func (a WorkspacesAPI) List(mwsAcctID string) ([]Workspace, error) { } type Token struct { - LifetimeSeconds int32 `json:"lifetime_seconds,omitempty" tf:"default:2592000"` - Comment string `json:"comment,omitempty" tf:"default:Terraform PAT"` - TokenID string `json:"token_id,omitempty" tf:"computed"` - TokenValue string `json:"token_value,omitempty" tf:"computed,sensitive"` + LifetimeSeconds int32 `json:"lifetime_seconds,omitempty" tf:"default:2592000"` + Comment string `json:"comment,omitempty" tf:"default:Terraform PAT"` + TokenID string `json:"token_id,omitempty" tf:"computed"` + TokenValue SensitiveString `json:"token_value,omitempty" tf:"computed,sensitive"` +} + +type SensitiveString string + +func (s SensitiveString) GoString() string { + return "****" +} + +func (s SensitiveString) String() string { + return "****" } // ephemeral entity to use with StructToData() @@ -370,7 +380,7 @@ func CreateTokenIfNeeded(workspacesAPI WorkspacesAPI, return fmt.Errorf("cannot create token: %w", err) } wsToken.Token.TokenID = token.TokenInfo.TokenID - wsToken.Token.TokenValue = token.TokenValue + wsToken.Token.TokenValue = SensitiveString(token.TokenValue) return common.StructToData(wsToken, workspaceSchema, d) } diff --git a/mws/resource_mws_workspaces_test.go b/mws/resource_mws_workspaces_test.go index a612a6565f..bb4e120579 100644 --- a/mws/resource_mws_workspaces_test.go +++ b/mws/resource_mws_workspaces_test.go @@ -2,6 +2,7 @@ package mws import ( "context" + "fmt" "testing" "time" @@ -1658,3 +1659,18 @@ func TestResourceWorkspaceCreateGcpManagedVPC(t *testing.T) { Create: true, }.ApplyNoError(t) } + +func TestSensitiveDataInLogs(t *testing.T) { + tk := Token{ + Comment: "comment", + LifetimeSeconds: 123, + TokenID: "tokenID", + TokenValue: "sensitive", + } + assert.Contains(t, fmt.Sprintf("%v", tk), "comment") + assert.Contains(t, fmt.Sprintf("%#v", tk), "comment") + assert.Contains(t, fmt.Sprintf("%+v", tk), "comment") + assert.NotContains(t, fmt.Sprintf("%v", tk), "sensitive") + assert.NotContains(t, fmt.Sprintf("%#v", tk), "sensitive") + assert.NotContains(t, fmt.Sprintf("%+v", tk), "sensitive") +} From 4935eb671cb137eb691f8f68261b26a7d42ef91e Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 4 Oct 2023 13:10:57 +0200 Subject: [PATCH 09/36] Export: parallel export of resources (#2742) * Preparing for parallel execution - adding mutexes, isolating code, etc. * First version of parallelized export * Add caching of users & service principals * make per resource concurrency configurable * Allow to ignore specific directories when doing the listing * don't ignore not running jobs * fix tests in `context_test` * Fix rest of tests * another test fix * Document parameters controlling parallel export * Add `-noformat` option to control formatting of exported files * Fix panic when reading SQL Alert If query is deleted then Query object is set to `nil`, and when we're trying to get Query ID from it, panic arise. * fixing flacky test * move log entry to TRACE * Address review comments * Port fix for a panic --- docs/guides/experimental-exporter.md | 42 ++-- exporter/command.go | 1 + exporter/context.go | 327 +++++++++++++++++---------- exporter/context_test.go | 81 +++++-- exporter/exporter_test.go | 12 +- exporter/importables.go | 9 +- exporter/importables_test.go | 45 ++-- exporter/model.go | 141 +++++++++++- exporter/util.go | 125 +++++++--- exporter/util_test.go | 27 ++- sql/resource_sql_alerts.go | 47 ++-- workspace/resource_notebook.go | 39 +++- workspace/resource_notebook_test.go | 2 +- 13 files changed, 647 insertions(+), 251 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 7f108fd7d3..4066433bc8 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -5,9 +5,9 @@ page_title: "Experimental resource exporter" -> **Note** This tooling is experimental and provided as is. It has an evolving interface, which may change or be removed in future versions of the provider. --> **Note** Use the same user who did the exporting to import the exported templates. Otherwise it could cause changes in the jobs ownership. +-> **Note** Use the same user who did the exporting to import the exported templates. Otherwise, it could cause changes in the ownership of the objects. -Generates `*.tf` files for Databricks resources as well as `import.sh` to run import state. Available as part of provider binary. The only possible way to authenticate is through [environment variables](../index.md#Environment-variables). It's best used when you need to quickly export Terraform configuration for an existing Databricks workspace. After generating configuration, we strongly recommend manually review all created files. +Generates `*.tf` files for Databricks resources as well as `import.sh` that is used to import objects into the Terraform state. Available as part of provider binary. The only possible way to authenticate is through [environment variables](../index.md#Environment-variables). It's best used when you need to quickly export Terraform configuration for an existing Databricks workspace. After generating the configuration, we strongly recommend manually reviewing all created files. ## Example Usage @@ -33,32 +33,33 @@ export DATABRICKS_TOKEN=... All arguments are optional and they tune what code is being generated. -* `-directory` - Path to directory, where `*.tf` and `import.sh` files would be written. By default it's set to the current working directory. +* `-directory` - Path to a directory, where `*.tf` and `import.sh` files would be written. By default, it's set to the current working directory. * `-module` - Name of module in Terraform state, that would affect reference resolution and prefixes for generated commands in `import.sh`. -* `-last-active-days` - Items older than `-last-active-days` won't be imported. By default the value is set to 3650 (10 years). Has an effect on listing [databricks_cluster](../resources/cluster.md) and [databricks_job](../resources/job.md) resources. -* `-services` - Comma-separated list of services to import. By default all services are imported. +* `-last-active-days` - Items older than `-last-active-days` won't be imported. By default, the value is set to 3650 (10 years). Has an effect on listing [databricks_cluster](../resources/cluster.md) and [databricks_job](../resources/job.md) resources. +* `-services` - Comma-separated list of services to import. By default, all services are imported. * `-listing` - Comma-separated list of services to be listed and further passed on for importing. `-services` parameter controls which transitive dependencies will be processed. We recommend limiting with `-listing` more often, than with `-services`. -* `-match` - Match resource names during listing operation. This filter applies to all resources that are getting listed, so if you want to import all dependencies of just one cluster, specify `-match=autoscaling -listing=compute`. By default it is empty, which matches everything. +* `-match` - Match resource names during listing operation. This filter applies to all resources that are getting listed, so if you want to import all dependencies of just one cluster, specify `-match=autoscaling -listing=compute`. By default, it is empty, which matches everything. * `-mounts` - List DBFS mount points, which is an extremely slow operation and would not trigger unless explicitly specified. -* `-generateProviderDeclaration` - flag that toggles generation of `databricks.tf` file with declaration of the Databricks Terraform provider that is necessary for Terraform versions since Terraform 0.13 (disabled by default). -* `-prefix` - optional prefix that will be added to the name of all exported resources - that's useful for exporting resources multiple workspaces for merging into a single one. +* `-generateProviderDeclaration` - the flag that toggles the generation of `databricks.tf` file with the declaration of the Databricks Terraform provider that is necessary for Terraform versions since Terraform 0.13 (disabled by default). +* `-prefix` - optional prefix that will be added to the name of all exported resources - that's useful for exporting resources from multiple workspaces for merging into a single one. * `-skip-interactive` - optionally run in a non-interactive mode. * `-includeUserDomains` - optionally include domain name into generated resource name for `databricks_user` resource. -* `-importAllUsers` - optionally include all users and service principals even if they only part of the `users` group. -* `-incremental` - experimental option for incremental export of modified resources and merging with existing resources. *Please note that only limited set of resources (notebooks, SQL queries/dashboards/alerts, ...) provides information about last modified date - all other resources will be re-exported again! Also, it's not possible to detect deletion of the resources, so you will need to do periodic full export if resources are deleted!* **Requires** `-updated-since` option if no `exporter-run-stats.json` file exists in the output directory. -* `-updated-since` - timestamp (in ISO8601 format supported by Go language) for exporting of resources modified since a giving timestamp. I.e. `2023-07-24T00:00:00Z`. If not specified, exporter will try to load last run timestamp from the `exporter-run-stats.json` file generated during the export, and use it. -* `-notebooksFormat` - optional format for exporting of notebooks. Supported values are `SOURCE` (default), `DBC`, `JUPYTER`. This could be used to export of notebooks with embedded dashboards. +* `-importAllUsers` - optionally include all users and service principals even if they are only part of the `users` group. +* `-incremental` - experimental option for incremental export of modified resources and merging with existing resources. *Please note that only a limited set of resources (notebooks, SQL queries/dashboards/alerts, ...) provides information about the last modified date - all other resources will be re-exported again! Also, it's not possible to detect the deletion of the resources, so you will need to do periodic full export if resources are deleted!* **Requires** `-updated-since` option if no `exporter-run-stats.json` file exists in the output directory. +* `-updated-since` - timestamp (in ISO8601 format supported by Go language) for exporting of resources modified since a given timestamp. I.e. `2023-07-24T00:00:00Z`. If not specified, the exporter will try to load the last run timestamp from the `exporter-run-stats.json` file generated during the export, and use it. +* `-notebooksFormat` - optional format for exporting of notebooks. Supported values are `SOURCE` (default), `DBC`, `JUPYTER`. This could be used to export notebooks with embedded dashboards. +* `-noformat` - optionally disable the execution of `terraform fmt` on the exported files (enabled by default). ## Services -Services are just logical groups of resources used for filtering and organization in files written in `-directory`. All resources are globally sorted by their resource name, which technically allows you to use generated files for compliance purposes. Nevertheless, managing the entire Databricks workspace with Terraform is the prefered way. With the exception of notebooks and possibly libraries, which may have their own CI/CD processes. +Services are just logical groups of resources used for filtering and organization in files written in `-directory`. All resources are globally sorted by their resource name, which technically allows you to use generated files for compliance purposes. Nevertheless, managing the entire Databricks workspace with Terraform is the preferred way. With the exception of notebooks and possibly libraries, which may have their own CI/CD processes. * `access` - [databricks_permissions](../resources/permissions.md), [databricks_instance_profile](../resources/instance_profile.md) and [databricks_ip_access_list](../resources/ip_access_list.md). * `compute` - **listing** [databricks_cluster](../resources/cluster.md). Includes [cluster policies](../resources/cluster_policy.md). * `directories` - **listing** [databricks_directory](../resources/directory.md) * `dlt` - **listing** [databricks_pipeline](../resources/pipeline.md) * `groups` - [databricks_group](../data-sources/group.md) with [membership](../resources/group_member.md) and [data access](../resources/group_instance_profile.md). -* `jobs` - **listing** [databricks_job](../resources/job.md). Usually there are more automated jobs than interactive clusters, so they get their own file in this tool's output. +* `jobs` - **listing** [databricks_job](../resources/job.md). Usually, there are more automated jobs than interactive clusters, so they get their own file in this tool's output. * `mlflow-webhooks` - **listing** [databricks_mlflow_webhook](../resources/mlflow_webhook.md). * `model-serving` - **listing** [databricks_model_serving](../resources/model_serving.md). * `mounts` - **listing** works only in combination with `-mounts` command-line option. @@ -72,12 +73,21 @@ Services are just logical groups of resources used for filtering and organizatio * `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md) * `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md) * `storage` - any referenced [databricks_dbfs_file](../resources/dbfs_file.md) will be downloaded locally and properly arranged into terraform state. -* `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, the only use-case for importing `users` service is to migrate workspaces. +* `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, the only use case for importing `users` service is to migrate workspaces. * `workspace` - [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md) ## Secrets -For security reasons, [databricks_secret](../resources/secret.md) cannot contain actual plaintext secrets. Importer will create a variable in `vars.tf`, that would have the same name as secret. You are supposed to [fill in the value of the secret](https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1#0e7d) after that. +For security reasons, [databricks_secret](../resources/secret.md) cannot contain actual plaintext secrets. Importer will create a variable in `vars.tf`, that would have the same name as the secret. You are supposed to [fill in the value of the secret](https://blog.gruntwork.io/a-comprehensive-guide-to-managing-secrets-in-your-terraform-code-1d586955ace1#0e7d) after that. + +## Parallel execution + +To speed up export, Terraform Exporter performs many operations, such as listing & actual data exporting, in parallel using Goroutines. There are built-in defaults controlling the parallelism, but it's also possible to tune some parameters using environment variables specific to the exporter: + +* `EXPORTER_WS_LIST_PARALLELISM` (default: `5`) controls how many Goroutines are used to perform parallel listing of Databricks Workspace objects (notebooks, directories, workspace files, ...). +* `EXPORTER_DIRECTORIES_CHANNEL_SIZE` (default: `100000`) controls the capacity of the channel that is used when listing workspace objects. Please make sure that this value is big enough (bigger than the number of directories in the workspace, default value should be ok for most cases), otherwise, there is a chance of deadlock. +* `EXPORTER_PARALLELISM_NNN` - number of Goroutines used to process resources of a specific type (replace `NNN` with the exact resource name, for example, `EXPORTER_PARALLELISM_databricks_notebook=10` sets the number of Goroutines for `databricks_notebook` resource to `10`). Defaults for some resources are defined by the `goroutinesNumber` map in `exporter/context.go`, or equal to `2` if there is no value there. *Don't increase default values too much to avoid REST API throttling!* + ## Support Matrix diff --git a/exporter/command.go b/exporter/command.go index 8f06d695e3..2ab77a1d21 100644 --- a/exporter/command.go +++ b/exporter/command.go @@ -104,6 +104,7 @@ func Run(args ...string) error { flags.Int64Var(&ic.lastActiveDays, "last-active-days", 3650, "Items with older than activity specified won't be imported.") flags.BoolVar(&ic.incremental, "incremental", false, "Incremental export of the data. Requires -updated-since parameter") + flags.BoolVar(&ic.noFormat, "noformat", false, "Don't run `terraform fmt` on exported files") flags.StringVar(&ic.updatedSinceStr, "updated-since", "", "Include only resources updated since a given timestamp (in ISO8601 format, i.e. 2023-07-01T00:00:00Z)") flags.BoolVar(&ic.debug, "debug", false, "Print extra debug information.") diff --git a/exporter/context.go b/exporter/context.go index 12234b0be0..6c1b3369a0 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -14,6 +14,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" "github.com/databricks/terraform-provider-databricks/commands" @@ -26,7 +27,6 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/zclconf/go-cty/cty" ) @@ -51,47 +51,82 @@ import ( +--------------------+ +-----------------+ +-----------------+ */ +type resourceChannel chan *resource + type importContext struct { - Module string - Context context.Context - Client *common.DatabricksClient - State stateApproximation - Importables map[string]importable - Resources map[string]*schema.Resource - Scope importedResources - Files map[string]*hclwrite.File - Directory string - importing map[string]bool - nameFixes []regexFix - hclFixes []regexFix - allUsers []scim.User - allGroups []scim.Group - mountMap map[string]mount - variables map[string]string - testEmits map[string]bool - sqlDatasources map[string]string - workspaceConfKeys map[string]any - allDirectories []workspace.ObjectStatus - allWorkspaceObjects []workspace.ObjectStatus + // not modified/used only in single thread + Module string + Context context.Context + Client *common.DatabricksClient + Importables map[string]importable + Resources map[string]*schema.Resource + Files map[string]*hclwrite.File + Directory string + nameFixes []regexFix + hclFixes []regexFix + variables map[string]string + workspaceConfKeys map[string]any + + channels map[string]resourceChannel + + // mutable resources + State stateApproximation + Scope importedResources + // command-line resources (immutable, or set by the single thread) includeUserDomains bool importAllUsers bool debug bool incremental bool mounts bool + noFormat bool services string listing string match string lastActiveDays int64 lastActiveMs int64 - updatedSinceStr string - updatedSinceMs int64 generateDeclaration bool meAdmin bool prefix string accountLevel bool shImports map[string]bool notebooksFormat string + updatedSinceStr string + updatedSinceMs int64 + + waitGroup *sync.WaitGroup + + // TODO: protect by mutex? + mountMap map[string]mount + + // + testEmits map[string]bool + testEmitsMutex sync.Mutex + + // + allGroups []scim.Group + groupsMutex sync.Mutex + + // + allUsers map[string]scim.User + usersMutex sync.RWMutex + + // + allSps map[string]scim.User + spsMutex sync.RWMutex + + // + importing map[string]bool + importingMutex sync.RWMutex + + // + sqlDatasources map[string]string + sqlDatasourcesMutex sync.Mutex + + // workspace-related objects & corresponding mutex + allDirectories []workspace.ObjectStatus + allWorkspaceObjects []workspace.ObjectStatus + wsObjectsMutex sync.RWMutex } type mount struct { @@ -125,6 +160,34 @@ var workspaceConfKeys = map[string]any{ "enableDeprecatedGlobalInitScripts": false, } +const ( + defaultChannelSize = 100000 + defaultNumRoutines = 2 + envVariablePrefix = "EXPORTER_PARALLELISM_" +) + +// increased concurrency limits, could be also overridden via environment variables with name: envVariablePrefix + resource type +var goroutinesNumber = map[string]int{ + "databricks_notebook": 7, + "databricks_directory": 5, + "databricks_workspace_file": 5, + "databricks_dbfs_file": 3, + "databricks_user": 1, + "databricks_service_principal": 1, + "databricks_sql_dashboard": 3, + "databricks_sql_query": 5, + "databricks_sql_alert": 2, + "databricks_permissions": 10, +} + +func makeResourcesChannels(p *schema.Provider) map[string]resourceChannel { + channels := make(map[string]resourceChannel, len(p.ResourcesMap)) + for r := range p.ResourcesMap { + channels[r] = make(resourceChannel, defaultChannelSize) + } + return channels +} + func newImportContext(c *common.DatabricksClient) *importContext { p := provider.DatabricksProvider() p.TerraformVersion = "exporter" @@ -136,6 +199,7 @@ func newImportContext(c *common.DatabricksClient) *importContext { c *common.DatabricksClient) common.CommandExecutor { return commands.NewCommandsAPI(ctx, c) }) + return &importContext{ Client: c, Context: ctx, @@ -143,18 +207,21 @@ func newImportContext(c *common.DatabricksClient) *importContext { Importables: resourcesMap, Resources: p.ResourcesMap, Files: map[string]*hclwrite.File{}, - Scope: []*resource{}, + Scope: importedResources{}, importing: map[string]bool{}, nameFixes: nameFixes, hclFixes: []regexFix{ // Be careful with that! it may break working code }, - allUsers: []scim.User{}, variables: map[string]string{}, allDirectories: []workspace.ObjectStatus{}, allWorkspaceObjects: []workspace.ObjectStatus{}, workspaceConfKeys: workspaceConfKeys, shImports: make(map[string]bool), notebooksFormat: "SOURCE", + allUsers: map[string]scim.User{}, + allSps: map[string]scim.User{}, + waitGroup: &sync.WaitGroup{}, + channels: makeResourcesChannels(p), } } @@ -198,6 +265,8 @@ func (ic *importContext) Run() error { ic.updatedSinceStr) } ic.updatedSinceStr = tm.UTC().Format(time.RFC3339) + tm, _ = time.Parse(time.RFC3339, ic.updatedSinceStr) + ic.updatedSinceMs = tm.UnixMilli() } log.Printf("[INFO] Importing %s module into %s directory Databricks resources of %s services", @@ -238,27 +307,44 @@ func (ic *importContext) Run() error { } } } + // Concurrent execution part + if ic.waitGroup == nil { + ic.waitGroup = &sync.WaitGroup{} + } + // Start goroutines for each resource type + ic.startImportChannels() - for resourceName, ir := range ic.Importables { + // Start listing of objects + for rnLoop, irLoop := range ic.Importables { + resourceName := rnLoop + ir := irLoop if ir.List == nil { continue } if !strings.Contains(ic.listing, ir.Service) { - log.Printf("[DEBUG] %s (%s service) is not part of listing", - resourceName, ir.Service) + log.Printf("[DEBUG] %s (%s service) is not part of listing", resourceName, ir.Service) continue } if ic.accountLevel && !ir.AccountLevel { log.Printf("[DEBUG] %s (%s service) is not account level", resourceName, ir.Service) continue } - if err := ir.List(ic); err != nil { - log.Printf("[ERROR] %s (%s service) listing failed: %s", - resourceName, ir.Service, err) - continue - } + ic.waitGroup.Add(1) + go func() { + if err := ir.List(ic); err != nil { + log.Printf("[ERROR] %s (%s service) listing failed: %s", resourceName, ir.Service, err) + } + log.Printf("[DEBUG] Finished listing for service %s", resourceName) + ic.waitGroup.Done() + }() } - if len(ic.Scope) == 0 { + + ic.waitGroup.Wait() + // close channels + ic.closeImportChannels() + + // This should be single threaded... + if ic.Scope.Len() == 0 { return fmt.Errorf("no resources to import") } shFileName := fmt.Sprintf("%s/import.sh", ic.Directory) @@ -338,29 +424,67 @@ func (ic *importContext) Run() error { return err } - cmd := exec.CommandContext(context.Background(), "terraform", "fmt") - cmd.Dir = ic.Directory - err = cmd.Run() - if err != nil { - return err - } // if stats, err := os.Create(statsFileName); err == nil { defer stats.Close() statsData := map[string]any{ "startTime": startTime.UTC().Format(time.RFC3339), "duration": fmt.Sprintf("%f sec", time.Since(startTime).Seconds()), - "exportedObjects": len(ic.Scope), + "exportedObjects": ic.Scope.Len(), } statsBytes, _ := json.Marshal(statsData) if _, err = stats.Write(statsBytes); err != nil { return err } } + + if !ic.noFormat { + // format generated source code + cmd := exec.CommandContext(context.Background(), "terraform", "fmt") + cmd.Dir = ic.Directory + err = cmd.Run() + if err != nil { + log.Printf("[ERROR] problems when formatting the generated code: %v", err) + return err + } + } log.Printf("[INFO] Done. Please edit the files and roll out new environment.") return nil } +func (ic *importContext) startImportChannels() { + for rt, c := range ic.channels { + ch := c + resourceType := rt + + numRoutines, exists := goroutinesNumber[resourceType] + if !exists { + numRoutines = defaultNumRoutines + } + numRoutines = getEnvAsInt(envVariablePrefix+resourceType, numRoutines) + + for i := 0; i < numRoutines; i++ { + num := i + go func() { + log.Printf("[DEBUG] Starting goroutine %d for resource %s", num, resourceType) + for r := range ch { + log.Printf("[DEBUG] channel for %s, channel size=%d got %v", resourceType, len(ch), r) + if r != nil { + r.ImportResource(ic) + } + } + }() + } + } +} + +func (ic *importContext) closeImportChannels() { + for rt, ch := range ic.channels { + log.Printf("[DEBUG] Closing channel for resource %s", rt) + close(ch) + } +} + func generateBlockFullName(block *hclwrite.Block) string { return block.Type() + "_" + strings.Join(block.Labels(), "_") } @@ -455,10 +579,10 @@ func (ic *importContext) generateVariables() error { } func (ic *importContext) generateHclForResources(sh *os.File) { - sort.Sort(ic.Scope) - scopeSize := len(ic.Scope) + resources := ic.Scope.Sorted() + scopeSize := ic.Scope.Len() log.Printf("[INFO] Generating configuration for %d resources", scopeSize) - for i, r := range ic.Scope { + for i, r := range resources { ir := ic.Importables[r.Resource] f, ok := ic.Files[ir.Service] if !ok { @@ -505,8 +629,9 @@ func (ic *importContext) MatchesName(n string) bool { return strings.Contains(strings.ToLower(n), strings.ToLower(ic.match)) } +// this will run single threaded func (ic *importContext) Find(r *resource, pick string, ref reference) (string, hcl.Traversal) { - for _, sr := range ic.State.Resources { + for _, sr := range ic.State.Resources() { if sr.Type != r.Resource { continue } @@ -520,6 +645,7 @@ func (ic *importContext) Find(r *resource, pick string, ref reference) (string, res := ref.Regexp.FindStringSubmatch(r.Value) if len(res) < 2 { log.Printf("[WARN] no match for regexp: %v in string %s", ref.Regexp, r.Value) + continue } matchValue = res[1] } @@ -569,31 +695,33 @@ func (ic *importContext) Has(r *resource) bool { return ic.HasInState(r, false) } +func (ic *importContext) isImporting(s string) (bool, bool) { + ic.importingMutex.RLocker().Lock() + defer ic.importingMutex.RLocker().Unlock() + v, visiting := ic.importing[s] + return v, visiting +} + // This function checks if resource exist. onlyAdded flag enforces that true is returned only if it was added with Add() func (ic *importContext) HasInState(r *resource, onlyAdded bool) bool { - if v, visiting := ic.importing[r.String()]; visiting && (v || !onlyAdded) { + v, visiting := ic.isImporting(r.String()) + if visiting && (v || !onlyAdded) { return true } - k, v := r.MatchPair() - for _, sr := range ic.State.Resources { - if sr.Type != r.Resource { - continue - } - for _, i := range sr.Instances { - tv, ok := i.Attributes[k].(string) - if ok && tv == v { - return true - } - } - } - return false + return ic.State.Has(r) +} + +func (ic *importContext) setImportingState(s string, state bool) { + ic.importingMutex.Lock() + defer ic.importingMutex.Unlock() + ic.importing[s] = state } func (ic *importContext) Add(r *resource) { if ic.HasInState(r, true) { // resource must exist and already marked as added return } - ic.importing[r.String()] = true // mark resource as added + ic.setImportingState(r.String(), true) // mark resource as added state := r.Data.State() if state == nil { log.Printf("[ERROR] state is nil for %s", r) @@ -609,7 +737,7 @@ func (ic *importContext) Add(r *resource) { r.Mode = "managed" } inst.Attributes["id"] = r.ID - ic.State.Resources = append(ic.State.Resources, resourceApproximation{ + ic.State.Append(resourceApproximation{ Mode: r.Mode, Module: ic.Module, Type: r.Resource, @@ -617,7 +745,7 @@ func (ic *importContext) Add(r *resource) { Instances: []instanceApproximation{inst}, }) // in single-threaded scenario scope is toposorted - ic.Scope = append(ic.Scope, r) + ic.Scope.Append(r) } func (ic *importContext) regexFix(s string, fixes []regexFix) string { @@ -661,74 +789,43 @@ func (ic *importContext) Emit(r *resource) { } if ic.testEmits != nil { log.Printf("[INFO] %s is emitted in test mode", r) + ic.testEmitsMutex.Lock() ic.testEmits[r.String()] = true + ic.testEmitsMutex.Unlock() return } - ic.importing[r.String()] = false // we're starting to add a new resource - pr, ok := ic.Resources[r.Resource] + ic.setImportingState(r.String(), false) // we're starting to add a new resource + ir, ok := ic.Importables[r.Resource] if !ok { - log.Printf("[ERROR] %s is not available in provider", r) + log.Printf("[ERROR] %s is not available for import", r) return } - ir, ok := ic.Importables[r.Resource] + _, ok = ic.Resources[r.Resource] if !ok { - log.Printf("[ERROR] %s is not available for import", r) + log.Printf("[ERROR] %s is not available in provider", r) return } + if ic.accountLevel && !ir.AccountLevel { - log.Printf("[DEBUG] %s (%s service) is not part of the account level export", - r.Resource, ir.Service) + log.Printf("[DEBUG] %s (%s service) is not part of the account level export", r.Resource, ir.Service) return - } + // TODO: add similar condition for checking workspace-level objects only. After new ACLs import is merged + + // TODO: split services into slice? if !strings.Contains(ic.services, ir.Service) { - log.Printf("[DEBUG] %s (%s service) is not part of the import", - r.Resource, ir.Service) + log.Printf("[DEBUG] %s (%s service) is not part of the import", r.Resource, ir.Service) return } - if r.ID == "" { - if ir.Search == nil { - log.Printf("[ERROR] Searching %s is not available", r) - return - } - if err := ir.Search(ic, r); err != nil { - log.Printf("[ERROR] Cannot search for a resource %s: %v", err, r) - return - } - if r.ID == "" { - log.Printf("[INFO] Cannot find %s", r) - return - } - } - if r.Data == nil { - // empty data with resource schema - r.Data = pr.Data(&terraform.InstanceState{ - Attributes: map[string]string{}, - ID: r.ID, - }) - r.Data.MarkNewResource() - resource := strings.ReplaceAll(r.Resource, "databricks_", "") - ctx := context.WithValue(ic.Context, common.ResourceName, resource) - apiVersion := ic.Importables[r.Resource].ApiVersion - if apiVersion != "" { - ctx = context.WithValue(ctx, common.Api, apiVersion) - } - if dia := pr.ReadContext(ctx, r.Data, ic.Client); dia != nil { - log.Printf("[ERROR] Error reading %s#%s: %v", r.Resource, r.ID, dia) - return - } - if r.Data.Id() == "" { - r.Data.SetId(r.ID) - } - } - r.Name = ic.ResourceName(r) - if ir.Import != nil { - if err := ir.Import(ic, r); err != nil { - log.Printf("[ERROR] Failed custom import of %s: %s", r, err) - return - } + // from here, it should be done by the goroutine... send resource into the channel + ch, exists := ic.channels[r.Resource] + if exists { + log.Printf("[TRACE] increasing counter & sending to the channel for resource %s", r.Resource) + ic.waitGroup.Add(1) + ch <- r + } else { + log.Printf("[WARN] Can't find channel for resource %s", r.Resource) } - ic.Add(r) } func (ic *importContext) getTraversalTokens(ref reference, value string) hclwrite.Tokens { diff --git a/exporter/context_test.go b/exporter/context_test.go index d3550e0acb..8a7fe86093 100644 --- a/exporter/context_test.go +++ b/exporter/context_test.go @@ -3,6 +3,7 @@ package exporter import ( "fmt" "os" + "sync" "testing" "github.com/databricks/terraform-provider-databricks/qa" @@ -21,7 +22,7 @@ func TestMatchesName(t *testing.T) { func TestImportContextFindSkips(t *testing.T) { _, traversal := (&importContext{ State: stateApproximation{ - Resources: []resourceApproximation{ + resources: []resourceApproximation{ { Type: "a", Instances: []instanceApproximation{ @@ -45,7 +46,7 @@ func TestImportContextFindSkips(t *testing.T) { func TestImportContextHas(t *testing.T) { assert.True(t, (&importContext{ State: stateApproximation{ - Resources: []resourceApproximation{ + resources: []resourceApproximation{ { Type: "a", Instances: []instanceApproximation{ @@ -92,7 +93,8 @@ func TestEmitNoImportable(t *testing.T) { } func TestEmitNoSearchAvail(t *testing.T) { - (&importContext{ + ch := make(resourceChannel) + ic := &importContext{ importing: map[string]bool{}, Resources: map[string]*schema.Resource{ "a": {}, @@ -102,17 +104,30 @@ func TestEmitNoSearchAvail(t *testing.T) { Service: "e", }, }, - services: "e", - }).Emit(&resource{ + services: "e", + waitGroup: &sync.WaitGroup{}, + channels: map[string]resourceChannel{ + "a": ch, + }, + } + go func() { + for r := range ch { + r.ImportResource(ic) + } + }() + ic.Emit(&resource{ Resource: "a", Attribute: "b", Value: "d", Name: "c", }) + ic.waitGroup.Wait() + close(ch) } func TestEmitNoSearchFails(t *testing.T) { - (&importContext{ + ch := make(resourceChannel, 10) + ic := &importContext{ importing: map[string]bool{}, Resources: map[string]*schema.Resource{ "a": {}, @@ -125,17 +140,30 @@ func TestEmitNoSearchFails(t *testing.T) { }, }, }, - services: "e", - }).Emit(&resource{ + services: "e", + waitGroup: &sync.WaitGroup{}, + channels: map[string]resourceChannel{ + "a": ch, + }, + } + go func() { + for r := range ch { + r.ImportResource(ic) + } + }() + ic.Emit(&resource{ Resource: "a", Attribute: "b", Value: "d", Name: "c", }) + ic.waitGroup.Wait() + close(ch) } func TestEmitNoSearchNoId(t *testing.T) { - (&importContext{ + ch := make(resourceChannel, 10) + ic := &importContext{ importing: map[string]bool{}, Resources: map[string]*schema.Resource{ "a": {}, @@ -148,17 +176,30 @@ func TestEmitNoSearchNoId(t *testing.T) { }, }, }, - services: "e", - }).Emit(&resource{ + services: "e", + waitGroup: &sync.WaitGroup{}, + channels: map[string]resourceChannel{ + "a": ch, + }, + } + go func() { + for r := range ch { + r.ImportResource(ic) + } + }() + ic.Emit(&resource{ Resource: "a", Attribute: "b", Value: "d", Name: "c", }) + ic.waitGroup.Wait() + close(ch) } func TestEmitNoSearchSucceedsImportFails(t *testing.T) { - (&importContext{ + ch := make(resourceChannel, 10) + ic := &importContext{ importing: map[string]bool{}, Resources: map[string]*schema.Resource{ "a": {}, @@ -175,14 +216,26 @@ func TestEmitNoSearchSucceedsImportFails(t *testing.T) { }, }, }, - services: "e", - }).Emit(&resource{ + services: "e", + waitGroup: &sync.WaitGroup{}, + channels: map[string]resourceChannel{ + "a": ch, + }, + } + go func() { + for r := range ch { + r.ImportResource(ic) + } + }() + ic.Emit(&resource{ Data: &schema.ResourceData{}, Resource: "a", Attribute: "b", Value: "d", Name: "c", }) + ic.waitGroup.Wait() + close(ch) } func TestLoadingLastRun(t *testing.T) { diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 85d2c56909..24faf303a2 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -204,9 +204,10 @@ func TestImportingMounts(t *testing.T) { err := ic.Importables["databricks_mount"].List(ic) assert.NoError(t, err) - for i := 0; i < len(ic.Scope); i++ { + resources := ic.Scope.Sorted() + for i := range resources { err = ic.Importables["databricks_mount"].Body(ic, - hclwrite.NewEmptyFile().Body(), ic.Scope[i]) + hclwrite.NewEmptyFile().Body(), resources[i]) assert.NoError(t, err) } }) @@ -752,6 +753,7 @@ func TestImportingClusters(t *testing.T) { }, }, func(ctx context.Context, client *common.DatabricksClient) { + os.Setenv("EXPORTER_PARALLELISM_databricks_cluster", "1") tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) defer os.RemoveAll(tmpDir) @@ -967,7 +969,8 @@ func TestImportingJobs_JobList(t *testing.T) { err := ic.Importables["databricks_job"].List(ic) assert.NoError(t, err) - for _, res := range ic.Scope { + resources := ic.Scope.Sorted() + for _, res := range resources { if res.Resource != "databricks_job" { continue } @@ -1216,7 +1219,8 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { err := ic.Importables["databricks_job"].List(ic) assert.NoError(t, err) - for _, res := range ic.Scope { + resources := ic.Scope.Sorted() + for _, res := range resources { if res.Resource != "databricks_job" { continue } diff --git a/exporter/importables.go b/exporter/importables.go index 2e5ba01f9d..70cfb78140 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -768,6 +768,7 @@ var resourcesMap map[string]importable = map[string]importable{ } return nameNormalizationRegex.ReplaceAllString(strings.Split(s, "@")[0], "_") + "_" + d.Id() }, + // TODO: we need to add List operation here as well Search: func(ic *importContext, r *resource) error { u, err := ic.findUserByName(r.Value) if err != nil { @@ -805,6 +806,7 @@ var resourcesMap map[string]importable = map[string]importable{ } return name + "_" + d.Id() }, + // TODO: we need to add List operation here as well Search: func(ic *importContext, r *resource) error { u, err := ic.findSpnByAppID(r.Value) if err != nil { @@ -1102,19 +1104,18 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, List: func(ic *importContext) error { - // TODO: Should we use parallel listing instead? - repoList, err := repos.NewReposAPI(ic.Context, ic.Client).ListAll() + objList, err := repos.NewReposAPI(ic.Context, ic.Client).ListAll() if err != nil { return err } - for offset, repo := range repoList { + for offset, repo := range objList { if repo.Url != "" { ic.Emit(&resource{ Resource: "databricks_repo", ID: fmt.Sprintf("%d", repo.ID), }) } - log.Printf("[INFO] Scanned %d of %d repos", offset+1, len(repoList)) + log.Printf("[INFO] Scanned %d of %d repos", offset+1, len(objList)) } return nil }, diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 30dbdf73e6..9a1fdb1fc9 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "sync" "testing" "github.com/databricks/databricks-sdk-go/apierr" @@ -37,6 +38,10 @@ func importContextForTest() *importContext { Files: map[string]*hclwrite.File{}, testEmits: map[string]bool{}, nameFixes: nameFixes, + waitGroup: &sync.WaitGroup{}, + allUsers: map[string]scim.User{}, + allSps: map[string]scim.User{}, + channels: makeResourcesChannels(p), } } @@ -362,30 +367,6 @@ func TestJobListNoNameMatch(t *testing.T) { assert.Equal(t, 0, len(ic.testEmits)) } -func TestJobList_FailGetRuns(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/jobs/runs/list?completed_only=true&job_id=1&limit=1", - Status: 404, - Response: apierr.NotFound("nope"), - }, - }, func(ctx context.Context, client *common.DatabricksClient) { - ic := importContextForTest() - ic.Client = client - ic.Context = ctx - ic.importJobs([]jobs.Job{ - { - JobID: 1, - Settings: &jobs.JobSettings{ - Name: "abc", - }, - }, - }) - assert.Equal(t, 0, len(ic.testEmits)) - }) -} - func TestClusterPolicyWrongDef(t *testing.T) { d := policies.ResourceClusterPolicy().TestResourceData() d.Set("name", "abc") @@ -763,6 +744,7 @@ func testGenerate(t *testing.T, fixtures []qa.HTTPFixture, services string, cb f ic.importing = map[string]bool{} ic.variables = map[string]string{} ic.services = services + ic.startImportChannels() cb(ic) }) } @@ -806,6 +788,8 @@ func TestNotebookGeneration(t *testing.T) { ic.notebooksFormat = "SOURCE" err := resourcesMap["databricks_notebook"].List(ic) assert.NoError(t, err) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_notebook" "first_second_123" { @@ -854,6 +838,8 @@ func TestNotebookGenerationJupyter(t *testing.T) { ic.notebooksFormat = "JUPYTER" err := resourcesMap["databricks_notebook"].List(ic) assert.NoError(t, err) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_notebook" "first_second_123" { @@ -902,6 +888,8 @@ func TestDirectoryGeneration(t *testing.T) { err := resourcesMap["databricks_directory"].List(ic) assert.NoError(t, err) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_directory" "first_1234" { @@ -928,6 +916,8 @@ func TestGlobalInitScriptGen(t *testing.T) { ID: "a", }) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_global_init_script" "new_importing_things" { @@ -958,6 +948,8 @@ func TestSecretGen(t *testing.T) { ID: "a|||b", }) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_secret" "a_b_eb2980a5a2" { @@ -991,6 +983,8 @@ func TestDbfsFileGen(t *testing.T) { ID: "a", }) + ic.waitGroup.Wait() + ic.closeImportChannels() ic.generateHclForResources(nil) assert.Equal(t, commands.TrimLeadingWhitespace(` resource "databricks_dbfs_file" "_0cc175b9c0f1b6a831c399e269772661_a" { @@ -1066,7 +1060,10 @@ func TestIncrementalListDLT(t *testing.T) { ic.Context = ctx ic.incremental = true ic.updatedSinceStr = "2023-07-24T00:00:00Z" + ic.updatedSinceMs = 1690156700000 err := resourcesMap["databricks_pipeline"].List(ic) + ic.waitGroup.Wait() + ic.closeImportChannels() assert.NoError(t, err) assert.Equal(t, 1, len(ic.testEmits)) }) diff --git a/exporter/model.go b/exporter/model.go index 948653db9d..a3c93efe3d 100644 --- a/exporter/model.go +++ b/exporter/model.go @@ -1,13 +1,19 @@ package exporter import ( + "context" "fmt" + "log" "regexp" + "sort" + "strings" + "sync" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) type regexFix struct { @@ -30,7 +36,42 @@ type resourceApproximation struct { } type stateApproximation struct { - Resources []resourceApproximation `json:"resources"` + mutex sync.RWMutex + // TODO: use map by type -> should speedup Has function? + resources []resourceApproximation +} + +// TODO: check if it's used directly by multiple threads? +func (s *stateApproximation) Resources() []resourceApproximation { + s.mutex.RLocker().Lock() + defer s.mutex.RLocker().Unlock() + c := make([]resourceApproximation, len(s.resources)) + copy(c, s.resources) + return c +} + +func (s *stateApproximation) Has(r *resource) bool { + s.mutex.RLocker().Lock() + defer s.mutex.RLocker().Unlock() + k, v := r.MatchPair() + for _, sr := range s.resources { + if sr.Type != r.Resource { + continue + } + for _, i := range sr.Instances { + tv, ok := i.Attributes[k].(string) + if ok && tv == v { + return true + } + } + } + return false +} + +func (s *stateApproximation) Append(ra resourceApproximation) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.resources = append(s.resources, ra) } type importable struct { @@ -134,14 +175,104 @@ func (r *resource) ImportCommand(ic *importContext) string { return fmt.Sprintf(`terraform import %s%s.%s "%s"`, m, r.Resource, r.Name, r.ID) } -type importedResources []*resource +func (r *resource) ImportResource(ic *importContext) { + defer ic.waitGroup.Done() + pr, ok := ic.Resources[r.Resource] + if !ok { + log.Printf("[ERROR] %s is not available in provider", r) + return + } + ir, ok := ic.Importables[r.Resource] + if !ok { + log.Printf("[ERROR] %s is not available for import", r) + return + } + if ic.HasInState(r, true) { + log.Printf("[DEBUG] %s already imported", r) + return + } + + if r.ID == "" { + if ir.Search == nil { + log.Printf("[ERROR] Searching %s is not available", r) + return + } + if err := ir.Search(ic, r); err != nil { + log.Printf("[ERROR] Cannot search for a resource %s: %v", err, r) + return + } + if r.ID == "" { + log.Printf("[INFO] Cannot find %s", r) + return + } + } + if r.Data == nil { + // empty data with resource schema + r.Data = pr.Data(&terraform.InstanceState{ + Attributes: map[string]string{}, + ID: r.ID, + }) + r.Data.MarkNewResource() + resource := strings.ReplaceAll(r.Resource, "databricks_", "") + ctx := context.WithValue(ic.Context, common.ResourceName, resource) + apiVersion := ic.Importables[r.Resource].ApiVersion + if apiVersion != "" { + ctx = context.WithValue(ctx, common.Api, apiVersion) + } + if dia := pr.ReadContext(ctx, r.Data, ic.Client); dia != nil { + log.Printf("[ERROR] Error reading %s#%s: %v", r.Resource, r.ID, dia) + return + } + if r.Data.Id() == "" { + r.Data.SetId(r.ID) + } + } + r.Name = ic.ResourceName(r) + if ir.Import != nil { + if err := ir.Import(ic, r); err != nil { + log.Printf("[ERROR] Failed custom import of %s: %s", r, err) + return + } + } + ic.Add(r) +} + +// TODO: split resources into a map of resource type -> list of resources (guarded by RW locks) +type resourcesList []*resource -func (a importedResources) Len() int { +type importedResources struct { + resources resourcesList + mutex sync.RWMutex +} + +func (a *importedResources) Append(r *resource) { + defer a.mutex.Unlock() + a.mutex.Lock() + a.resources = append(a.resources, r) +} + +func (a *importedResources) Len() int { + defer a.mutex.RLocker().Unlock() + a.mutex.RLocker().Lock() + return len(a.resources) +} + +func (a resourcesList) Len() int { return len(a) } -func (a importedResources) Swap(i, j int) { + +func (a resourcesList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a importedResources) Less(i, j int) bool { +func (a resourcesList) Less(i, j int) bool { return a[i].Name < a[j].Name } + +func (a *importedResources) Sorted() []*resource { + defer a.mutex.Unlock() + a.mutex.Lock() + c := make(resourcesList, len(a.resources)) + copy(c, a.resources) + sort.Sort(c) + return c +} diff --git a/exporter/util.go b/exporter/util.go index e9a7316a20..324cbda4b0 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -22,6 +22,7 @@ import ( "github.com/databricks/terraform-provider-databricks/sql" "github.com/databricks/terraform-provider-databricks/storage" "github.com/databricks/terraform-provider-databricks/workspace" + "golang.org/x/exp/slices" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -149,21 +150,42 @@ func (ic *importContext) emitNotebookOrRepo(path string) { func (ic *importContext) getAllDirectories() []workspace.ObjectStatus { if len(ic.allDirectories) == 0 { objects := ic.getAllWorkspaceObjects() - for _, v := range objects { - if v.ObjectType == workspace.Directory { - ic.allDirectories = append(ic.allDirectories, v) + ic.wsObjectsMutex.Lock() + defer ic.wsObjectsMutex.Unlock() + if len(ic.allDirectories) == 0 { + for _, v := range objects { + if v.ObjectType == workspace.Directory { + ic.allDirectories = append(ic.allDirectories, v) + } } } } return ic.allDirectories } +// TODO: Ignore databricks_automl as well? +var directoriesToIgnore = []string{".ide", ".bundle", "__pycache__"} + +func excludeAuxiliaryDirectories(v workspace.ObjectStatus) bool { + if v.ObjectType != workspace.Directory { + return true + } + parts := strings.Split(v.Path, "/") + result := len(parts) > 1 && slices.Contains[[]string, string](directoriesToIgnore, parts[len(parts)-1]) + if result { + log.Printf("[DEBUG] Ignoring directory %s", v.Path) + } + return !result +} + func (ic *importContext) getAllWorkspaceObjects() []workspace.ObjectStatus { + ic.wsObjectsMutex.Lock() + defer ic.wsObjectsMutex.Unlock() if len(ic.allWorkspaceObjects) == 0 { t1 := time.Now() log.Printf("[DEBUG] %v. Starting to list all workspace objects", t1.Local().Format(time.RFC3339)) notebooksAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) - ic.allWorkspaceObjects, _ = notebooksAPI.ListParallel("/", true) + ic.allWorkspaceObjects, _ = notebooksAPI.ListParallel("/", excludeAuxiliaryDirectories) t2 := time.Now() log.Printf("[DEBUG] %v. Finished listing of all workspace objects. %d objects in total. %v seconds", t2.Local().Format(time.RFC3339), len(ic.allWorkspaceObjects), t2.Sub(t1).Seconds()) @@ -239,7 +261,9 @@ func (ic *importContext) importClusterLibraries(d *schema.ResourceData, s map[st } func (ic *importContext) cacheGroups() error { - if len(ic.allGroups) == 0 { + ic.groupsMutex.Lock() + defer ic.groupsMutex.Unlock() + if ic.allGroups == nil { log.Printf("[INFO] Caching groups in memory ...") groupsAPI := scim.NewGroupsAPI(ic.Context, ic.Client) g, err := groupsAPI.Filter("") @@ -252,7 +276,25 @@ func (ic *importContext) cacheGroups() error { return nil } +const ( + nonExistingUserOrSp = "__USER_OR_SPN_DOES_NOT_EXIST__" +) + func (ic *importContext) findUserByName(name string) (u scim.User, err error) { + log.Printf("[DEBUG] Looking for user %s", name) + ic.usersMutex.RLocker().Lock() + user, exists := ic.allUsers[name] + ic.usersMutex.RLocker().Unlock() + if exists { + if user.UserName == nonExistingUserOrSp { + log.Printf("[DEBUG] non-existing user %s is found in the cache", name) + err = fmt.Errorf("user %s not found", name) + } else { + log.Printf("[DEBUG] existing user %s is found in the cache", name) + u = user + } + return + } a := scim.NewUsersAPI(ic.Context, ic.Client) users, err := a.Filter(fmt.Sprintf("userName eq '%s'", name), false) if err != nil { @@ -260,13 +302,31 @@ func (ic *importContext) findUserByName(name string) (u scim.User, err error) { } if len(users) == 0 { err = fmt.Errorf("user %s not found", name) - return + u = scim.User{UserName: nonExistingUserOrSp} + } else { + u = users[0] } - u = users[0] + ic.usersMutex.Lock() + ic.allUsers[name] = u + ic.usersMutex.Unlock() return } func (ic *importContext) findSpnByAppID(applicationID string) (u scim.User, err error) { + log.Printf("[DEBUG] Looking for SP %s", applicationID) + ic.spsMutex.RLocker().Lock() + sp, exists := ic.allSps[applicationID] + ic.spsMutex.RLocker().Unlock() + if exists { + if sp.ApplicationID == nonExistingUserOrSp { + log.Printf("[DEBUG] non-existing SP %s is found in the cache", applicationID) + err = fmt.Errorf("user %s not found", applicationID) + } else { + log.Printf("[DEBUG] existing SP %s is found in the cache", applicationID) + u = sp + } + return + } a := scim.NewServicePrincipalsAPI(ic.Context, ic.Client) users, err := a.Filter(fmt.Sprintf("applicationId eq '%s'", strings.ReplaceAll(applicationID, "'", "")), false) if err != nil { @@ -274,9 +334,14 @@ func (ic *importContext) findSpnByAppID(applicationID string) (u scim.User, err } if len(users) == 0 { err = fmt.Errorf("service principal %s not found", applicationID) - return + u = scim.User{ApplicationID: nonExistingUserOrSp} + } else { + u = users[0] } - u = users[0] + ic.usersMutex.Lock() + ic.allSps[applicationID] = u + ic.usersMutex.Unlock() + return } @@ -327,6 +392,8 @@ func dbsqlListObjects(ic *importContext, path string) (events []map[string]any, } func (ic *importContext) getSqlDataSources() (map[string]string, error) { + ic.sqlDatasourcesMutex.Lock() + defer ic.sqlDatasourcesMutex.Unlock() if ic.sqlDatasources == nil { var dss []sql.DataSource err := ic.Client.Get(ic.Context, "/preview/sql/data_sources", nil, &dss) @@ -474,36 +541,12 @@ func eitherString(a any, b any) string { } func (ic *importContext) importJobs(l []jobs.Job) { - nowSeconds := time.Now().Unix() - a := jobs.NewJobsAPI(ic.Context, ic.Client) - starterAfter := (nowSeconds - (ic.lastActiveDays * 24 * 60 * 60)) * 1000 i := 0 for offset, job := range l { if !ic.MatchesName(job.Settings.Name) { log.Printf("[INFO] Job name %s doesn't match selection %s", job.Settings.Name, ic.match) continue } - if ic.lastActiveDays != 3650 { - rl, err := a.RunsList(jobs.JobRunsListRequest{ - JobID: job.JobID, - CompletedOnly: true, - Limit: 1, - }) - if err != nil { - log.Printf("[WARN] Failed to get runs: %s", err) - continue - } - if len(rl.Runs) == 0 { - log.Printf("[INFO] Job %#v (%d) did never run. Skipping", job.Settings.Name, job.JobID) - continue - } - if rl.Runs[0].StartTime < starterAfter { - log.Printf("[INFO] Job %#v (%d) didn't run for %d days. Skipping", - job.Settings.Name, job.JobID, - (nowSeconds*1000-rl.Runs[0].StartTime)/24*60*60/1000) - continue - } - } ic.Emit(&resource{ Resource: "databricks_job", ID: job.ID(), @@ -620,6 +663,7 @@ func wsObjectGetModifiedAt(obs workspace.ObjectStatus) int64 { func createListWorkspaceObjectsFunc(objType string, resourceType string, objName string) func(ic *importContext) error { return func(ic *importContext) error { + // TODO: can we pass a visitor here, that will emit corresponding object earlier? objectsList := ic.getAllWorkspaceObjects() updatedSinceMs := ic.getUpdatedSinceMs() for offset, object := range objectsList { @@ -664,9 +708,16 @@ func (ic *importContext) getUpdatedSinceStr() string { } func (ic *importContext) getUpdatedSinceMs() int64 { - if ic.updatedSinceMs == 0 { - tm, _ := time.Parse(time.RFC3339, ic.updatedSinceStr) - ic.updatedSinceMs = tm.UnixMilli() - } return ic.updatedSinceMs } + +func getEnvAsInt(envName string, defaultValue int) int { + if val, exists := os.LookupEnv(envName); exists { + parsedVal, err := strconv.Atoi(val) + if err == nil { + return parsedVal + } + log.Printf("[ERROR] Can't parse value '%s' of environment variable '%s'", val, envName) + } + return defaultValue +} diff --git a/exporter/util_test.go b/exporter/util_test.go index fe7f7e0385..a3cff51f75 100644 --- a/exporter/util_test.go +++ b/exporter/util_test.go @@ -1,9 +1,11 @@ package exporter import ( + "os" "testing" "github.com/databricks/terraform-provider-databricks/clusters" + "github.com/databricks/terraform-provider-databricks/workspace" "github.com/stretchr/testify/assert" ) @@ -86,7 +88,6 @@ func TestEmitNotebookOrRepo(t *testing.T) { } func TestIsUserOrServicePrincipalDirectory(t *testing.T) { - ic := importContextForTest() result_false_partslength_more_than_3 := ic.IsUserOrServicePrincipalDirectory("/Users/user@domain.com/abc", "/Users") assert.False(t, result_false_partslength_more_than_3) @@ -115,3 +116,27 @@ func TestIsUserOrServicePrincipalDirectory(t *testing.T) { result_true_sp_directory := ic.IsUserOrServicePrincipalDirectory("/Users/0e561119-c5a0-4f29-b246-5a953adb9575", "/Users") assert.True(t, result_true_sp_directory) } + +func TestGetEnvAsInt(t *testing.T) { + os.Setenv("a", "10") + assert.Equal(t, 10, getEnvAsInt("a", 1)) + // + os.Setenv("a", "abc") + assert.Equal(t, 1, getEnvAsInt("a", 1)) + // + assert.Equal(t, 1, getEnvAsInt("b", 1)) +} + +func TestExcludeAuxiliaryDirectories(t *testing.T) { + assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "", ObjectType: workspace.Directory})) + assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{ObjectType: workspace.File})) + assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc", + ObjectType: workspace.Directory})) + // should be ignored + assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/.ide", + ObjectType: workspace.Directory})) + assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Shared/.bundle", + ObjectType: workspace.Directory})) + assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc/__pycache__", + ObjectType: workspace.Directory})) +} diff --git a/sql/resource_sql_alerts.go b/sql/resource_sql_alerts.go index 62a535acdd..a7fe04b215 100644 --- a/sql/resource_sql_alerts.go +++ b/sql/resource_sql_alerts.go @@ -3,6 +3,7 @@ package sql import ( "context" "fmt" + "log" "strconv" "github.com/databricks/databricks-sdk-go/service/sql" @@ -72,29 +73,38 @@ func (a *AlertEntity) toEditAlertApiObject(s map[string]*schema.Schema, data *sc func (a *AlertEntity) fromAPIObject(apiAlert *sql.Alert, s map[string]*schema.Schema, data *schema.ResourceData) error { a.Name = apiAlert.Name a.Parent = apiAlert.Parent - a.QueryId = apiAlert.Query.Id + if apiAlert.Query != nil { + a.QueryId = apiAlert.Query.Id + } else { + log.Printf("[WARN] Query object is nil in alert '%s' (id: %s) ", apiAlert.Name, apiAlert.Id) + } a.Rearm = apiAlert.Rearm a.CreatedAt = apiAlert.CreatedAt a.UpdatedAt = apiAlert.UpdatedAt - a.Options = &AlertOptions{ - Column: apiAlert.Options.Column, - Op: apiAlert.Options.Op, - Muted: apiAlert.Options.Muted, - CustomBody: apiAlert.Options.CustomBody, - CustomSubject: apiAlert.Options.CustomSubject, - } + if apiAlert.Options != nil { + a.Options = &AlertOptions{ + Column: apiAlert.Options.Column, + Op: apiAlert.Options.Op, + Muted: apiAlert.Options.Muted, + CustomBody: apiAlert.Options.CustomBody, + CustomSubject: apiAlert.Options.CustomSubject, + } - // value can be a string or a float64 - unfortunately this can't be encoded in OpenAPI yet - switch value := apiAlert.Options.Value.(type) { - case string: - a.Options.Value = value - case float64: - a.Options.Value = strconv.FormatFloat(value, 'f', 0, 64) - case bool: - a.Options.Value = strconv.FormatBool(value) - default: - return fmt.Errorf("unexpected type for value: %T", value) + // value can be a string or a float64 - unfortunately this can't be encoded in OpenAPI yet + switch value := apiAlert.Options.Value.(type) { + case string: + a.Options.Value = value + case float64: + a.Options.Value = strconv.FormatFloat(value, 'f', 0, 64) + case bool: + a.Options.Value = strconv.FormatBool(value) + default: + return fmt.Errorf("unexpected type for value: %T", value) + } + } else { + log.Printf("[WARN] Options object is nil in alert '%s' (id: %s) ", apiAlert.Name, apiAlert.Id) + a.Options = &AlertOptions{} } return common.StructToData(a, s, data) @@ -132,6 +142,7 @@ func ResourceSqlAlert() *schema.Resource { } apiAlert, err := w.Alerts.GetByAlertId(ctx, data.Id()) if err != nil { + log.Printf("[WARN] error getting alert by ID: %v", err) return err } var a AlertEntity diff --git a/workspace/resource_notebook.go b/workspace/resource_notebook.go index 44b5c3fea1..422180db7a 100644 --- a/workspace/resource_notebook.go +++ b/workspace/resource_notebook.go @@ -163,8 +163,8 @@ type directoryInfo struct { const ( directoryListingMaxAttempts = 3 envVarListParallelism = "EXPORTER_WS_LIST_PARALLELISM" - envVarDirectoryChannelSize = "EXPORTER_CHANNEL_SIZE" - defaultWorkersPoolSize = 5 + envVarDirectoryChannelSize = "EXPORTER_DIRECTORIES_CHANNEL_SIZE" + defaultWorkersPoolSize = 10 defaultDirectoryChannelSize = 100000 ) @@ -173,7 +173,7 @@ func getFormattedNowTime() string { } func (a NotebooksAPI) recursiveAddPathsParallel(directory directoryInfo, dirChannel chan directoryInfo, - answer *syncAnswer, wg *sync.WaitGroup) { + answer *syncAnswer, wg *sync.WaitGroup, shouldIncludeDir func(ObjectStatus) bool) { defer wg.Done() notebookInfoList, err := a.list(directory.Path) if err != nil { @@ -183,16 +183,27 @@ func (a NotebooksAPI) recursiveAddPathsParallel(directory directoryInfo, dirChan dirChannel <- directoryInfo{Path: directory.Path, Attempts: directory.Attempts + 1} } } - answer.append(notebookInfoList) + + newList := make([]ObjectStatus, 0, len(notebookInfoList)) + directories := make([]ObjectStatus, 0, len(notebookInfoList)) for _, v := range notebookInfoList { if v.ObjectType == Directory { - wg.Add(1) - log.Printf("[DEBUG] %s: putting directory '%s' into channel. Channel size: %d", - getFormattedNowTime(), v.Path, len(dirChannel)) - dirChannel <- directoryInfo{Path: v.Path} - time.Sleep(15 * time.Millisecond) + if shouldIncludeDir(v) { + newList = append(newList, v) + directories = append(directories, v) + } + } else { + newList = append(newList, v) } } + answer.append(newList) + for _, v := range directories { + wg.Add(1) + log.Printf("[DEBUG] %s: putting directory '%s' into channel. Channel size: %d", + getFormattedNowTime(), v.Path, len(dirChannel)) + dirChannel <- directoryInfo{Path: v.Path} + time.Sleep(15 * time.Millisecond) + } } func getEnvAsInt(envName string, defaultValue int) int { @@ -205,10 +216,14 @@ func getEnvAsInt(envName string, defaultValue int) int { return defaultValue } -func (a NotebooksAPI) ListParallel(path string, recursive bool) ([]ObjectStatus, error) { +func (a NotebooksAPI) ListParallel(path string, shouldIncludeDir func(ObjectStatus) bool) ([]ObjectStatus, error) { var answer syncAnswer wg := &sync.WaitGroup{} + if shouldIncludeDir == nil { + shouldIncludeDir = func(ObjectStatus) bool { return true } + } + numWorkers := getEnvAsInt(envVarListParallelism, defaultWorkersPoolSize) channelSize := getEnvAsInt(envVarDirectoryChannelSize, defaultDirectoryChannelSize) dirChannel := make(chan directoryInfo, channelSize) @@ -218,14 +233,14 @@ func (a NotebooksAPI) ListParallel(path string, recursive bool) ([]ObjectStatus, log.Printf("[DEBUG] %s: starting go routine %d", getFormattedNowTime(), t) for directory := range dirChannel { log.Printf("[DEBUG] %s: processing directory %s", getFormattedNowTime(), directory.Path) - a.recursiveAddPathsParallel(directory, dirChannel, &answer, wg) + a.recursiveAddPathsParallel(directory, dirChannel, &answer, wg, shouldIncludeDir) } }() } log.Printf("[DEBUG] %s: pushing initial path to channel", getFormattedNowTime()) wg.Add(1) - a.recursiveAddPathsParallel(directoryInfo{Path: path}, dirChannel, &answer, wg) + a.recursiveAddPathsParallel(directoryInfo{Path: path}, dirChannel, &answer, wg, shouldIncludeDir) log.Printf("[DEBUG] %s: starting to wait", getFormattedNowTime()) wg.Wait() log.Printf("[DEBUG] %s: closing the directory channel", getFormattedNowTime()) diff --git a/workspace/resource_notebook_test.go b/workspace/resource_notebook_test.go index e9b493056e..b72efd1763 100644 --- a/workspace/resource_notebook_test.go +++ b/workspace/resource_notebook_test.go @@ -436,7 +436,7 @@ func TestParallelListing(t *testing.T) { os.Setenv("EXPORTER_CHANNEL_SIZE", "100") ctx := context.Background() api := NewNotebooksAPI(ctx, client) - objects, err := api.ListParallel("/", true) + objects, err := api.ListParallel("/", nil) require.NoError(t, err) require.Equal(t, 4, len(objects)) From b00148b3097846e21350393e0fab21a144a02c48 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 4 Oct 2023 13:26:46 +0200 Subject: [PATCH 10/36] Use `terraform-field-dev` as code owner instead of `field-dev-ecosystem` (#2718) * Use `terraform-field-dev` as code owner instead of `field-dev-ecosystem` * Update CODEOWNERS Co-authored-by: Serge Smertin <259697+nfx@users.noreply.github.com> --------- Co-authored-by: Serge Smertin <259697+nfx@users.noreply.github.com> --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index b8828dd615..970299e9a5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1 @@ -* @databricks/field-dev-ecosystem @databricks/eng-plat-auto-exp-reviewers +* @databricks/field-dev-terraform @databricks/eng-plat-auto-exp-reviewers From 364512f1abf157c6da7610fd710a7d31337b28ca Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 4 Oct 2023 13:39:15 +0200 Subject: [PATCH 11/36] Add `dashboard_filters_enabled` attribute to `databricks_sql_dashboard` resource (#2725) When `dashboard_filters_enabled` is set to `true` then filters are shown on the SQL Dashboard level. In this case, the `databricks_sql_widget` should have following block to use it: ``` parameter { type = "dashboard-level" name = "Limit" map_to = "Limit" } ``` This fixes #2172 --- sql/api/dashboard.go | 15 ++++++++------- sql/resource_sql_dashboard.go | 13 ++++++++----- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/sql/api/dashboard.go b/sql/api/dashboard.go index 8e9cf4f0d6..58ea7ed5c7 100644 --- a/sql/api/dashboard.go +++ b/sql/api/dashboard.go @@ -4,11 +4,12 @@ import "encoding/json" // Dashboard ... type Dashboard struct { - ID string `json:"id"` - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Widgets []json.RawMessage `json:"widgets,omitempty"` - Parent string `json:"parent,omitempty"` - CreatedAt string `json:"created_at,omitempty"` - UpdatedAt string `json:"updated_at,omitempty"` + ID string `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Widgets []json.RawMessage `json:"widgets,omitempty"` + Parent string `json:"parent,omitempty"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"` } diff --git a/sql/resource_sql_dashboard.go b/sql/resource_sql_dashboard.go index 8cdaa736c4..f6e3124aec 100644 --- a/sql/resource_sql_dashboard.go +++ b/sql/resource_sql_dashboard.go @@ -11,11 +11,12 @@ import ( // DashboardEntity defines the parameters that can be set in the resource. type DashboardEntity struct { - Name string `json:"name"` - Tags []string `json:"tags,omitempty"` - Parent string `json:"parent,omitempty" tf:"suppress_diff,force_new"` - CreatedAt string `json:"created_at,omitempty" tf:"computed"` - UpdatedAt string `json:"updated_at,omitempty" tf:"computed"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Parent string `json:"parent,omitempty" tf:"suppress_diff,force_new"` + CreatedAt string `json:"created_at,omitempty" tf:"computed"` + UpdatedAt string `json:"updated_at,omitempty" tf:"computed"` + DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"` } func (d *DashboardEntity) toAPIObject(schema map[string]*schema.Schema, data *schema.ResourceData) (*api.Dashboard, error) { @@ -28,6 +29,7 @@ func (d *DashboardEntity) toAPIObject(schema map[string]*schema.Schema, data *sc ad.Name = d.Name ad.Tags = append([]string{}, d.Tags...) ad.Parent = d.Parent + ad.DashboardFiltersEnabled = d.DashboardFiltersEnabled return &ad, nil } @@ -39,6 +41,7 @@ func (d *DashboardEntity) fromAPIObject(ad *api.Dashboard, schema map[string]*sc d.Parent = ad.Parent d.UpdatedAt = ad.UpdatedAt d.CreatedAt = ad.CreatedAt + d.DashboardFiltersEnabled = ad.DashboardFiltersEnabled // Pass to ResourceData. if err := common.StructToData(*d, schema, data); err != nil { From f7739a56e9988886b8972f124c720a664518afb6 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 6 Oct 2023 08:23:08 +0200 Subject: [PATCH 12/36] Add `empty_result_state` attribute to the `databricks_sql_alert` resource (#2724) This fixes #2662 --- docs/resources/sql_alert.md | 1 + sql/resource_sql_alerts.go | 42 ++++++++++++++++++++++++------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index edd918eef7..4f512948c0 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -50,6 +50,7 @@ The following arguments are available: * `custom_body` - (Optional, String) Custom body of alert notification, if it exists. See [Alerts API reference](https://docs.databricks.com/sql/user/alerts/index.html) for custom templating instructions. * `parent` - (Optional, String) The identifier of the workspace folder containing the alert. The default is ther user's home folder. The folder identifier is formatted as `folder/`. * `rearm` - (Optional, Integer) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. +* `empty_result_state` - (Optional, String) State that alert evaluates to when query result is empty. Currently supported values are `unknown`, `triggered`, `ok` - check [API documentation](https://docs.databricks.com/api/workspace/alerts/create) for full list of supported values. ## Related Resources diff --git a/sql/resource_sql_alerts.go b/sql/resource_sql_alerts.go index a7fe04b215..6304863012 100644 --- a/sql/resource_sql_alerts.go +++ b/sql/resource_sql_alerts.go @@ -13,12 +13,13 @@ import ( ) type AlertOptions struct { - Column string `json:"column"` - Op string `json:"op"` - Value string `json:"value"` - Muted bool `json:"muted,omitempty"` - CustomBody string `json:"custom_body,omitempty"` - CustomSubject string `json:"custom_subject,omitempty"` + Column string `json:"column"` + Op string `json:"op"` + Value string `json:"value"` + Muted bool `json:"muted,omitempty"` + CustomBody string `json:"custom_body,omitempty"` + CustomSubject string `json:"custom_subject,omitempty"` + EmptyResultState string `json:"empty_result_state,omitempty"` } type AlertEntity struct { @@ -47,14 +48,18 @@ func (a *AlertEntity) toCreateAlertApiObject(s map[string]*schema.Schema, data * Op: a.Options.Op, Value: a.Options.Value, } - - return ca, nil + // This is a workaround for Go SDK problem, will be fixed there. + var err error + if a.Options.EmptyResultState != "" { + err = ca.Options.EmptyResultState.Set(a.Options.EmptyResultState) + } + return ca, err } func (a *AlertEntity) toEditAlertApiObject(s map[string]*schema.Schema, data *schema.ResourceData) (sql.EditAlert, error) { common.DataToStructPointer(data, s, a) - return sql.EditAlert{ + ea := sql.EditAlert{ AlertId: data.Id(), Name: a.Name, Options: sql.AlertOptions{ @@ -67,7 +72,13 @@ func (a *AlertEntity) toEditAlertApiObject(s map[string]*schema.Schema, data *sc }, QueryId: a.QueryId, Rearm: a.Rearm, - }, nil + } + + var err error + if a.Options.EmptyResultState != "" { + err = ea.Options.EmptyResultState.Set(a.Options.EmptyResultState) + } + return ea, err } func (a *AlertEntity) fromAPIObject(apiAlert *sql.Alert, s map[string]*schema.Schema, data *schema.ResourceData) error { @@ -84,11 +95,12 @@ func (a *AlertEntity) fromAPIObject(apiAlert *sql.Alert, s map[string]*schema.Sc if apiAlert.Options != nil { a.Options = &AlertOptions{ - Column: apiAlert.Options.Column, - Op: apiAlert.Options.Op, - Muted: apiAlert.Options.Muted, - CustomBody: apiAlert.Options.CustomBody, - CustomSubject: apiAlert.Options.CustomSubject, + Column: apiAlert.Options.Column, + Op: apiAlert.Options.Op, + Muted: apiAlert.Options.Muted, + CustomBody: apiAlert.Options.CustomBody, + CustomSubject: apiAlert.Options.CustomSubject, + EmptyResultState: apiAlert.Options.EmptyResultState.String(), } // value can be a string or a float64 - unfortunately this can't be encoded in OpenAPI yet From d03041d2e9c174ec8b99b5040b093a137b4e28be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Oct 2023 09:37:59 +0200 Subject: [PATCH 13/36] Bump github.com/hashicorp/hcl/v2 from 2.18.0 to 2.18.1 (#2776) Bumps [github.com/hashicorp/hcl/v2](https://github.com/hashicorp/hcl) from 2.18.0 to 2.18.1. - [Release notes](https://github.com/hashicorp/hcl/releases) - [Changelog](https://github.com/hashicorp/hcl/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/hcl/compare/v2.18.0...v2.18.1) --- updated-dependencies: - dependency-name: github.com/hashicorp/hcl/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dda0390323..272dabb53b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.18.0 + github.com/hashicorp/hcl/v2 v2.18.1 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/stretchr/testify v1.8.4 diff --git a/go.sum b/go.sum index fa14369b13..da707548e7 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/hashicorp/hc-install v0.6.0 h1:fDHnU7JNFNSQebVKYhHZ0va1bC6SrPQ8fpebsv github.com/hashicorp/hc-install v0.6.0/go.mod h1:10I912u3nntx9Umo1VAeYPUUuehk0aRQJYpMwbX5wQA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.18.0 h1:wYnG7Lt31t2zYkcquwgKo6MWXzRUDIeIVU5naZwHLl8= -github.com/hashicorp/hcl/v2 v2.18.0/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= +github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= From b932c399fca073203b2e19182f2bbfabe8314087 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Oct 2023 09:38:12 +0200 Subject: [PATCH 14/36] Bump github.com/zclconf/go-cty from 1.14.0 to 1.14.1 (#2777) Bumps [github.com/zclconf/go-cty](https://github.com/zclconf/go-cty) from 1.14.0 to 1.14.1. - [Release notes](https://github.com/zclconf/go-cty/releases) - [Changelog](https://github.com/zclconf/go-cty/blob/main/CHANGELOG.md) - [Commits](https://github.com/zclconf/go-cty/compare/v1.14.0...v1.14.1) --- updated-dependencies: - dependency-name: github.com/zclconf/go-cty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 272dabb53b..2f02150a18 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/stretchr/testify v1.8.4 - github.com/zclconf/go-cty v1.14.0 + github.com/zclconf/go-cty v1.14.1 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/mod v0.12.0 ) diff --git a/go.sum b/go.sum index da707548e7..c3866daa77 100644 --- a/go.sum +++ b/go.sum @@ -178,8 +178,8 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= -github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= From 97aa8869f7b074db69da795354365683950890d1 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 9 Oct 2023 16:37:26 +0200 Subject: [PATCH 15/36] Exporter: fix a logic for omitting some fields (#2774) `application_id` was ignored on Azure as well, although it's required - because the corresponding field is marked as `computed`. Also, make `display_name` required for AWS/GCP. --- exporter/importables.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/exporter/importables.go b/exporter/importables.go index 70cfb78140..13fa429a30 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -816,13 +816,19 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, ShouldOmitField: func(ic *importContext, pathString string, as *schema.Schema, d *schema.ResourceData) bool { + if pathString == "display_name" { + if ic.Client.IsAzure() { + applicationID := d.Get("application_id").(string) + displayName := d.Get("display_name").(string) + return applicationID == displayName + } + return false + } // application_id should be provided only on Azure - if pathString == "display_name" && ic.Client.IsAzure() { - applicationID := d.Get("application_id").(string) - displayName := d.Get("display_name").(string) - return applicationID == displayName + if pathString == "application_id" { + return !ic.Client.IsAzure() } - return (pathString == "application_id" && !ic.Client.IsAzure()) || defaultShouldOmitFieldFunc(ic, pathString, as, d) + return defaultShouldOmitFieldFunc(ic, pathString, as, d) }, Import: func(ic *importContext, r *resource) error { applicationID := r.Data.Get("application_id").(string) From 1601f7420dd563040dffca4277f1b141591e18bc Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 11 Oct 2023 10:12:58 +0200 Subject: [PATCH 16/36] Fix documentation for `databricks_schema` about default value for `storage_root` (#2790) --- docs/resources/schema.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/resources/schema.md b/docs/resources/schema.md index ab03aeee3d..ab6e83e9b7 100644 --- a/docs/resources/schema.md +++ b/docs/resources/schema.md @@ -35,7 +35,7 @@ The following arguments are required: * `name` - Name of Schema relative to parent catalog. Change forces creation of a new resource. * `catalog_name` - Name of parent catalog. Change forces creation of a new resource. -* `storage_root` - (Optional) Managed location of the schema. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource. +* `storage_root` - (Optional) Managed location of the schema. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the catalog root location. Change forces creation of a new resource. * `owner` - (Optional) Username/groupname/sp application_id of the schema owner. * `comment` - (Optional) User-supplied free-form text. * `properties` - (Optional) Extensible Schema properties. From 98c1bd9e87770f17c72a95e83c7f3e53868356da Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 11 Oct 2023 13:04:33 +0200 Subject: [PATCH 17/36] Clarify possible values for `principal` attribute of `databricks_secret_acl` (#2772) This fixes #2767 --- docs/resources/secret_acl.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/resources/secret_acl.md b/docs/resources/secret_acl.md index f9421f2ec1..ddc3df797f 100644 --- a/docs/resources/secret_acl.md +++ b/docs/resources/secret_acl.md @@ -7,7 +7,7 @@ Create or overwrite the ACL associated with the given principal (user or group) ## Example Usage -This way, data scientists can read the Publishing API key that is synchronized from example, Azure Key Vault. +This way, data scientists can read the Publishing API key that is synchronized from, for example, Azure Key Vault. ```hcl resource "databricks_group" "ds" { @@ -37,8 +37,11 @@ resource "databricks_secret" "publishing_api" { The following arguments are required: * `scope` - (Required) name of the scope -* `principal` - (Required) name of the principals. It can be `users` for all users or name or `display_name` of [databricks_group](group.md) -* `permission` - (Required) `READ`, `WRITE` or `MANAGE`. +* `principal` - (Required) principal's identifier. It can be: + * `user_name` attribute of [databricks_user](user.md). + * `display_name` attribute of [databricks_group](group.md). Use `users` to allow access for all workspace users. + * `application_id` attribute of [databricks_service_principal](service_principal.md). +* `permission` - (Required) `READ`, `WRITE` or `MANAGE`. ## Import From c1cda2de98a4a960ff86d7106ed58d92bad4470d Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 12 Oct 2023 08:52:58 +0200 Subject: [PATCH 18/36] Remove deprecation warning from `cluster_mount_info` in `databricks_cluster`, but mark it as experimental (#2787) Adjusting it again by the request of cluster controls team who got a feedback from the field team. --- clusters/resource_cluster.go | 2 -- docs/resources/cluster.md | 4 +++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 34fe6e1e14..46a84608f3 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -91,8 +91,6 @@ func resourceClusterSchema() map[string]*schema.Schema { s["runtime_engine"].ValidateFunc = validation.StringInSlice([]string{"PHOTON", "STANDARD"}, false) - s["cluster_mount_info"].Deprecated = "cluster_mount_info block is deprecated due the Clusters API changes." - s["is_pinned"] = &schema.Schema{ Type: schema.TypeBool, Optional: true, diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 60069c3e5a..99e79118da 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -473,7 +473,9 @@ resource "databricks_cluster" "this" { } ``` -## cluster_mount_info blocks (deprecated) +## cluster_mount_info blocks (experimental) + +-> **Note** The underlying API is experimental and may change in the future. It's possible to mount NFS (Network File System) resources into the Spark containers inside the cluster. You can specify one or more `cluster_mount_info` blocks describing the mount. This block has following attributes: From 618232ff50378238b2fe9a334d4e3ad446ef84d7 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 12 Oct 2023 09:51:35 +0200 Subject: [PATCH 19/36] Force recreation of UC Volume when `volume_type` and `storage_location` are changed (#2734) The [UC Volumes update API](https://docs.databricks.com/api/workspace/volumes/update) allows updates only of specific attributes, so we need to force recreation of the volume when `volume_type` or `storage_location` are modified. This fixes #2692 --- catalog/resource_volume.go | 4 +- catalog/resource_volume_test.go | 65 ++++++++++++++++++++++++++++++++- docs/resources/volume.md | 4 +- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/catalog/resource_volume.go b/catalog/resource_volume.go index e4dfa3fd11..6111c577ca 100644 --- a/catalog/resource_volume.go +++ b/catalog/resource_volume.go @@ -24,8 +24,8 @@ type VolumeInfo struct { // The identifier of the user who owns the volume Owner string `json:"owner,omitempty" tf:"computed"` // The storage location on the cloud - StorageLocation string `json:"storage_location,omitempty"` - VolumeType catalog.VolumeType `json:"volume_type"` + StorageLocation string `json:"storage_location,omitempty" tf:"force_new"` + VolumeType catalog.VolumeType `json:"volume_type" tf:"force_new"` } func ResourceVolume() *schema.Resource { diff --git a/catalog/resource_volume_test.go b/catalog/resource_volume_test.go index a164895df2..ecbdc06e95 100644 --- a/catalog/resource_volume_test.go +++ b/catalog/resource_volume_test.go @@ -332,6 +332,7 @@ func TestVolumesUpdate(t *testing.T) { InstanceState: map[string]string{ "catalog_name": "testCatalogName", "schema_name": "testSchemaName", + "volume_type": "testVolumeType", }, ID: "testCatalogName.testSchemaName.testName", HCL: ` @@ -351,7 +352,7 @@ func TestVolumesUpdate(t *testing.T) { assert.Equal(t, "This is a new test comment.", d.Get("comment")) } -func TestVolumesUpdateForceNew(t *testing.T) { +func TestVolumesUpdateForceNewOnCatalog(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -407,6 +408,67 @@ func TestVolumesUpdateForceNew(t *testing.T) { assert.Equal(t, "This is a new test comment.", d.Get("comment")) } +func TestVolumesUpdateForceNewOnVolumeType(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName?", + Response: catalog.VolumeInfo{ + Name: "testNameNew", + VolumeType: catalog.VolumeType("testVolumeTypeNew"), + CatalogName: "testCatalogName", + SchemaName: "testSchemaName", + Comment: "This is a new test comment.", + FullName: "testCatalogName.testSchemaName.testNameNew", + Owner: "testOwnerNew", + }, + }, + { + Method: http.MethodPatch, + Resource: "/api/2.1/unity-catalog/volumes/testCatalogName.testSchemaName.testName", + ExpectedRequest: catalog.UpdateVolumeRequestContent{ + Name: "testName", + Comment: "This is a new test comment.", + Owner: "testOwnerNew", + }, + Response: catalog.VolumeInfo{ + Name: "testNameNew", + VolumeType: catalog.VolumeType("testVolumeTypeNew"), + CatalogName: "testCatalogName", + SchemaName: "testSchemaName", + Comment: "This is a new test comment.", + FullName: "testCatalogName.testSchemaName.testName", + Owner: "testOwnerNew", + }, + }, + }, + Resource: ResourceVolume(), + RequiresNew: true, + Update: true, + ID: "testCatalogName.testSchemaName.testName", + InstanceState: map[string]string{ + "catalog_name": "testCatalogName", + "schema_name": "testSchemaName", + "volume_type": "testVolumeType", + }, + HCL: ` + name = "testName" + volume_type = "testVolumeTypeNew" + catalog_name = "testCatalogName" + schema_name = "testSchemaName" + comment = "This is a new test comment." + owner = "testOwnerNew" + `, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "testNameNew", d.Get("name")) + assert.Equal(t, "testVolumeTypeNew", d.Get("volume_type")) + assert.Equal(t, "testCatalogName", d.Get("catalog_name")) + assert.Equal(t, "testSchemaName", d.Get("schema_name")) + assert.Equal(t, "This is a new test comment.", d.Get("comment")) +} + func TestVolumeUpdate_Error(t *testing.T) { _, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ @@ -430,6 +492,7 @@ func TestVolumeUpdate_Error(t *testing.T) { InstanceState: map[string]string{ "catalog_name": "testCatalogName", "schema_name": "testSchemaName", + "volume_type": "testVolumeType", }, ID: "testCatalogName.testSchemaName.testName", HCL: ` diff --git a/docs/resources/volume.md b/docs/resources/volume.md index caab4c5a3a..e91555df14 100644 --- a/docs/resources/volume.md +++ b/docs/resources/volume.md @@ -82,9 +82,9 @@ The following arguments are supported: * `name` - Name of the Volume * `catalog_name` - Name of parent Catalog. Change forces creation of a new resource. * `schema_name` - Name of parent Schema relative to parent Catalog. Change forces creation of a new resource. -* `volume_type` - Volume type. `EXTERNAL` or `MANAGED`. +* `volume_type` - Volume type. `EXTERNAL` or `MANAGED`. Change forces creation of a new resource. * `owner` - (Optional) Name of the volume owner. -* `storage_location` - (Optional) Path inside an External Location. Only used for `EXTERNAL` Volumes. +* `storage_location` - (Optional) Path inside an External Location. Only used for `EXTERNAL` Volumes. Change forces creation of a new resource. * `comment` - (Optional) Free-form text. ## Attribute Reference From 8e785edfb456cafa51f361819e8d3f942526260c Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 12 Oct 2023 16:31:43 +0200 Subject: [PATCH 20/36] Mark attributes in the `run_as` block in `databricks_job` as `ExactlyOneOf` (#2784) * Mark attributes in the `run_as` block in `databricks_job` as `ExactlyOneOf` This prevents error when user specifies both at the same time, or don't specify any of them in the existing `run_as` block. * Update jobs/resource_job.go Co-authored-by: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> --------- Co-authored-by: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> --- docs/resources/job.md | 4 ++-- jobs/resource_job.go | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/resources/job.md b/docs/resources/job.md index c18f7ac167..a1d67ce2d2 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -146,12 +146,12 @@ resource "databricks_job" "this" { ### run_as Configuration Block The `run_as` block allows specifying the user or the service principal that the job runs as. If not specified, the job runs as the user or service -principal that created the job. +principal that created the job. Only one of `user_name` or `service_principal_name` can be specified. * `user_name` - (Optional) The email of an active workspace user. Non-admin users can only set this field to their own email. * `service_principal_name` - (Optional) The application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role. -Example +Example: ```hcl resource "databricks_job" "this" { diff --git a/jobs/resource_job.go b/jobs/resource_job.go index f6d8afd8d0..7d4ed08614 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -690,6 +690,12 @@ var jobSchema = common.StructToSchema(JobSettings{}, s["schedule"].ConflictsWith = []string{"continuous", "trigger"} s["continuous"].ConflictsWith = []string{"schedule", "trigger"} s["trigger"].ConflictsWith = []string{"schedule", "continuous"} + + // we need to have only one of user name vs service principal in the run_as block + run_as_eoo := []string{"run_as.0.user_name", "run_as.0.service_principal_name"} + common.MustSchemaPath(s, "run_as", "user_name").ExactlyOneOf = run_as_eoo + common.MustSchemaPath(s, "run_as", "service_principal_name").ExactlyOneOf = run_as_eoo + return s }) From a9f0ed6068cb56032cdf56e00c28e8bad4480fe9 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 13 Oct 2023 13:57:28 +0200 Subject: [PATCH 21/36] Suppress diff for `user_name` in `databricks_user` when the changes only in character case (#2786) * Suppress diff for `user_name` in `databricks_user` when the changes only in character case Identity management team rolled out the changes for AWS workspaces to force user names be lower-cased by default (to match behavior on GCP & Azure). But these changes are causing the permanent configuration drift because `user_name` is marked as `force_new`. To mitigate this problem, added a suppress diff function that ignores changes in case. * Add integration test * Clarify in the docs that name will be lower-cased --- docs/resources/user.md | 2 +- internal/acceptance/user_test.go | 18 +++++++++++++++++- scim/resource_user.go | 1 + scim/resource_user_test.go | 7 +++++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/docs/resources/user.md b/docs/resources/user.md index 58050b2d80..09aba93c56 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -87,7 +87,7 @@ resource "databricks_user" "account_user" { The following arguments are available: -* `user_name` - (Required) This is the username of the given user and will be their form of access and identity. +* `user_name` - (Required) This is the username of the given user and will be their form of access and identity. Provided username will be converted to lower case if it contains upper case characters. * `display_name` - (Optional) This is an alias for the username that can be the full name of the user. * `external_id` - (Optional) ID of the user in an external identity provider. * `allow_cluster_create` - (Optional) Allow the user to have [cluster](cluster.md) create privileges. Defaults to false. More fine grained permissions could be assigned with [databricks_permissions](permissions.md#Cluster-usage) and `cluster_id` argument. Everyone without `allow_cluster_create` argument set, but with [permission to use](permissions.md#Cluster-Policy-usage) Cluster Policy would be able to create clusters, but within boundaries of that specific policy. diff --git a/internal/acceptance/user_test.go b/internal/acceptance/user_test.go index a21bf15399..ed18450bdf 100644 --- a/internal/acceptance/user_test.go +++ b/internal/acceptance/user_test.go @@ -138,7 +138,8 @@ func TestAccUserResource(t *testing.T) { user_name = "tf-derde+{var.RANDOM}@example.com" display_name = "Derde {var.RANDOM}" allow_instance_pool_create = true - }` + } + ` workspaceLevel(t, step{ Template: differentUsers, Check: resource.ComposeTestCheckFunc( @@ -153,3 +154,18 @@ func TestAccUserResource(t *testing.T) { Template: differentUsers, }) } + +func TestAccUserResourceCaseInsensitive(t *testing.T) { + username := "CSTF-" + qa.RandomEmail() + csUser := `resource "databricks_user" "first" { + user_name = "` + username + `" + }` + workspaceLevel(t, step{ + Template: csUser, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_user.first", "user_name", strings.ToLower(username)), + ), + }, step{ + Template: csUser, + }) +} diff --git a/scim/resource_user.go b/scim/resource_user.go index 5a46d81711..9586225d0c 100644 --- a/scim/resource_user.go +++ b/scim/resource_user.go @@ -29,6 +29,7 @@ func ResourceUser() *schema.Resource { userSchema := common.StructToSchema(entity{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { addEntitlementsToSchema(&m) + m["user_name"].DiffSuppressFunc = common.EqualFoldDiffSuppress m["active"].Default = true m["force"] = &schema.Schema{ Type: schema.TypeBool, diff --git a/scim/resource_user_test.go b/scim/resource_user_test.go index f513ca13d2..eb0fcfb21a 100644 --- a/scim/resource_user_test.go +++ b/scim/resource_user_test.go @@ -727,3 +727,10 @@ func TestCreateForceOverwriteFindsAndSetsAccID(t *testing.T) { assert.Equal(t, "abc", d.Id()) }) } + +func TestUserResource_SparkConfDiffSuppress(t *testing.T) { + jr := ResourceUser() + scs := jr.Schema["user_name"] + assert.True(t, scs.DiffSuppressFunc("user_name", "abcdef@example.com", "AbcDef@example.com", nil)) + assert.False(t, scs.DiffSuppressFunc("user_name", "abcdef@example.com", "abcdef2@example.com", nil)) +} From e320aa11fc4ef982eaa62234b95e91815f2fb3bc Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 13 Oct 2023 14:54:28 +0200 Subject: [PATCH 22/36] Don't rely on having `@` to check if it's user or SP (#2765) * Fix: don't rely on having `@` to check if it's user or SP There are some installations where user name isn't equal to email - for them our detection of username vs SP ID is broken, and lead to some problems, like, generation of erroneous `run_as` blocks in job definitions, etc. This PR fixes this by validating a given string as UUID, and if it's matches, then uses it as SP ID, and the rest as user name. Changes affect following: * `databricks_job` API - besides validation, it also doesn't fill the `RunAs` structure on read when creator user name == run as user name. * `databricks_current_user` data source - should fix generation of `acl_principal_id` * Update jobs/data_job_test.go Co-authored-by: Miles Yucht * Add integration test --------- Co-authored-by: Miles Yucht --- common/util.go | 13 +++++ common/util_test.go | 12 +++++ exporter/importables.go | 1 - exporter/util.go | 8 ++- internal/acceptance/job_test.go | 63 +++++++++++++++++++++++ jobs/data_job_test.go | 91 +++++++++++++++++++++++++++++++++ jobs/resource_job.go | 10 ++-- scim/data_current_user.go | 6 +-- 8 files changed, 192 insertions(+), 12 deletions(-) create mode 100644 common/util.go create mode 100644 common/util_test.go diff --git a/common/util.go b/common/util.go new file mode 100644 index 0000000000..cf6c3b4ed2 --- /dev/null +++ b/common/util.go @@ -0,0 +1,13 @@ +package common + +import ( + "regexp" +) + +var ( + uuidRegex = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) +) + +func StringIsUUID(s string) bool { + return uuidRegex.MatchString(s) +} diff --git a/common/util_test.go b/common/util_test.go new file mode 100644 index 0000000000..08aa81a086 --- /dev/null +++ b/common/util_test.go @@ -0,0 +1,12 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringIsUUID(t *testing.T) { + assert.True(t, StringIsUUID("3f670caf-9a4b-4479-8143-1a0878da8f57")) + assert.False(t, StringIsUUID("abc")) +} diff --git a/exporter/importables.go b/exporter/importables.go index 13fa429a30..51ab96df71 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -42,7 +42,6 @@ var ( nameNormalizationRegex = regexp.MustCompile(`\W+`) jobClustersRegex = regexp.MustCompile(`^((job_cluster|task)\.[0-9]+\.new_cluster\.[0-9]+\.)`) dltClusterRegex = regexp.MustCompile(`^(cluster\.[0-9]+\.)`) - uuidRegex = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) predefinedClusterPolicies = []string{"Personal Compute", "Job Compute", "Power User Compute", "Shared Compute"} secretPathRegex = regexp.MustCompile(`^\{\{secrets\/([^\/]+)\/([^}]+)\}\}$`) sqlParentRegexp = regexp.MustCompile(`^folders/(\d+)$`) diff --git a/exporter/util.go b/exporter/util.go index 324cbda4b0..0ef21cc353 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -96,13 +96,15 @@ func (ic *importContext) emitUserOrServicePrincipal(userOrSPName string) { if userOrSPName == "" { return } + // TODO: think about another way of checking for a user. ideally we need to check against the + // list of users/SPs obtained via SCIM API - this will be done in the refactoring requested by the SCIM team if strings.Contains(userOrSPName, "@") { ic.Emit(&resource{ Resource: "databricks_user", Attribute: "user_name", Value: userOrSPName, }) - } else if uuidRegex.MatchString(userOrSPName) { + } else if common.StringIsUUID(userOrSPName) { ic.Emit(&resource{ Resource: "databricks_service_principal", Attribute: "application_id", @@ -126,8 +128,10 @@ func (ic *importContext) IsUserOrServicePrincipalDirectory(path, prefix string) } parts := strings.SplitN(path, "/", 4) if len(parts) == 3 || (len(parts) == 4 && parts[3] == "") { + // TODO: think about another way of checking for a user. ideally we need to check against the + // list of users/SPs obtained via SCIM API - this will be done in the refactoring requested by the SCIM team userOrSPName := parts[2] - return strings.Contains(userOrSPName, "@") || uuidRegex.MatchString(userOrSPName) + return strings.Contains(userOrSPName, "@") || common.StringIsUUID(userOrSPName) } return false } diff --git a/internal/acceptance/job_test.go b/internal/acceptance/job_test.go index 916051d685..3467d194f8 100644 --- a/internal/acceptance/job_test.go +++ b/internal/acceptance/job_test.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/qa" "github.com/stretchr/testify/assert" ) @@ -216,3 +217,65 @@ func TestAccJobControlRunState(t *testing.T) { Check: resourceCheck("databricks_job.this", waitForRunToStart), }) } + +func runAsTemplate(runAs string) string { + return ` + data "databricks_current_user" "me" {} + data "databricks_spark_version" "latest" {} + data "databricks_node_type" "smallest" { + local_disk = true + } + + resource "databricks_notebook" "this" { + path = "${data.databricks_current_user.me.home}/Terraform{var.RANDOM}" + language = "PYTHON" + content_base64 = base64encode(<<-EOT + # created from ${abspath(path.module)} + display(spark.range(10)) + EOT + ) + } + + resource "databricks_job" "this" { + name = "{var.RANDOM}" + + job_cluster { + job_cluster_key = "j" + new_cluster { + num_workers = 20 + spark_version = data.databricks_spark_version.latest.id + node_type_id = data.databricks_node_type.smallest.id + } + } + + task { + task_key = "c" + job_cluster_key = "j" + notebook_task { + notebook_path = databricks_notebook.this.path + } + } + + run_as { + ` + runAs + ` + } + }` +} + +func TestAccJobRunAsUser(t *testing.T) { + workspaceLevel(t, step{ + Template: ` + resource "databricks_user" "this" { + user_name = "` + qa.RandomEmail() + `" + } + ` + runAsTemplate(`user_name = databricks_user.this.user_name`), + }) +} + +func TestAccJobRunAsServicePrincipal(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "ucws") + spId := GetEnvOrSkipTest(t, "ACCOUNT_LEVEL_SERVICE_PRINCIPAL_ID") + unityWorkspaceLevel(t, step{ + Template: runAsTemplate(`service_principal_name = "` + spId + `"`), + }) +} diff --git a/jobs/data_job_test.go b/jobs/data_job_test.go index 32323070be..89ca70bb22 100755 --- a/jobs/data_job_test.go +++ b/jobs/data_job_test.go @@ -84,6 +84,97 @@ func TestDataSourceQueryableJobMatchesId(t *testing.T) { }) } +func TestDataSourceQueryableJobRunAsSP(t *testing.T) { + spID := "3f670caf-9a4b-4479-8143-1a0878da8f57" + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/jobs/get?job_id=234", + Response: Job{ + JobID: 234, + Settings: &JobSettings{ + Name: "Second", + }, + CreatorUserName: "user@domain.com", + RunAsUserName: spID, + }, + }, + }, + Resource: DataSourceJob(), + Read: true, + New: true, + NonWritable: true, + HCL: `job_id = "234"`, + ID: "234", + }.ApplyAndExpectData(t, map[string]any{ + "job_id": "234", + "id": "234", + "job_settings.0.settings.0.name": "Second", + "job_settings.0.settings.0.run_as.0.service_principal_name": spID, + }) +} + +func TestDataSourceQueryableJobRunAsSameUser(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/jobs/get?job_id=234", + Response: Job{ + JobID: 234, + Settings: &JobSettings{ + Name: "Second", + }, + CreatorUserName: "user@domain.com", + RunAsUserName: "user@domain.com", + }, + }, + }, + Resource: DataSourceJob(), + Read: true, + New: true, + NonWritable: true, + HCL: `job_id = "234"`, + ID: "234", + }.ApplyAndExpectData(t, map[string]any{ + "job_id": "234", + "id": "234", + "job_settings.0.settings.0.name": "Second", + "job_settings.0.settings.0.run_as.0": map[string]any{}, + }) +} + +func TestDataSourceQueryableJobRunAsAnotherUser(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/jobs/get?job_id=234", + Response: Job{ + JobID: 234, + Settings: &JobSettings{ + Name: "Second", + }, + CreatorUserName: "user1@domain.com", + RunAsUserName: "user2@domain.com", + }, + }, + }, + Resource: DataSourceJob(), + Read: true, + New: true, + NonWritable: true, + HCL: `job_id = "234"`, + ID: "234", + }.ApplyAndExpectData(t, map[string]any{ + "job_id": "234", + "id": "234", + "job_settings.0.settings.0.name": "Second", + "job_settings.0.settings.0.run_as.0.user_name": "user2@domain.com", + }) +} + func TestDataSourceQueryableJobMatchesName(t *testing.T) { qa.ResourceFixture{ Fixtures: commonFixtures("First"), diff --git a/jobs/resource_job.go b/jobs/resource_job.go index 7d4ed08614..c0be2bc311 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -572,16 +572,14 @@ func (a JobsAPI) Read(id string) (job Job, err error) { job.Settings.sortWebhooksByID() } - if job.RunAsUserName != "" && job.Settings != nil { - userNameIsEmail := strings.Contains(job.RunAsUserName, "@") - - if userNameIsEmail { + if job.Settings != nil && job.RunAsUserName != "" && job.RunAsUserName != job.CreatorUserName { + if common.StringIsUUID(job.RunAsUserName) { job.Settings.RunAs = &JobRunAs{ - UserName: job.RunAsUserName, + ServicePrincipalName: job.RunAsUserName, } } else { job.Settings.RunAs = &JobRunAs{ - ServicePrincipalName: job.RunAsUserName, + UserName: job.RunAsUserName, } } } diff --git a/scim/data_current_user.go b/scim/data_current_user.go index bbbf77e1b0..fdaa4917ed 100644 --- a/scim/data_current_user.go +++ b/scim/data_current_user.go @@ -59,10 +59,10 @@ func DataSourceCurrentUser() *schema.Resource { d.Set("user_name", me.UserName) d.Set("home", fmt.Sprintf("/Users/%s", me.UserName)) d.Set("repos", fmt.Sprintf("/Repos/%s", me.UserName)) - if strings.Contains(me.UserName, "@") { - d.Set("acl_principal_id", fmt.Sprintf("users/%s", me.UserName)) - } else { + if common.StringIsUUID(me.UserName) { d.Set("acl_principal_id", fmt.Sprintf("servicePrincipals/%s", me.UserName)) + } else { + d.Set("acl_principal_id", fmt.Sprintf("users/%s", me.UserName)) } d.Set("external_id", me.ExternalId) splits := strings.Split(me.UserName, "@") From d5266eca27a6bc6ec626a9a701d3f92196b3b583 Mon Sep 17 00:00:00 2001 From: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> Date: Fri, 13 Oct 2023 06:33:41 -0700 Subject: [PATCH 23/36] Added `databricks_registered_model` resource (#2771) * Added databricks_registered_model resource * Applied comments * Update grants * Update tests * Update update * Apply comments Signed-off-by: Arpit Jasapara * Update internal/acceptance/registered_model_test.go * update deprecation comment --------- Signed-off-by: Arpit Jasapara Co-authored-by: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Co-authored-by: Miles Yucht --- catalog/resource_grants.go | 3 + catalog/resource_registered_model.go | 72 ++++++ catalog/resource_registered_model_test.go | 237 +++++++++++++++++++ docs/index.md | 8 +- docs/resources/grants.md | 18 ++ docs/resources/mlflow_experiment.md | 3 +- docs/resources/mlflow_model.md | 3 + docs/resources/model_serving.md | 3 +- docs/resources/registered_model.md | 53 +++++ internal/acceptance/registered_model_test.go | 48 ++++ provider/provider.go | 1 + 11 files changed, 444 insertions(+), 5 deletions(-) create mode 100644 catalog/resource_registered_model.go create mode 100644 catalog/resource_registered_model_test.go create mode 100644 docs/resources/registered_model.md create mode 100644 internal/acceptance/registered_model_test.go diff --git a/catalog/resource_grants.go b/catalog/resource_grants.go index e44d144998..2718962b86 100644 --- a/catalog/resource_grants.go +++ b/catalog/resource_grants.go @@ -105,6 +105,9 @@ func getPermissionEndpoint(securable, name string) string { if securable == "foreign_connection" { return fmt.Sprintf("/unity-catalog/permissions/connection/%s", name) } + if securable == "model" { + return fmt.Sprintf("/unity-catalog/permissions/function/%s", name) + } return fmt.Sprintf("/unity-catalog/permissions/%s/%s", securable, name) } diff --git a/catalog/resource_registered_model.go b/catalog/resource_registered_model.go new file mode 100644 index 0000000000..99e3a67531 --- /dev/null +++ b/catalog/resource_registered_model.go @@ -0,0 +1,72 @@ +package catalog + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceRegisteredModel() *schema.Resource { + s := common.StructToSchema( + catalog.CreateRegisteredModelRequest{}, + func(m map[string]*schema.Schema) map[string]*schema.Schema { + m["name"].ForceNew = true + m["catalog_name"].ForceNew = true + m["schema_name"].ForceNew = true + m["storage_location"].ForceNew = true + m["storage_location"].Computed = true + + return m + }) + + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var m catalog.CreateRegisteredModelRequest + common.DataToStructPointer(d, s, &m) + model, err := w.RegisteredModels.Create(ctx, m) + if err != nil { + return err + } + d.SetId(model.FullName) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + model, err := w.RegisteredModels.GetByFullName(ctx, d.Id()) + if err != nil { + return err + } + return common.StructToData(*model, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var u catalog.UpdateRegisteredModelRequest + common.DataToStructPointer(d, s, &u) + u.FullName = d.Id() + _, err = w.RegisteredModels.Update(ctx, u) + return err + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.RegisteredModels.DeleteByFullName(ctx, d.Id()) + }, + StateUpgraders: []schema.StateUpgrader{}, + Schema: s, + SchemaVersion: 0, + }.ToResource() +} diff --git a/catalog/resource_registered_model_test.go b/catalog/resource_registered_model_test.go new file mode 100644 index 0000000000..129bb12d7e --- /dev/null +++ b/catalog/resource_registered_model_test.go @@ -0,0 +1,237 @@ +package catalog + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestRegisteredModelCornerCases(t *testing.T) { + qa.ResourceCornerCases(t, ResourceRegisteredModel()) +} + +func TestRegisteredModelCreate(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPost, + Resource: "/api/2.1/unity-catalog/models", + ExpectedRequest: catalog.CreateRegisteredModelRequest{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + Comment: "comment", + }, + Response: catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "comment", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "comment", + }, + }, + }, + Resource: ResourceRegisteredModel(), + HCL: ` + name = "model" + catalog_name = "catalog" + schema_name = "schema" + comment = "comment" + `, + Create: true, + }.ApplyAndExpectData(t, + map[string]any{ + "id": "catalog.schema.model", + }, + ) +} + +func TestRegisteredModelCreate_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPost, + Resource: "/api/2.1/unity-catalog/models", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceRegisteredModel(), + Create: true, + }.ExpectError(t, "Internal error happened") +} + +func TestRegisteredModelRead(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "comment", + }, + }, + }, + Resource: ResourceRegisteredModel(), + Read: true, + ID: "catalog.schema.model", + }.ApplyAndExpectData(t, + map[string]any{ + "id": "catalog.schema.model", + }, + ) +} + +func TestRegisteredModelRead_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceRegisteredModel(), + Read: true, + ID: "catalog.schema.model", + }.ExpectError(t, "Internal error happened") +} + +func TestRegisteredModelUpdate(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPatch, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model", + ExpectedRequest: catalog.UpdateRegisteredModelRequest{ + FullName: "catalog.schema.model", + Comment: "new comment", + Name: "model", + }, + Response: catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "new comment", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: catalog.RegisteredModelInfo{ + Name: "model", + CatalogName: "catalog", + SchemaName: "schema", + FullName: "catalog.schema.model", + Comment: "new comment", + }, + }, + }, + Resource: ResourceRegisteredModel(), + Update: true, + ID: "catalog.schema.model", + InstanceState: map[string]string{ + "name": "model", + "catalog_name": "catalog", + "schema_name": "schema", + "comment": "comment", + }, + HCL: ` + name = "model" + catalog_name = "catalog" + schema_name = "schema" + comment = "new comment" + `, + }.ApplyNoError(t) +} + +func TestRegisteredModelUpdate_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPatch, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceRegisteredModel(), + Update: true, + ID: "catalog.schema.model", + InstanceState: map[string]string{ + "name": "model", + "catalog_name": "catalog", + "schema_name": "schema", + "comment": "comment", + }, + HCL: ` + name = "model" + catalog_name = "catalog" + schema_name = "schema" + comment = "new comment" + `, + }.ExpectError(t, "Internal error happened") +} + +func TestRegisteredModelDelete(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: "", + }, + }, + Resource: ResourceRegisteredModel(), + Delete: true, + ID: "catalog.schema.model", + }.ApplyNoError(t) +} + +func TestRegisteredModelDelete_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/models/catalog.schema.model?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }, + Status: 400, + }, + }, + Resource: ResourceRegisteredModel(), + Delete: true, + ID: "catalog.schema.model", + }.ExpectError(t, "Internal error happened") +} diff --git a/docs/index.md b/docs/index.md index 5c75d44c2b..1d27679666 100644 --- a/docs/index.md +++ b/docs/index.md @@ -52,10 +52,12 @@ Databricks SQL * Manage [dashboards](resources/sql_dashboard.md) and their [widgets](resources/sql_widget.md). * Provide [global configuration for all SQL warehouses](docs/resources/sql_global_config.md) -MLFlow +Machine Learning -* Create [MLFlow models](resources/mlflow_model.md). -* Create [MLFlow experiments](resources/mlflow_experiment.md). +* Create [models in Unity Catalog](resources/registered_model.md). +* Create [MLflow experiments](resources/mlflow_experiment.md). +* Create [models in the workspace model registry](resources/mlflow_model.md). +* Create [model serving endpoints](resources/model_serving.md). ## Example Usage diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 1a8daacc0d..c04872fcf9 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -194,6 +194,24 @@ resource "databricks_grants" "volume" { } ``` +## Registered model grants + +You can grant `ALL_PRIVILEGES`, `APPLY_TAG`, and `EXECUTE` privileges to [_`catalog.schema.model`_](registered_model.md) specified in the `model` attribute. + +```hcl +resource "databricks_grants" "customers" { + model = "main.reporting.customer_model" + grant { + principal = "Data Engineers" + privileges = ["APPLY_TAG", "EXECUTE"] + } + grant { + principal = "Data Analysts" + privileges = ["EXECUTE"] + } +} +``` + ## Storage credential grants You can grant `ALL_PRIVILEGES`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `READ_FILES` and `WRITE_FILES` privileges to [databricks_storage_credential](storage_credential.md) id specified in `storage_credential` attribute: diff --git a/docs/resources/mlflow_experiment.md b/docs/resources/mlflow_experiment.md index 66352041e0..f4d71a170d 100644 --- a/docs/resources/mlflow_experiment.md +++ b/docs/resources/mlflow_experiment.md @@ -41,9 +41,10 @@ $ terraform import databricks_mlflow_experiment.this The following resources are often used in the same context: +* [databricks_registered_model](registered_model.md) to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_directory](directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). -* [databricks_mlflow_model](mlflow_model.md) to create [MLflow models](https://docs.databricks.com/applications/mlflow/models.html) in Databricks. +* [databricks_mlflow_model](mlflow_model.md) to create models in the [workspace model registry](https://docs.databricks.com/en/mlflow/model-registry.html) in Databricks. * [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index 0d00843996..55c505f68d 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -5,6 +5,8 @@ subcategory: "MLflow" This resource allows you to create [MLflow models](https://docs.databricks.com/applications/mlflow/models.html) in Databricks. +**Note** This documentation covers the Workspace Model Registry. Databricks recommends using [Models in Unity Catalog](registered_model.md). Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. + ## Example Usage ```hcl @@ -48,6 +50,7 @@ $ terraform import databricks_mlflow_model.this The following resources are often used in the same context: +* [databricks_registered_model](registered_model.md) to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_model_serving](model_serving.md) to serve this model on a Databricks serving endpoint. * [databricks_directory](directory.md) to manage directories in [Databricks Workspace](https://docs.databricks.com/workspace/workspace-objects.html). diff --git a/docs/resources/model_serving.md b/docs/resources/model_serving.md index 722fa9fa4b..d9a0a6cc9c 100644 --- a/docs/resources/model_serving.md +++ b/docs/resources/model_serving.md @@ -103,9 +103,10 @@ $ terraform import databricks_model_serving.this The following resources are often used in the same context: +* [databricks_registered_model](registered_model.md) to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. * [End to end workspace management](../guides/workspace-management.md) guide. * [databricks_directory](directory.md) to manage directories in [Databricks Workspace](https://docs.databricks.com/workspace/workspace-objects.html). -* [databricks_mlflow_model](mlflow_model.md) to create [MLflow models](https://docs.databricks.com/applications/mlflow/models.html) in Databricks. +* [databricks_mlflow_model](mlflow_model.md) to create models in the [workspace model registry](https://docs.databricks.com/en/mlflow/model-registry.html) in Databricks. * [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md new file mode 100644 index 0000000000..4e6c3345f3 --- /dev/null +++ b/docs/resources/registered_model.md @@ -0,0 +1,53 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_registered_model Resource + +This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + +## Example Usage + +```hcl +resource "databricks_registered_model" "this" { + name = "my_model" + catalog_name = "main" + schema_name = "default" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the registered model. +* `catalog_name` - (Required) The name of the catalog where the schema and the registered model reside. +* `schema_name` - (Required) The name of the schema where the registered model resides. +* `comment` - The comment attached to the registered model. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Equal to the full name of the model (`catalog_name.schema_name.name`) and used to identify the model uniquely across the metastore. + +## Access Control + +* [databricks_grants](grants.md#registered-model-grants) can be used to grant principals `ALL_PRIVILEGES`, `APPLY_TAG`, and `EXECUTE` privileges. + +## Import + +The registered model resource can be imported using the full (3-level) name of the model. + +```bash +$ terraform import databricks_registered_model.this +``` + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_model_serving](model_serving.md) to serve this model on a Databricks serving endpoint. +* [databricks_mlflow_experiment](mlflow_experiment.md) to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. +* [databricks_table](tables.md) data to manage tables within Unity Catalog. +* [databricks_schema](schemas.md) data to manage schemas within Unity Catalog. +* [databricks_catalog](catalogs.md) data to manage catalogs within Unity Catalog. diff --git a/internal/acceptance/registered_model_test.go b/internal/acceptance/registered_model_test.go new file mode 100644 index 0000000000..98dbbd31d7 --- /dev/null +++ b/internal/acceptance/registered_model_test.go @@ -0,0 +1,48 @@ +package acceptance + +import ( + "testing" +) + +func TestUcAccRegisteredModel(t *testing.T) { + unityWorkspaceLevel(t, + step{ + Template: ` + resource "databricks_registered_model" "model" { + name = "terraform-test-registered-model-{var.STICKY_RANDOM}" + catalog_name = "main" + schema_name = "default" + } + + resource "databricks_grants" "model_grants" { + model = databricks_registered_model.model.id + + grant { + principal = "account users" + privileges = ["EXECUTE"] + } + } + `, + }, + step{ + Template: ` + resource "databricks_registered_model" "model" { + name = "terraform-test-registered-model-{var.STICKY_RANDOM}" + catalog_name = "main" + schema_name = "default" + comment = "new comment" + } + `, + }, + step{ + Template: ` + resource "databricks_registered_model" "model" { + name = "terraform-test-registered-model-update-{var.STICKY_RANDOM}" + catalog_name = "main" + schema_name = "default" + comment = "new comment" + } + `, + }, + ) +} diff --git a/provider/provider.go b/provider/provider.go index f9d89b2082..9dabc4f1fd 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -135,6 +135,7 @@ func DatabricksProvider() *schema.Provider { "databricks_pipeline": pipelines.ResourcePipeline(), "databricks_provider": catalog.ResourceProvider(), "databricks_recipient": catalog.ResourceRecipient(), + "databricks_registered_model": catalog.ResourceRegisteredModel(), "databricks_repo": repos.ResourceRepo(), "databricks_schema": catalog.ResourceSchema(), "databricks_secret": secrets.ResourceSecret(), From 8a038ed6a3788dc5a1f3f23700970e1a2a91cea9 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Fri, 13 Oct 2023 17:56:02 +0100 Subject: [PATCH 24/36] fix metastore read and add test (#2795) --- catalog/resource_metastore.go | 2 +- catalog/resource_metastore_test.go | 94 ++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/catalog/resource_metastore.go b/catalog/resource_metastore.go index 6cd07adb72..5d9f7b79ec 100644 --- a/catalog/resource_metastore.go +++ b/catalog/resource_metastore.go @@ -95,7 +95,7 @@ func ResourceMetastore() *schema.Resource { if err != nil { return err } - return common.StructToData(mi, s, d) + return common.StructToData(mi.MetastoreInfo, s, d) }, func(w *databricks.WorkspaceClient) error { mi, err := w.Metastores.GetById(ctx, d.Id()) if err != nil { diff --git a/catalog/resource_metastore_test.go b/catalog/resource_metastore_test.go index 6229fecfdd..5ca1c46445 100644 --- a/catalog/resource_metastore_test.go +++ b/catalog/resource_metastore_test.go @@ -3,6 +3,7 @@ package catalog import ( "testing" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/qa" ) @@ -532,3 +533,96 @@ func TestUpdateAccountMetastore_DeltaSharingScopeOnly(t *testing.T) { `, }.ApplyNoError(t) } + +func TestReadAccountMetastore(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/accounts/100/metastores/abc?", + Response: catalog.AccountsMetastoreInfo{ + MetastoreInfo: &catalog.MetastoreInfo{ + StorageRoot: "s3://b/abc", + Name: "a", + Region: "us-east1", + }, + }, + }, + }, + Resource: ResourceMetastore(), + AccountID: "100", + ID: "abc", + Read: true, + New: true, + }.ApplyAndExpectData(t, + map[string]any{ + "id": "abc", + "storage_root": "s3://b/abc", + "name": "a", + "region": "us-east1", + }) +} + +func TestReadAccountMetastore_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/accounts/100/metastores/abc?", + Response: apierr.APIErrorBody{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Metastore with the given ID could not be found.", + }, + Status: 404, + }, + }, + Resource: ResourceMetastore(), + AccountID: "100", + ID: "abc", + Read: true, + }.ExpectError(t, "resource is not expected to be removed") +} + +func TestReadMetastore(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/metastores/abc?", + Response: catalog.MetastoreInfo{ + StorageRoot: "s3://b/abc", + Name: "a", + }, + }, + }, + Resource: ResourceMetastore(), + ID: "abc", + Read: true, + New: true, + }.ApplyAndExpectData(t, + map[string]any{ + "id": "abc", + "storage_root": "s3://b/abc", + "name": "a", + }) +} + +func TestReadMetastore_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/metastores/abc?", + Response: apierr.APIErrorBody{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Metastore with the given ID could not be found.", + }, + Status: 404, + }, + }, + Resource: ResourceMetastore(), + ID: "abc", + Read: true, + New: true, + }.ExpectError(t, "resource is not expected to be removed") +} From 31da6b58a46d596781a6c025f7e80b1bd49e2dab Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Fri, 13 Oct 2023 18:46:24 +0100 Subject: [PATCH 25/36] Bump github.com/databricks/databricks-sdk-go from 0.22.0 to 0.23.0 (#2794) * upgrade go sdk * fix test * fix * fix model serving test * suppress diff * add suppress diff * revert --- catalog/resource_metastore_data_access.go | 10 +-- .../resource_metastore_data_access_test.go | 6 +- catalog/resource_storage_credential.go | 22 +++--- docs/resources/model_serving.md | 3 +- exporter/exporter_test.go | 4 +- go.mod | 32 ++++---- go.sum | 74 +++++++++---------- serving/resource_model_serving.go | 8 ++ 8 files changed, 84 insertions(+), 75 deletions(-) diff --git a/catalog/resource_metastore_data_access.go b/catalog/resource_metastore_data_access.go index 9f8dadcc68..5d00ac1608 100644 --- a/catalog/resource_metastore_data_access.go +++ b/catalog/resource_metastore_data_access.go @@ -142,8 +142,8 @@ func ResourceMetastoreDataAccess() *schema.Resource { return c.AccountOrWorkspaceRequest(func(acc *databricks.AccountClient) error { var storageCredential *catalog.AccountsStorageCredentialInfo storageCredential, err = acc.StorageCredentials.Get(ctx, catalog.GetAccountStorageCredentialRequest{ - MetastoreId: metastoreId, - Name: dacName, + MetastoreId: metastoreId, + StorageCredentialName: dacName, }) if err != nil { return err @@ -179,9 +179,9 @@ func ResourceMetastoreDataAccess() *schema.Resource { } return c.AccountOrWorkspaceRequest(func(acc *databricks.AccountClient) error { return acc.StorageCredentials.Delete(ctx, catalog.DeleteAccountStorageCredentialRequest{ - MetastoreId: metastoreId, - Name: dacName, - Force: force, + MetastoreId: metastoreId, + StorageCredentialName: dacName, + Force: force, }) }, func(w *databricks.WorkspaceClient) error { return w.StorageCredentials.Delete(ctx, catalog.DeleteStorageCredentialRequest{ diff --git a/catalog/resource_metastore_data_access_test.go b/catalog/resource_metastore_data_access_test.go index 335be92948..25a9231c1f 100644 --- a/catalog/resource_metastore_data_access_test.go +++ b/catalog/resource_metastore_data_access_test.go @@ -209,7 +209,7 @@ func TestCreateAccountDacWithAws(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/?", + Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", Response: catalog.StorageCredentialInfo{ Name: "bcd", AwsIamRole: &catalog.AwsIamRole{ @@ -274,7 +274,7 @@ func TestCreateAccountDacWithAzMI(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/?", + Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", Response: catalog.StorageCredentialInfo{ Name: "bcd", AzureManagedIdentity: &catalog.AzureManagedIdentity{ @@ -340,7 +340,7 @@ func TestCreateAccountDacWithDbGcpSA(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/?", + Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", Response: catalog.StorageCredentialInfo{ Name: "bcd", DatabricksGcpServiceAccount: &catalog.DatabricksGcpServiceAccountResponse{ diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index 4392ceb40f..a5a96bd017 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -70,9 +70,9 @@ func ResourceStorageCredential() *schema.Resource { return nil } _, err = acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ - CredentialInfo: &update, - MetastoreId: metastoreId, - Name: storageCredential.CredentialInfo.Id, + CredentialInfo: &update, + MetastoreId: metastoreId, + StorageCredentialName: storageCredential.CredentialInfo.Id, }) if err != nil { return err @@ -101,8 +101,8 @@ func ResourceStorageCredential() *schema.Resource { return c.AccountOrWorkspaceRequest(func(acc *databricks.AccountClient) error { storageCredential, err := acc.StorageCredentials.Get(ctx, catalog.GetAccountStorageCredentialRequest{ - MetastoreId: d.Get("metastore_id").(string), - Name: d.Id(), + MetastoreId: d.Get("metastore_id").(string), + StorageCredentialName: d.Id(), }) if err != nil { return err @@ -122,9 +122,9 @@ func ResourceStorageCredential() *schema.Resource { return c.AccountOrWorkspaceRequest(func(acc *databricks.AccountClient) error { _, err := acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ - CredentialInfo: &update, - MetastoreId: d.Get("metastore_id").(string), - Name: d.Id(), + CredentialInfo: &update, + MetastoreId: d.Get("metastore_id").(string), + StorageCredentialName: d.Id(), }) if err != nil { return err @@ -142,9 +142,9 @@ func ResourceStorageCredential() *schema.Resource { force := d.Get("force_destroy").(bool) return c.AccountOrWorkspaceRequest(func(acc *databricks.AccountClient) error { return acc.StorageCredentials.Delete(ctx, catalog.DeleteAccountStorageCredentialRequest{ - Force: force, - Name: d.Id(), - MetastoreId: d.Get("metastore_id").(string), + Force: force, + StorageCredentialName: d.Id(), + MetastoreId: d.Get("metastore_id").(string), }) }, func(w *databricks.WorkspaceClient) error { return w.StorageCredentials.Delete(ctx, catalog.DeleteStorageCredentialRequest{ diff --git a/docs/resources/model_serving.md b/docs/resources/model_serving.md index d9a0a6cc9c..375cb3c33f 100644 --- a/docs/resources/model_serving.md +++ b/docs/resources/model_serving.md @@ -58,6 +58,7 @@ The following arguments are supported: * `model_version` - (Required) The version of the model in Databricks Model Registry to be served. * `workload_size` - (Required) The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). * `scale_to_zero_enabled` - Whether the compute resources for the served model should scale down to zero. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. The default value is `true`. +* `workload_type` - The workload type of the served model. The workload type selects which type of compute to use in the endpoint. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See documentation for all options. The default value is `CPU`. * `environment_vars` - (Optional) a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. * `instance_profile_arn` - (Optional) ARN of the instance profile that the served model will use to access AWS resources. @@ -96,7 +97,7 @@ timeouts { The model serving resource can be imported using the name of the endpoint. ```bash -$ terraform import databricks_model_serving.this +terraform import databricks_model_serving.this ``` ## Related Resources diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 24faf303a2..f4d01f35ee 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -1494,14 +1494,14 @@ func TestImportingIPAccessLists(t *testing.T) { Method: "GET", Resource: "/api/2.0/ip-access-lists/123?", Response: settings.GetIpAccessListResponse{ - IpAccessLists: []settings.IpAccessListInfo{resp}, + IpAccessList: &resp, }, }, { Method: "GET", Resource: "/api/2.0/ip-access-lists/124?", Response: settings.GetIpAccessListResponse{ - IpAccessLists: []settings.IpAccessListInfo{resp2}, + IpAccessList: &resp2, }, }, { diff --git a/go.mod b/go.mod index 2f02150a18..5bdf061b18 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.19 require ( - github.com/databricks/databricks-sdk-go v0.22.0 + github.com/databricks/databricks-sdk-go v0.23.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 @@ -12,14 +12,14 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/stretchr/testify v1.8.4 github.com/zclconf/go-cty v1.14.1 - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 - golang.org/x/mod v0.12.0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/mod v0.13.0 ) require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect @@ -27,19 +27,19 @@ require ( github.com/fatih/color v1.15.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.5.1 // indirect + github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.0 // indirect + github.com/hashicorp/hc-install v0.6.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.19.0 // indirect github.com/hashicorp/terraform-json v0.17.1 // indirect @@ -57,19 +57,19 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/net v0.15.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.140.0 // indirect + google.golang.org/api v0.146.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 // indirect - google.golang.org/grpc v1.58.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index c3866daa77..0af3626495 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -21,8 +21,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/databricks/databricks-sdk-go v0.22.0 h1:CIwNZcOV7wYZmRLl1NWA+07f2j6H9h5L6MhR5O/4dRw= -github.com/databricks/databricks-sdk-go v0.22.0/go.mod h1:COiklTN3IdieazXcs4TnMou5GQFwIM7uhMGrz7nEAAk= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/databricks/databricks-sdk-go v0.23.0 h1:rdLMA7cDUPJiCSMyuUSufzDDmugqyp79SNiY/vc7kMI= +github.com/databricks/databricks-sdk-go v0.23.0/go.mod h1:a6rErRNh5bz+IJbO07nwW70iGyvtWidy1p/S5thepXI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -35,8 +36,8 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= -github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= -github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= @@ -67,16 +68,16 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -92,15 +93,15 @@ github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k= -github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= +github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.0 h1:fDHnU7JNFNSQebVKYhHZ0va1bC6SrPQ8fpebsvNr2w4= -github.com/hashicorp/hc-install v0.6.0/go.mod h1:10I912u3nntx9Umo1VAeYPUUuehk0aRQJYpMwbX5wQA= +github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= +github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= @@ -162,7 +163,6 @@ github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJC github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -172,8 +172,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= -github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/msgpack/v5 v5.4.0 h1:hRM0digJwyR6vll33NNAwCFguy5JuBD6jxDmQP3l608= +github.com/vmihailenco/msgpack/v5 v5.4.0/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -187,18 +187,18 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -211,11 +211,11 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -240,14 +240,14 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -268,11 +268,11 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.140.0 h1:CaXNdYOH5oQQI7l6iKTHHiMTdxZca4/02hRg2U8c2hM= -google.golang.org/api v0.140.0/go.mod h1:aGbCiFgtwb2P6badchFbSBUurV6oR5d50Af4iNJtDdI= +google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= +google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -280,15 +280,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 h1:o4LtQxebKIJ4vkzyhtD2rfUNZ20Zf0ik5YVP5E7G7VE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= -google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/serving/resource_model_serving.go b/serving/resource_model_serving.go index 6bac3b434f..c717e2cbf9 100644 --- a/serving/resource_model_serving.go +++ b/serving/resource_model_serving.go @@ -22,6 +22,14 @@ func ResourceModelServing() *schema.Resource { common.MustSchemaPath(m, "config", "served_models", "scale_to_zero_enabled").Default = true common.MustSchemaPath(m, "config", "served_models", "name").Computed = true + common.MustSchemaPath(m, "config", "served_models", "workload_type").Default = "CPU" + // if GPU serving is not enabled, workload_type will always be empty + common.MustSchemaPath(m, "config", "served_models", "workload_type").DiffSuppressFunc = func(k, old, new string, d *schema.ResourceData) bool { + if old == "" || new == "" { + return true + } + return false + } common.MustSchemaPath(m, "config", "traffic_config").Computed = true m["serving_endpoint_id"] = &schema.Schema{ From b2e48b016d71f15e7dc1c60654ae6da9d099512d Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Sat, 14 Oct 2023 12:39:49 +0100 Subject: [PATCH 26/36] suppress diff (#2799) --- docs/resources/sql_query.md | 23 +++++++++++++++++++++++ sql/resource_sql_query.go | 2 +- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index 54e3325f2e..95e74c102a 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -88,6 +88,29 @@ resource "databricks_permissions" "q1" { } ``` +## Argument Reference + +The following arguments are supported: + +* `data_source_id` - Data source ID of a [SQL warehouse](sql_endpoint.md) +* `query` - The text of the query to be run. +* `name` - The title of this query that appears in list views, widget headings, and on the query page. +* `parent` - The identifier of the workspace folder containing the object. +* `description` - General description that conveys additional information about this query such as usage notes. +* `run_as_role` - Run as role. Possible values are `viewer`, `owner`. + +### `parameter` configuration block + +For parameter definition + +* `title` - The text displayed in a parameter picking widget. +* `name` - The literal parameter marker that appears between double curly braces in the query text. +Parameters can have several different types. Type is specified using one of the following configuration blocks: `text`, `number`, `enum`, `query`, `date`, `datetime`, `datetimesec`, `date_range`, `datetime_range`, `datetimesec_range`. + +For `text`, `number`, `date`, `datetime`, `datetimesec` block + +* `value` - The default value for this parameter. + ## Import You can import a `databricks_sql_query` resource with ID like the following: diff --git a/sql/resource_sql_query.go b/sql/resource_sql_query.go index 7f65010f57..65f25c9e0e 100644 --- a/sql/resource_sql_query.go +++ b/sql/resource_sql_query.go @@ -24,7 +24,7 @@ type QueryEntity struct { Schedule *QuerySchedule `json:"schedule,omitempty"` Tags []string `json:"tags,omitempty"` Parameter []QueryParameter `json:"parameter,omitempty"` - RunAsRole string `json:"run_as_role,omitempty"` + RunAsRole string `json:"run_as_role,omitempty" tf:"suppress_diff"` Parent string `json:"parent,omitempty" tf:"suppress_diff,force_new"` CreatedAt string `json:"created_at,omitempty" tf:"computed"` UpdatedAt string `json:"updated_at,omitempty" tf:"computed"` From d14bbe87babe6facf845cffc1668ceff805e4258 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 14 Oct 2023 13:45:40 +0200 Subject: [PATCH 27/36] Exporter: improve exporting of `databricks_cluster_policy` resource (#2680) This includes: - Listing of all cluster policies, not only that are referenced in the jobs/clusters. - Emit secret scopes when they are referenced in Spark Conf or environment variables Also, fixes generation of non-existent secret scopes that happens when we emit non-existent secret scope (for example, it's a dangling reference in cluster policy or old job configuration). This fixes #2664 --- docs/guides/experimental-exporter.md | 21 +++++---- exporter/exporter_test.go | 33 +++++++++++++ exporter/importables.go | 46 +++++++++++++++++- exporter/importables_test.go | 55 ++++++++++++++++++++++ exporter/test-data/get-cluster-policy.json | 2 +- 5 files changed, 144 insertions(+), 13 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 4066433bc8..472d5311c1 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -55,26 +55,27 @@ All arguments are optional and they tune what code is being generated. Services are just logical groups of resources used for filtering and organization in files written in `-directory`. All resources are globally sorted by their resource name, which technically allows you to use generated files for compliance purposes. Nevertheless, managing the entire Databricks workspace with Terraform is the preferred way. With the exception of notebooks and possibly libraries, which may have their own CI/CD processes. * `access` - [databricks_permissions](../resources/permissions.md), [databricks_instance_profile](../resources/instance_profile.md) and [databricks_ip_access_list](../resources/ip_access_list.md). -* `compute` - **listing** [databricks_cluster](../resources/cluster.md). Includes [cluster policies](../resources/cluster_policy.md). -* `directories` - **listing** [databricks_directory](../resources/directory.md) -* `dlt` - **listing** [databricks_pipeline](../resources/pipeline.md) +* `compute` - **listing** [databricks_cluster](../resources/cluster.md). +* `directories` - **listing** [databricks_directory](../resources/directory.md). +* `dlt` - **listing** [databricks_pipeline](../resources/pipeline.md). * `groups` - [databricks_group](../data-sources/group.md) with [membership](../resources/group_member.md) and [data access](../resources/group_instance_profile.md). * `jobs` - **listing** [databricks_job](../resources/job.md). Usually, there are more automated jobs than interactive clusters, so they get their own file in this tool's output. * `mlflow-webhooks` - **listing** [databricks_mlflow_webhook](../resources/mlflow_webhook.md). * `model-serving` - **listing** [databricks_model_serving](../resources/model_serving.md). * `mounts` - **listing** works only in combination with `-mounts` command-line option. -* `notebooks` - **listing** [databricks_notebook](../resources/notebook.md) and [databricks_workspace_file](../resources/workspace_file.md) +* `notebooks` - **listing** [databricks_notebook](../resources/notebook.md) and [databricks_workspace_file](../resources/workspace_file.md). +* `policies` - **listing** [databricks_cluster_policy](../resources/cluster_policy). * `pools` - **listing** [instance pools](../resources/instance_pool.md). -* `repos` - **listing** [databricks_repo](../resources/repo.md) +* `repos` - **listing** [databricks_repo](../resources/repo.md). * `secrets` - **listing** [databricks_secret_scope](../resources/secret_scope.md) along with [keys](../resources/secret.md) and [ACLs](../resources/secret_acl.md). * `sql-alerts` - **listing** [databricks_sql_alert](../resources/sql_alert.md). -* `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md) * `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). -* `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md) -* `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md) +* `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). +* `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md). +* `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md). * `storage` - any referenced [databricks_dbfs_file](../resources/dbfs_file.md) will be downloaded locally and properly arranged into terraform state. -* `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, the only use case for importing `users` service is to migrate workspaces. -* `workspace` - [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md) +* `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, the only use-case for importing `users` service is to migrate workspaces. +* `workspace` - [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md). ## Secrets diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index f4d01f35ee..83c05c4d55 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -233,6 +233,13 @@ var emptyPipelines = qa.HTTPFixture{ Response: pipelines.PipelineListResponse{}, } +var emptyClusterPolicies = qa.HTTPFixture{ + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.0/policies/clusters/list?", + Response: compute.ListPoliciesResponse{}, +} + var emptyMlflowWebhooks = qa.HTTPFixture{ Method: "GET", ReuseRequest: true, @@ -354,6 +361,7 @@ func TestImportingUsersGroupsSecretScopes(t *testing.T) { emptySqlQueries, emptySqlAlerts, emptyPipelines, + emptyClusterPolicies, emptyWorkspaceConf, allKnownWorkspaceConfs, dummyWorkspaceConf, @@ -555,6 +563,7 @@ func TestImportingNoResourcesError(t *testing.T) { emptyMlflowWebhooks, emptyWorkspaceConf, emptyInstancePools, + emptyClusterPolicies, dummyWorkspaceConf, { Method: "GET", @@ -751,6 +760,30 @@ func TestImportingClusters(t *testing.T) { ReuseRequest: true, Response: getJSONObject("test-data/get-job-14-permissions.json"), }, + { + Method: "GET", + Resource: "/api/2.0/secrets/list?scope=some-kv-scope", + ReuseRequest: true, + Response: getJSONObject("test-data/secret-scopes-list-scope-response.json"), + }, + { + Method: "GET", + Resource: "/api/2.0/secrets/acls/list?scope=some-kv-scope", + ReuseRequest: true, + Response: getJSONObject("test-data/secret-scopes-list-scope-acls-response.json"), + }, + { + Method: "GET", + Resource: "/api/2.0/secrets/acls/get?principal=test%40test.com&scope=some-kv-scope", + ReuseRequest: true, + Response: getJSONObject("test-data/secret-scopes-get-principal-response.json"), + }, + { + Method: "GET", + Resource: "/api/2.0/secrets/scopes/list", + ReuseRequest: true, + Response: getJSONObject("test-data/secret-scopes-response.json"), + }, }, func(ctx context.Context, client *common.DatabricksClient) { os.Setenv("EXPORTER_PARALLELISM_databricks_cluster", "1") diff --git a/exporter/importables.go b/exporter/importables.go index 51ab96df71..bd9c22482d 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -13,6 +13,7 @@ import ( "golang.org/x/exp/slices" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/settings" "github.com/databricks/terraform-provider-databricks/clusters" @@ -551,10 +552,38 @@ var resourcesMap map[string]importable = map[string]importable{ }, }, "databricks_cluster_policy": { - Service: "compute", + Service: "policies", Name: func(ic *importContext, d *schema.ResourceData) string { return d.Get("name").(string) }, + List: func(ic *importContext) error { + w, err := ic.Client.WorkspaceClient() + if err != nil { + return err + } + policies, err := w.ClusterPolicies.ListAll(ic.Context, compute.ListClusterPoliciesRequest{}) + if err != nil { + return err + } + for offset, policy := range policies { + log.Printf("[INFO] Scanning %d: %v", offset+1, policy) + if slices.Contains(predefinedClusterPolicies, policy.Name) { + continue + } + if !ic.MatchesName(policy.Name) { + log.Printf("[DEBUG] Policy %s doesn't match %s filter", policy.Name, ic.match) + continue + } + ic.Emit(&resource{ + Resource: "databricks_cluster_policy", + ID: policy.PolicyId, + }) + if offset%10 == 0 { + log.Printf("[INFO] Scanned %d of %d cluster policies", offset+1, len(policies)) + } + } + return nil + }, Import: func(ic *importContext, r *resource) error { if ic.meAdmin { ic.Emit(&resource{ @@ -573,7 +602,8 @@ var resourcesMap map[string]importable = map[string]importable{ defaultValue, dok := policy["defaultValue"] typ := policy["type"] if !vok && !dok { - log.Printf("[INFO] Skipping policy element as it doesn't have both value and defaultValue") + log.Printf("[DEBUG] Skipping policy element as it doesn't have both value and defaultValue. k='%v', policy='%v'", + k, policy) continue } if k == "aws_attributes.instance_profile_arn" { @@ -599,6 +629,15 @@ var resourcesMap map[string]importable = map[string]importable{ ID: eitherString(value, defaultValue), }) } + if typ == "fixed" && (strings.HasPrefix(k, "spark_conf.") || strings.HasPrefix(k, "spark_env_vars.")) { + either := eitherString(value, defaultValue) + if res := secretPathRegex.FindStringSubmatch(either); res != nil { + ic.Emit(&resource{ + Resource: "databricks_secret_scope", + ID: res[1], + }) + } + } } policyName := r.Data.Get("name").(string) if slices.Contains(predefinedClusterPolicies, policyName) { @@ -946,6 +985,9 @@ var resourcesMap map[string]importable = map[string]importable{ } return nil }, + Ignore: func(ic *importContext, r *resource) bool { + return r.Data.Get("name").(string) == "" + }, }, "databricks_secret": { Service: "secrets", diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 9a1fdb1fc9..9284d8ac4a 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -626,6 +626,61 @@ func TestSecretScopeListNoNameMatch(t *testing.T) { }) } +func TestPoliciesListing(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/policies/clusters/list?", + Response: compute.ListPoliciesResponse{ + Policies: []compute.Policy{ + { + Name: "Personal Compute", + PolicyId: "123", + }, + { + Name: "abcd", + PolicyId: "456", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTest() + ic.Client = client + ic.Context = ctx + err := resourcesMap["databricks_cluster_policy"].List(ic) + assert.NoError(t, err) + assert.Equal(t, 1, len(ic.testEmits)) + }) +} + +func TestPoliciesListNoNameMatch(t *testing.T) { + qa.HTTPFixturesApply(t, []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.0/policies/clusters/list?", + Response: compute.ListPoliciesResponse{ + Policies: []compute.Policy{ + { + Name: "Personal Compute", + }, + { + Name: "abcd", + }, + }, + }, + }, + }, func(ctx context.Context, client *common.DatabricksClient) { + ic := importContextForTest() + ic.Client = client + ic.Context = ctx + ic.match = "bcd" + err := resourcesMap["databricks_cluster_policy"].List(ic) + assert.NoError(t, err) + assert.Equal(t, 0, len(ic.testEmits)) + }) +} + func TestAwsS3MountProfile(t *testing.T) { ic := importContextForTest() ic.mounts = true diff --git a/exporter/test-data/get-cluster-policy.json b/exporter/test-data/get-cluster-policy.json index 6b0a8b7ff7..693927c510 100644 --- a/exporter/test-data/get-cluster-policy.json +++ b/exporter/test-data/get-cluster-policy.json @@ -1,6 +1,6 @@ { "created_at_timestamp": 1606308550000, - "definition": "{\"aws_attributes.instance_profile_arn\":{\"hidden\":true,\"type\":\"fixed\",\"value\":\"arn:aws:iam::12345:instance-profile/shard-s3-access\"},\"instance_pool_id\":{\"hidden\":true,\"type\":\"fixed\",\"value\":\"pool1\"},\"autoscale.max_workers\":{\"defaultValue\":2,\"maxValue\":5,\"type\":\"range\"}}", + "definition": "{\"aws_attributes.instance_profile_arn\":{\"hidden\":true,\"type\":\"fixed\",\"value\":\"arn:aws:iam::12345:instance-profile/shard-s3-access\"},\"instance_pool_id\":{\"hidden\":true,\"type\":\"fixed\",\"value\":\"pool1\"},\"spark_conf.abc\":{\"hidden\":true,\"type\":\"fixed\",\"value\":\"{{secrets/some-kv-scope/secret}}\"},\"autoscale.max_workers\":{\"defaultValue\":2,\"maxValue\":5,\"type\":\"range\"}}", "name": "users cluster policy", "policy_id": "123" } From aa9783d44c776c50efd6c36490c89e5d6911ba45 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 16 Oct 2023 10:49:37 +0200 Subject: [PATCH 28/36] Improve Provider Logging (#2801) * Improve test logging * Fix logging in TF provider * fix * fix lint --- internal/acceptance/init_test.go | 41 ++++---------------------------- logger/logger.go | 3 ++- 2 files changed, 6 insertions(+), 38 deletions(-) diff --git a/internal/acceptance/init_test.go b/internal/acceptance/init_test.go index e9008bc773..b2ce5571f4 100644 --- a/internal/acceptance/init_test.go +++ b/internal/acceptance/init_test.go @@ -20,6 +20,7 @@ import ( "github.com/databricks/databricks-sdk-go/logger" "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" + dbproviderlogger "github.com/databricks/terraform-provider-databricks/logger" "github.com/databricks/terraform-provider-databricks/provider" "github.com/databricks/terraform-provider-databricks/qa" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -30,12 +31,8 @@ import ( func init() { rand.Seed(time.Now().UnixMicro()) databricks.WithProduct("tf-integration-tests", common.Version()) - if isInDebug() { - // Terraform SDK v2 intercepts default logger - // that Go SDK SimpleLogger is using, so we have - // to re-implement one again. - logger.DefaultLogger = stdErrLogger{} - } + os.Setenv("TF_LOG", "DEBUG") + dbproviderlogger.SetLogger() } func workspaceLevel(t *testing.T, steps ...step) { @@ -180,8 +177,8 @@ func run(t *testing.T, steps []step) { ts := []resource.TestStep{} ctx := context.Background() - stepConfig := "" for i, s := range steps { + stepConfig := "" if s.Template != "" { stepConfig = environmentTemplate(t, s.Template, vars) } @@ -381,33 +378,3 @@ func loadDebugEnvIfRunsFromIDE(t *testing.T, key string) { os.Setenv(k, v) } } - -type stdErrLogger struct { - traceEnabled bool -} - -func (l stdErrLogger) Enabled(_ context.Context, level logger.Level) bool { - return true -} - -func (l stdErrLogger) Tracef(_ context.Context, format string, v ...interface{}) { - if l.traceEnabled { - fmt.Fprintf(os.Stderr, "[TRACE] "+format+"\n", v...) - } -} - -func (l stdErrLogger) Debugf(_ context.Context, format string, v ...interface{}) { - fmt.Fprintf(os.Stderr, "\n[DEBUG] "+format+"\n", v...) -} - -func (l stdErrLogger) Infof(_ context.Context, format string, v ...interface{}) { - fmt.Fprintf(os.Stderr, "\n[INFO] "+format+"\n", v...) -} - -func (l stdErrLogger) Warnf(_ context.Context, format string, v ...interface{}) { - fmt.Fprintf(os.Stderr, "\n[WARN] "+format+"\n", v...) -} - -func (l stdErrLogger) Errorf(_ context.Context, format string, v ...interface{}) { - fmt.Fprintf(os.Stderr, "[ERROR] "+format+"\n", v...) -} diff --git a/logger/logger.go b/logger/logger.go index a713c5f906..6d94476fa2 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -61,5 +61,6 @@ func (tfLogger *TfLogger) Errorf(ctx context.Context, format string, v ...any) { } func SetLogger() { - logger.DefaultLogger = &TfLogger{} + var tfLogger *TfLogger + logger.DefaultLogger = tfLogger } From 9f3658dd9b08e2c7a1dd5ec9e0af28aa8f3f8616 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:06:22 +0200 Subject: [PATCH 29/36] Release v1.28.0 (#2793) * Release v1.28.0 * Release v1.28.0 * Release v1.28.0 * Release v1.28.0 --- CHANGELOG.md | 38 ++++++++++++++++++++++++++++++++++++++ common/version.go | 2 +- go.mod | 6 +++--- go.sum | 14 +++++++------- 4 files changed, 49 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a7ec0965b..dfdb3130ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # Version changelog +## 1.28.0 +* Added `dashboard_filters_enabled` attribute to [databricks_sql_dashboard](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/sql_dashboard) resource ([#2725](https://github.com/databricks/terraform-provider-databricks/pull/2725)). + * Added `empty_result_state` attribute to the [databricks_sql_alert](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/sql_alert) resource ([#2724](https://github.com/databricks/terraform-provider-databricks/pull/2724)). + * Added enabled field for queueing ([#2741](https://github.com/databricks/terraform-provider-databricks/pull/2741)). + * Added [databricks_registered_model](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/registered_model) resource ([#2771](https://github.com/databricks/terraform-provider-databricks/pull/2771)). + * Added logging package and fixed issue with API calls not being shown in DEBUG or lower log levels ([#2747](https://github.com/databricks/terraform-provider-databricks/pull/2747)). + * Added [databricks_system_schema](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/system_schema) resource ([#2606](https://github.com/databricks/terraform-provider-databricks/pull/2606)). + * Don't rely on having `@` to check if it's user or SP ([#2765](https://github.com/databricks/terraform-provider-databricks/pull/2765)). + * Forced recreation of UC Volume when `volume_type` and `storage_location` are changed ([#2734](https://github.com/databricks/terraform-provider-databricks/pull/2734)). + * Improved Provider Logging ([#2801](https://github.com/databricks/terraform-provider-databricks/pull/2801)). + * Marked attributes in the `run_as` block in [databricks_job](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/job) as `ExactlyOneOf` ([#2784](https://github.com/databricks/terraform-provider-databricks/pull/2784)). + * Masked sensitive field ([#2755](https://github.com/databricks/terraform-provider-databricks/pull/2755)). + * Removed deprecation warning from `cluster_mount_info` in [databricks_cluster](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster), but mark it as experimental ([#2787](https://github.com/databricks/terraform-provider-databricks/pull/2787)). + * Suppress diff for `user_name` in [databricks_user](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/user) when the changes only in character case ([#2786](https://github.com/databricks/terraform-provider-databricks/pull/2786)). + * Refresh grant lists ([#2746](https://github.com/databricks/terraform-provider-databricks/pull/2746)). + * Fixed run_as_role drift for databricks_sql_query resource ([#2799](https://github.com/databricks/terraform-provider-databricks/pull/2799)). + * Fixed metastore read and add test ([#2795](https://github.com/databricks/terraform-provider-databricks/pull/2795)). + +Exporter: + * Exporter: fix a logic for omitting some fields ([#2774](https://github.com/databricks/terraform-provider-databricks/pull/2774)). + * Exporter: improve exporting of [databricks_cluster_policy](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) resource ([#2680](https://github.com/databricks/terraform-provider-databricks/pull/2680)). + * Exporter: parallel export of resources ([#2742](https://github.com/databricks/terraform-provider-databricks/pull/2742)). + +Documentation: + * Updated [databricks_grants](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grants) examples for [databricks_external_location](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/external_location) ([#2735](https://github.com/databricks/terraform-provider-databricks/pull/2735)). + * Fixed documentation for [databricks_schema](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/schema) about default value for `storage_root` ([#2790](https://github.com/databricks/terraform-provider-databricks/pull/2790)). + * Clarified possible values for `principal` attribute of [databricks_secret_acl](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/secret_acl) ([#2772](https://github.com/databricks/terraform-provider-databricks/pull/2772)). + +Other Changes: + * Bumped databricks-sdk-go dependency to 0.21.0 ([#2738](https://github.com/databricks/terraform-provider-databricks/pull/2738)). + * Bumped github.com/databricks/databricks-sdk-go from 0.21.0 to 0.22.0 ([#2761](https://github.com/databricks/terraform-provider-databricks/pull/2761)). + * Bumped github.com/databricks/databricks-sdk-go from 0.22.0 to 0.23.0 ([#2794](https://github.com/databricks/terraform-provider-databricks/pull/2794)). + * Bumped github.com/hashicorp/hcl/v2 from 2.18.0 to 2.18.1 ([#2776](https://github.com/databricks/terraform-provider-databricks/pull/2776)). + * Bumped github.com/zclconf/go-cty from 1.14.0 to 1.14.1 ([#2777](https://github.com/databricks/terraform-provider-databricks/pull/2777)). + * Used `terraform-field-dev` as code owner instead of `field-dev-ecosystem` ([#2718](https://github.com/databricks/terraform-provider-databricks/pull/2718)). + * GitHub Actions workflow to compute provider schema diff ([#2740](https://github.com/databricks/terraform-provider-databricks/pull/2740)). + + ## 1.27.0 * Fixed [databricks_permissions](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/permissions) resource for correct permissions update. ([#2719](https://github.com/databricks/terraform-provider-databricks/pull/2719)). * Added `owner` & `force_destroy` to [databricks_metastore_data_access](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/metastore_data_access) ([#2713](https://github.com/databricks/terraform-provider-databricks/pull/2713)). diff --git a/common/version.go b/common/version.go index cd12afac75..295e0895bc 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.27.0" + version = "1.28.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider diff --git a/go.mod b/go.mod index 5bdf061b18..89ba880f93 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect github.com/agext/levenshtein v1.2.3 // indirect @@ -66,9 +66,9 @@ require ( golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.146.0 // indirect + google.golang.org/api v0.147.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 0af3626495..191092b7f4 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -221,7 +221,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -271,8 +271,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= -google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= +google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= +google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -280,8 +280,8 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From af25041fe318c5a5beab5848f0b85e8f3afb6158 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Wed, 18 Oct 2023 12:40:49 +0100 Subject: [PATCH 30/36] fix storage credential read (#2804) --- catalog/resource_metastore_data_access.go | 4 ++- .../resource_metastore_data_access_test.go | 36 ++++++++++++------- catalog/resource_storage_credential.go | 2 +- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/catalog/resource_metastore_data_access.go b/catalog/resource_metastore_data_access.go index 5d00ac1608..f83ee18494 100644 --- a/catalog/resource_metastore_data_access.go +++ b/catalog/resource_metastore_data_access.go @@ -42,6 +42,8 @@ func adjustDataAccessSchema(m map[string]*schema.Schema) map[string]*schema.Sche m["gcp_service_account_key"].DiffSuppressFunc = SuppressGcpSAKeyDiff common.MustSchemaPath(m, "azure_managed_identity", "credential_id").Computed = true + common.MustSchemaPath(m, "databricks_gcp_service_account", "email").Computed = true + common.MustSchemaPath(m, "databricks_gcp_service_account", "credential_id").Computed = true m["force_destroy"] = &schema.Schema{ Type: schema.TypeBool, @@ -155,7 +157,7 @@ func ResourceMetastoreDataAccess() *schema.Resource { } isDefault := metastore.StorageRootCredentialName == dacName d.Set("is_default", isDefault) - return common.StructToData(storageCredential, dacSchema, d) + return common.StructToData(storageCredential.CredentialInfo, dacSchema, d) }, func(w *databricks.WorkspaceClient) error { var storageCredential *catalog.StorageCredentialInfo storageCredential, err = w.StorageCredentials.GetByName(ctx, dacName) diff --git a/catalog/resource_metastore_data_access_test.go b/catalog/resource_metastore_data_access_test.go index 25a9231c1f..fc8d2b6539 100644 --- a/catalog/resource_metastore_data_access_test.go +++ b/catalog/resource_metastore_data_access_test.go @@ -210,10 +210,12 @@ func TestCreateAccountDacWithAws(t *testing.T) { { Method: "GET", Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", - Response: catalog.StorageCredentialInfo{ - Name: "bcd", - AwsIamRole: &catalog.AwsIamRole{ - RoleArn: "def", + Response: catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "bcd", + AwsIamRole: &catalog.AwsIamRole{ + RoleArn: "def", + }, }, }, }, @@ -275,10 +277,12 @@ func TestCreateAccountDacWithAzMI(t *testing.T) { { Method: "GET", Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", - Response: catalog.StorageCredentialInfo{ - Name: "bcd", - AzureManagedIdentity: &catalog.AzureManagedIdentity{ - AccessConnectorId: "def", + Response: catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "bcd", + AzureManagedIdentity: &catalog.AzureManagedIdentity{ + AccessConnectorId: "def", + }, }, }, }, @@ -341,10 +345,12 @@ func TestCreateAccountDacWithDbGcpSA(t *testing.T) { { Method: "GET", Resource: "/api/2.0/accounts/100/metastores/abc/storage-credentials/bcd?", - Response: catalog.StorageCredentialInfo{ - Name: "bcd", - DatabricksGcpServiceAccount: &catalog.DatabricksGcpServiceAccountResponse{ - Email: "a@example.com", + Response: catalog.AccountsStorageCredentialInfo{ + CredentialInfo: &catalog.StorageCredentialInfo{ + Name: "bcd", + DatabricksGcpServiceAccount: &catalog.DatabricksGcpServiceAccountResponse{ + Email: "a@example.com", + }, }, }, }, @@ -367,5 +373,9 @@ func TestCreateAccountDacWithDbGcpSA(t *testing.T) { is_default = true databricks_gcp_service_account {} `, - }.ApplyNoError(t) + }.ApplyAndExpectData(t, + map[string]any{ + "databricks_gcp_service_account.#": 1, + "databricks_gcp_service_account.0.email": "a@example.com", + }) } diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index a5a96bd017..cad7de9875 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -107,7 +107,7 @@ func ResourceStorageCredential() *schema.Resource { if err != nil { return err } - return common.StructToData(storageCredential, storageCredentialSchema, d) + return common.StructToData(storageCredential.CredentialInfo, storageCredentialSchema, d) }, func(w *databricks.WorkspaceClient) error { storageCredential, err := w.StorageCredentials.GetByName(ctx, d.Id()) if err != nil { From 6b898aab69e2a064f581621cf1465bad0d2b535c Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Wed, 18 Oct 2023 16:47:36 +0200 Subject: [PATCH 31/36] Release v1.28.1 (#2814) * Release v1.28.1 * Release v1.28.1 --- CHANGELOG.md | 4 ++++ common/version.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfdb3130ca..0a54c33d07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Version changelog +## 1.28.1 + * Fixed read method for `databricks_storage_credential` resource ([#2804](https://github.com/databricks/terraform-provider-databricks/pull/2804)). + + ## 1.28.0 * Added `dashboard_filters_enabled` attribute to [databricks_sql_dashboard](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/sql_dashboard) resource ([#2725](https://github.com/databricks/terraform-provider-databricks/pull/2725)). * Added `empty_result_state` attribute to the [databricks_sql_alert](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/sql_alert) resource ([#2724](https://github.com/databricks/terraform-provider-databricks/pull/2724)). diff --git a/common/version.go b/common/version.go index 295e0895bc..8919faa1ac 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.28.0" + version = "1.28.1" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From cb1c468e15a16da9356c1a412e32f392911ac31f Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Wed, 18 Oct 2023 16:52:59 +0100 Subject: [PATCH 32/36] Fixed `databricks_catalog` `isolation_mode` (#2805) * fix update for catalog isolation mode * fix test * switch bindings API --- catalog/resource_catalog.go | 38 ++++++++++++++++----- catalog/resource_catalog_test.go | 24 ++++++++++---- internal/acceptance/catalog_test.go | 51 +++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 16 deletions(-) create mode 100644 internal/acceptance/catalog_test.go diff --git a/catalog/resource_catalog.go b/catalog/resource_catalog.go index 9fa92b72d2..e0e924ad1f 100644 --- a/catalog/resource_catalog.go +++ b/catalog/resource_catalog.go @@ -93,15 +93,17 @@ func ResourceCatalog() *schema.Resource { if err != nil { return err } - _, err = w.WorkspaceBindings.Update(ctx, catalog.UpdateWorkspaceBindings{ - Name: ci.Name, - AssignWorkspaces: []int64{currentMetastoreAssignment.WorkspaceId}, + _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ + SecurableName: ci.Name, + SecurableType: "catalog", + Add: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: currentMetastoreAssignment.WorkspaceId, + }, + }, }) - if err != nil { - return err - } - - return nil + return err }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() @@ -131,7 +133,25 @@ func ResourceCatalog() *schema.Resource { // So if we don't update the field then the requests would be made to old Name which doesn't exists. d.SetId(ci.Name) - return nil + if d.Get("isolation_mode") != "ISOLATED" { + return nil + } + // Bind the current workspace if the catalog is isolated, otherwise the read will fail + currentMetastoreAssignment, err := w.Metastores.Current(ctx) + if err != nil { + return err + } + _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ + SecurableName: ci.Name, + SecurableType: "catalog", + Add: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: currentMetastoreAssignment.WorkspaceId, + }, + }, + }) + return err }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() diff --git a/catalog/resource_catalog_test.go b/catalog/resource_catalog_test.go index 1acd969c3f..f9909ae9e0 100644 --- a/catalog/resource_catalog_test.go +++ b/catalog/resource_catalog_test.go @@ -518,14 +518,24 @@ func TestCatalogCreateIsolated(t *testing.T) { }, { Method: "PATCH", - Resource: "/api/2.1/unity-catalog/workspace-bindings/catalogs/a", - ExpectedRequest: catalog.UpdateWorkspaceBindings{ - Name: "a", - AssignWorkspaces: []int64{123456789101112}, + Resource: "/api/2.1/unity-catalog/bindings/catalog/a", + ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ + SecurableName: "a", + SecurableType: "catalog", + Add: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, }, - - Response: catalog.CurrentWorkspaceBindings{ - Workspaces: []int64{123456789101112}, + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, }, }, { diff --git a/internal/acceptance/catalog_test.go b/internal/acceptance/catalog_test.go new file mode 100644 index 0000000000..fb9a75ec96 --- /dev/null +++ b/internal/acceptance/catalog_test.go @@ -0,0 +1,51 @@ +package acceptance + +import ( + "testing" +) + +func TestUcAccCatalog(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + }`, + }) +} + +func TestUcAccCatalogIsolated(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + }`, + }, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + isolation_mode = "ISOLATED" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + }`, + }, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + isolation_mode = "OPEN" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + }`, + }) +} From 31e57c5139d00be2fafca414483b59be8d80216e Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 18 Oct 2023 19:52:31 +0200 Subject: [PATCH 33/36] Add `account_id` parameter to provider blocks in the guides (#2817) This fixes #2809 --- docs/guides/aws-e2-firewall-hub-and-spoke.md | 1 + docs/guides/aws-e2-firewall-workspace.md | 1 + docs/guides/aws-private-link-workspace.md | 1 + docs/guides/aws-workspace.md | 1 + docs/guides/unity-catalog-azure.md | 5 +++-- docs/guides/unity-catalog-gcp.md | 5 +++-- 6 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/guides/aws-e2-firewall-hub-and-spoke.md b/docs/guides/aws-e2-firewall-hub-and-spoke.md index a8fd78f487..252170528b 100644 --- a/docs/guides/aws-e2-firewall-hub-and-spoke.md +++ b/docs/guides/aws-e2-firewall-hub-and-spoke.md @@ -108,6 +108,7 @@ provider "aws" { provider "databricks" { alias = "mws" host = "https://accounts.cloud.databricks.com" + account_id = var.databricks_account_id client_id = var.client_id client_secret = var.client_secret } diff --git a/docs/guides/aws-e2-firewall-workspace.md b/docs/guides/aws-e2-firewall-workspace.md index 2286091c17..9945c00c4f 100644 --- a/docs/guides/aws-e2-firewall-workspace.md +++ b/docs/guides/aws-e2-firewall-workspace.md @@ -106,6 +106,7 @@ provider "aws" { provider "databricks" { alias = "mws" host = "https://accounts.cloud.databricks.com" + account_id = var.databricks_account_id client_id = var.client_id client_secret = var.client_secret } diff --git a/docs/guides/aws-private-link-workspace.md b/docs/guides/aws-private-link-workspace.md index 16d34f2531..e3f6ec2b52 100644 --- a/docs/guides/aws-private-link-workspace.md +++ b/docs/guides/aws-private-link-workspace.md @@ -59,6 +59,7 @@ provider "aws" { provider "databricks" { alias = "mws" host = "https://accounts.cloud.databricks.com" + account_id = var.databricks_account_id client_id = var.client_id client_secret = var.client_secret } diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 68e441d17a..d33d0dabd8 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -71,6 +71,7 @@ provider "aws" { provider "databricks" { alias = "mws" host = "https://accounts.cloud.databricks.com" + account_id = var.databricks_account_id client_id = var.client_id client_secret = var.client_secret } diff --git a/docs/guides/unity-catalog-azure.md b/docs/guides/unity-catalog-azure.md index 9d5de66708..ce93433cda 100644 --- a/docs/guides/unity-catalog-azure.md +++ b/docs/guides/unity-catalog-azure.md @@ -79,8 +79,9 @@ provider "databricks" { } provider "databricks" { - alias = "accounts" - host = "https://accounts.azuredatabricks.net" + alias = "accounts" + host = "https://accounts.azuredatabricks.net" + account_id = var.databricks_account_id } ``` diff --git a/docs/guides/unity-catalog-gcp.md b/docs/guides/unity-catalog-gcp.md index 243445dedf..6bfcab5b5a 100644 --- a/docs/guides/unity-catalog-gcp.md +++ b/docs/guides/unity-catalog-gcp.md @@ -76,8 +76,9 @@ provider "databricks" { } provider "databricks" { - alias = "accounts" - host = "https://accounts.gcp.databricks.com" + alias = "accounts" + host = "https://accounts.gcp.databricks.com" + account_id = var.databricks_account_id } ``` From 9176bc4174de8a10f95cbc94d5173544e2e79242 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:19:33 +0200 Subject: [PATCH 34/36] Enable Cluster Integration test (#2815) --- internal/acceptance/cluster_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index adbf5bf884..3cfe25b944 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -5,7 +5,6 @@ import ( ) func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { - t.Skip("Waiting for maintenance release for fix") workspaceLevel(t, step{ Template: `data "databricks_spark_version" "latest" { } From 075190191fab8b4619b633d2b6796833320160a5 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 19 Oct 2023 09:53:56 +0200 Subject: [PATCH 35/36] Exporter: clarify behaviour of services not marked with listing (#2785) --- docs/guides/experimental-exporter.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 472d5311c1..63422fa905 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -52,13 +52,16 @@ All arguments are optional and they tune what code is being generated. ## Services -Services are just logical groups of resources used for filtering and organization in files written in `-directory`. All resources are globally sorted by their resource name, which technically allows you to use generated files for compliance purposes. Nevertheless, managing the entire Databricks workspace with Terraform is the preferred way. With the exception of notebooks and possibly libraries, which may have their own CI/CD processes. +Services are just logical groups of resources used for filtering and organization in files written in `-directory`. All resources are globally sorted by their resource name, which technically allows you to use generated files for compliance purposes. Nevertheless, managing the entire Databricks workspace with Terraform is the preferred way. With the exception of notebooks and possibly libraries, which may have their own CI/CD processes. + +-> **Note** + Please note that for services not marked with **listing** we'll export resources only if they are referenced from other resources. * `access` - [databricks_permissions](../resources/permissions.md), [databricks_instance_profile](../resources/instance_profile.md) and [databricks_ip_access_list](../resources/ip_access_list.md). * `compute` - **listing** [databricks_cluster](../resources/cluster.md). * `directories` - **listing** [databricks_directory](../resources/directory.md). * `dlt` - **listing** [databricks_pipeline](../resources/pipeline.md). -* `groups` - [databricks_group](../data-sources/group.md) with [membership](../resources/group_member.md) and [data access](../resources/group_instance_profile.md). +* `groups` - **listing** [databricks_group](../data-sources/group.md) with [membership](../resources/group_member.md) and [data access](../resources/group_instance_profile.md). * `jobs` - **listing** [databricks_job](../resources/job.md). Usually, there are more automated jobs than interactive clusters, so they get their own file in this tool's output. * `mlflow-webhooks` - **listing** [databricks_mlflow_webhook](../resources/mlflow_webhook.md). * `model-serving` - **listing** [databricks_model_serving](../resources/model_serving.md). @@ -70,10 +73,9 @@ Services are just logical groups of resources used for filtering and organizatio * `secrets` - **listing** [databricks_secret_scope](../resources/secret_scope.md) along with [keys](../resources/secret.md) and [ACLs](../resources/secret_acl.md). * `sql-alerts` - **listing** [databricks_sql_alert](../resources/sql_alert.md). * `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). -* `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). * `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md). * `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md). -* `storage` - any referenced [databricks_dbfs_file](../resources/dbfs_file.md) will be downloaded locally and properly arranged into terraform state. +* `storage` - only [databricks_dbfs_file](../resources/dbfs_file.md) referenced in other resources (libraries, init scripts, ...) will be downloaded locally and properly arranged into terraform state. * `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, the only use-case for importing `users` service is to migrate workspaces. * `workspace` - [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md). From 8a3089cf00df296fc68b98665ff42b2de67dc396 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:09:59 +0100 Subject: [PATCH 36/36] Refactor `databricks_recipient` to Go SDK (#2757) * refactor `databricks_recipient` * update doc * add suppress_diff * add sharing namespace * fix namespace * doc feedback --- catalog/resource_recipient_test.go | 80 ---------- catalog/resource_share_test.go | 6 +- docs/resources/recipient.md | 12 +- provider/provider.go | 3 +- {catalog => sharing}/resource_recipient.go | 69 ++++----- sharing/resource_recipient_test.go | 162 +++++++++++++++++++++ 6 files changed, 199 insertions(+), 133 deletions(-) delete mode 100644 catalog/resource_recipient_test.go rename {catalog => sharing}/resource_recipient.go (62%) create mode 100644 sharing/resource_recipient_test.go diff --git a/catalog/resource_recipient_test.go b/catalog/resource_recipient_test.go deleted file mode 100644 index 7447c04a29..0000000000 --- a/catalog/resource_recipient_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package catalog - -import ( - "testing" - - "github.com/databricks/terraform-provider-databricks/qa" -) - -func TestRecipientCornerCases(t *testing.T) { - qa.ResourceCornerCases(t, ResourceRecipient()) -} - -func TestCreateRecipient(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "POST", - Resource: "/api/2.1/unity-catalog/recipients", - ExpectedRequest: RecipientInfo{ - Name: "a", - Comment: "b", - SharingCode: "c", - AuthenticationType: "TOKEN", - Tokens: nil, - IpAccessList: &IpAccessList{ - AllowedIpAddresses: []string{"0.0.0.0/0"}, - }, - }, - Response: RecipientInfo{ - Name: "a", - }, - }, - { - Method: "GET", - Resource: "/api/2.1/unity-catalog/recipients/a", - Response: RecipientInfo{ - Name: "a", - Comment: "b", - SharingCode: "c", - AuthenticationType: "TOKEN", - Tokens: nil, - IpAccessList: &IpAccessList{ - AllowedIpAddresses: []string{"0.0.0.0/0"}, - }, - }, - }, - }, - Resource: ResourceRecipient(), - Create: true, - HCL: ` - name = "a" - comment = "b" - authentication_type = "TOKEN" - sharing_code = "c" - ip_access_list { - allowed_ip_addresses = ["0.0.0.0/0"] - } - `, - }.ApplyNoError(t) -} - -func TestCreateRecipient_InvalidAuthType(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{}, - Resource: ResourceRecipient(), - Create: true, - HCL: ` - name = "a" - comment = "b" - authentication_type = "temp" - sharing_code = "c" - ip_access_list { - allowed_ip_addresses = ["0.0.0.0/0"] - } - `, - }.ExpectError(t, "invalid config supplied. "+ - "[authentication_type] expected authentication_type "+ - "to be one of [TOKEN DATABRICKS], got temp") - -} diff --git a/catalog/resource_share_test.go b/catalog/resource_share_test.go index 9b19e067eb..f081a52f21 100644 --- a/catalog/resource_share_test.go +++ b/catalog/resource_share_test.go @@ -185,7 +185,7 @@ func TestCreateShare(t *testing.T) { ExpectedRequest: ShareInfo{ Name: "a", }, - Response: RecipientInfo{ + Response: ShareInfo{ Name: "a", }, }, @@ -213,7 +213,7 @@ func TestCreateShare(t *testing.T) { }, }, }, - Response: RecipientInfo{ + Response: ShareInfo{ Name: "a", }, }, @@ -462,7 +462,7 @@ func TestCreateShareButPatchFails(t *testing.T) { ExpectedRequest: ShareInfo{ Name: "a", }, - Response: RecipientInfo{ + Response: ShareInfo{ Name: "a", }, }, diff --git a/docs/resources/recipient.md b/docs/resources/recipient.md index 24298e8cd8..99fbbfb27d 100644 --- a/docs/resources/recipient.md +++ b/docs/resources/recipient.md @@ -67,13 +67,14 @@ The following arguments are required: * `name` - Name of recipient. Change forces creation of a new resource. * `comment` - (Optional) Description about the recipient. * `sharing_code` - (Optional) The one-time sharing code provided by the data recipient. +* `owner` - (Optional) Username/groupname/sp application_id of the recipient owner. * `authentication_type` - (Optional) The delta sharing authentication type. Valid values are `TOKEN` and `DATABRICKS`. -* `data_recipient_global_metastore_id` - Required when authentication_type is DATABRICKS. -* `ip_access_list` - (Optional) The one-time sharing code provided by the data recipient. +* `data_recipient_global_metastore_id` - Required when `authentication_type` is `DATABRICKS`. +* `ip_access_list` - (Optional) Recipient IP access list. ### Ip Access List Argument -Only one `ip_access_list` blocks is allowed in a recipient. It conflicts with authentication type DATABRICKS. +Only one `ip_access_list` block is allowed in a recipient. It conflicts with authentication type `DATABRICKS`. ```hcl ip_access_list { @@ -99,11 +100,6 @@ In addition to all arguments above, the following attributes are exported: * `expiration_time` - Expiration timestamp of the token in epoch milliseconds. * `updated_at` - Time at which this recipient Token was updated, in epoch milliseconds. * `updated_by` - Username of recipient Token updater. - -## Attribute Reference - -In addition to all arguments above, the following attributes are exported: - * `id` - ID of this recipient - same as the `name`. ## Related Resources diff --git a/provider/provider.go b/provider/provider.go index 9dabc4f1fd..0eeb974c59 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -32,6 +32,7 @@ import ( "github.com/databricks/terraform-provider-databricks/scim" "github.com/databricks/terraform-provider-databricks/secrets" "github.com/databricks/terraform-provider-databricks/serving" + "github.com/databricks/terraform-provider-databricks/sharing" "github.com/databricks/terraform-provider-databricks/sql" "github.com/databricks/terraform-provider-databricks/storage" "github.com/databricks/terraform-provider-databricks/tokens" @@ -134,7 +135,7 @@ func DatabricksProvider() *schema.Provider { "databricks_permissions": permissions.ResourcePermissions(), "databricks_pipeline": pipelines.ResourcePipeline(), "databricks_provider": catalog.ResourceProvider(), - "databricks_recipient": catalog.ResourceRecipient(), + "databricks_recipient": sharing.ResourceRecipient(), "databricks_registered_model": catalog.ResourceRegisteredModel(), "databricks_repo": repos.ResourceRepo(), "databricks_schema": catalog.ResourceSchema(), diff --git a/catalog/resource_recipient.go b/sharing/resource_recipient.go similarity index 62% rename from catalog/resource_recipient.go rename to sharing/resource_recipient.go index 419728038f..a16ff3f36f 100644 --- a/catalog/resource_recipient.go +++ b/sharing/resource_recipient.go @@ -1,22 +1,14 @@ -package catalog +package sharing import ( "context" + "github.com/databricks/databricks-sdk-go/service/sharing" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -type RecipientsAPI struct { - client *common.DatabricksClient - context context.Context -} - -func NewRecipientsAPI(ctx context.Context, m any) RecipientsAPI { - return RecipientsAPI{m.(*common.DatabricksClient), context.WithValue(ctx, common.Api, common.API_2_1)} -} - type Token struct { Id string `json:"id,omitempty" tf:"computed"` CreatedAt int64 `json:"created_at,omitempty" tf:"computed"` @@ -37,33 +29,11 @@ type RecipientInfo struct { SharingCode string `json:"sharing_code,omitempty" tf:"sensitive,force_new,suppress_diff"` AuthenticationType string `json:"authentication_type" tf:"force_new"` Tokens []Token `json:"tokens,omitempty" tf:"computed"` + Owner string `json:"owner,omitempty" tf:"suppress_diff"` DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty" tf:"force_new,conflicts:ip_access_list"` IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` } -type Recipients struct { - Recipients []RecipientInfo `json:"recipients"` -} - -func (a RecipientsAPI) createRecipient(ci *RecipientInfo) error { - return a.client.Post(a.context, "/unity-catalog/recipients", ci, ci) -} - -func (a RecipientsAPI) getRecipient(name string) (ci RecipientInfo, err error) { - err = a.client.Get(a.context, "/unity-catalog/recipients/"+name, nil, &ci) - return -} - -func (a RecipientsAPI) deleteRecipient(name string) error { - return a.client.Delete(a.context, "/unity-catalog/recipients/"+name, nil) -} - -func (a RecipientsAPI) updateRecipient(ci *RecipientInfo) error { - patch := map[string]any{"comment": ci.Comment, "ip_access_list": ci.IpAccessList} - - return a.client.Patch(a.context, "/unity-catalog/recipients/"+ci.Name, patch) -} - func ResourceRecipient() *schema.Resource { recipientSchema := common.StructToSchema(RecipientInfo{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { m["authentication_type"].ValidateFunc = validation.StringInSlice([]string{"TOKEN", "DATABRICKS"}, false) @@ -72,28 +42,45 @@ func ResourceRecipient() *schema.Resource { return common.Resource{ Schema: recipientSchema, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var ri RecipientInfo - common.DataToStructPointer(d, recipientSchema, &ri) - if err := NewRecipientsAPI(ctx, c).createRecipient(&ri); err != nil { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var createRecipientRequest sharing.CreateRecipient + common.DataToStructPointer(d, recipientSchema, &createRecipientRequest) + ri, err := w.Recipients.Create(ctx, createRecipientRequest) + if err != nil { return err } d.SetId(ri.Name) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - ri, err := NewRecipientsAPI(ctx, c).getRecipient(d.Id()) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + ri, err := w.Recipients.GetByName(ctx, d.Id()) if err != nil { return err } return common.StructToData(ri, recipientSchema, d) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var ri RecipientInfo - common.DataToStructPointer(d, recipientSchema, &ri) - return NewRecipientsAPI(ctx, c).updateRecipient(&ri) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var updateRecipientRequest sharing.UpdateRecipient + common.DataToStructPointer(d, recipientSchema, &updateRecipientRequest) + return w.Recipients.Update(ctx, updateRecipientRequest) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - return NewRecipientsAPI(ctx, c).deleteRecipient(d.Id()) + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.Recipients.DeleteByName(ctx, d.Id()) }, }.ToResource() } diff --git a/sharing/resource_recipient_test.go b/sharing/resource_recipient_test.go new file mode 100644 index 0000000000..3fb7ff1f56 --- /dev/null +++ b/sharing/resource_recipient_test.go @@ -0,0 +1,162 @@ +package sharing + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" +) + +func TestRecipientCornerCases(t *testing.T) { + qa.ResourceCornerCases(t, ResourceRecipient()) +} + +func TestCreateRecipient(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodPost, + Resource: "/api/2.1/unity-catalog/recipients", + ExpectedRequest: sharing.CreateRecipient{ + Name: "a", + Comment: "b", + SharingCode: "c", + AuthenticationType: "TOKEN", + Owner: "InitialOwner", + IpAccessList: &sharing.IpAccessList{ + AllowedIpAddresses: []string{"0.0.0.0/0"}, + }, + }, + Response: RecipientInfo{ + Name: "a", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/recipients/a?", + Response: sharing.RecipientInfo{ + Name: "a", + Comment: "b", + SharingCode: "c", + AuthenticationType: "TOKEN", + Owner: "InitialOwner", + Tokens: nil, + IpAccessList: &sharing.IpAccessList{ + AllowedIpAddresses: []string{"0.0.0.0/0"}, + }, + }, + }, + }, + Resource: ResourceRecipient(), + Create: true, + HCL: ` + name = "a" + comment = "b" + authentication_type = "TOKEN" + sharing_code = "c" + owner = "InitialOwner" + ip_access_list { + allowed_ip_addresses = ["0.0.0.0/0"] + } + `, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "a", d.Get("name")) + assert.Equal(t, "InitialOwner", d.Get("owner")) + assert.Equal(t, "TOKEN", d.Get("authentication_type")) + assert.Equal(t, "b", d.Get("comment")) +} + +func TestCreateRecipient_InvalidAuthType(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{}, + Resource: ResourceRecipient(), + Create: true, + HCL: ` + name = "a" + comment = "b" + authentication_type = "temp" + sharing_code = "c" + ip_access_list { + allowed_ip_addresses = ["0.0.0.0/0"] + } + `, + }.ExpectError(t, "invalid config supplied. "+ + "[authentication_type] expected authentication_type "+ + "to be one of [TOKEN DATABRICKS], got temp") + +} + +func TestReadRecipient(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/recipients/a?", + Response: sharing.RecipientInfo{ + Name: "a", + Comment: "b", + SharingCode: "c", + AuthenticationType: "TOKEN", + Tokens: nil, + IpAccessList: &sharing.IpAccessList{ + AllowedIpAddresses: []string{"0.0.0.0/0"}, + }, + }, + }, + }, + Resource: ResourceRecipient(), + Read: true, + ID: "a", + HCL: ` + name = "a" + comment = "b" + authentication_type = "TOKEN" + sharing_code = "c" + ip_access_list { + allowed_ip_addresses = ["0.0.0.0/0"] + } + `, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "a", d.Get("name")) + assert.Equal(t, "b", d.Get("comment")) +} + +func TestDeleteRecipient(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/recipients/testRecipient?", + }, + }, + Resource: ResourceRecipient(), + Delete: true, + ID: "testRecipient", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "testRecipient", d.Id()) +} + +func TestDeleteRecipientError(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodDelete, + Resource: "/api/2.1/unity-catalog/recipients/testRecipient?", + Response: apierr.APIErrorBody{ + ErrorCode: "INVALID_STATE", + Message: "Something went wrong", + }, + Status: 400, + }, + }, + Resource: ResourceRecipient(), + Delete: true, + ID: "testRecipient", + }.ExpectError(t, "Something went wrong") +}