From b814ca01b40dec2e2ac306b16032bd5258e516ec Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 7 Nov 2024 04:49:23 -0500 Subject: [PATCH 01/10] [Fix] Always fill `cluster_name` in `databricks_cluster` data source (#4197) ## Changes ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- clusters/data_cluster.go | 1 + clusters/data_cluster_test.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/clusters/data_cluster.go b/clusters/data_cluster.go index 73ae4a1e19..aee0503619 100644 --- a/clusters/data_cluster.go +++ b/clusters/data_cluster.go @@ -46,6 +46,7 @@ func DataSourceCluster() common.Resource { } data.Id = data.ClusterInfo.ClusterId data.ClusterId = data.ClusterInfo.ClusterId + data.Name = data.ClusterInfo.ClusterName return nil }) diff --git a/clusters/data_cluster_test.go b/clusters/data_cluster_test.go index cd20edec0d..f7744c2ba2 100644 --- a/clusters/data_cluster_test.go +++ b/clusters/data_cluster_test.go @@ -37,6 +37,7 @@ func TestClusterDataByID(t *testing.T) { "cluster_info.0.node_type_id": "i3.xlarge", "cluster_info.0.autoscale.0.max_workers": 4, "cluster_info.0.state": "RUNNING", + "cluster_name": "Shared Autoscaling", }) } @@ -68,6 +69,7 @@ func TestClusterDataByName(t *testing.T) { "cluster_info.0.node_type_id": "i3.xlarge", "cluster_info.0.autoscale.0.max_workers": 4, "cluster_info.0.state": "RUNNING", + "cluster_id": "abc", }) } From 80514f54b75e36253518da20ec7118d33d71f2ad Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Thu, 7 Nov 2024 14:04:05 +0100 Subject: [PATCH 02/10] [Internal] Update to latest OpenAPI spec and bump Go SDK (#4199) ## Changes Update to latest OpenAPI spec and Bump go sdk. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- go.mod | 2 +- go.sum | 4 +- internal/service/apps_tf/model.go | 39 ++- internal/service/catalog_tf/model.go | 343 +++++++++++++++++++++- internal/service/compute_tf/model.go | 2 +- internal/service/dashboards_tf/model.go | 107 +++++++ internal/service/jobs_tf/model.go | 3 + internal/service/pipelines_tf/model.go | 59 +++- internal/service/provisioning_tf/model.go | 3 + internal/service/sharing_tf/model.go | 285 ------------------ 11 files changed, 535 insertions(+), 314 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ecf041814d..5f4b508602 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file +d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file diff --git a/go.mod b/go.mod index e449f753c8..87e265f72e 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.50.0 + github.com/databricks/databricks-sdk-go v0.51.0 github.com/golang-jwt/jwt/v4 v4.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 6f416791a4..2fe2fa4ab3 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.50.0 h1:Zl4uBhYMT5z6aDojCQJPT2zCYjjfqxBQSQn8uLTphpo= -github.com/databricks/databricks-sdk-go v0.50.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= +github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index b5a602ba1f..d52106d4a8 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -144,8 +144,6 @@ func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existing } type AppDeployment struct { - // The name of the app. - AppName types.String `tfsdk:"-"` // The creation time of the deployment. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` @@ -400,6 +398,30 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState Compu } } +// Create an app deployment +type CreateAppDeploymentRequest struct { + AppDeployment []AppDeployment `tfsdk:"app_deployment" tf:"optional,object"` + // The name of the app. + AppName types.String `tfsdk:"-"` +} + +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { +} + +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { +} + +// Create an app +type CreateAppRequest struct { + App []App `tfsdk:"app" tf:"optional,object"` +} + +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { +} + +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { +} + // Delete an app type DeleteAppRequest struct { // The name of the app. @@ -551,3 +573,16 @@ func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sto func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { } + +// Update an app +type UpdateAppRequest struct { + App []App `tfsdk:"app" tf:"optional,object"` + // The name of the app. + Name types.String `tfsdk:"-"` +} + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { +} + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { +} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index caf38f865c..a712a00dad 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -181,6 +181,25 @@ func (newState *AwsCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan Aws func (newState *AwsCredentials) SyncEffectiveFieldsDuringRead(existingState AwsCredentials) { } +// The AWS IAM role configuration +type AwsIamRole struct { + // The external ID used in role assumption to prevent the confused deputy + // problem. + ExternalId types.String `tfsdk:"external_id" tf:"optional"` + // The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary + // credentials. + RoleArn types.String `tfsdk:"role_arn" tf:"optional"` + // The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. + // This is the identity that is going to assume the AWS IAM role. + UnityCatalogIamArn types.String `tfsdk:"unity_catalog_iam_arn" tf:"optional"` +} + +func (newState *AwsIamRole) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsIamRole) { +} + +func (newState *AwsIamRole) SyncEffectiveFieldsDuringRead(existingState AwsIamRole) { +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` @@ -209,6 +228,47 @@ func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringRead(existingState AwsIamRoleResponse) { } +// Azure Active Directory token, essentially the Oauth token for Azure Service +// Principal or Managed Identity. Read more at +// https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token +type AzureActiveDirectoryToken struct { + // Opaque token that contains claims that you can use in Azure Active + // Directory to access cloud services. + AadToken types.String `tfsdk:"aad_token" tf:"optional"` +} + +func (newState *AzureActiveDirectoryToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureActiveDirectoryToken) { +} + +func (newState *AzureActiveDirectoryToken) SyncEffectiveFieldsDuringRead(existingState AzureActiveDirectoryToken) { +} + +// The Azure managed identity configuration. +type AzureManagedIdentity struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`. + AccessConnectorId types.String `tfsdk:"access_connector_id" tf:"optional"` + // The Databricks internal ID that represents this managed identity. This + // field is only used to persist the credential_id once it is fetched from + // the credentials manager - as we only use the protobuf serializer to store + // credentials, this ID gets persisted to the database. . + CredentialId types.String `tfsdk:"credential_id" tf:"optional"` + // The Azure resource ID of the managed identity. Use the format, + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}` + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // using the system-assigned identity. + ManagedIdentityId types.String `tfsdk:"managed_identity_id" tf:"optional"` +} + +func (newState *AzureManagedIdentity) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureManagedIdentity) { +} + +func (newState *AzureManagedIdentity) SyncEffectiveFieldsDuringRead(existingState AzureManagedIdentity) { +} + type AzureManagedIdentityRequest struct { // The Azure resource ID of the Azure Databricks Access Connector. Use the // format @@ -550,6 +610,29 @@ func (newState *CreateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan C func (newState *CreateConnection) SyncEffectiveFieldsDuringRead(existingState CreateConnection) { } +type CreateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name types.String `tfsdk:"name" tf:"optional"` + // Indicates the purpose of the credential. + Purpose types.String `tfsdk:"purpose" tf:"optional"` + // Optional. Supplying true to this argument skips validation of the created + // set of credentials. + SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` +} + +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialRequest) { +} + +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState CreateCredentialRequest) { +} + type CreateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -728,6 +811,18 @@ func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan Crea func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { } +// Create an Online Table +type CreateOnlineTableRequest struct { + // Online Table information. + Table []OnlineTable `tfsdk:"table" tf:"optional,object"` +} + +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { +} + +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { +} + type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -840,6 +935,58 @@ func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringCreateOrUpd func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringRead(existingState CreateVolumeRequestContent) { } +type CredentialInfo struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // Time at which this credential was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` + // Username of credential creator. + CreatedBy types.String `tfsdk:"created_by" tf:"optional"` + // The full name of the credential. + FullName types.String `tfsdk:"full_name" tf:"optional"` + // The unique identifier of the credential. + Id types.String `tfsdk:"id" tf:"optional"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` + // Unique identifier of the parent metastore. + MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name types.String `tfsdk:"name" tf:"optional"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner" tf:"optional"` + // Indicates the purpose of the credential. + Purpose types.String `tfsdk:"purpose" tf:"optional"` + // Time at which this credential was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` + // Username of user who last modified the credential. + UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` +} + +func (newState *CredentialInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CredentialInfo) { +} + +func (newState *CredentialInfo) SyncEffectiveFieldsDuringRead(existingState CredentialInfo) { +} + +type CredentialValidationResult struct { + // Error message would exist when the result does not equal to **PASS**. + Message types.String `tfsdk:"message" tf:"optional"` + // The results of the tested operation. + Result types.String `tfsdk:"result" tf:"optional"` +} + +func (newState *CredentialValidationResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan CredentialValidationResult) { +} + +func (newState *CredentialValidationResult) SyncEffectiveFieldsDuringRead(existingState CredentialValidationResult) { +} + // Currently assigned workspaces type CurrentWorkspaceBindings struct { // A list of workspace IDs. @@ -969,6 +1116,29 @@ func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteConnectionRequest) { } +// Delete a credential +type DeleteCredentialRequest struct { + // Force deletion even if there are dependent services. + Force types.Bool `tfsdk:"-"` + // Name of the credential. + NameArg types.String `tfsdk:"-"` +} + +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialRequest) { +} + +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialRequest) { +} + +type DeleteCredentialResponse struct { +} + +func (newState *DeleteCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialResponse) { +} + +func (newState *DeleteCredentialResponse) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialResponse) { +} + // Delete an external location type DeleteExternalLocationRequest struct { // Force deletion even if there are dependent external tables or mounts. @@ -1339,8 +1509,7 @@ type ExternalLocationInfo struct { // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. Fallback types.Bool `tfsdk:"fallback" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Unique identifier of metastore hosting the external location. MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -1548,6 +1717,34 @@ func (newState *GcpOauthToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpO func (newState *GcpOauthToken) SyncEffectiveFieldsDuringRead(existingState GcpOauthToken) { } +// Options to customize the requested temporary credential +type GenerateTemporaryServiceCredentialAzureOptions struct { + // The resources to which the temporary Azure credential should apply. These + // resources are the scopes that are passed to the token provider (see + // https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential?view=azure-python) + Resources []types.String `tfsdk:"resources" tf:"optional"` +} + +func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialAzureOptions) { +} + +func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialAzureOptions) { +} + +type GenerateTemporaryServiceCredentialRequest struct { + // Options to customize the requested temporary credential + AzureOptions []GenerateTemporaryServiceCredentialAzureOptions `tfsdk:"azure_options" tf:"optional,object"` + // The name of the service credential used to generate a temporary + // credential + CredentialName types.String `tfsdk:"credential_name" tf:"optional"` +} + +func (newState *GenerateTemporaryServiceCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialRequest) { +} + +func (newState *GenerateTemporaryServiceCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialRequest) { +} + type GenerateTemporaryTableCredentialRequest struct { // The operation performed against the table data, either READ or // READ_WRITE. If READ_WRITE is specified, the credentials returned will @@ -1567,6 +1764,10 @@ type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad []AzureActiveDirectoryToken `tfsdk:"azure_aad" tf:"optional,object"` // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional,object"` @@ -1706,6 +1907,18 @@ func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(pl func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringRead(existingState GetConnectionRequest) { } +// Get a credential +type GetCredentialRequest struct { + // Name of the credential. + NameArg types.String `tfsdk:"-"` +} + +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCredentialRequest) { +} + +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GetCredentialRequest) { +} + // Get effective permissions type GetEffectiveRequest struct { // Full name of securable. @@ -2116,6 +2329,40 @@ func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringRead(existingState ListConnectionsResponse) { } +// List credentials +type ListCredentialsRequest struct { + // Maximum number of credentials to return. - If not set, the default max + // page size is used. - When set to a value greater than 0, the page length + // is the minimum of this value and a server-configured value. - When set to + // 0, the page length is set to a server-configured value (recommended). - + // When set to a value less than 0, an invalid parameter error is returned. + MaxResults types.Int64 `tfsdk:"-"` + // Opaque token to retrieve the next page of results. + PageToken types.String `tfsdk:"-"` + // Return only credentials for the specified purpose. + Purpose types.String `tfsdk:"-"` +} + +func (newState *ListCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCredentialsRequest) { +} + +func (newState *ListCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState ListCredentialsRequest) { +} + +type ListCredentialsResponse struct { + Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` +} + +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCredentialsResponse) { +} + +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState ListCredentialsResponse) { +} + // List external locations type ListExternalLocationsRequest struct { // Whether to include external locations in the response for which the @@ -2515,6 +2762,9 @@ type ListTablesRequest struct { OmitColumns types.Bool `tfsdk:"-"` // Whether to omit the properties of the table from the response or not. OmitProperties types.Bool `tfsdk:"-"` + // Whether to omit the username of the table (e.g. owner, updated_by, + // created_by) from the response or not. + OmitUsername types.Bool `tfsdk:"-"` // Opaque token to send for the next page of results (pagination). PageToken types.String `tfsdk:"-"` // Parent schema of tables. @@ -3445,10 +3695,11 @@ type StorageCredentialInfo struct { CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // The Databricks managed GCP service account configuration. DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` + // The full name of the credential. + FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the credential. Id types.String `tfsdk:"id" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Unique identifier of parent metastore. MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -3642,6 +3893,25 @@ func (newState *TableSummary) SyncEffectiveFieldsDuringCreateOrUpdate(plan Table func (newState *TableSummary) SyncEffectiveFieldsDuringRead(existingState TableSummary) { } +type TemporaryCredentials struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad []AzureActiveDirectoryToken `tfsdk:"azure_aad" tf:"optional,object"` + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. + ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` +} + +func (newState *TemporaryCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan TemporaryCredentials) { +} + +func (newState *TemporaryCredentials) SyncEffectiveFieldsDuringRead(existingState TemporaryCredentials) { +} + // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. type TriggeredUpdateStatus struct { @@ -3736,6 +4006,35 @@ func (newState *UpdateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan U func (newState *UpdateConnection) SyncEffectiveFieldsDuringRead(existingState UpdateConnection) { } +type UpdateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // Force update even if there are dependent services. + Force types.Bool `tfsdk:"force" tf:"optional"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` + // Name of the credential. + NameArg types.String `tfsdk:"-"` + // New name of credential. + NewName types.String `tfsdk:"new_name" tf:"optional"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner" tf:"optional"` + // Supply true to this argument to skip validation of the updated + // credential. + SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` +} + +func (newState *UpdateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCredentialRequest) { +} + +func (newState *UpdateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCredentialRequest) { +} + type UpdateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -3752,8 +4051,7 @@ type UpdateExternalLocation struct { // Force update even if changing url invalidates dependent external tables // or mounts. Force types.Bool `tfsdk:"force" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Name of the external location. Name types.String `tfsdk:"-"` @@ -3970,8 +4268,7 @@ type UpdateStorageCredential struct { // Force update even if there are dependent external locations or external // tables. Force types.Bool `tfsdk:"force" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Name of the storage credential. Name types.String `tfsdk:"-"` @@ -4055,6 +4352,36 @@ func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringCrea func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceBindingsParameters) { } +type ValidateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Required. The name of an existing credential or long-lived cloud + // credential to validate. + CredentialName types.String `tfsdk:"credential_name" tf:"optional"` + // The purpose of the credential. This should only be used when the + // credential is specified. + Purpose types.String `tfsdk:"purpose" tf:"optional"` +} + +func (newState *ValidateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateCredentialRequest) { +} + +func (newState *ValidateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState ValidateCredentialRequest) { +} + +type ValidateCredentialResponse struct { + // The results of the validation check. + Results []CredentialValidationResult `tfsdk:"results" tf:"optional"` +} + +func (newState *ValidateCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateCredentialResponse) { +} + +func (newState *ValidateCredentialResponse) SyncEffectiveFieldsDuringRead(existingState ValidateCredentialResponse) { +} + type ValidateStorageCredential struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index 653cfec24f..d1e67f00bb 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -1747,7 +1747,7 @@ type EditCluster struct { // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` - // ID of the cluser + // ID of the cluster ClusterId types.String `tfsdk:"cluster_id" tf:""` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 2066f6a422..c49167cac7 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -15,6 +15,68 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +// Create dashboard +type CreateDashboardRequest struct { + Dashboard []Dashboard `tfsdk:"dashboard" tf:"optional,object"` +} + +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { +} + +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { +} + +// Create dashboard schedule +type CreateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + + Schedule []Schedule `tfsdk:"schedule" tf:"optional,object"` +} + +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId +} + +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } +} + +// Create schedule subscription +type CreateSubscriptionRequest struct { + // UUID identifying the dashboard to which the subscription belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` + + Subscription []Subscription `tfsdk:"subscription" tf:"optional,object"` +} + +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } +} + type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -803,6 +865,8 @@ type Schedule struct { // A timestamp indicating when the schedule was last updated. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` + // The warehouse id to run the dashboard with for the schedule. + WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } func (newState *Schedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan Schedule) { @@ -1025,3 +1089,46 @@ func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { } + +// Update dashboard +type UpdateDashboardRequest struct { + Dashboard []Dashboard `tfsdk:"dashboard" tf:"optional,object"` + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"-"` +} + +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { +} + +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { +} + +// Update dashboard schedule +type UpdateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + + Schedule []Schedule `tfsdk:"schedule" tf:"optional,object"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } +} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index fe3918dabd..d4629abf94 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -2321,6 +2321,9 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` + // A list of task keys to run inside of the job. If this field is not + // provided, all tasks in the job will be run. + Only []types.String `tfsdk:"only" tf:"optional"` // Controls whether the pipeline should perform a full refresh PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 8adcfa0bfa..56f0b9d192 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -45,7 +45,7 @@ type CreatePipeline struct { Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -60,6 +60,8 @@ type CreatePipeline struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -173,7 +175,7 @@ type EditPipeline struct { ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -190,6 +192,8 @@ type EditPipeline struct { Photon types.Bool `tfsdk:"photon" tf:"optional"` // Unique identifier for this pipeline. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -365,11 +369,11 @@ func (newState *GetUpdateResponse) SyncEffectiveFieldsDuringRead(existingState G } type IngestionConfig struct { - // Select tables from a specific source report. + // Select a specific source report. Report []ReportSpec `tfsdk:"report" tf:"optional,object"` - // Select tables from a specific source schema. + // Select all tables from a specific source schema. Schema []SchemaSpec `tfsdk:"schema" tf:"optional,object"` - // Select tables from a specific source table. + // Select a specific source table. Table []TableSpec `tfsdk:"table" tf:"optional,object"` } @@ -380,9 +384,13 @@ func (newState *IngestionConfig) SyncEffectiveFieldsDuringRead(existingState Ing } type IngestionGatewayPipelineDefinition struct { - // Immutable. The Unity Catalog connection this gateway pipeline uses to - // communicate with the source. + // [Deprecated, use connection_name instead] Immutable. The Unity Catalog + // connection that this gateway pipeline uses to communicate with the + // source. ConnectionId types.String `tfsdk:"connection_id" tf:"optional"` + // Immutable. The Unity Catalog connection that this gateway pipeline uses + // to communicate with the source. + ConnectionName types.String `tfsdk:"connection_name" tf:"optional"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. GatewayStorageCatalog types.String `tfsdk:"gateway_storage_catalog" tf:"optional"` @@ -403,13 +411,13 @@ func (newState *IngestionGatewayPipelineDefinition) SyncEffectiveFieldsDuringRea } type IngestionPipelineDefinition struct { - // Immutable. The Unity Catalog connection this ingestion pipeline uses to - // communicate with the source. Specify either ingestion_gateway_id or - // connection_name. + // Immutable. The Unity Catalog connection that this ingestion pipeline uses + // to communicate with the source. This is used with connectors for + // applications like Salesforce, Workday, and so on. ConnectionName types.String `tfsdk:"connection_name" tf:"optional"` - // Immutable. Identifier for the ingestion gateway used by this ingestion - // pipeline to communicate with the source. Specify either - // ingestion_gateway_id or connection_name. + // Immutable. Identifier for the gateway that is used by this ingestion + // pipeline to communicate with the source database. This is used with + // connectors to databases like SQL Server. IngestionGatewayId types.String `tfsdk:"ingestion_gateway_id" tf:"optional"` // Required. Settings specifying tables to replicate and the destination for // the replicated tables. @@ -934,7 +942,7 @@ type PipelineSpec struct { Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -949,6 +957,8 @@ type PipelineSpec struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -1032,6 +1042,27 @@ func (newState *ReportSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReportS func (newState *ReportSpec) SyncEffectiveFieldsDuringRead(existingState ReportSpec) { } +type RestartWindow struct { + // Days of week in which the restart is allowed to happen (within a + // five-hour window starting at start_hour). If not specified all days of + // the week will be used. + DaysOfWeek types.String `tfsdk:"days_of_week" tf:"optional"` + // An integer between 0 and 23 denoting the start hour for the restart + // window in the 24-hour day. Continuous pipeline restart is triggered only + // within a five-hour window starting at this hour. + StartHour types.Int64 `tfsdk:"start_hour" tf:""` + // Time zone id of restart window. See + // https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + // for details. If not specified, UTC will be used. + TimeZoneId types.String `tfsdk:"time_zone_id" tf:"optional"` +} + +func (newState *RestartWindow) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestartWindow) { +} + +func (newState *RestartWindow) SyncEffectiveFieldsDuringRead(existingState RestartWindow) { +} + type SchemaSpec struct { // Required. Destination catalog to store tables. DestinationCatalog types.String `tfsdk:"destination_catalog" tf:"optional"` diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 188e8f48df..49b5d02e78 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -947,6 +947,9 @@ type UpdateWorkspaceRequest struct { // switch from a Databricks-managed VPC to a customer-managed VPC by // updating the workspace to add a network configuration ID. NetworkId types.String `tfsdk:"network_id" tf:"optional"` + // The ID of the workspace's private access settings configuration object. + // This parameter is available only for updating failed workspaces. + PrivateAccessSettingsId types.String `tfsdk:"private_access_settings_id" tf:"optional"` // The ID of the workspace's storage configuration object. This parameter is // available only for updating failed workspaces. StorageConfigurationId types.String `tfsdk:"storage_configuration_id" tf:"optional"` diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 0192deeaaa..6bde086372 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -15,214 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type CentralCleanRoomInfo struct { - // All assets from all collaborators that are available in the clean room. - // Only one of table_info or notebook_info will be filled in. - CleanRoomAssets []CleanRoomAssetInfo `tfsdk:"clean_room_assets" tf:"optional"` - // All collaborators who are in the clean room. - Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators" tf:"optional"` - // The collaborator who created the clean room. - Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional,object"` - // The cloud where clean room tasks will be run. - StationCloud types.String `tfsdk:"station_cloud" tf:"optional"` - // The region where clean room tasks will be run. - StationRegion types.String `tfsdk:"station_region" tf:"optional"` -} - -func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CentralCleanRoomInfo) { -} - -func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CentralCleanRoomInfo) { -} - -type CleanRoomAssetInfo struct { - // Time at which this asset was added, in epoch milliseconds. - AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` - // Details about the notebook asset. - NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional,object"` - // The collaborator who owns the asset. - Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional,object"` - // Details about the table asset. - TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional,object"` - // Time at which this asset was updated, in epoch milliseconds. - UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` -} - -func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetInfo) { -} - -func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetInfo) { -} - -type CleanRoomCatalog struct { - // Name of the catalog in the clean room station. Empty for notebooks. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The details of the shared notebook files. - NotebookFiles []SharedDataObject `tfsdk:"notebook_files" tf:"optional"` - // The details of the shared tables. - Tables []SharedDataObject `tfsdk:"tables" tf:"optional"` -} - -func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalog) { -} - -func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalog) { -} - -type CleanRoomCatalogUpdate struct { - // The name of the catalog to update assets. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The updates to the assets in the catalog. - Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional,object"` -} - -func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalogUpdate) { -} - -func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalogUpdate) { -} - -type CleanRoomCollaboratorInfo struct { - // The global Unity Catalog metastore id of the collaborator. Also known as - // the sharing identifier. The identifier is of format - // __cloud__:__region__:__metastore-uuid__. - GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` - // The organization name of the collaborator. This is configured in the - // metastore for Delta Sharing and is used to identify the organization to - // other collaborators. - OrganizationName types.String `tfsdk:"organization_name" tf:"optional"` -} - -func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCollaboratorInfo) { -} - -func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomCollaboratorInfo) { -} - -type CleanRoomInfo struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // Time at which this clean room was created, in epoch milliseconds. - CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` - // Username of clean room creator. - CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - // Catalog aliases shared by the current collaborator with asset details. - LocalCatalogs []CleanRoomCatalog `tfsdk:"local_catalogs" tf:"optional"` - // Name of the clean room. - Name types.String `tfsdk:"name" tf:"optional"` - // Username of current owner of clean room. - Owner types.String `tfsdk:"owner" tf:"optional"` - // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional,object"` - // Time at which this clean room was updated, in epoch milliseconds. - UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` - // Username of clean room updater. - UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` -} - -func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomInfo) { -} - -func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomInfo) { -} - -type CleanRoomNotebookInfo struct { - // The base64 representation of the notebook content in HTML. - NotebookContent types.String `tfsdk:"notebook_content" tf:"optional"` - // The name of the notebook. - NotebookName types.String `tfsdk:"notebook_name" tf:"optional"` -} - -func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomNotebookInfo) { -} - -func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomNotebookInfo) { -} - -type CleanRoomTableInfo struct { - // Name of parent catalog. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The array of __ColumnInfo__ definitions of the table's columns. - Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` - // Full name of table, in form of - // __catalog_name__.__schema_name__.__table_name__ - FullName types.String `tfsdk:"full_name" tf:"optional"` - // Name of table, relative to parent schema. - Name types.String `tfsdk:"name" tf:"optional"` - // Name of parent schema relative to its parent catalog. - SchemaName types.String `tfsdk:"schema_name" tf:"optional"` -} - -func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomTableInfo) { -} - -func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomTableInfo) { -} - -type ColumnInfo struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - - Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` - // Name of Column. - Name types.String `tfsdk:"name" tf:"optional"` - // Whether field may be Null (default: true). - Nullable types.Bool `tfsdk:"nullable" tf:"optional"` - // Partition index for column. - PartitionIndex types.Int64 `tfsdk:"partition_index" tf:"optional"` - // Ordinal position of column (starting at position 0). - Position types.Int64 `tfsdk:"position" tf:"optional"` - // Format of IntervalType. - TypeIntervalType types.String `tfsdk:"type_interval_type" tf:"optional"` - // Full data type specification, JSON-serialized. - TypeJson types.String `tfsdk:"type_json" tf:"optional"` - // Name of type (INT, STRUCT, MAP, etc.). - TypeName types.String `tfsdk:"type_name" tf:"optional"` - // Digits of precision; required for DecimalTypes. - TypePrecision types.Int64 `tfsdk:"type_precision" tf:"optional"` - // Digits to right of decimal; Required for DecimalTypes. - TypeScale types.Int64 `tfsdk:"type_scale" tf:"optional"` - // Full data type specification as SQL/catalogString text. - TypeText types.String `tfsdk:"type_text" tf:"optional"` -} - -func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { -} - -func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { -} - -type ColumnMask struct { - // The full name of the column mask SQL UDF. - FunctionName types.String `tfsdk:"function_name" tf:"optional"` - // The list of additional table columns to be passed as input to the column - // mask function. The first arg of the mask function should be of the type - // of the column being masked and the types of the rest of the args should - // match the types of columns in 'using_column_names'. - UsingColumnNames []types.String `tfsdk:"using_column_names" tf:"optional"` -} - -func (newState *ColumnMask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnMask) { -} - -func (newState *ColumnMask) SyncEffectiveFieldsDuringRead(existingState ColumnMask) { -} - -type CreateCleanRoom struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // Name of the clean room. - Name types.String `tfsdk:"name" tf:""` - // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"object"` -} - -func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoom) { -} - -func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoom) { -} - type CreateProvider struct { // The delta sharing authentication type. AuthenticationType types.String `tfsdk:"authentication_type" tf:""` @@ -287,18 +79,6 @@ func (newState *CreateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan Create func (newState *CreateShare) SyncEffectiveFieldsDuringRead(existingState CreateShare) { } -// Delete a clean room -type DeleteCleanRoomRequest struct { - // The name of the clean room. - Name types.String `tfsdk:"-"` -} - -func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomRequest) { -} - -func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomRequest) { -} - // Delete a provider type DeleteProviderRequest struct { // Name of the provider. @@ -365,20 +145,6 @@ func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringCreateOrU func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringRead(existingState GetActivationUrlInfoResponse) { } -// Get a clean room -type GetCleanRoomRequest struct { - // Whether to include remote details (central) on the clean room. - IncludeRemoteDetails types.Bool `tfsdk:"-"` - // The name of the clean room. - Name types.String `tfsdk:"-"` -} - -func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCleanRoomRequest) { -} - -func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState GetCleanRoomRequest) { -} - // Get a provider type GetProviderRequest struct { // Name of the provider. @@ -443,40 +209,6 @@ func (newState *IpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan IpAcc func (newState *IpAccessList) SyncEffectiveFieldsDuringRead(existingState IpAccessList) { } -// List clean rooms -type ListCleanRoomsRequest struct { - // Maximum number of clean rooms to return. If not set, all the clean rooms - // are returned (not recommended). - when set to a value greater than 0, the - // page length is the minimum of this value and a server configured value; - - // when set to 0, the page length is set to a server configured value - // (recommended); - when set to a value less than 0, an invalid parameter - // error is returned; - MaxResults types.Int64 `tfsdk:"-"` - // Opaque pagination token to go to next page based on previous query. - PageToken types.String `tfsdk:"-"` -} - -func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsRequest) { -} - -func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsRequest) { -} - -type ListCleanRoomsResponse struct { - // An array of clean rooms. Remote details (central) are not included. - CleanRooms []CleanRoomInfo `tfsdk:"clean_rooms" tf:"optional"` - // Opaque token to retrieve the next page of results. Absent if there are no - // more pages. __page_token__ should be set to this value for the next - // request (for the next page of results). - NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` -} - -func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsResponse) { -} - -func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsResponse) { -} - type ListProviderSharesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1032,23 +764,6 @@ func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringCreateOrUpdate( func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringRead(existingState SharedDataObjectUpdate) { } -type UpdateCleanRoom struct { - // Array of shared data object updates. - CatalogUpdates []CleanRoomCatalogUpdate `tfsdk:"catalog_updates" tf:"optional"` - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // The name of the clean room. - Name types.String `tfsdk:"-"` - // Username of current owner of clean room. - Owner types.String `tfsdk:"owner" tf:"optional"` -} - -func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCleanRoom) { -} - -func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringRead(existingState UpdateCleanRoom) { -} - type UpdatePermissionsResponse struct { } From 7ddbeab4529a271a0c03fca248a4898e6145bb6b Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 8 Nov 2024 03:49:09 -0500 Subject: [PATCH 03/10] [Fix] Upload content `databricks_workspace_file` using raw format (#4200) ## Changes This fixes a problem with uploading zip-based files with the `databricks_workspace_file` resource. ## Tests - [x] `make test` run locally - [ ] ~relevant change in `docs/` folder~ - [x] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --- internal/acceptance/permissions_test.go | 30 ++++++++++++++------- internal/acceptance/workspace_file_test.go | 9 +++++++ workspace/acceptance/testdata/zipfile.zip | Bin 0 -> 171 bytes workspace/resource_workspace_file.go | 4 +-- workspace/resource_workspace_file_test.go | 16 +++++------ 5 files changed, 39 insertions(+), 20 deletions(-) create mode 100644 workspace/acceptance/testdata/zipfile.zip diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 325bc398fe..0fdd5553b8 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -466,17 +466,22 @@ func TestAccPermissions_WorkspaceFile_Path(t *testing.T) { } resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" - path = "${databricks_directory.this.path}/test_notebook" + path = "${databricks_directory.this.path}/test_ws_file" }` WorkspaceLevel(t, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", groupPermissions("CAN_RUN")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + groupPermissions("CAN_RUN")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + currentPrincipalPermission(t, "CAN_MANAGE"), + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ // The current user can be removed from permissions since they inherit permissions from the directory they created. - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_READ")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + currentPrincipalPermission(t, "CAN_READ")), ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), }) } @@ -489,17 +494,22 @@ func TestAccPermissions_WorkspaceFile_Id(t *testing.T) { } resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" - path = "${databricks_directory.this.path}/test_notebook" + path = "${databricks_directory.this.path}/test_ws_file" }` WorkspaceLevel(t, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", groupPermissions("CAN_RUN")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + groupPermissions("CAN_RUN")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + currentPrincipalPermission(t, "CAN_MANAGE"), + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ // The current user can be removed from permissions since they inherit permissions from the directory they created. - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + currentPrincipalPermission(t, "CAN_READ")), ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), }) } diff --git a/internal/acceptance/workspace_file_test.go b/internal/acceptance/workspace_file_test.go index 9a9a57c8e0..f0b8b27b45 100644 --- a/internal/acceptance/workspace_file_test.go +++ b/internal/acceptance/workspace_file_test.go @@ -27,6 +27,15 @@ func TestAccWorkspaceFileEmptyFile(t *testing.T) { }) } +func TestAccWorkspaceFileZipFile(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: `resource "databricks_workspace_file" "zipfile" { + source = "{var.CWD}/../../workspace/acceptance/testdata/zipfile.zip" + path = "/Shared/provider-test/zipfile_{var.RANDOM}.zip" + }`, + }) +} + func TestAccWorkspaceFileBase64(t *testing.T) { WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_file" "this2" { diff --git a/workspace/acceptance/testdata/zipfile.zip b/workspace/acceptance/testdata/zipfile.zip new file mode 100644 index 0000000000000000000000000000000000000000..2be8cd176157ab85f01d3d5ee6ef14b32422b05b GIT binary patch literal 171 zcmWIWW@h1H0D-xQ>5)}c`EQwlY!K#TkYUJ3&B@8vE2$_64dG;9-nl?09fV6OxEUB( zzA-W|u!sN^W@K^&cr!A|G2=2v0;q_A0jQi|Nh64bWGXAfR5TL Date: Fri, 8 Nov 2024 19:51:21 +0530 Subject: [PATCH 04/10] [Release] Release v1.58.0 (#4202) ### Bug Fixes * Always fill `cluster_name` in `databricks_cluster` data source ([#4197](https://github.com/databricks/terraform-provider-databricks/pull/4197)). * Suppress equal fold diff for DLT pipeline resource ([#4196](https://github.com/databricks/terraform-provider-databricks/pull/4196)). * Upload content `databricks_workspace_file` using raw format ([#4200](https://github.com/databricks/terraform-provider-databricks/pull/4200)). ### Internal Changes * Update to latest OpenAPI spec and bump Go SDK ([#4199](https://github.com/databricks/terraform-provider-databricks/pull/4199)). ### Dependency Updates * Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 ([#4191](https://github.com/databricks/terraform-provider-databricks/pull/4191)). --- CHANGELOG.md | 19 +++++++++++++++++++ common/version.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2d4139a7d..a9e6a67691 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Version changelog +## [Release] Release v1.58.0 + +### Bug Fixes + + * Always fill `cluster_name` in `databricks_cluster` data source ([#4197](https://github.com/databricks/terraform-provider-databricks/pull/4197)). + * Suppress equal fold diff for DLT pipeline resource ([#4196](https://github.com/databricks/terraform-provider-databricks/pull/4196)). + * Upload content `databricks_workspace_file` using raw format ([#4200](https://github.com/databricks/terraform-provider-databricks/pull/4200)). + + +### Internal Changes + + * Update to latest OpenAPI spec and bump Go SDK ([#4199](https://github.com/databricks/terraform-provider-databricks/pull/4199)). + + +### Dependency Updates + + * Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 ([#4191](https://github.com/databricks/terraform-provider-databricks/pull/4191)). + + ## [Release] Release v1.57.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index b8959caac0..75ecf50a60 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.57.0" + version = "1.58.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From ae65156df2b2e7fd683703e660055b0e15163860 Mon Sep 17 00:00:00 2001 From: Vibhor Nanavati Date: Tue, 12 Nov 2024 05:05:59 -0800 Subject: [PATCH 05/10] [Doc] Update "Databricks Workspace Creator" permissions on gcp-workspace.md (#4201) ## Changes Match the corresponding (upcoming) changes to https://docs.gcp.databricks.com/en/admin/cloud-configurations/gcp/permissions.html#required-permissions-for-the-workspace-creator ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/gcp-workspace.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/guides/gcp-workspace.md b/docs/guides/gcp-workspace.md index e7d2044a12..0f0456c8c2 100644 --- a/docs/guides/gcp-workspace.md +++ b/docs/guides/gcp-workspace.md @@ -57,6 +57,8 @@ resource "google_project_iam_custom_role" "workspace_creator" { permissions = [ "iam.serviceAccounts.getIamPolicy", "iam.serviceAccounts.setIamPolicy", + "iam.serviceAccounts.create", + "iam.serviceAccounts.get", "iam.roles.create", "iam.roles.delete", "iam.roles.get", @@ -68,8 +70,13 @@ resource "google_project_iam_custom_role" "workspace_creator" { "serviceusage.services.list", "serviceusage.services.enable", "compute.networks.get", + "compute.networks.updatePolicy", "compute.projects.get", "compute.subnetworks.get", + "compute.subnetworks.getIamPolicy", + "compute.subnetworks.setIamPolicy", + "compute.firewalls.get", + "compute.firewalls.create", ] } From d4e461cb09d7480ede5c621cc1c9c82f520be233 Mon Sep 17 00:00:00 2001 From: Ashen Gunaratne Date: Tue, 12 Nov 2024 18:45:43 +0530 Subject: [PATCH 06/10] [Feature] Add support partitions in policy data sources (#4181) ## Changes - Resolves https://github.com/databricks/terraform-provider-databricks/issues/4054 - Resolves https://github.com/databricks/terraform-provider-databricks/issues/4152 - Add optional argument `aws_partition` to all aws policy data sources to allow usage in all aws partitions ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- aws/constants.go | 17 +++++ aws/data_aws_assume_role_policy.go | 26 ++++++-- aws/data_aws_assume_role_policy_test.go | 49 ++++++++++++++ aws/data_aws_bucket_policy.go | 25 +++++-- aws/data_aws_bucket_policy_test.go | 16 +++++ aws/data_aws_crossaccount_policy.go | 51 ++++++++------- aws/data_aws_crossaccount_policy_test.go | 29 +++++++++ ...ta_aws_unity_catalog_assume_role_policy.go | 14 +++- ...s_unity_catalog_assume_role_policy_test.go | 65 +++++++++++++++++++ aws/data_aws_unity_catalog_policy.go | 17 +++-- aws/data_aws_unity_catalog_policy_test.go | 58 +++++++++++++++++ docs/data-sources/aws_assume_role_policy.md | 1 + docs/data-sources/aws_bucket_policy.md | 1 + docs/data-sources/aws_crossaccount_policy.md | 1 + .../aws_unity_catalog_assume_role_policy.md | 3 +- docs/data-sources/aws_unity_catalog_policy.md | 1 + 16 files changed, 330 insertions(+), 44 deletions(-) create mode 100644 aws/constants.go diff --git a/aws/constants.go b/aws/constants.go new file mode 100644 index 0000000000..36d9f84ea1 --- /dev/null +++ b/aws/constants.go @@ -0,0 +1,17 @@ +package aws + +var AwsConfig = map[string]map[string]string{ + "aws": { + "accountId": "414351767826", + "logDeliveryIamArn": "arn:aws:iam::414351767826:role/SaasUsageDeliveryRole-prod-IAMRole-3PLHICCRR1TK", + "unityCatalogueIamArn": "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL", + }, + "aws-us-gov": { + "accountId": "044793339203", + "logDeliveryIamArn": "arn:aws-us-gov:iam::044793339203:role/SaasUsageDeliveryRole-prod-aws-gov-IAMRole-L4QM0RCHYQ1G", + "unityCatalogueIamArn": "arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ", + }, +} + +var AwsPartitions = []string{"aws", "aws-us-gov"} +var AwsPartitionsValidationError = "aws_partition must be either 'aws' or 'aws-us-gov'" diff --git a/aws/data_aws_assume_role_policy.go b/aws/data_aws_assume_role_policy.go index 576321d819..1cbbed669d 100644 --- a/aws/data_aws_assume_role_policy.go +++ b/aws/data_aws_assume_role_policy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) type awsIamPolicy struct { @@ -31,6 +32,13 @@ func DataAwsAssumeRolePolicy() common.Resource { return common.Resource{ Read: func(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { externalID := d.Get("external_id").(string) + awsPartition := d.Get("aws_partition").(string) + databricksAwsAccountId := d.Get("databricks_account_id").(string) + + if databricksAwsAccountId == "" { + databricksAwsAccountId = AwsConfig[awsPartition]["accountId"] + } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -43,16 +51,14 @@ func DataAwsAssumeRolePolicy() common.Resource { }, }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", d.Get("databricks_account_id").(string)), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", awsPartition, databricksAwsAccountId), }, }, }, } if v, ok := d.GetOk("for_log_delivery"); ok { if v.(bool) { - // this is production UsageDelivery IAM role, that is considered a constant - logDeliveryARN := "arn:aws:iam::414351767826:role/SaasUsageDeliveryRole-prod-IAMRole-3PLHICCRR1TK" - policy.Statements[0].Principal["AWS"] = logDeliveryARN + policy.Statements[0].Principal["AWS"] = AwsConfig[awsPartition]["logDeliveryIamArn"] } } policyJSON, err := json.MarshalIndent(policy, "", " ") @@ -65,10 +71,16 @@ func DataAwsAssumeRolePolicy() common.Resource { return nil }, Schema: map[string]*schema.Schema{ + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "databricks_account_id": { - Type: schema.TypeString, - Default: "414351767826", - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "databricks_account_id will be will be removed in the next major release.", }, "for_log_delivery": { Type: schema.TypeBool, diff --git a/aws/data_aws_assume_role_policy_test.go b/aws/data_aws_assume_role_policy_test.go index 7322660420..f4a1fa1998 100644 --- a/aws/data_aws_assume_role_policy_test.go +++ b/aws/data_aws_assume_role_policy_test.go @@ -19,3 +19,52 @@ func TestDataAwsAssumeRolePolicy(t *testing.T) { j := d.Get("json") assert.Lenf(t, j, 299, "Strange length for policy: %s", j) } + +func TestDataAwsAssumeRolePolicyGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_partition = "aws-us-gov" + external_id = "abc" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 306, "Strange length for policy: %s", j) +} + +func TestDataAwsAssumeRolePolicyLogDelivery(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + external_id = "abc" + for_log_delivery = true + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 347, "Strange length for policy: %s", j) +} + +func TestDataAwsAssumeRolePolicyLogDeliveryGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_partition = "aws-us-gov" + external_id = "abc" + for_log_delivery = true + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 362, "Strange length for policy: %s", j) +} diff --git a/aws/data_aws_bucket_policy.go b/aws/data_aws_bucket_policy.go index b1cc42a8f4..dc8394d85e 100644 --- a/aws/data_aws_bucket_policy.go +++ b/aws/data_aws_bucket_policy.go @@ -16,6 +16,13 @@ func DataAwsBucketPolicy() common.Resource { return common.Resource{ Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { bucket := d.Get("bucket").(string) + awsPartition := d.Get("aws_partition").(string) + databricksAwsAccountId := AwsConfig[awsPartition]["accountId"] + + if databricksAwsAccountId == "" { + databricksAwsAccountId = AwsConfig[awsPartition]["accountId"] + } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -30,11 +37,11 @@ func DataAwsBucketPolicy() common.Resource { "s3:GetBucketLocation", }, Resources: []string{ - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - fmt.Sprintf("arn:aws:s3:::%s", bucket), + fmt.Sprintf("arn:%s:s3:::%s/*", awsPartition, bucket), + fmt.Sprintf("arn:%s:s3:::%s", awsPartition, bucket), }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", d.Get("databricks_account_id").(string)), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", awsPartition, databricksAwsAccountId), }, }, }, @@ -60,10 +67,16 @@ func DataAwsBucketPolicy() common.Resource { return nil }, Schema: map[string]*schema.Schema{ + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "databricks_account_id": { - Type: schema.TypeString, - Default: "414351767826", - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "databricks_account_id will be will be removed in the next major release.", }, "databricks_e2_account_id": { Type: schema.TypeString, diff --git a/aws/data_aws_bucket_policy_test.go b/aws/data_aws_bucket_policy_test.go index 75f3a13645..5ec6c763b2 100644 --- a/aws/data_aws_bucket_policy_test.go +++ b/aws/data_aws_bucket_policy_test.go @@ -53,3 +53,19 @@ func TestDataAwsBucketPolicyConfusedDeputyProblem(t *testing.T) { j := d.Get("json") assert.Lenf(t, j, 575, "Strange length for policy: %s", j) } + +func TestDataAwsBucketPolicyPartitionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsBucketPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + bucket = "abc" + aws_partition = "aws-us-gov" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 461, "Strange length for policy: %s", j) +} diff --git a/aws/data_aws_crossaccount_policy.go b/aws/data_aws_crossaccount_policy.go index a5da5d9365..46ff5a6b9e 100644 --- a/aws/data_aws_crossaccount_policy.go +++ b/aws/data_aws_crossaccount_policy.go @@ -3,6 +3,7 @@ package aws import ( "context" "encoding/json" + "errors" "fmt" "regexp" "slices" @@ -17,11 +18,16 @@ func DataAwsCrossaccountPolicy() common.Resource { PassRole []string `json:"pass_roles,omitempty"` JSON string `json:"json" tf:"computed"` AwsAccountId string `json:"aws_account_id,omitempty"` + AwsPartition string `json:"aws_partition,omitempty" tf:"default:aws"` VpcId string `json:"vpc_id,omitempty"` Region string `json:"region,omitempty"` SecurityGroupId string `json:"security_group_id,omitempty"` } return common.NoClientData(func(ctx context.Context, data *AwsCrossAccountPolicy) error { + if !slices.Contains(AwsPartitions, data.AwsPartition) { + return errors.New(AwsPartitionsValidationError) + } + if !slices.Contains([]string{"managed", "customer", "restricted"}, data.PolicyType) { return fmt.Errorf("policy_type must be either 'managed', 'customer' or 'restricted'") } @@ -145,7 +151,7 @@ func DataAwsCrossaccountPolicy() common.Resource { "iam:CreateServiceLinkedRole", "iam:PutRolePolicy", }, - Resources: "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + Resources: fmt.Sprintf("arn:%s:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", data.AwsPartition), Condition: map[string]map[string]string{ "StringLike": { "iam:AWSServiceName": "spot.amazonaws.com", @@ -168,6 +174,7 @@ func DataAwsCrossaccountPolicy() common.Resource { if data.PolicyType == "restricted" { region := data.Region aws_account_id := data.AwsAccountId + awsPartition := data.AwsPartition vpc_id := data.VpcId security_group_id := data.SecurityGroupId policy.Statements = append(policy.Statements, @@ -179,7 +186,7 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:DisassociateIamInstanceProfile", "ec2:ReplaceIamInstanceProfileAssociation", }, - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "ec2:ResourceTag/Vendor": "Databricks", @@ -191,8 +198,8 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -204,7 +211,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Sid: "AllowEc2RunInstanceImagePerTag", Effect: "Allow", Actions: "ec2:RunInstances", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:image/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:image/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "aws:ResourceTag/Vendor": "Databricks", @@ -216,13 +223,13 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:network-interface/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:subnet/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:network-interface/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:subnet/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { - "ec2:vpc": fmt.Sprintf("arn:aws:ec2:%s:%s:vpc/%s", region, aws_account_id, vpc_id), + "ec2:vpc": fmt.Sprintf("arn:%s:ec2:%s:%s:vpc/%s", awsPartition, region, aws_account_id, vpc_id), }, }, }, @@ -231,19 +238,19 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", NotResources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:image/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:network-interface/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:subnet/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:image/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:network-interface/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:subnet/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), }, }, &awsIamPolicyStatement{ Sid: "EC2TerminateInstancesTag", Effect: "Allow", Actions: "ec2:TerminateInstances", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "ec2:ResourceTag/Vendor": "Databricks", @@ -258,8 +265,8 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:DetachVolume", }, Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -271,7 +278,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Sid: "EC2CreateVolumeByTag", Effect: "Allow", Actions: "ec2:CreateVolume", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "aws:RequestTag/Vendor": "Databricks", @@ -283,7 +290,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:DeleteVolume", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -300,10 +307,10 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", }, - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/%s", region, aws_account_id, security_group_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/%s", awsPartition, region, aws_account_id, security_group_id), Condition: map[string]map[string]string{ "StringEquals": { - "ec2:vpc": fmt.Sprintf("arn:aws:ec2:%s:%s:vpc/%s", region, aws_account_id, vpc_id), + "ec2:vpc": fmt.Sprintf("arn:%s:ec2:%s:%s:vpc/%s", awsPartition, region, aws_account_id, vpc_id), }, }, }, diff --git a/aws/data_aws_crossaccount_policy_test.go b/aws/data_aws_crossaccount_policy_test.go index 177cb166e9..6832807be5 100644 --- a/aws/data_aws_crossaccount_policy_test.go +++ b/aws/data_aws_crossaccount_policy_test.go @@ -530,6 +530,25 @@ func TestDataAwsCrossAccountRestrictedPolicy(t *testing.T) { assert.Lenf(t, j, 5725, "Strange length for policy: %s", j) } +func TestDataAwsCrossAccountRestrictedPolicyPartitionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: ` + policy_type = "restricted" + aws_account_id = "123456789012" + aws_partition = "aws-us-gov" + vpc_id = "vpc-12345678" + region = "us-west-2" + security_group_id = "sg-12345678"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 5879, "Strange length for policy: %s", j) +} + func TestDataAwsCrossAccountInvalidPolicy(t *testing.T) { qa.ResourceFixture{ Read: true, @@ -552,6 +571,16 @@ func TestDataAwsCrossAccountInvalidAccountId(t *testing.T) { }.ExpectError(t, "aws_account_id must be a 12 digit number") } +func TestDataAwsCrossAccountInvalidPartition(t *testing.T) { + qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `aws_partition = "something"`, + ID: ".", + }.ExpectError(t, AwsPartitionsValidationError) +} + func TestDataAwsCrossAccountInvalidVpcId(t *testing.T) { qa.ResourceFixture{ Read: true, diff --git a/aws/data_aws_unity_catalog_assume_role_policy.go b/aws/data_aws_unity_catalog_assume_role_policy.go index d4706bdca5..a90ab98505 100644 --- a/aws/data_aws_unity_catalog_assume_role_policy.go +++ b/aws/data_aws_unity_catalog_assume_role_policy.go @@ -3,7 +3,9 @@ package aws import ( "context" "encoding/json" + "errors" "fmt" + "slices" "github.com/databricks/terraform-provider-databricks/common" ) @@ -14,13 +16,19 @@ func DataAwsUnityCatalogAssumeRolePolicy() common.Resource { UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty" tf:"computed"` ExternalId string `json:"external_id"` AwsAccountId string `json:"aws_account_id"` + AwsPartition string `json:"aws_partition,omitempty" tf:"default:aws"` JSON string `json:"json" tf:"computed"` Id string `json:"id" tf:"computed"` } return common.NoClientData(func(ctx context.Context, data *AwsUcAssumeRolePolicy) error { + if !slices.Contains(AwsPartitions, data.AwsPartition) { + return errors.New(AwsPartitionsValidationError) + } + if data.UnityCatalogIamArn == "" { - data.UnityCatalogIamArn = "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + data.UnityCatalogIamArn = AwsConfig[data.AwsPartition]["unityCatalogueIamArn"] } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -43,11 +51,11 @@ func DataAwsUnityCatalogAssumeRolePolicy() common.Resource { Actions: "sts:AssumeRole", Condition: map[string]map[string]string{ "ArnLike": { - "aws:PrincipalArn": fmt.Sprintf("arn:aws:iam::%s:role/%s", data.AwsAccountId, data.RoleName), + "aws:PrincipalArn": fmt.Sprintf("arn:%s:iam::%s:role/%s", data.AwsPartition, data.AwsAccountId, data.RoleName), }, }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", data.AwsAccountId), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", data.AwsPartition, data.AwsAccountId), }, }, }, diff --git a/aws/data_aws_unity_catalog_assume_role_policy_test.go b/aws/data_aws_unity_catalog_assume_role_policy_test.go index 30c1d89f2d..4f2da8932e 100644 --- a/aws/data_aws_unity_catalog_assume_role_policy_test.go +++ b/aws/data_aws_unity_catalog_assume_role_policy_test.go @@ -103,3 +103,68 @@ func TestDataAwsUnityCatalogAssumeRolePolicyWithoutUcArn(t *testing.T) { }` compareJSON(t, j, p) } + +func TestDataAwsUnityCatalogAssumeRolePolicyGovWithoutUcArn(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "aws-us-gov" + role_name = "databricks-role" + external_id = "12345" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "UnityCatalogAssumeRole", + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Principal": { + "AWS": "arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ" + }, + "Condition": { + "StringEquals": { + "sts:ExternalId": "12345" + } + } + }, + { + "Sid": "ExplicitSelfRoleAssumption", + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Principal": { + "AWS": "arn:aws-us-gov:iam::123456789098:root" + }, + "Condition": { + "ArnLike": { + "aws:PrincipalArn": "arn:aws-us-gov:iam::123456789098:role/databricks-role" + } + } + } + ] + }` + compareJSON(t, j, p) +} + +func TestDataAwsUnityCatalogAssumeRolePolicyInvalidPartition(t *testing.T) { + qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "something" + role_name = "databricks-role" + unity_catalog_iam_arn = "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + external_id = "12345" + `, + }.ExpectError(t, AwsPartitionsValidationError) +} diff --git a/aws/data_aws_unity_catalog_policy.go b/aws/data_aws_unity_catalog_policy.go index d332b84348..5dbc565b38 100644 --- a/aws/data_aws_unity_catalog_policy.go +++ b/aws/data_aws_unity_catalog_policy.go @@ -15,6 +15,7 @@ import ( func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { bucket := d.Get("bucket_name").(string) awsAccountId := d.Get("aws_account_id").(string) + awsPartition := d.Get("aws_partition").(string) roleName := d.Get("role_name").(string) policy := awsIamPolicy{ Version: "2012-10-17", @@ -29,8 +30,8 @@ func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common. "s3:GetBucketLocation", }, Resources: []string{ - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - fmt.Sprintf("arn:aws:s3:::%s", bucket), + fmt.Sprintf("arn:%s:s3:::%s/*", awsPartition, bucket), + fmt.Sprintf("arn:%s:s3:::%s", awsPartition, bucket), }, }, { @@ -39,14 +40,14 @@ func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common. "sts:AssumeRole", }, Resources: []string{ - fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountId, roleName), + fmt.Sprintf("arn:%s:iam::%s:role/%s", awsPartition, awsAccountId, roleName), }, }, }, } if kmsKey, ok := d.GetOk("kms_name"); ok { - kmsArn := fmt.Sprintf("arn:aws:kms:%s", kmsKey) - if strings.HasPrefix(kmsKey.(string), "arn:aws") { + kmsArn := fmt.Sprintf("arn:%s:kms:%s", awsPartition, kmsKey) + if strings.HasPrefix(kmsKey.(string), fmt.Sprintf("arn:%s", awsPartition)) { kmsArn = kmsKey.(string) } policy.Statements = append(policy.Statements, &awsIamPolicyStatement{ @@ -92,6 +93,12 @@ func validateSchema() map[string]*schema.Schema { Type: schema.TypeString, Required: true, }, + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "json": { Type: schema.TypeString, Computed: true, diff --git a/aws/data_aws_unity_catalog_policy_test.go b/aws/data_aws_unity_catalog_policy_test.go index 28a45a4f16..6ca159e290 100644 --- a/aws/data_aws_unity_catalog_policy_test.go +++ b/aws/data_aws_unity_catalog_policy_test.go @@ -167,6 +167,64 @@ func TestDataAwsUnityCatalogPolicyWithoutKMS(t *testing.T) { compareJSON(t, j, p) } +func TestDataAwsUnityCatalogPolicyPartionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "aws-us-gov" + bucket_name = "databricks-bucket" + role_name = "databricks-role" + kms_name = "databricks-kms" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws-us-gov:s3:::databricks-bucket/*", + "arn:aws-us-gov:s3:::databricks-bucket" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": [ + "arn:aws-us-gov:iam::123456789098:role/databricks-role" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey*" + ], + "Resource": [ + "arn:aws-us-gov:kms:databricks-kms" + ] + } + ] + }` + compareJSON(t, j, p) +} + func compareJSON(t *testing.T, json1 string, json2 string) { var i1 interface{} var i2 interface{} diff --git a/docs/data-sources/aws_assume_role_policy.md b/docs/data-sources/aws_assume_role_policy.md index 73d6fb0e11..d46f3520b7 100644 --- a/docs/data-sources/aws_assume_role_policy.md +++ b/docs/data-sources/aws_assume_role_policy.md @@ -49,6 +49,7 @@ resource "databricks_mws_credentials" "this" { ## Argument Reference * `external_id` (Required) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `for_log_delivery` (Optional) Either or not this assume role policy should be created for usage log delivery. Defaults to false. ## Attribute Reference diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index e42949e06a..f2e99edc2a 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -75,6 +75,7 @@ resource "aws_s3_bucket_policy" "ds" { ## Argument Reference * `bucket` - (Required) AWS S3 Bucket name for which to generate the policy document. +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `full_access_role` - (Optional) Data access role that can have full access for this bucket * `databricks_e2_account_id` - (Optional) Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket diff --git a/docs/data-sources/aws_crossaccount_policy.md b/docs/data-sources/aws_crossaccount_policy.md index 715cf59b15..883bd1b490 100644 --- a/docs/data-sources/aws_crossaccount_policy.md +++ b/docs/data-sources/aws_crossaccount_policy.md @@ -21,6 +21,7 @@ data "databricks_aws_crossaccount_policy" "this" {} * `pass_roles` (Optional) (List) List of Data IAM role ARNs that are explicitly granted `iam:PassRole` action. The below arguments are only valid for `restricted` policy type * `aws_account_id` — Your AWS account ID, which is a number. +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `vpc_id` — ID of the AWS VPC where you want to launch workspaces. * `region` — AWS Region name for your VPC deployment, for example `us-west-2`. * `security_group_id` — ID of your AWS security group. When you add a security group restriction, you cannot reuse the cross-account IAM role or reference a credentials ID (`credentials_id`) for any other workspaces. For those other workspaces, you must create separate roles, policies, and credentials objects. diff --git a/docs/data-sources/aws_unity_catalog_assume_role_policy.md b/docs/data-sources/aws_unity_catalog_assume_role_policy.md index c5f66ddec9..1619855ca9 100644 --- a/docs/data-sources/aws_unity_catalog_assume_role_policy.md +++ b/docs/data-sources/aws_unity_catalog_assume_role_policy.md @@ -38,9 +38,10 @@ resource "aws_iam_role" "metastore_data_access" { ## Argument Reference * `aws_account_id` (Required) The Account ID of the current AWS account (not your Databricks account). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `external_id` (Required) The [storage credential](../resources/storage_credential.md) external id. * `role_name` (Required) The name of the AWS IAM role to be created for Unity Catalog. -* `unity_catalog_iam_arn` (Optional) The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` +* `unity_catalog_iam_arn` (Optional) The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` on standard AWS partition selection and `arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ` on GovCloud partition selection ## Attribute Reference diff --git a/docs/data-sources/aws_unity_catalog_policy.md b/docs/data-sources/aws_unity_catalog_policy.md index 3804b1d5fa..2e65039d57 100644 --- a/docs/data-sources/aws_unity_catalog_policy.md +++ b/docs/data-sources/aws_unity_catalog_policy.md @@ -38,6 +38,7 @@ resource "aws_iam_role" "metastore_data_access" { ## Argument Reference * `aws_account_id` (Required) The Account ID of the current AWS account (not your Databricks account). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `bucket_name` (Required) The name of the S3 bucket used as root storage location for [managed tables](https://docs.databricks.com/data-governance/unity-catalog/index.html#managed-table) in Unity Catalog. * `role_name` (Required) The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). * `kms_name` (Optional) If encryption is enabled, provide the ARN of the KMS key that encrypts the S3 bucket contents. If encryption is disabled, do not provide this argument. From 9178630bda6a4163c6248758263f607c8b9a2ee5 Mon Sep 17 00:00:00 2001 From: Brandon Grams <21249739+bgrams@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:22:09 -0600 Subject: [PATCH 07/10] [Fix] Use cluster list API to determine pinned cluster status (#4203) ## Changes Modify `setPinnedStatus` to use the clusters list API internally for determining the pinning status of a cluster. The existing implementation using the cluster events API subjects the resource to drift as events expire after a period of time. Closes #3616 ## Tests * Coverage added to `TestResourceClusterCreate` and `TestResourceClusterCreatePinned`. * Fixtures modified to mock the necessary API calls in all other relevant tests. * `TestAccClusterResource_PinAndUnpin` acceptance test added - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- clusters/resource_cluster.go | 27 +-- clusters/resource_cluster_test.go | 320 ++++------------------------ exporter/exporter_test.go | 44 +--- internal/acceptance/cluster_test.go | 22 +- 4 files changed, 81 insertions(+), 332 deletions(-) diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 3c03502023..a6100ae071 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -513,20 +513,23 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, c *commo } func setPinnedStatus(ctx context.Context, d *schema.ResourceData, clusterAPI compute.ClustersInterface) error { - events, err := clusterAPI.EventsAll(ctx, compute.GetEvents{ - ClusterId: d.Id(), - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, + clusterDetails := clusterAPI.List(ctx, compute.ListClustersRequest{ + FilterBy: &compute.ListClustersFilterBy{ + IsPinned: true, + }, + PageSize: 100, // pinned cluster limit - just get all of them }) - if err != nil { - return err - } - pinnedEvent := compute.EventTypeUnpinned - if len(events) > 0 { - pinnedEvent = events[0].Type + + for clusterDetails.HasNext(ctx) { + detail, err := clusterDetails.Next(ctx) + if err != nil { + return err + } + if detail.ClusterId == d.Id() { + return d.Set("is_pinned", true) + } } - return d.Set("is_pinned", pinnedEvent == compute.EventTypePinned) + return d.Set("is_pinned", false) } func resourceClusterRead(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/clusters/resource_cluster_test.go b/clusters/resource_cluster_test.go index 240b62cb4e..1ef37126d7 100644 --- a/clusters/resource_cluster_test.go +++ b/clusters/resource_cluster_test.go @@ -12,9 +12,18 @@ import ( "github.com/stretchr/testify/require" ) +var nothingPinned = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{}, + }, +} + func TestResourceClusterCreate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -44,20 +53,6 @@ func TestResourceClusterCreate(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc", @@ -79,6 +74,7 @@ func TestResourceClusterCreate(t *testing.T) { }.Apply(t) assert.NoError(t, err) assert.Equal(t, "abc", d.Id()) + assert.Equal(t, false, d.Get("is_pinned")) } func TestResourceClusterCreatePinned(t *testing.T) { @@ -128,24 +124,18 @@ func TestResourceClusterCreatePinned(t *testing.T) { }, }, { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{ - { - ClusterId: "abc", - Timestamp: int64(123), - Type: compute.EventTypePinned, - Details: &compute.EventDetails{}, - }, - }, - TotalCount: 1, + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: compute.StateRunning, + }}, }, }, }, @@ -162,6 +152,7 @@ func TestResourceClusterCreatePinned(t *testing.T) { }.Apply(t) assert.NoError(t, err) assert.Equal(t, "abc", d.Id()) + assert.Equal(t, true, d.Get("is_pinned")) } func TestResourceClusterCreateErrorFollowedByDeletion(t *testing.T) { @@ -278,6 +269,7 @@ func TestResourceClusterCreateErrorFollowedByDeletionError(t *testing.T) { func TestResourceClusterCreate_WithLibraries(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -306,20 +298,6 @@ func TestResourceClusterCreate_WithLibraries(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.0/libraries/install", @@ -474,6 +452,7 @@ func TestResourceClusterCreate_WithLibraries(t *testing.T) { func TestResourceClusterCreatePhoton(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -505,20 +484,6 @@ func TestResourceClusterCreatePhoton(t *testing.T) { RuntimeEngine: "PHOTON", }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc", @@ -546,6 +511,7 @@ func TestResourceClusterCreatePhoton(t *testing.T) { func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -573,20 +539,6 @@ func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { State: compute.StateUnknown, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.0/libraries/install", @@ -638,6 +590,7 @@ func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { func TestResourceClusterCreateNoWait(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -667,20 +620,6 @@ func TestResourceClusterCreateNoWait(t *testing.T) { State: compute.StateUnknown, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, Create: true, Resource: ResourceCluster(), @@ -728,6 +667,7 @@ func TestResourceClusterCreate_Error(t *testing.T) { func TestResourceClusterRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -744,20 +684,6 @@ func TestResourceClusterRead(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, Resource: ResourceCluster(), Read: true, @@ -825,6 +751,7 @@ func TestResourceClusterRead_Error(t *testing.T) { func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -842,20 +769,6 @@ func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testin State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -892,6 +805,7 @@ func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testin func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -906,20 +820,6 @@ func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testin State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -959,6 +859,7 @@ func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testin func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -973,20 +874,6 @@ func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/edit", @@ -1023,6 +910,7 @@ func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1049,20 +937,6 @@ func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, ID: "abc", Update: true, @@ -1094,6 +968,7 @@ func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1108,20 +983,6 @@ func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -1154,6 +1015,7 @@ func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { func TestResourceClusterUpdate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1168,20 +1030,6 @@ func TestResourceClusterUpdate(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1234,6 +1082,7 @@ func TestResourceClusterUpdate(t *testing.T) { func TestResourceClusterUpdate_WhileScaling(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1248,20 +1097,6 @@ func TestResourceClusterUpdate_WhileScaling(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1330,6 +1165,7 @@ func TestResourceClusterUpdate_WhileScaling(t *testing.T) { func TestResourceClusterUpdateWithPinned(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1344,20 +1180,6 @@ func TestResourceClusterUpdateWithPinned(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1446,6 +1268,7 @@ func TestResourceClusterUpdate_LibrariesChangeOnTerminatedCluster(t *testing.T) } d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, terminated, // 1 of ... { Method: "POST", @@ -1492,20 +1315,6 @@ func TestResourceClusterUpdate_LibrariesChangeOnTerminatedCluster(t *testing.T) State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { // start cluster before libs install Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1613,6 +1422,7 @@ func TestResourceClusterUpdate_Error(t *testing.T) { func TestResourceClusterUpdate_AutoAz(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1632,20 +1442,6 @@ func TestResourceClusterUpdate_AutoAz(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1762,6 +1558,7 @@ func TestResourceClusterDelete_Error(t *testing.T) { func TestResourceClusterCreate_SingleNode(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -1785,20 +1582,6 @@ func TestResourceClusterCreate_SingleNode(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", ReuseRequest: true, @@ -1962,6 +1745,7 @@ func TestReadOnStoppedClusterWithLibrariesDoesNotFail(t *testing.T) { qa.ResourceFixture{ Resource: ResourceCluster(), Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=foo", @@ -1969,10 +1753,6 @@ func TestReadOnStoppedClusterWithLibrariesDoesNotFail(t *testing.T) { State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - }, { Method: "GET", ReuseRequest: true, @@ -2007,10 +1787,6 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=foo", @@ -2045,6 +1821,7 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { }, }, }, + nothingPinned, }, Read: true, ID: "foo", @@ -2054,6 +1831,7 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { func TestResourceClusterUpdate_LocalSsdCount(t *testing.T) { _, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -2071,20 +1849,6 @@ func TestResourceClusterUpdate_LocalSsdCount(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/edit", diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index ad485b9557..120538efd1 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -836,9 +836,12 @@ func TestImportingClusters(t *testing.T) { ReuseRequest: true, }, { - Method: "POST", - Resource: "/api/2.1/clusters/events", - Response: compute.GetEvents{}, + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{}, + }, + ReuseRequest: true, }, { Method: "GET", @@ -868,30 +871,6 @@ func TestImportingClusters(t *testing.T) { Resource: "/api/2.1/clusters/get?cluster_id=test2", Response: getJSONObject("test-data/get-cluster-test2-response.json"), }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "test2", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - ReuseRequest: true, - }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "test1", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - ReuseRequest: true, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=test2", @@ -917,17 +896,6 @@ func TestImportingClusters(t *testing.T) { Resource: "/api/2.1/clusters/get?cluster_id=awscluster", Response: getJSONObject("test-data/get-cluster-awscluster-response.json"), }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "awscluster", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=awscluster", diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index f399eece3f..bc1c4023d9 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -51,7 +51,7 @@ func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { }) } -func singleNodeClusterTemplate(autoTerminationMinutes string) string { +func singleNodeClusterTemplate(autoTerminationMinutes string, isPinned bool) string { return fmt.Sprintf(` data "databricks_spark_version" "latest" { } @@ -61,6 +61,7 @@ func singleNodeClusterTemplate(autoTerminationMinutes string) string { instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" num_workers = 0 autotermination_minutes = %s + is_pinned = %t spark_conf = { "spark.databricks.cluster.profile" = "singleNode" "spark.master" = "local[*]" @@ -69,14 +70,14 @@ func singleNodeClusterTemplate(autoTerminationMinutes string) string { "ResourceClass" = "SingleNode" } } - `, autoTerminationMinutes) + `, autoTerminationMinutes, isPinned) } func TestAccClusterResource_CreateSingleNodeCluster(t *testing.T) { WorkspaceLevel(t, Step{ - Template: singleNodeClusterTemplate("10"), + Template: singleNodeClusterTemplate("10", false), }, Step{ - Template: singleNodeClusterTemplate("20"), + Template: singleNodeClusterTemplate("20", false), }) } @@ -176,6 +177,19 @@ func TestAccClusterResource_WorkloadType(t *testing.T) { }) } +func TestAccClusterResource_PinAndUnpin(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: singleNodeClusterTemplate("10", false), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "false"), + }, Step{ + Template: singleNodeClusterTemplate("10", true), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "true"), + }, Step{ + Template: singleNodeClusterTemplate("10", false), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "false"), + }) +} + func testAccClusterResourceWorkloadTypeTemplate(workloadType string) string { return fmt.Sprintf(` data "databricks_spark_version" "latest" {} From 2b381b07a2148ee4871e83ee321479da7a79cec2 Mon Sep 17 00:00:00 2001 From: ryan-gord-db <60911136+ryan-gord-db@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:07:05 -0500 Subject: [PATCH 08/10] [Doc] Clarify workspace provider config (#4208) ## Changes Specifying the `account_id` field within a workspace provider block may cause an error when creating workspace resources (e.g. [#3495](https://github.com/databricks/terraform-provider-databricks/issues/3495)). This change explicitly calls out this misconfiguration in the docs. ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/troubleshooting.md | 17 +++++++++++++++++ docs/index.md | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index dadd4a51c9..f5ed88c3ad 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -228,3 +228,20 @@ There could be different reasons for this error: ### Provider "registry.terraform.io/databricks/databricks" planned an invalid value for ...: planned value ... for a non-computed attribute. Starting with version v1.51.0, the Terraform provider for Databricks supports `terraform` versions 1.1.5 and later. Older versions of `terraform`, such as v0.15.5, are known to erroneously generate this error. Check the version of `terraform` that you're using by running `terraform version` and upgrade it if necessary. + +### Error: cannot create ....: invalid Databricks Account configuration + +`....` is the descriptive name of a resource such as `access control rule set`. The error occurs when creating a workspace resource with a provider containing the `account_id` argument e.g.: + +```hcl +provider "databricks" { + host = "https://.cloud.databricks.com" + client_id = "..." + client_secret = "..." + + # This line is the problem + account_id = "..." +} +``` + +Remove the `account_id` argument from the workspace provider to resolve the error. diff --git a/docs/index.md b/docs/index.md index 05081f57ff..f9e14b935b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -237,7 +237,7 @@ Alternatively, you can provide this value as an environment variable `DATABRICKS * `config_file` - (optional) Location of the Databricks CLI credentials file created by `databricks configure --token` command (~/.databrickscfg by default). Check [Databricks CLI documentation](https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication) for more details. The provider uses configuration file credentials when you don't specify host/token/azure attributes. Alternatively, you can provide this value as an environment variable `DATABRICKS_CONFIG_FILE`. This field defaults to `~/.databrickscfg`. * `profile` - (optional) Connection profile specified within ~/.databrickscfg. Please check [connection profiles section](https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles) for more details. This field defaults to `DEFAULT`. -* `account_id` - (optional for workspace-level operations, but required for account-level) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). In the future releases of the provider this property will also be used specify account for `databricks_mws_*` resources as well. +* `account_id` - (required for account-level operations) Account ID found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). **Note: do NOT use in the workspace-level provider to avoid `...invalid Databricks Account configuration` errors**. * `auth_type` - (optional) enforce specific auth type to be used in very rare cases, where a single Terraform state manages Databricks workspaces on more than one cloud and `more than one authorization method configured` error is a false positive. Valid values are `pat`, `basic`, `oauth-m2m`, `azure-client-secret`, `azure-msi`, `azure-cli`, `github-oidc-azure`, `google-credentials`, and `google-id`. ## Special configurations for Azure From e3b25617d9c9357eccc10fba2eacbd26ac3e7118 Mon Sep 17 00:00:00 2001 From: zgcalebp <142928130+zgcalebp@users.noreply.github.com> Date: Wed, 13 Nov 2024 05:15:05 -0500 Subject: [PATCH 09/10] [Feature] Update databricks_permissions resource to support vector-search-endpoints (#4209) ## Changes Databricks permissions API has been updated to support `vector-search-endpoints`. This corresponding change to the permissions_definitions would enable the use of the `databricks_permissions` resource to manage ACLs for Vector Search Endpoints leveraging the existing APIs under the hood. Example CLI call confirming support: ``` databricks permissions get vector-search-endpoints {endpoint-id} --debug 18:56:25 INFO start pid=77800 version=0.224.1 args="databricks, permissions, get, vector-search-endpoints, {endpoint-id}, --debug" 18:56:25 INFO Ignoring pat auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Ignoring basic auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Ignoring oauth-m2m auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Refreshed OAuth token from Databricks CLI, expires on 2024-11-11 19:00:53.515729 -0500 EST pid=77800 sdk=true 18:56:25 DEBUG Using Databricks CLI authentication with Databricks OAuth tokens pid=77800 sdk=true 18:56:25 INFO Refreshed OAuth token from Databricks CLI, expires on 2024-11-11 19:00:53.515729 -0500 EST pid=77800 sdk=true 18:56:26 DEBUG GET /api/2.0/permissions/vector-search-endpoints/{endpoint-id} < HTTP/2.0 200 OK < { < "access_control_list": [ .... ``` ## Tests - [X] `make test` run locally - [X] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [X] using Go SDK --------- Co-authored-by: Alex Ott --- docs/resources/permissions.md | 30 +++++++++++++++++++++++ internal/acceptance/permissions_test.go | 23 +++++++++++++++++ internal/acceptance/vector_search_test.go | 11 ++++++++- permissions/permission_definitions.go | 11 +++++++++ permissions/resource_permissions_test.go | 2 +- 5 files changed, 75 insertions(+), 2 deletions(-) diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index 868e2aa835..9696df577e 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -639,6 +639,35 @@ resource "databricks_permissions" "ml_serving_usage" { } ``` +## Mosaic AI Vector Search usage + +Valid permission levels for [databricks_vector_search_endpoint](vector_search_endpoint.md) are: `CAN_USE` and `CAN_MANAGE`. + +```hcl +resource "databricks_vector_search_endpoint" "this" { + name = "vector-search-test" + endpoint_type = "STANDARD" +} + +resource "databricks_group" "eng" { + display_name = "Engineering" +} + +resource "databricks_permissions" "vector_search_endpoint_usage" { + vector_search_endpoint_id = databricks_vector_search_endpoint.this.endpoint_id + + access_control { + group_name = "users" + permission_level = "CAN_USE" + } + + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } +} +``` + ## Passwords usage By default on AWS deployments, all admin users can sign in to Databricks using either SSO or their username and password, and all API users can authenticate to the Databricks REST APIs using their username and password. As an admin, you [can limit](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#optional-configure-password-access-control) admin users’ and API users’ ability to authenticate with their username and password by configuring `CAN_USE` permissions using password access control. @@ -895,6 +924,7 @@ Exactly one of the following arguments is required: - `experiment_id` - [MLflow experiment](mlflow_experiment.md) id - `registered_model_id` - [MLflow registered model](mlflow_model.md) id - `serving_endpoint_id` - [Model Serving](model_serving.md) endpoint id. +- `vector_search_endpoint_id` - [Vector Search](vector_search_endpoint.md) endpoint id. - `authorization` - either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission). - `sql_endpoint_id` - [SQL warehouse](sql_endpoint.md) id - `sql_dashboard_id` - [SQL dashboard](sql_dashboard.md) id diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 0fdd5553b8..20dfb564d0 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -866,6 +866,29 @@ func TestAccPermissions_ServingEndpoint(t *testing.T) { }) } +// AlexOtt: Temporary disable as it takes too long to create a new vector search endpoint +// Testing is done in the `vector_search_test.go` +// func TestAccPermissions_VectorSearchEndpoint(t *testing.T) { +// loadDebugEnvIfRunsFromIDE(t, "workspace") +// if isGcp(t) { +// skipf(t)("Vector Search endpoints are not supported on GCP") +// } +// endpointTemplate := ` +// resource "databricks_vector_search_endpoint" "endpoint" { +// name = "{var.STICKY_RANDOM}" +// endpoint_type = "STANDARD" +// } +// ` +// WorkspaceLevel(t, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", groupPermissions("CAN_USE")), +// }, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_USE")), +// }, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", currentPrincipalPermission(t, "CAN_USE"), groupPermissions("CAN_USE")), +// ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for mlflowExperiment, allowed levels: CAN_MANAGE"), +// }) +// } + func TestAccPermissions_Alert(t *testing.T) { loadDebugEnvIfRunsFromIDE(t, "workspace") alertTemplate := ` diff --git a/internal/acceptance/vector_search_test.go b/internal/acceptance/vector_search_test.go index 2442d0fa05..890f36ca34 100644 --- a/internal/acceptance/vector_search_test.go +++ b/internal/acceptance/vector_search_test.go @@ -20,7 +20,16 @@ func TestUcAccVectorSearchEndpoint(t *testing.T) { resource "databricks_vector_search_endpoint" "this" { name = "%s" endpoint_type = "STANDARD" - } + } + + resource "databricks_permissions" "this" { + vector_search_endpoint_id = databricks_vector_search_endpoint.this.endpoint_id + + access_control { + group_name = "users" + permission_level = "CAN_USE" + } + } `, name), }, ) diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index 48e6d7a56f..398b032a64 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -732,5 +732,16 @@ func allResourcePermissions() []resourcePermissions { updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, }, + { + field: "vector_search_endpoint_id", + objectType: "vector-search-endpoints", + requestObjectType: "vector-search-endpoints", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, } } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index 7019ae5c56..983b59fbc1 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -593,7 +593,7 @@ func TestResourcePermissionsCreate_invalid(t *testing.T) { qa.ResourceFixture{ Resource: ResourcePermissions(), Create: true, - }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, workspace_file_id, workspace_file_path") + }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, vector_search_endpoint_id, workspace_file_id, workspace_file_path") } func TestResourcePermissionsCreate_no_access_control(t *testing.T) { From 8f68baadac798ba407cee32b9912447604916c3e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:22:18 +0530 Subject: [PATCH 10/10] [Fix] Remove single-node validation from jobs clusters (#4216) ## Changes Fixes https://github.com/databricks/cli/issues/1896. Introducing this validation has caused a regression for both DABs and TF customers. This PR removes the validation for job clusters. ## Tests Unit tests. --- clusters/clusters_api.go | 16 ------ clusters/resource_cluster.go | 2 - jobs/jobs_api_go_sdk.go | 18 +++--- jobs/resource_job.go | 13 ----- jobs/resource_job_test.go | 104 +++++++---------------------------- 5 files changed, 28 insertions(+), 125 deletions(-) diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index 6a08a4a608..308016bda1 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -434,22 +434,6 @@ type Cluster struct { ClusterMounts []MountInfo `json:"cluster_mount_infos,omitempty" tf:"alias:cluster_mount_info"` } -// TODO: Remove this once all the resources using clusters are migrated to Go SDK. -// They would then be using Validate(cluster compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. -func (cluster Cluster) Validate() error { - // TODO: rewrite with CustomizeDiff - if cluster.NumWorkers > 0 || cluster.Autoscale != nil { - return nil - } - profile := cluster.SparkConf["spark.databricks.cluster.profile"] - master := cluster.SparkConf["spark.master"] - resourceClass := cluster.CustomTags["ResourceClass"] - if profile == "singleNode" && strings.HasPrefix(master, "local") && resourceClass == "SingleNode" { - return nil - } - return errors.New(numWorkerErr) -} - // TODO: Remove this once all the resources using clusters are migrated to Go SDK. // They would then be using ModifyRequestOnInstancePool(cluster *compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. // ModifyRequestOnInstancePool helps remove all request fields that should not be submitted when instance pool is selected. diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index a6100ae071..28672e2962 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -130,8 +130,6 @@ func ZoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return false } -// This method is a duplicate of Validate() in clusters/clusters_api.go that uses Go SDK. -// Long term, Validate() in clusters_api.go will be removed once all the resources using clusters are migrated to Go SDK. func Validate(cluster any) error { var profile, master, resourceClass string switch c := cluster.(type) { diff --git a/jobs/jobs_api_go_sdk.go b/jobs/jobs_api_go_sdk.go index 6051bafae5..15ac33dac7 100644 --- a/jobs/jobs_api_go_sdk.go +++ b/jobs/jobs_api_go_sdk.go @@ -156,12 +156,8 @@ func (c controlRunStateLifecycleManagerGoSdk) OnUpdate(ctx context.Context) erro return StopActiveRun(jobID, c.d.Timeout(schema.TimeoutUpdate), w, ctx) } -func updateAndValidateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema.ResourceData) error { - err := clusters.Validate(*clusterSpec) - if err != nil { - return err - } - err = clusters.ModifyRequestOnInstancePool(clusterSpec) +func updateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema.ResourceData) error { + err := clusters.ModifyRequestOnInstancePool(clusterSpec) if err != nil { return err } @@ -178,21 +174,21 @@ func updateAndValidateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema func prepareJobSettingsForUpdateGoSdk(d *schema.ResourceData, js *JobSettingsResource) error { if js.NewCluster != nil { - err := updateAndValidateJobClusterSpec(js.NewCluster, d) + err := updateJobClusterSpec(js.NewCluster, d) if err != nil { return err } } for _, task := range js.Tasks { if task.NewCluster != nil { - err := updateAndValidateJobClusterSpec(task.NewCluster, d) + err := updateJobClusterSpec(task.NewCluster, d) if err != nil { return err } } } for i := range js.JobClusters { - err := updateAndValidateJobClusterSpec(&js.JobClusters[i].NewCluster, d) + err := updateJobClusterSpec(&js.JobClusters[i].NewCluster, d) if err != nil { return err } @@ -205,14 +201,14 @@ func prepareJobSettingsForCreateGoSdk(d *schema.ResourceData, jc *JobCreateStruc // Before the go-sdk migration, the field `num_workers` was required, so we always sent it. for _, task := range jc.Tasks { if task.NewCluster != nil { - err := updateAndValidateJobClusterSpec(task.NewCluster, d) + err := updateJobClusterSpec(task.NewCluster, d) if err != nil { return err } } } for i := range jc.JobClusters { - err := updateAndValidateJobClusterSpec(&jc.JobClusters[i].NewCluster, d) + err := updateJobClusterSpec(&jc.JobClusters[i].NewCluster, d) if err != nil { return err } diff --git a/jobs/resource_job.go b/jobs/resource_job.go index be2b982a79..e619ceac49 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -1068,19 +1068,6 @@ func ResourceJob() common.Resource { return fmt.Errorf("`control_run_state` must be specified only with `max_concurrent_runs = 1`") } } - for _, task := range js.Tasks { - if task.NewCluster == nil { - continue - } - if err := clusters.Validate(*task.NewCluster); err != nil { - return fmt.Errorf("task %s invalid: %w", task.TaskKey, err) - } - } - if js.NewCluster != nil { - if err := clusters.Validate(*js.NewCluster); err != nil { - return fmt.Errorf("invalid job cluster: %w", err) - } - } return nil }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 75a780c00a..17a8abc08a 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -823,6 +823,14 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { NotebookPath: "/Stuff", }, }, + { + TaskKey: "c", + NewCluster: &clusters.Cluster{ + SparkVersion: "d", + NodeTypeID: "e", + NumWorkers: 0, + }, + }, }, MaxConcurrentRuns: 1, JobClusters: []JobCluster{ @@ -839,7 +847,7 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { NewCluster: &clusters.Cluster{ SparkVersion: "x", NodeTypeID: "y", - NumWorkers: 9, + NumWorkers: 0, }, }, }, @@ -883,7 +891,7 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { job_cluster { job_cluster_key = "k" new_cluster { - num_workers = 9 + num_workers = 0 spark_version = "x" node_type_id = "y" } @@ -910,7 +918,17 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { notebook_task { notebook_path = "/Stuff" } - }`, + } + + task { + task_key = "c" + new_cluster { + spark_version = "d" + node_type_id = "e" + num_workers = 0 + } + } + `, }.Apply(t) assert.NoError(t, err) assert.Equal(t, "17", d.Id()) @@ -2031,48 +2049,6 @@ func TestResourceJobCreateFromGitSourceWithoutProviderFail(t *testing.T) { }.ExpectError(t, "git source is not empty but Git Provider is not specified and cannot be guessed by url &{GitBranch: GitCommit: GitProvider: GitSnapshot: GitTag:0.4.8 GitUrl:https://custom.git.hosting.com/databricks/terraform-provider-databricks JobSource: ForceSendFields:[]}") } -func TestResourceJobCreateSingleNode_Fail(t *testing.T) { - _, err := qa.ResourceFixture{ - Create: true, - Resource: ResourceJob(), - HCL: `new_cluster { - num_workers = 0 - spark_version = "7.3.x-scala2.12" - node_type_id = "Standard_DS3_v2" - } - max_concurrent_runs = 1 - max_retries = 3 - min_retry_interval_millis = 5000 - name = "Featurizer" - retry_on_timeout = true - - spark_jar_task { - main_class_name = "com.labs.BarMain" - } - library { - jar = "dbfs://aa/bb/cc.jar" - } - library { - jar = "dbfs://ff/gg/hh.jar" - }`, - }.Apply(t) - assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node -cluster please include the following configuration in your cluster configuration: - - spark_conf = { - "spark.databricks.cluster.profile" : "singleNode" - "spark.master" : "local[*]" - } - - custom_tags = { - "ResourceClass" = "SingleNode" - } - -Please note that the Databricks Terraform provider cannot detect if the above configuration -is defined in a policy used by the cluster. Please define this in the cluster configuration -itself to create a single node cluster.`) -} - func TestResourceJobRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ @@ -2938,44 +2914,6 @@ func TestResourceJobDelete(t *testing.T) { assert.Equal(t, "789", d.Id()) } -func TestResourceJobUpdate_FailNumWorkersZero(t *testing.T) { - _, err := qa.ResourceFixture{ - ID: "789", - Update: true, - Resource: ResourceJob(), - HCL: `new_cluster { - num_workers = 0 - spark_version = "7.3.x-scala2.12" - node_type_id = "Standard_DS3_v2" - } - max_concurrent_runs = 1 - max_retries = 3 - min_retry_interval_millis = 5000 - name = "Featurizer New" - retry_on_timeout = true - - spark_jar_task { - main_class_name = "com.labs.BarMain" - parameters = ["--cleanup", "full"] - }`, - }.Apply(t) - assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node -cluster please include the following configuration in your cluster configuration: - - spark_conf = { - "spark.databricks.cluster.profile" : "singleNode" - "spark.master" : "local[*]" - } - - custom_tags = { - "ResourceClass" = "SingleNode" - } - -Please note that the Databricks Terraform provider cannot detect if the above configuration -is defined in a policy used by the cluster. Please define this in the cluster configuration -itself to create a single node cluster.`) -} - func TestJobsAPIList(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ {