From 8d6e92d5555752893be40f64097ac9e206dec295 Mon Sep 17 00:00:00 2001 From: Vuong Date: Thu, 20 Jun 2024 17:18:44 +0100 Subject: [PATCH 1/5] refactored `databricks_spark_version` and `databricks_zones` --- clusters/data_spark_version.go | 36 +++-- clusters/data_spark_version_test.go | 224 +++++++++++++++------------- clusters/data_zones.go | 41 ++--- clusters/data_zones_test.go | 30 ++-- common/resource.go | 38 +++-- 5 files changed, 193 insertions(+), 176 deletions(-) diff --git a/clusters/data_spark_version.go b/clusters/data_spark_version.go index 3b58fd4634..2b5e3c2fe1 100644 --- a/clusters/data_spark_version.go +++ b/clusters/data_spark_version.go @@ -7,6 +7,8 @@ import ( "sort" "strings" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "golang.org/x/mod/semver" @@ -125,25 +127,21 @@ func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string // DataSourceSparkVersion returns DBR version matching to the specification func DataSourceSparkVersion() common.Resource { - s := common.StructToSchema(SparkVersionRequest{}, func( - s map[string]*schema.Schema) map[string]*schema.Schema { - - s["photon"].Deprecated = "Specify runtime_engine=\"PHOTON\" in the cluster configuration" - s["graviton"].Deprecated = "Not required anymore - it's automatically enabled on the Graviton-based node types" + return common.WorkspaceDataWithCustomizeFunc(func(ctx context.Context, data *compute.SparkVersionRequest, w *databricks.WorkspaceClient) error { + data.Id = "" + version, err := w.Clusters.SelectSparkVersion(ctx, *data) + if err != nil { + return err + } + data.Id = version + return nil + }, func(s map[string]*schema.Schema) map[string]*schema.Schema { + common.CustomizeSchemaPath(s, "photon").SetDeprecated("Specify runtime_engine=\"PHOTON\" in the cluster configuration") + common.CustomizeSchemaPath(s).AddNewField("graviton", &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Deprecated: "Not required anymore - it's automatically enabled on the Graviton-based node types", + }) return s }) - - return common.Resource{ - Schema: s, - Read: func(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { - var this SparkVersionRequest - common.DataToStructPointer(d, s, &this) - version, err := NewClustersAPI(ctx, m).LatestSparkVersion(this) - if err != nil { - return err - } - d.SetId(version) - return nil - }, - } } diff --git a/clusters/data_spark_version_test.go b/clusters/data_spark_version_test.go index ce229859c4..c850e17840 100644 --- a/clusters/data_spark_version_test.go +++ b/clusters/data_spark_version_test.go @@ -1,83 +1,47 @@ package clusters import ( + "fmt" "strings" "testing" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/qa" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func commonFixtures() []qa.HTTPFixture { - return []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/spark-versions", - Response: SparkVersionsList{ - SparkVersions: []SparkVersion{ - { - Version: "7.1.x-scala2.12", - Description: "7.1 (includes Apache Spark 3.0.0, Scala 2.12)", - }, - { - Version: "7.1.x-gpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", - }, - { - Version: "apache-spark-2.4.x-scala2.11", - Description: "Light 2.4 (includes Apache Spark 2.4, Scala 2.11)", - }, - { - Version: "5.5.x-cpu-esr-ml-scala2.11", - Description: "5.5 Extended Support ML (includes Apache Spark 2.4.3, Scala 2.11)", - }, - { - Version: "7.3.x-hls-scala2.12", - Description: "7.3 LTS Genomics (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "6.4.x-scala2.11", - Description: "6.4 (includes Apache Spark 2.4.5, Scala 2.11)", - }, - { - Version: "7.3.x-scala2.12", - Description: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "7.4.x-scala2.12", - Description: "7.4 (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "7.5.x-scala2.12", - Description: "7.5 Beta (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "8.3.x-photon-scala2.12", - Description: "8.3 Photon (includes Apache Spark 3.1.1, Scala 2.12)", - }, - }, - }, - }, - } -} - func TestSparkVersionLatest(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + }).Return("7.4.x-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, State: map[string]any{}, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.4.x-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.4.x-scala2.12", + }) } func TestSparkVersionLTS(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + LongTermSupport: true, + }).Return("7.3.x-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -85,14 +49,22 @@ func TestSparkVersionLTS(t *testing.T) { "long_term_support": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.3.x-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.3.x-scala2.12", + }) } func TestSparkVersionESR(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.11", + LongTermSupport: true, + ML: true, + }).Return("5.5.x-cpu-esr-ml-scala2.11", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -102,14 +74,22 @@ func TestSparkVersionESR(t *testing.T) { "ml": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "5.5.x-cpu-esr-ml-scala2.11", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "5.5.x-cpu-esr-ml-scala2.11", + }) } func TestSparkVersionGpuMl(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + GPU: true, + ML: true, + }).Return("7.1.x-gpu-ml-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -118,14 +98,21 @@ func TestSparkVersionGpuMl(t *testing.T) { "ml": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.1.x-gpu-ml-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.1.x-gpu-ml-scala2.12", + }) } func TestSparkVersionGenomics(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + Genomics: true, + }).Return("7.3.x-hls-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -133,14 +120,21 @@ func TestSparkVersionGenomics(t *testing.T) { "genomics": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.3.x-hls-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.3.x-hls-scala2.12", + }) } func TestSparkVersion300(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + SparkVersion: "3.0.0", + }).Return("7.1.x-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -148,14 +142,21 @@ func TestSparkVersion300(t *testing.T) { "spark_version": "3.0.0", }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.1.x-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.1.x-scala2.12", + }) } func TestSparkVersionBeta(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + Beta: true, + }).Return("7.5.x-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -163,14 +164,21 @@ func TestSparkVersionBeta(t *testing.T) { "beta": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "7.5.x-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "7.5.x-scala2.12", + }) } func TestSparkVersionPhoton(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + Photon: true, + }).Return("8.3.x-photon-scala2.12", nil) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -178,14 +186,22 @@ func TestSparkVersionPhoton(t *testing.T) { "photon": true, }, ID: ".", - }.Apply(t) - assert.NoError(t, err) - assert.Equal(t, "8.3.x-photon-scala2.12", d.Id()) + }.ApplyAndExpectData(t, map[string]any{ + "id": "8.3.x-photon-scala2.12", + }) } func TestSparkVersionErrorNoResults(t *testing.T) { - _, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: true, + Scala: "2.12", + Beta: true, + LongTermSupport: true, + }).Return("", fmt.Errorf("spark versions query returned no results. Please change your search criteria and try again")) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -194,14 +210,18 @@ func TestSparkVersionErrorNoResults(t *testing.T) { "long_term_support": true, }, ID: ".", - }.Apply(t) - assert.Error(t, err) - assert.Equal(t, true, strings.Contains(err.Error(), "query returned no results")) + }.ExpectError(t, "spark versions query returned no results. Please change your search criteria and try again") } func TestSparkVersionErrorMultipleResults(t *testing.T) { - _, err := qa.ResourceFixture{ - Fixtures: commonFixtures(), + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.SelectSparkVersion(mock.Anything, compute.SparkVersionRequest{ + Latest: false, + Scala: "2.12", + }).Return("", fmt.Errorf("spark versions query returned multiple results. Please change your search criteria and try again")) + }, Read: true, Resource: DataSourceSparkVersion(), NonWritable: true, @@ -209,9 +229,7 @@ func TestSparkVersionErrorMultipleResults(t *testing.T) { "latest": false, }, ID: ".", - }.Apply(t) - assert.Error(t, err) - assert.Equal(t, true, strings.Contains(err.Error(), "query returned multiple results")) + }.ExpectError(t, "spark versions query returned multiple results. Please change your search criteria and try again") } func TestSparkVersionErrorBadAnswer(t *testing.T) { diff --git a/clusters/data_zones.go b/clusters/data_zones.go index c10620ce90..888c6775a5 100644 --- a/clusters/data_zones.go +++ b/clusters/data_zones.go @@ -3,35 +3,24 @@ package clusters import ( "context" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/terraform-provider-databricks/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // DataSourceClusterZones ... func DataSourceClusterZones() common.Resource { - return common.Resource{ - Read: func(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { - zonesInfo, err := NewClustersAPI(ctx, m).ListZones() - if err != nil { - return err - } - d.SetId(zonesInfo.DefaultZone) - d.Set("default_zone", zonesInfo.DefaultZone) - d.Set("zones", zonesInfo.Zones) - return nil - }, - Schema: map[string]*schema.Schema{ - "default_zone": { - Type: schema.TypeString, - Computed: true, - ForceNew: true, - }, - "zones": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - }, - } + return common.WorkspaceData(func(ctx context.Context, data *struct { + Id string `json:"id,omitempty" tf:"computed"` + DefaultZone string `json:"default_zone,omitempty" tf:"computed"` + Zones []string `json:"zones,omitempty" tf:"computed"` + }, w *databricks.WorkspaceClient) error { + zonesInfo, err := w.Clusters.ListZones(ctx) + if err != nil { + return err + } + data.Id = zonesInfo.DefaultZone + data.DefaultZone = zonesInfo.DefaultZone + data.Zones = zonesInfo.Zones + return nil + }) } diff --git a/clusters/data_zones_test.go b/clusters/data_zones_test.go index 958e62c27f..21e6dece76 100644 --- a/clusters/data_zones_test.go +++ b/clusters/data_zones_test.go @@ -1,24 +1,24 @@ package clusters import ( + "fmt" "testing" - "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/qa" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) func TestZones(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list-zones", - Response: ZonesInfo{ - DefaultZone: "a", - Zones: []string{"a", "b"}, - }, - }, + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListZones(mock.Anything).Return(&compute.ListAvailableZonesResponse{ + DefaultZone: "a", + Zones: []string{"a", "b"}, + }, nil) }, Read: true, Resource: DataSourceClusterZones(), @@ -32,13 +32,9 @@ func TestZones(t *testing.T) { func TestZones_404(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list-zones", - Status: 404, - Response: apierr.NotFound("missing"), - }, + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListZones(mock.Anything).Return(&compute.ListAvailableZonesResponse{}, fmt.Errorf("missing")) }, Read: true, Resource: DataSourceClusterZones(), diff --git a/common/resource.go b/common/resource.go index 4ae0bef2e0..cb1c4d16bf 100644 --- a/common/resource.go +++ b/common/resource.go @@ -250,21 +250,21 @@ func DataResource(sc any, read func(context.Context, any, *DatabricksClient) err func WorkspaceData[T any](read func(context.Context, *T, *databricks.WorkspaceClient) error) Resource { return genericDatabricksData((*DatabricksClient).WorkspaceClient, func(ctx context.Context, s struct{}, t *T, wc *databricks.WorkspaceClient) error { return read(ctx, t, wc) - }, false) + }, false, NoCustomize) } // WorkspaceDataWithParams defines a data source that can be used to read data from the workspace API. // It differs from WorkspaceData in that it separates the definition of the computed fields (the resource type) // from the definition of the user-supplied parameters. // -// The first type parameter is the type of the resource. This can be a type directly from the SDK, or a custom -// type defined in the provider that embeds the SDK type. -// -// The second type parameter is the type representing parameters that a user may provide to the data source. These +// The first type parameter is the type representing parameters that a user may provide to the data source. These // are the attributes that the user can specify in the data source configuration, but are not part of the resource // type. If there are no extra attributes, this should be `struct{}`. If there are any fields with the same JSON // name as fields in the resource type, these fields will override the values from the resource type. // +// The second type parameter is the type of the resource. This can be a type directly from the SDK, or a custom +// type defined in the provider that embeds the SDK type. +// // The single argument is a function that will be called to read the data from the workspace API, returning the // value of the resource type. The function should return an error if the data cannot be read or the resource cannot // be found. @@ -292,7 +292,21 @@ func WorkspaceDataWithParams[T, P any](read func(context.Context, P, *databricks } *s = *res return nil - }, true) + }, true, NoCustomize) +} + +// WorkspaceDataWithCustomizeFunc defines a data source that can be used to read data from the workspace API. +// It differs from WorkspaceData in that it allows the schema to be customized further using a +// customizeSchemaFunc function. +// +// The additional argument is a function that will be called to customize the schema of the data source. + +func WorkspaceDataWithCustomizeFunc[T any]( + read func(context.Context, *T, *databricks.WorkspaceClient) error, + customizeSchemaFunc func(map[string]*schema.Schema) map[string]*schema.Schema) Resource { + return genericDatabricksData((*DatabricksClient).WorkspaceClient, func(ctx context.Context, s struct{}, t *T, wc *databricks.WorkspaceClient) error { + return read(ctx, t, wc) + }, false, customizeSchemaFunc) } // AccountData is a generic way to define account data resources in Terraform provider. @@ -309,7 +323,7 @@ func WorkspaceDataWithParams[T, P any](read func(context.Context, P, *databricks func AccountData[T any](read func(context.Context, *T, *databricks.AccountClient) error) Resource { return genericDatabricksData((*DatabricksClient).AccountClient, func(ctx context.Context, s struct{}, t *T, ac *databricks.AccountClient) error { return read(ctx, t, ac) - }, false) + }, false, NoCustomize) } // AccountDataWithParams defines a data source that can be used to read data from the account API. @@ -351,7 +365,7 @@ func AccountDataWithParams[T, P any](read func(context.Context, P, *databricks.A } *s = *res return nil - }, true) + }, true, NoCustomize) } // genericDatabricksData is generic and common way to define both account and workspace data and calls their respective clients. @@ -362,7 +376,8 @@ func AccountDataWithParams[T, P any](read func(context.Context, P, *databricks.A func genericDatabricksData[T, P, C any]( getClient func(*DatabricksClient) (C, error), read func(context.Context, P, *T, C) error, - hasOther bool) Resource { + hasOther bool, + customizeSchemaFunc func(map[string]*schema.Schema) map[string]*schema.Schema) Resource { var dummy T var other P otherFields := StructToSchema(other, NoCustomize) @@ -387,7 +402,8 @@ func genericDatabricksData[T, P, C any]( v.Computed = true v.Required = false } - return m + // allow c + return customizeSchemaFunc(m) }) return Resource{ Schema: s, @@ -455,5 +471,5 @@ func NoClientData[T any](read func(context.Context, *T) error) Resource { return genericDatabricksData(func(*DatabricksClient) (any, error) { return nil, nil }, func(ctx context.Context, s struct{}, t *T, ac any) error { return read(ctx, t) - }, false) + }, false, NoCustomize) } From 57b73d8f2a27b2adeb2f502a37530bd872bbaaaa Mon Sep 17 00:00:00 2001 From: Vuong Date: Thu, 20 Jun 2024 18:02:29 +0100 Subject: [PATCH 2/5] clean up spark versions methods --- access/resource_sql_permissions.go | 2 +- access/resource_sql_permissions_test.go | 16 +-- catalog/resource_sql_table.go | 2 +- catalog/resource_sql_table_test.go | 12 +-- clusters/clusters_api.go | 28 +++-- clusters/clusters_api_sdk.go | 9 ++ clusters/clusters_api_test.go | 135 ++---------------------- clusters/data_spark_version.go | 116 -------------------- exporter/exporter_test.go | 6 +- storage/mounts.go | 9 +- storage/mounts_test.go | 8 +- storage/resource_mount_test.go | 8 +- 12 files changed, 71 insertions(+), 280 deletions(-) diff --git a/access/resource_sql_permissions.go b/access/resource_sql_permissions.go index 360220219a..432fdf0bdd 100644 --- a/access/resource_sql_permissions.go +++ b/access/resource_sql_permissions.go @@ -272,7 +272,7 @@ func (ta *SqlPermissions) initCluster(ctx context.Context, d *schema.ResourceDat } func (ta *SqlPermissions) getOrCreateCluster(clustersAPI clusters.ClustersAPI) (string, error) { - sparkVersion := clustersAPI.LatestSparkVersionOrDefault(clusters.SparkVersionRequest{ + sparkVersion := clusters.LatestSparkVersionOrDefault(clustersAPI.Context(), clustersAPI.WorkspaceClient(), compute.SparkVersionRequest{ Latest: true, }) nodeType := clustersAPI.GetSmallestNodeType(compute.NodeTypeRequest{LocalDisk: true}) diff --git a/access/resource_sql_permissions_test.go b/access/resource_sql_permissions_test.go index 6e75fd109e..17a864d7e2 100644 --- a/access/resource_sql_permissions_test.go +++ b/access/resource_sql_permissions_test.go @@ -185,11 +185,11 @@ var createHighConcurrencyCluster = []qa.HTTPFixture{ Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", + Key: "7.1.x-cpu-ml-scala2.12", + Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, }, }, @@ -262,11 +262,11 @@ var createSharedCluster = []qa.HTTPFixture{ Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", + Key: "7.1.x-cpu-ml-scala2.12", + Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, }, }, diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index db4ee7ff46..0076181d7b 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -159,7 +159,7 @@ func (ti *SqlTableInfo) initCluster(ctx context.Context, d *schema.ResourceData, } func (ti *SqlTableInfo) getOrCreateCluster(clusterName string, clustersAPI clusters.ClustersAPI) (string, error) { - sparkVersion := clustersAPI.LatestSparkVersionOrDefault(clusters.SparkVersionRequest{ + sparkVersion := clusters.LatestSparkVersionOrDefault(clustersAPI.Context(), clustersAPI.WorkspaceClient(), compute.SparkVersionRequest{ Latest: true, }) nodeType := clustersAPI.GetSmallestNodeType(compute.NodeTypeRequest{LocalDisk: true}) diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index db26ccab51..58d4b0ddd6 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -1248,15 +1248,11 @@ var baseClusterFixture = []qa.HTTPFixture{ Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", - }, - { - Version: "7.3.x-scala2.12", - Description: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", + Key: "7.1.x-cpu-ml-scala2.12", + Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, }, }, diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index 3434d3fdae..dd4708aec1 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/compute" @@ -574,6 +575,19 @@ type ClustersAPI struct { context context.Context } +// Temporary function to be used until all resources are migrated to Go SDK +// Create a workspace client +func (a ClustersAPI) WorkspaceClient() *databricks.WorkspaceClient { + client, _ := a.client.WorkspaceClient() + return client +} + +// Temporary function to be used until all resources are migrated to Go SDK +// Return a context +func (a ClustersAPI) Context() context.Context { + return a.context +} + // Create creates a new Spark cluster and waits till it's running func (a ClustersAPI) Create(cluster Cluster) (info ClusterInfo, err error) { var ci ClusterID @@ -867,6 +881,7 @@ var getOrCreateClusterMutex sync.Mutex // GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error) { + w, err := a.client.WorkspaceClient() getOrCreateClusterMutex.Lock() defer getOrCreateClusterMutex.Unlock() @@ -900,13 +915,14 @@ func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) ( LocalDisk: true, }) log.Printf("[INFO] Creating an autoterminating cluster with node type %s", smallestNodeType) + latestVersion, _ := w.Clusters.SelectSparkVersion(a.context, compute.SparkVersionRequest{ + Latest: true, + LongTermSupport: true, + }) r := Cluster{ - NumWorkers: 1, - ClusterName: name, - SparkVersion: a.LatestSparkVersionOrDefault(SparkVersionRequest{ - Latest: true, - LongTermSupport: true, - }), + NumWorkers: 1, + ClusterName: name, + SparkVersion: latestVersion, NodeTypeID: smallestNodeType, AutoterminationMinutes: 10, } diff --git a/clusters/clusters_api_sdk.go b/clusters/clusters_api_sdk.go index 388f1b80b5..a1c4b91f2a 100644 --- a/clusters/clusters_api_sdk.go +++ b/clusters/clusters_api_sdk.go @@ -35,3 +35,12 @@ func StartClusterAndGetInfo(ctx context.Context, w *databricks.WorkspaceClient, } return w.Clusters.StartByClusterIdAndWait(ctx, clusterID) } + +// LatestSparkVersionOrDefault returns Spark version matching the definition, or default in case of error +func LatestSparkVersionOrDefault(ctx context.Context, w *databricks.WorkspaceClient, svr compute.SparkVersionRequest) string { + version, err := w.Clusters.SelectSparkVersion(ctx, svr) + if err != nil { + return "7.3.x-scala2.12" + } + return version +} diff --git a/clusters/clusters_api_test.go b/clusters/clusters_api_test.go index 441d7397d5..32f8d3f407 100644 --- a/clusters/clusters_api_test.go +++ b/clusters/clusters_api_test.go @@ -6,7 +6,7 @@ import ( "fmt" // "reflect" - "strings" + "testing" "github.com/databricks/databricks-sdk-go/apierr" @@ -28,23 +28,23 @@ func TestGetOrCreateRunningCluster_AzureAuth(t *testing.T) { Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: SparkVersionsList{ - SparkVersions: []SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", + Key: "7.1.x-cpu-ml-scala2.12", + Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, { - Version: "apache-spark-2.4.x-scala2.11", - Description: "Light 2.4 (includes Apache Spark 2.4, Scala 2.11)", + Key: "apache-spark-2.4.x-scala2.11", + Name: "Light 2.4 (includes Apache Spark 2.4, Scala 2.11)", }, { - Version: "7.3.x-scala2.12", - Description: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", + Key: "7.3.x-scala2.12", + Name: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", }, { - Version: "6.4.x-scala2.11", - Description: "6.4 (includes Apache Spark 2.4.5, Scala 2.11)", + Key: "6.4.x-scala2.11", + Name: "6.4 (includes Apache Spark 2.4.5, Scala 2.11)", }, }, }, @@ -1016,119 +1016,6 @@ func TestEventsEmptyResult(t *testing.T) { assert.Equal(t, len(clusterEvents), 0) } -func TestListSparkVersions(t *testing.T) { - client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/spark-versions", - Response: SparkVersionsList{ - SparkVersions: []SparkVersion{ - { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", - }, - { - Version: "apache-spark-2.4.x-scala2.11", - Description: "Light 2.4 (includes Apache Spark 2.4, Scala 2.11)", - }, - { - Version: "7.3.x-hls-scala2.12", - Description: "7.3 LTS Genomics (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "6.4.x-scala2.11", - Description: "6.4 (includes Apache Spark 2.4.5, Scala 2.11)", - }, - }, - }, - }, - }) - defer server.Close() - require.NoError(t, err) - - ctx := context.Background() - sparkVersions, err := NewClustersAPI(ctx, client).ListSparkVersions() - require.NoError(t, err) - require.Equal(t, 4, len(sparkVersions.SparkVersions)) - require.Equal(t, "6.4.x-scala2.11", sparkVersions.SparkVersions[3].Version) -} - -func TestListSparkVersionsWithError(t *testing.T) { - client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/spark-versions", - Response: "{garbage....", - }, - }) - defer server.Close() - require.NoError(t, err) - - ctx := context.Background() - _, err = NewClustersAPI(ctx, client).ListSparkVersions() - require.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "invalid character 'g' looking")) -} - -func TestGetLatestSparkVersion(t *testing.T) { - versions := SparkVersionsList{ - SparkVersions: []SparkVersion{ - { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", - }, - { - Version: "apache-spark-2.4.x-scala2.11", - Description: "Light 2.4 (includes Apache Spark 2.4, Scala 2.11)", - }, - { - Version: "7.3.x-hls-scala2.12", - Description: "7.3 LTS Genomics (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "6.4.x-scala2.11", - Description: "6.4 (includes Apache Spark 2.4.5, Scala 2.11)", - }, - { - Version: "7.3.x-scala2.12", - Description: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "7.4.x-scala2.12", - Description: "7.4 (includes Apache Spark 3.0.1, Scala 2.12)", - }, - { - Version: "7.1.x-scala2.12", - Description: "7.1 (includes Apache Spark 3.0.0, Scala 2.12)", - }, - }, - } - - version, err := versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12", Latest: true}) - require.NoError(t, err) - require.Equal(t, "7.4.x-scala2.12", version) - - version, err = versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12", LongTermSupport: true, Latest: true}) - require.NoError(t, err) - require.Equal(t, "7.3.x-scala2.12", version) - - version, err = versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12", Latest: true, SparkVersion: "3.0.0"}) - require.NoError(t, err) - require.Equal(t, "7.1.x-scala2.12", version) - - _, err = versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12"}) - require.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "query returned multiple results")) - - _, err = versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12", ML: true, Genomics: true}) - require.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "query returned no results")) - - _, err = versions.LatestSparkVersion(SparkVersionRequest{Scala: "2.12", SparkVersion: "3.10"}) - require.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "query returned no results")) -} - func TestClusterState_CanReach(t *testing.T) { tests := []struct { from ClusterState diff --git a/clusters/data_spark_version.go b/clusters/data_spark_version.go index 2b5e3c2fe1..dfe1541795 100644 --- a/clusters/data_spark_version.go +++ b/clusters/data_spark_version.go @@ -2,129 +2,13 @@ package clusters import ( "context" - "fmt" - "regexp" - "sort" - "strings" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "golang.org/x/mod/semver" ) -// SparkVersion - contains information about specific version -type SparkVersion struct { - Version string `json:"key"` - Description string `json:"name"` -} - -// SparkVersionsList - returns a list of all currently supported Spark Versions -// https://docs.databricks.com/dev-tools/api/latest/clusters.html#runtime-versions -type SparkVersionsList struct { - SparkVersions []SparkVersion `json:"versions"` -} - -// SparkVersionRequest - filtering request -type SparkVersionRequest struct { - LongTermSupport bool `json:"long_term_support,omitempty"` - Beta bool `json:"beta,omitempty" tf:"conflicts:long_term_support"` - Latest bool `json:"latest,omitempty" tf:"default:true"` - ML bool `json:"ml,omitempty"` - Genomics bool `json:"genomics,omitempty"` - GPU bool `json:"gpu,omitempty"` - Scala string `json:"scala,omitempty" tf:"default:2.12"` - SparkVersion string `json:"spark_version,omitempty"` - Photon bool `json:"photon,omitempty"` - Graviton bool `json:"graviton,omitempty"` -} - -// ListSparkVersions returns smallest (or default) node type id given the criteria -func (a ClustersAPI) ListSparkVersions() (SparkVersionsList, error) { - var sparkVersions SparkVersionsList - err := a.client.Get(a.context, "/clusters/spark-versions", nil, &sparkVersions) - return sparkVersions, err -} - -type sparkVersionsType []string - -func (s sparkVersionsType) Len() int { - return len(s) -} -func (s sparkVersionsType) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) - -func extractDbrVersions(s string) string { - m := dbrVersionRegex.FindStringSubmatch(s) - if len(m) > 1 { - return m[1] - } - return s -} - -func (s sparkVersionsType) Less(i, j int) bool { - return semver.Compare("v"+extractDbrVersions(s[i]), "v"+extractDbrVersions(s[j])) > 0 -} - -// LatestSparkVersion returns latest version matching the request parameters -func (sparkVersions SparkVersionsList) LatestSparkVersion(req SparkVersionRequest) (string, error) { - var versions []string - - for _, version := range sparkVersions.SparkVersions { - if strings.Contains(version.Version, "-scala"+req.Scala) { - matches := ((!strings.Contains(version.Version, "apache-spark-")) && - (strings.Contains(version.Version, "-ml-") == req.ML) && - (strings.Contains(version.Version, "-hls-") == req.Genomics) && - (strings.Contains(version.Version, "-gpu-") == req.GPU) && - (strings.Contains(version.Version, "-photon-") == req.Photon) && - (strings.Contains(version.Version, "-aarch64-") == req.Graviton) && - (strings.Contains(version.Description, "Beta") == req.Beta)) - if matches && req.LongTermSupport { - matches = (matches && (strings.Contains(version.Description, "LTS") || strings.Contains(version.Version, "-esr-"))) - } - if matches && len(req.SparkVersion) > 0 { - matches = (matches && strings.Contains(version.Description, "Apache Spark "+req.SparkVersion)) - } - if matches { - versions = append(versions, version.Version) - } - } - } - if len(versions) < 1 { - return "", fmt.Errorf("spark versions query returned no results. Please change your search criteria and try again") - } else if len(versions) > 1 { - if req.Latest { - sort.Sort(sparkVersionsType(versions)) - } else { - return "", fmt.Errorf("spark versions query returned multiple results. Please change your search criteria and try again") - } - } - - return versions[0], nil -} - -// LatestSparkVersion returns latest version matching the request parameters -func (a ClustersAPI) LatestSparkVersion(svr SparkVersionRequest) (string, error) { - sparkVersions, err := a.ListSparkVersions() - if err != nil { - return "", err - } - return sparkVersions.LatestSparkVersion(svr) -} - -// LatestSparkVersionOrDefault returns Spark version matching the definition, or default in case of error -func (a ClustersAPI) LatestSparkVersionOrDefault(svr SparkVersionRequest) string { - version, err := a.LatestSparkVersion(svr) - if err != nil { - return "7.3.x-scala2.12" - } - return version -} - // DataSourceSparkVersion returns DBR version matching to the specification func DataSourceSparkVersion() common.Resource { return common.WorkspaceDataWithCustomizeFunc(func(ctx context.Context, data *compute.SparkVersionRequest, w *databricks.WorkspaceClient) error { diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 174a766409..f605bd762f 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -180,10 +180,10 @@ func TestImportingMounts(t *testing.T) { Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "Foo LTS", + Key: "Foo LTS", }, }, }, diff --git a/storage/mounts.go b/storage/mounts.go index 8531eacc03..1dcf66dd60 100644 --- a/storage/mounts.go +++ b/storage/mounts.go @@ -137,11 +137,10 @@ func getCommonClusterObject(clustersAPI clusters.ClustersAPI, clusterName string return clusters.Cluster{ NumWorkers: 0, ClusterName: clusterName, - SparkVersion: clustersAPI.LatestSparkVersionOrDefault( - clusters.SparkVersionRequest{ - Latest: true, - LongTermSupport: true, - }), + SparkVersion: clusters.LatestSparkVersionOrDefault(clustersAPI.Context(), clustersAPI.WorkspaceClient(), compute.SparkVersionRequest{ + Latest: true, + LongTermSupport: true, + }), NodeTypeID: clustersAPI.GetSmallestNodeType( compute.NodeTypeRequest{ LocalDisk: true, diff --git a/storage/mounts_test.go b/storage/mounts_test.go index 401fed07ca..9d8a4a2521 100644 --- a/storage/mounts_test.go +++ b/storage/mounts_test.go @@ -174,11 +174,11 @@ func TestDeletedMountClusterRecreates(t *testing.T) { Method: "GET", ReuseRequest: true, Resource: "/api/2.0/clusters/spark-versions", - Response: clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ + Response: compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.1.x-cpu-ml-scala2.12", - Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", + Key: "7.1.x-cpu-ml-scala2.12", + Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, }, }, diff --git a/storage/resource_mount_test.go b/storage/resource_mount_test.go index 58318fd01c..24bf5ca37c 100644 --- a/storage/resource_mount_test.go +++ b/storage/resource_mount_test.go @@ -21,11 +21,11 @@ import ( // Test interface compliance via compile time error var _ Mount = (*S3IamMount)(nil) -var sparkVersionsResponse = clusters.SparkVersionsList{ - SparkVersions: []clusters.SparkVersion{ +var sparkVersionsResponse = compute.GetSparkVersionsResponse{ + Versions: []compute.SparkVersion{ { - Version: "7.3.x-scala2.12", - Description: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", + Key: "7.3.x-scala2.12", + Name: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", }, }, } From dd78ab8e81b8f27a016d7c9e9940c98ae38e98ed Mon Sep 17 00:00:00 2001 From: Vuong Date: Thu, 20 Jun 2024 18:14:58 +0100 Subject: [PATCH 3/5] tidy up --- catalog/resource_sql_table_test.go | 4 ++++ clusters/clusters_api.go | 14 ++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index 58d4b0ddd6..fdf3fd4162 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -1254,6 +1254,10 @@ var baseClusterFixture = []qa.HTTPFixture{ Key: "7.1.x-cpu-ml-scala2.12", Name: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)", }, + { + Key: "7.3.x-scala2.12", + Name: "7.3 LTS (includes Apache Spark 3.0.1, Scala 2.12)", + }, }, }, }, diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index dd4708aec1..1b7192390d 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -881,7 +881,6 @@ var getOrCreateClusterMutex sync.Mutex // GetOrCreateRunningCluster creates an autoterminating cluster if it doesn't exist func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) (c ClusterInfo, err error) { - w, err := a.client.WorkspaceClient() getOrCreateClusterMutex.Lock() defer getOrCreateClusterMutex.Unlock() @@ -915,14 +914,13 @@ func (a ClustersAPI) GetOrCreateRunningCluster(name string, custom ...Cluster) ( LocalDisk: true, }) log.Printf("[INFO] Creating an autoterminating cluster with node type %s", smallestNodeType) - latestVersion, _ := w.Clusters.SelectSparkVersion(a.context, compute.SparkVersionRequest{ - Latest: true, - LongTermSupport: true, - }) r := Cluster{ - NumWorkers: 1, - ClusterName: name, - SparkVersion: latestVersion, + NumWorkers: 1, + ClusterName: name, + SparkVersion: LatestSparkVersionOrDefault(a.Context(), a.WorkspaceClient(), compute.SparkVersionRequest{ + Latest: true, + LongTermSupport: true, + }), NodeTypeID: smallestNodeType, AutoterminationMinutes: 10, } From f45211a570e61b48aeeef1ba3dbe050775ae0941 Mon Sep 17 00:00:00 2001 From: Vuong Date: Tue, 2 Jul 2024 17:02:49 +0100 Subject: [PATCH 4/5] upgrade to 11.3.x --- access/resource_sql_permissions_test.go | 4 ++-- clusters/clusters_api_sdk.go | 2 +- storage/mounts_test.go | 2 +- storage/s3_test.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/access/resource_sql_permissions_test.go b/access/resource_sql_permissions_test.go index 17a864d7e2..f09dec2aba 100644 --- a/access/resource_sql_permissions_test.go +++ b/access/resource_sql_permissions_test.go @@ -222,7 +222,7 @@ var createHighConcurrencyCluster = []qa.HTTPFixture{ AutoterminationMinutes: 10, ClusterName: "terraform-table-acl", NodeTypeID: "Standard_F4s", - SparkVersion: "7.3.x-scala2.12", + SparkVersion: "11.3.x-scala2.12", CustomTags: map[string]string{ "ResourceClass": "SingleNode", }, @@ -299,7 +299,7 @@ var createSharedCluster = []qa.HTTPFixture{ AutoterminationMinutes: 10, ClusterName: "terraform-table-acl", NodeTypeID: "Standard_F4s", - SparkVersion: "7.3.x-scala2.12", + SparkVersion: "11.3.x-scala2.12", CustomTags: map[string]string{ "ResourceClass": "SingleNode", }, diff --git a/clusters/clusters_api_sdk.go b/clusters/clusters_api_sdk.go index a1c4b91f2a..8682a58231 100644 --- a/clusters/clusters_api_sdk.go +++ b/clusters/clusters_api_sdk.go @@ -40,7 +40,7 @@ func StartClusterAndGetInfo(ctx context.Context, w *databricks.WorkspaceClient, func LatestSparkVersionOrDefault(ctx context.Context, w *databricks.WorkspaceClient, svr compute.SparkVersionRequest) string { version, err := w.Clusters.SelectSparkVersion(ctx, svr) if err != nil { - return "7.3.x-scala2.12" + return "11.3.x-scala2.12" } return version } diff --git a/storage/mounts_test.go b/storage/mounts_test.go index 9d8a4a2521..a829e50469 100644 --- a/storage/mounts_test.go +++ b/storage/mounts_test.go @@ -212,7 +212,7 @@ func TestDeletedMountClusterRecreates(t *testing.T) { AutoterminationMinutes: 10, ClusterName: "terraform-mount", NodeTypeID: "Standard_F4s", - SparkVersion: "7.3.x-scala2.12", + SparkVersion: "11.3.x-scala2.12", CustomTags: map[string]string{ "ResourceClass": "SingleNode", }, diff --git a/storage/s3_test.go b/storage/s3_test.go index 9356f85aff..38fa1bb86f 100644 --- a/storage/s3_test.go +++ b/storage/s3_test.go @@ -62,7 +62,7 @@ func TestPreprocessS3MountOnDeletedClusterWorks(t *testing.T) { "ResourceClass": "SingleNode", }, ClusterName: "terraform-mount-s3-access", - SparkVersion: "7.3.x-scala2.12", + SparkVersion: "11.3.x-scala2.12", NumWorkers: 0, NodeTypeID: "i3.xlarge", AwsAttributes: &clusters.AwsAttributes{ From 53b1fb14252562a9839126ede045a10d10658ec2 Mon Sep 17 00:00:00 2001 From: Vuong Date: Tue, 2 Jul 2024 18:07:44 +0100 Subject: [PATCH 5/5] fix test --- storage/gs_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/gs_test.go b/storage/gs_test.go index 18ef9bd3fb..4d090cd0f4 100644 --- a/storage/gs_test.go +++ b/storage/gs_test.go @@ -65,7 +65,7 @@ func TestCreateOrValidateClusterForGoogleStorage_WorksOnDeletedCluster(t *testin GcpAttributes: &clusters.GcpAttributes{ GoogleServiceAccount: "service-account", }, - SparkVersion: "7.3.x-scala2.12", + SparkVersion: "11.3.x-scala2.12", NumWorkers: 0, NodeTypeID: "i3.xlarge", AutoterminationMinutes: 10,