diff --git a/apps/v2/impl.go b/apps/v2/impl.go index 34772c88..2dc174da 100755 --- a/apps/v2/impl.go +++ b/apps/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just apps API methods +// unexported type that holds implementations of just Apps API methods type appsImpl struct { client *client.DatabricksClient } diff --git a/billing/v2/impl.go b/billing/v2/impl.go index 4f9633cf..de7761de 100755 --- a/billing/v2/impl.go +++ b/billing/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just billable_usage API methods +// unexported type that holds implementations of just BillableUsage API methods type billableUsageImpl struct { client *client.DatabricksClient } @@ -24,7 +24,7 @@ func (a *billableUsageImpl) Download(ctx context.Context, request DownloadReques return &downloadResponse, err } -// unexported type that holds implementations of just budgets API methods +// unexported type that holds implementations of just Budgets API methods type budgetsImpl struct { client *client.DatabricksClient } @@ -76,7 +76,7 @@ func (a *budgetsImpl) Update(ctx context.Context, request UpdateBudgetConfigurat return &updateBudgetConfigurationResponse, err } -// unexported type that holds implementations of just log_delivery API methods +// unexported type that holds implementations of just LogDelivery API methods type logDeliveryImpl struct { client *client.DatabricksClient } @@ -119,7 +119,7 @@ func (a *logDeliveryImpl) PatchStatus(ctx context.Context, request UpdateLogDeli return err } -// unexported type that holds implementations of just usage_dashboards API methods +// unexported type that holds implementations of just UsageDashboards API methods type usageDashboardsImpl struct { client *client.DatabricksClient } diff --git a/catalog/v2/impl.go b/catalog/v2/impl.go index c269bf14..d50bbe16 100755 --- a/catalog/v2/impl.go +++ b/catalog/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just account_metastore_assignments API methods +// unexported type that holds implementations of just AccountMetastoreAssignments API methods type accountMetastoreAssignmentsImpl struct { client *client.DatabricksClient } @@ -19,8 +19,8 @@ func (a *accountMetastoreAssignmentsImpl) Create(ctx context.Context, request Ac var createResponse CreateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) return err } @@ -62,7 +62,7 @@ func (a *accountMetastoreAssignmentsImpl) Update(ctx context.Context, request Ac return err } -// unexported type that holds implementations of just account_metastores API methods +// unexported type that holds implementations of just AccountMetastores API methods type accountMetastoresImpl struct { client *client.DatabricksClient } @@ -114,7 +114,7 @@ func (a *accountMetastoresImpl) Update(ctx context.Context, request AccountsUpda return &accountsMetastoreInfo, err } -// unexported type that holds implementations of just account_storage_credentials API methods +// unexported type that holds implementations of just AccountStorageCredentials API methods type accountStorageCredentialsImpl struct { client *client.DatabricksClient } @@ -123,8 +123,8 @@ func (a *accountStorageCredentialsImpl) Create(ctx context.Context, request Acco var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &accountsStorageCredentialInfo) return &accountsStorageCredentialInfo, err } @@ -166,7 +166,7 @@ func (a *accountStorageCredentialsImpl) Update(ctx context.Context, request Acco return &accountsStorageCredentialInfo, err } -// unexported type that holds implementations of just artifact_allowlists API methods +// unexported type that holds implementations of just ArtifactAllowlists API methods type artifactAllowlistsImpl struct { client *client.DatabricksClient } @@ -190,7 +190,7 @@ func (a *artifactAllowlistsImpl) Update(ctx context.Context, request SetArtifact return &artifactAllowlistInfo, err } -// unexported type that holds implementations of just catalogs API methods +// unexported type that holds implementations of just Catalogs API methods type catalogsImpl struct { client *client.DatabricksClient } @@ -242,7 +242,7 @@ func (a *catalogsImpl) Update(ctx context.Context, request UpdateCatalog) (*Cata return &catalogInfo, err } -// unexported type that holds implementations of just connections API methods +// unexported type that holds implementations of just Connections API methods type connectionsImpl struct { client *client.DatabricksClient } @@ -294,7 +294,7 @@ func (a *connectionsImpl) Update(ctx context.Context, request UpdateConnection) return &connectionInfo, err } -// unexported type that holds implementations of just credentials API methods +// unexported type that holds implementations of just Credentials API methods type credentialsImpl struct { client *client.DatabricksClient } @@ -366,7 +366,7 @@ func (a *credentialsImpl) ValidateCredential(ctx context.Context, request Valida return &validateCredentialResponse, err } -// unexported type that holds implementations of just external_locations API methods +// unexported type that holds implementations of just ExternalLocations API methods type externalLocationsImpl struct { client *client.DatabricksClient } @@ -375,8 +375,8 @@ func (a *externalLocationsImpl) Create(ctx context.Context, request CreateExtern var externalLocationInfo ExternalLocationInfo path := "/api/2.1/unity-catalog/external-locations" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &externalLocationInfo) return &externalLocationInfo, err } @@ -418,7 +418,7 @@ func (a *externalLocationsImpl) Update(ctx context.Context, request UpdateExtern return &externalLocationInfo, err } -// unexported type that holds implementations of just functions API methods +// unexported type that holds implementations of just Functions API methods type functionsImpl struct { client *client.DatabricksClient } @@ -470,7 +470,7 @@ func (a *functionsImpl) Update(ctx context.Context, request UpdateFunction) (*Fu return &functionInfo, err } -// unexported type that holds implementations of just grants API methods +// unexported type that holds implementations of just Grants API methods type grantsImpl struct { client *client.DatabricksClient } @@ -503,7 +503,7 @@ func (a *grantsImpl) Update(ctx context.Context, request UpdatePermissions) (*Pe return &permissionsList, err } -// unexported type that holds implementations of just metastores API methods +// unexported type that holds implementations of just Metastores API methods type metastoresImpl struct { client *client.DatabricksClient } @@ -512,8 +512,8 @@ func (a *metastoresImpl) Assign(ctx context.Context, request CreateMetastoreAssi var assignResponse AssignResponse path := fmt.Sprintf("/api/2.1/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &assignResponse) return err } @@ -522,8 +522,8 @@ func (a *metastoresImpl) Create(ctx context.Context, request CreateMetastore) (* var metastoreInfo MetastoreInfo path := "/api/2.1/unity-catalog/metastores" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &metastoreInfo) return &metastoreInfo, err } @@ -602,7 +602,7 @@ func (a *metastoresImpl) UpdateAssignment(ctx context.Context, request UpdateMet return err } -// unexported type that holds implementations of just model_versions API methods +// unexported type that holds implementations of just ModelVersions API methods type modelVersionsImpl struct { client *client.DatabricksClient } @@ -652,7 +652,7 @@ func (a *modelVersionsImpl) Update(ctx context.Context, request UpdateModelVersi return &modelVersionInfo, err } -// unexported type that holds implementations of just online_tables API methods +// unexported type that holds implementations of just OnlineTables API methods type onlineTablesImpl struct { client *client.DatabricksClient } @@ -685,7 +685,7 @@ func (a *onlineTablesImpl) Get(ctx context.Context, request GetOnlineTableReques return &onlineTable, err } -// unexported type that holds implementations of just quality_monitors API methods +// unexported type that holds implementations of just QualityMonitors API methods type qualityMonitorsImpl struct { client *client.DatabricksClient } @@ -747,8 +747,8 @@ func (a *qualityMonitorsImpl) RegenerateDashboard(ctx context.Context, request R var regenerateDashboardResponse RegenerateDashboardResponse path := fmt.Sprintf("/api/2.1/quality-monitoring/tables/%v/monitor/dashboard", request.TableName) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, ®enerateDashboardResponse) return ®enerateDashboardResponse, err } @@ -772,7 +772,7 @@ func (a *qualityMonitorsImpl) Update(ctx context.Context, request UpdateMonitor) return &monitorInfo, err } -// unexported type that holds implementations of just registered_models API methods +// unexported type that holds implementations of just RegisteredModels API methods type registeredModelsImpl struct { client *client.DatabricksClient } @@ -841,7 +841,7 @@ func (a *registeredModelsImpl) Update(ctx context.Context, request UpdateRegiste return ®isteredModelInfo, err } -// unexported type that holds implementations of just resource_quotas API methods +// unexported type that holds implementations of just ResourceQuotas API methods type resourceQuotasImpl struct { client *client.DatabricksClient } @@ -864,7 +864,7 @@ func (a *resourceQuotasImpl) ListQuotas(ctx context.Context, request ListQuotasR return &listQuotasResponse, err } -// unexported type that holds implementations of just schemas API methods +// unexported type that holds implementations of just Schemas API methods type schemasImpl struct { client *client.DatabricksClient } @@ -916,7 +916,7 @@ func (a *schemasImpl) Update(ctx context.Context, request UpdateSchema) (*Schema return &schemaInfo, err } -// unexported type that holds implementations of just storage_credentials API methods +// unexported type that holds implementations of just StorageCredentials API methods type storageCredentialsImpl struct { client *client.DatabricksClient } @@ -962,8 +962,8 @@ func (a *storageCredentialsImpl) Update(ctx context.Context, request UpdateStora var storageCredentialInfo StorageCredentialInfo path := fmt.Sprintf("/api/2.1/unity-catalog/storage-credentials/%v", request.Name) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &storageCredentialInfo) return &storageCredentialInfo, err } @@ -972,13 +972,13 @@ func (a *storageCredentialsImpl) Validate(ctx context.Context, request ValidateS var validateStorageCredentialResponse ValidateStorageCredentialResponse path := "/api/2.1/unity-catalog/validate-storage-credentials" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &validateStorageCredentialResponse) return &validateStorageCredentialResponse, err } -// unexported type that holds implementations of just system_schemas API methods +// unexported type that holds implementations of just SystemSchemas API methods type systemSchemasImpl struct { client *client.DatabricksClient } @@ -1010,7 +1010,7 @@ func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasR return &listSystemSchemasResponse, err } -// unexported type that holds implementations of just table_constraints API methods +// unexported type that holds implementations of just TableConstraints API methods type tableConstraintsImpl struct { client *client.DatabricksClient } @@ -1034,7 +1034,7 @@ func (a *tableConstraintsImpl) Delete(ctx context.Context, request DeleteTableCo return err } -// unexported type that holds implementations of just tables API methods +// unexported type that holds implementations of just Tables API methods type tablesImpl struct { client *client.DatabricksClient } @@ -1094,7 +1094,7 @@ func (a *tablesImpl) Update(ctx context.Context, request UpdateTableRequest) err return err } -// unexported type that holds implementations of just temporary_table_credentials API methods +// unexported type that holds implementations of just TemporaryTableCredentials API methods type temporaryTableCredentialsImpl struct { client *client.DatabricksClient } @@ -1109,7 +1109,7 @@ func (a *temporaryTableCredentialsImpl) GenerateTemporaryTableCredentials(ctx co return &generateTemporaryTableCredentialResponse, err } -// unexported type that holds implementations of just volumes API methods +// unexported type that holds implementations of just Volumes API methods type volumesImpl struct { client *client.DatabricksClient } @@ -1118,8 +1118,8 @@ func (a *volumesImpl) Create(ctx context.Context, request CreateVolumeRequestCon var volumeInfo VolumeInfo path := "/api/2.1/unity-catalog/volumes" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &volumeInfo) return &volumeInfo, err } @@ -1160,7 +1160,7 @@ func (a *volumesImpl) Update(ctx context.Context, request UpdateVolumeRequestCon return &volumeInfo, err } -// unexported type that holds implementations of just workspace_bindings API methods +// unexported type that holds implementations of just WorkspaceBindings API methods type workspaceBindingsImpl struct { client *client.DatabricksClient } @@ -1197,8 +1197,8 @@ func (a *workspaceBindingsImpl) UpdateBindings(ctx context.Context, request Upda var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &workspaceBindingsResponse) return &workspaceBindingsResponse, err } diff --git a/cleanrooms/v2/impl.go b/cleanrooms/v2/impl.go index 11461d6c..451ce463 100755 --- a/cleanrooms/v2/impl.go +++ b/cleanrooms/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just clean_room_assets API methods +// unexported type that holds implementations of just CleanRoomAssets API methods type cleanRoomAssetsImpl struct { client *client.DatabricksClient } @@ -62,7 +62,7 @@ func (a *cleanRoomAssetsImpl) Update(ctx context.Context, request UpdateCleanRoo return &cleanRoomAsset, err } -// unexported type that holds implementations of just clean_room_task_runs API methods +// unexported type that holds implementations of just CleanRoomTaskRuns API methods type cleanRoomTaskRunsImpl struct { client *client.DatabricksClient } @@ -76,7 +76,7 @@ func (a *cleanRoomTaskRunsImpl) List(ctx context.Context, request ListCleanRoomN return &listCleanRoomNotebookTaskRunsResponse, err } -// unexported type that holds implementations of just clean_rooms API methods +// unexported type that holds implementations of just CleanRooms API methods type cleanRoomsImpl struct { client *client.DatabricksClient } @@ -132,8 +132,8 @@ func (a *cleanRoomsImpl) Update(ctx context.Context, request UpdateCleanRoomRequ var cleanRoom CleanRoom path := fmt.Sprintf("/api/2.0/clean-rooms/%v", request.Name) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &cleanRoom) return &cleanRoom, err } diff --git a/compute/v2/impl.go b/compute/v2/impl.go index e89c3ec8..ae0777f4 100755 --- a/compute/v2/impl.go +++ b/compute/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just cluster_policies API methods +// unexported type that holds implementations of just ClusterPolicies API methods type clusterPoliciesImpl struct { client *client.DatabricksClient } @@ -19,8 +19,8 @@ func (a *clusterPoliciesImpl) Create(ctx context.Context, request CreatePolicy) var createPolicyResponse CreatePolicyResponse path := "/api/2.0/policies/clusters/create" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createPolicyResponse) return &createPolicyResponse, err } @@ -101,7 +101,7 @@ func (a *clusterPoliciesImpl) UpdatePermissions(ctx context.Context, request Clu return &clusterPolicyPermissions, err } -// unexported type that holds implementations of just clusters API methods +// unexported type that holds implementations of just Clusters API methods type clustersImpl struct { client *client.DatabricksClient } @@ -120,8 +120,8 @@ func (a *clustersImpl) Create(ctx context.Context, request CreateCluster) (*Crea var createClusterResponse CreateClusterResponse path := "/api/2.1/clusters/create" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createClusterResponse) return &createClusterResponse, err } @@ -254,8 +254,8 @@ func (a *clustersImpl) SetPermissions(ctx context.Context, request ClusterPermis var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0/permissions/clusters/%v", request.ClusterId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &clusterPermissions) return &clusterPermissions, err } @@ -309,7 +309,7 @@ func (a *clustersImpl) UpdatePermissions(ctx context.Context, request ClusterPer return &clusterPermissions, err } -// unexported type that holds implementations of just command_execution API methods +// unexported type that holds implementations of just CommandExecution API methods type commandExecutionImpl struct { client *client.DatabricksClient } @@ -356,8 +356,8 @@ func (a *commandExecutionImpl) Destroy(ctx context.Context, request DestroyConte var destroyResponse DestroyResponse path := "/api/1.2/contexts/destroy" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &destroyResponse) return err } @@ -372,7 +372,7 @@ func (a *commandExecutionImpl) Execute(ctx context.Context, request Command) (*C return &created, err } -// unexported type that holds implementations of just global_init_scripts API methods +// unexported type that holds implementations of just GlobalInitScripts API methods type globalInitScriptsImpl struct { client *client.DatabricksClient } @@ -381,8 +381,8 @@ func (a *globalInitScriptsImpl) Create(ctx context.Context, request GlobalInitSc var createResponse CreateResponse path := "/api/2.0/global-init-scripts" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) return &createResponse, err } @@ -422,7 +422,7 @@ func (a *globalInitScriptsImpl) Update(ctx context.Context, request GlobalInitSc return err } -// unexported type that holds implementations of just instance_pools API methods +// unexported type that holds implementations of just InstancePools API methods type instancePoolsImpl struct { client *client.DatabricksClient } @@ -513,7 +513,7 @@ func (a *instancePoolsImpl) UpdatePermissions(ctx context.Context, request Insta return &instancePoolPermissions, err } -// unexported type that holds implementations of just instance_profiles API methods +// unexported type that holds implementations of just InstanceProfiles API methods type instanceProfilesImpl struct { client *client.DatabricksClient } @@ -522,8 +522,8 @@ func (a *instanceProfilesImpl) Add(ctx context.Context, request AddInstanceProfi var addResponse AddResponse path := "/api/2.0/instance-profiles/add" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &addResponse) return err } @@ -551,13 +551,13 @@ func (a *instanceProfilesImpl) Remove(ctx context.Context, request RemoveInstanc var removeResponse RemoveResponse path := "/api/2.0/instance-profiles/remove" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &removeResponse) return err } -// unexported type that holds implementations of just libraries API methods +// unexported type that holds implementations of just Libraries API methods type librariesImpl struct { client *client.DatabricksClient } @@ -600,7 +600,7 @@ func (a *librariesImpl) Uninstall(ctx context.Context, request UninstallLibrarie return err } -// unexported type that holds implementations of just policy_compliance_for_clusters API methods +// unexported type that holds implementations of just PolicyComplianceForClusters API methods type policyComplianceForClustersImpl struct { client *client.DatabricksClient } @@ -633,7 +633,7 @@ func (a *policyComplianceForClustersImpl) ListCompliance(ctx context.Context, re return &listClusterCompliancesResponse, err } -// unexported type that holds implementations of just policy_families API methods +// unexported type that holds implementations of just PolicyFamilies API methods type policyFamiliesImpl struct { client *client.DatabricksClient } diff --git a/dashboards/v2/impl.go b/dashboards/v2/impl.go index c3a9238d..917f339a 100755 --- a/dashboards/v2/impl.go +++ b/dashboards/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just genie API methods +// unexported type that holds implementations of just Genie API methods type genieImpl struct { client *client.DatabricksClient } @@ -56,13 +56,13 @@ func (a *genieImpl) StartConversation(ctx context.Context, request GenieStartCon var genieStartConversationResponse GenieStartConversationResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/start-conversation", request.SpaceId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &genieStartConversationResponse) return &genieStartConversationResponse, err } -// unexported type that holds implementations of just lakeview API methods +// unexported type that holds implementations of just Lakeview API methods type lakeviewImpl struct { client *client.DatabricksClient } @@ -182,8 +182,8 @@ func (a *lakeviewImpl) Migrate(ctx context.Context, request MigrateDashboardRequ var dashboard Dashboard path := "/api/2.0/lakeview/dashboards/migrate" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &dashboard) return &dashboard, err } diff --git a/files/v2/impl.go b/files/v2/impl.go index ce3ffba5..4f0df2c3 100755 --- a/files/v2/impl.go +++ b/files/v2/impl.go @@ -11,7 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -// unexported type that holds implementations of just dbfs API methods +// unexported type that holds implementations of just Dbfs API methods type dbfsImpl struct { client *client.DatabricksClient } @@ -78,8 +78,8 @@ func (a *dbfsImpl) Mkdirs(ctx context.Context, request MkDirs) error { var mkDirsResponse MkDirsResponse path := "/api/2.0/dbfs/mkdirs" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &mkDirsResponse) return err } @@ -88,8 +88,8 @@ func (a *dbfsImpl) Move(ctx context.Context, request Move) error { var moveResponse MoveResponse path := "/api/2.0/dbfs/move" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &moveResponse) return err } @@ -113,7 +113,7 @@ func (a *dbfsImpl) Read(ctx context.Context, request ReadDbfsRequest) (*ReadResp return &readResponse, err } -// unexported type that holds implementations of just files API methods +// unexported type that holds implementations of just Files API methods type filesImpl struct { client *client.DatabricksClient } diff --git a/iam/v2/client.go b/iam/v2/client.go new file mode 100644 index 00000000..a2e78a8d --- /dev/null +++ b/iam/v2/client.go @@ -0,0 +1,48 @@ +package iam + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type PermissionsClient struct { + cfg *config.Config + apiClient *httpclient.ApiClient + + permissions PermissionsInterface +} + +func NewPermissionsClientFromConfig(c ...*config.Config) (*PermissionsClient, error) { + var cfg *config.Config + if len(c) == 1 { + // first config + cfg = (*config.Config)(c[0]) + } else { + // default config + cfg = &config.Config{} + } + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("account config for a workspace client is not supported") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PermissionsClient{ + cfg: cfg, + apiClient: apiClient, + permissions: NewPermissions(databricksClient), + }, nil +} diff --git a/iam/v2/impl.go b/iam/v2/impl.go index 24ac2c6b..cfb996e4 100755 --- a/iam/v2/impl.go +++ b/iam/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just access_control API methods +// unexported type that holds implementations of just AccessControl API methods type accessControlImpl struct { client *client.DatabricksClient } @@ -24,7 +24,7 @@ func (a *accessControlImpl) CheckPolicy(ctx context.Context, request CheckPolicy return &checkPolicyResponse, err } -// unexported type that holds implementations of just account_access_control API methods +// unexported type that holds implementations of just AccountAccessControl API methods type accountAccessControlImpl struct { client *client.DatabricksClient } @@ -57,7 +57,7 @@ func (a *accountAccessControlImpl) UpdateRuleSet(ctx context.Context, request Up return &ruleSetResponse, err } -// unexported type that holds implementations of just account_access_control_proxy API methods +// unexported type that holds implementations of just AccountAccessControlProxy API methods type accountAccessControlProxyImpl struct { client *client.DatabricksClient } @@ -84,13 +84,13 @@ func (a *accountAccessControlProxyImpl) UpdateRuleSet(ctx context.Context, reque var ruleSetResponse RuleSetResponse path := "/api/2.0/preview/accounts/access-control/rule-sets" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &ruleSetResponse) return &ruleSetResponse, err } -// unexported type that holds implementations of just account_groups API methods +// unexported type that holds implementations of just AccountGroups API methods type accountGroupsImpl struct { client *client.DatabricksClient } @@ -99,8 +99,8 @@ func (a *accountGroupsImpl) Create(ctx context.Context, request Group) (*Group, var group Group path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &group) return &group, err } @@ -151,7 +151,7 @@ func (a *accountGroupsImpl) Update(ctx context.Context, request Group) error { return err } -// unexported type that holds implementations of just account_service_principals API methods +// unexported type that holds implementations of just AccountServicePrincipals API methods type accountServicePrincipalsImpl struct { client *client.DatabricksClient } @@ -212,7 +212,7 @@ func (a *accountServicePrincipalsImpl) Update(ctx context.Context, request Servi return err } -// unexported type that holds implementations of just account_users API methods +// unexported type that holds implementations of just AccountUsers API methods type accountUsersImpl struct { client *client.DatabricksClient } @@ -221,8 +221,8 @@ func (a *accountUsersImpl) Create(ctx context.Context, request User) (*User, err var user User path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &user) return &user, err } @@ -273,7 +273,7 @@ func (a *accountUsersImpl) Update(ctx context.Context, request User) error { return err } -// unexported type that holds implementations of just current_user API methods +// unexported type that holds implementations of just CurrentUser API methods type currentUserImpl struct { client *client.DatabricksClient } @@ -287,7 +287,7 @@ func (a *currentUserImpl) Me(ctx context.Context) (*User, error) { return &user, err } -// unexported type that holds implementations of just groups API methods +// unexported type that holds implementations of just Groups API methods type groupsImpl struct { client *client.DatabricksClient } @@ -342,13 +342,13 @@ func (a *groupsImpl) Update(ctx context.Context, request Group) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) return err } -// unexported type that holds implementations of just permission_migration API methods +// unexported type that holds implementations of just PermissionMigration API methods type permissionMigrationImpl struct { client *client.DatabricksClient } @@ -363,7 +363,7 @@ func (a *permissionMigrationImpl) MigratePermissions(ctx context.Context, reques return &migratePermissionsResponse, err } -// unexported type that holds implementations of just permissions API methods +// unexported type that holds implementations of just Permissions API methods type permissionsImpl struct { client *client.DatabricksClient } @@ -406,7 +406,7 @@ func (a *permissionsImpl) Update(ctx context.Context, request PermissionsRequest return &objectPermissions, err } -// unexported type that holds implementations of just service_principals API methods +// unexported type that holds implementations of just ServicePrincipals API methods type servicePrincipalsImpl struct { client *client.DatabricksClient } @@ -451,8 +451,8 @@ func (a *servicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) return err } @@ -467,7 +467,7 @@ func (a *servicePrincipalsImpl) Update(ctx context.Context, request ServicePrinc return err } -// unexported type that holds implementations of just users API methods +// unexported type that holds implementations of just Users API methods type usersImpl struct { client *client.DatabricksClient } @@ -566,7 +566,7 @@ func (a *usersImpl) UpdatePermissions(ctx context.Context, request PasswordPermi return &passwordPermissions, err } -// unexported type that holds implementations of just workspace_assignment API methods +// unexported type that holds implementations of just WorkspaceAssignment API methods type workspaceAssignmentImpl struct { client *client.DatabricksClient } diff --git a/jobs/v2/impl.go b/jobs/v2/impl.go index cb2f6cc1..ea2dca0c 100755 --- a/jobs/v2/impl.go +++ b/jobs/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just jobs API methods +// unexported type that holds implementations of just Jobs API methods type jobsImpl struct { client *client.DatabricksClient } @@ -141,8 +141,8 @@ func (a *jobsImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRun var repairRunResponse RepairRunResponse path := "/api/2.1/jobs/runs/repair" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &repairRunResponse) return &repairRunResponse, err } @@ -171,8 +171,8 @@ func (a *jobsImpl) SetPermissions(ctx context.Context, request JobPermissionsReq var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0/permissions/jobs/%v", request.JobId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &jobPermissions) return &jobPermissions, err } @@ -201,13 +201,13 @@ func (a *jobsImpl) UpdatePermissions(ctx context.Context, request JobPermissions var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0/permissions/jobs/%v", request.JobId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &jobPermissions) return &jobPermissions, err } -// unexported type that holds implementations of just policy_compliance_for_jobs API methods +// unexported type that holds implementations of just PolicyComplianceForJobs API methods type policyComplianceForJobsImpl struct { client *client.DatabricksClient } diff --git a/jobs/v2/model.go b/jobs/v2/model.go index 8a3ba304..00588e86 100755 --- a/jobs/v2/model.go +++ b/jobs/v2/model.go @@ -8,231 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/marshal" ) -type Adlsgen2Info struct { - // abfss destination, e.g. - // `abfss://@.dfs.core.windows.net/`. - Destination string `json:"destination"` -} - -type AutoScale struct { - // The maximum number of workers to which the cluster can scale up when - // overloaded. Note that `max_workers` must be strictly greater than - // `min_workers`. - MaxWorkers int `json:"max_workers,omitempty"` - // The minimum number of workers to which the cluster can scale down when - // underutilized. It is also the initial number of workers the cluster will - // have after creation. - MinWorkers int `json:"min_workers,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *AutoScale) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s AutoScale) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type AwsAttributes struct { - // Availability type used for all subsequent nodes past the - // `first_on_demand` ones. - // - // Note: If `first_on_demand` is zero, this availability type will be used - // for the entire cluster. - Availability AwsAvailability `json:"availability,omitempty"` - // The number of volumes launched for each instance. Users can choose up to - // 10 volumes. This feature is only enabled for supported node types. Legacy - // node types cannot specify custom EBS volumes. For node types with no - // instance store, at least one EBS volume needs to be specified; otherwise, - // cluster creation will fail. - // - // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance - // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. - // - // If EBS volumes are attached, Databricks will configure Spark to use only - // the EBS volumes for scratch storage because heterogenously sized scratch - // devices can lead to inefficient disk utilization. If no EBS volumes are - // attached, Databricks will configure Spark to use instance store volumes. - // - // Please note that if EBS volumes are specified, then the Spark - // configuration `spark.local.dir` will be overridden. - EbsVolumeCount int `json:"ebs_volume_count,omitempty"` - // If using gp3 volumes, what IOPS to use for the disk. If this is not set, - // the maximum performance of a gp2 volume with the same volume size will be - // used. - EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` - // The size of each EBS volume (in GiB) launched for each instance. For - // general purpose SSD, this value must be within the range 100 - 4096. For - // throughput optimized HDD, this value must be within the range 500 - 4096. - EbsVolumeSize int `json:"ebs_volume_size,omitempty"` - // If using gp3 volumes, what throughput to use for the disk. If this is not - // set, the maximum performance of a gp2 volume with the same volume size - // will be used. - EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` - // The type of EBS volumes that will be launched with this cluster. - EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` - // The first `first_on_demand` nodes of the cluster will be placed on - // on-demand instances. If this value is greater than 0, the cluster driver - // node in particular will be placed on an on-demand instance. If this value - // is greater than or equal to the current cluster size, all nodes will be - // placed on on-demand instances. If this value is less than the current - // cluster size, `first_on_demand` nodes will be placed on on-demand - // instances and the remainder will be placed on `availability` instances. - // Note that this value does not affect cluster size and cannot currently be - // mutated over the lifetime of a cluster. - FirstOnDemand int `json:"first_on_demand,omitempty"` - // Nodes for this cluster will only be placed on AWS instances with this - // instance profile. If ommitted, nodes will be placed on instances without - // an IAM instance profile. The instance profile must have previously been - // added to the Databricks environment by an account administrator. - // - // This feature may only be available to certain customer plans. - // - // If this field is ommitted, we will pull in the default from the conf if - // it exists. - InstanceProfileArn string `json:"instance_profile_arn,omitempty"` - // The bid price for AWS spot instances, as a percentage of the - // corresponding instance type's on-demand price. For example, if this field - // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then - // the bid price is half of the price of on-demand `r3.xlarge` instances. - // Similarly, if this field is set to 200, the bid price is twice the price - // of on-demand `r3.xlarge` instances. If not specified, the default value - // is 100. When spot instances are requested for this cluster, only spot - // instances whose bid price percentage matches this field will be - // considered. Note that, for safety, we enforce this field to be no more - // than 10000. - // - // The default value and documentation here should be kept consistent with - // CommonConf.defaultSpotBidPricePercent and - // CommonConf.maxSpotBidPricePercent. - SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` - // Identifier for the availability zone/datacenter in which the cluster - // resides. This string will be of a form like "us-west-2a". The provided - // availability zone must be in the same region as the Databricks - // deployment. For example, "us-west-2a" is not a valid zone id if the - // Databricks deployment resides in the "us-east-1" region. This is an - // optional field at cluster creation, and if not specified, a default zone - // will be used. If the zone specified is "auto", will try to place cluster - // in a zone with high availability, and will retry placement in a different - // AZ if there is not enough capacity. The list of available zones as well - // as the default value can be found by using the `List Zones` method. - ZoneId string `json:"zone_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *AwsAttributes) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s AwsAttributes) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// Availability type used for all subsequent nodes past the `first_on_demand` -// ones. -// -// Note: If `first_on_demand` is zero, this availability type will be used for -// the entire cluster. -type AwsAvailability string - -const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` - -const AwsAvailabilitySpot AwsAvailability = `SPOT` - -const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` - -// String representation for [fmt.Print] -func (f *AwsAvailability) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *AwsAvailability) Set(v string) error { - switch v { - case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: - *f = AwsAvailability(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) - } -} - -// Type always returns AwsAvailability to satisfy [pflag.Value] interface -func (f *AwsAvailability) Type() string { - return "AwsAvailability" -} - -type AzureAttributes struct { - // Availability type used for all subsequent nodes past the - // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only - // happens on pool clusters), this availability type will be used for the - // entire cluster. - Availability AzureAvailability `json:"availability,omitempty"` - // The first `first_on_demand` nodes of the cluster will be placed on - // on-demand instances. This value should be greater than 0, to make sure - // the cluster driver node is placed on an on-demand instance. If this value - // is greater than or equal to the current cluster size, all nodes will be - // placed on on-demand instances. If this value is less than the current - // cluster size, `first_on_demand` nodes will be placed on on-demand - // instances and the remainder will be placed on `availability` instances. - // Note that this value does not affect cluster size and cannot currently be - // mutated over the lifetime of a cluster. - FirstOnDemand int `json:"first_on_demand,omitempty"` - // Defines values necessary to configure and run Azure Log Analytics agent - LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"` - // The max bid price to be used for Azure spot instances. The Max price for - // the bid cannot be higher than the on-demand price of the instance. If not - // specified, the default value is -1, which specifies that the instance - // cannot be evicted on the basis of price, and only on the basis of - // availability. Further, the value should > 0 or -1. - SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *AzureAttributes) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s AzureAttributes) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// Availability type used for all subsequent nodes past the `first_on_demand` -// ones. Note: If `first_on_demand` is zero (which only happens on pool -// clusters), this availability type will be used for the entire cluster. -type AzureAvailability string - -const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` - -const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` - -const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` - -// String representation for [fmt.Print] -func (f *AzureAvailability) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *AzureAvailability) Set(v string) error { - switch v { - case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: - *f = AzureAvailability(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) - } -} - -// Type always returns AzureAvailability to satisfy [pflag.Value] interface -func (f *AzureAvailability) Type() string { - return "AzureAvailability" -} - type BaseJob struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). @@ -588,23 +363,6 @@ type CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput struct { OutputSchemaInfo *OutputSchemaInfo `json:"output_schema_info,omitempty"` } -type ClientsTypes struct { - // With jobs set, the cluster can be used for jobs - Jobs bool `json:"jobs,omitempty"` - // With notebooks set, this cluster can be used for notebooks - Notebooks bool `json:"notebooks,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *ClientsTypes) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s ClientsTypes) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, @@ -636,208 +394,21 @@ func (s ClusterInstance) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type ClusterLogConf struct { - // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : - // "dbfs:/home/cluster_log" } }` - Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` - // destination and either the region or endpoint need to be provided. e.g. - // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : - // "us-west-2" } }` Cluster iam role is used to access s3, please make sure - // the cluster iam role in `instance_profile_arn` has permission to write - // data to the s3 destination. - S3 *S3StorageInfo `json:"s3,omitempty"` -} - type ClusterSpec struct { - // When set to true, fixed and default values from the policy will be used - // for fields that are omitted. When set to false, only fixed values from - // the policy will be applied. - ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - // Parameters needed in order to automatically scale clusters up and down - // based on load. Note: autoscaling works best with DB runtime versions 3.0 - // or later. - Autoscale *AutoScale `json:"autoscale,omitempty"` - // Automatically terminates the cluster after it is inactive for this time - // in minutes. If not set, this cluster will not be automatically - // terminated. If specified, the threshold must be between 10 and 10000 - // minutes. Users can also set this value to 0 to explicitly disable - // automatic termination. - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` - // Attributes related to clusters running on Amazon Web Services. If not - // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` - // Attributes related to clusters running on Microsoft Azure. If not - // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` - // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` - // Cluster name requested by the user. This doesn't have to be unique. If - // not specified at creation, the cluster name will be an empty string. - ClusterName string `json:"cluster_name,omitempty"` - // Additional tags for cluster resources. Databricks will tag all cluster - // resources (e.g., AWS instances and EBS volumes) with these tags in - // addition to `default_tags`. Notes: - // - // - Currently, Databricks allows at most 45 custom tags - // - // - Clusters can only reuse cloud resources if the resources' tags are a - // subset of the cluster tags - CustomTags map[string]string `json:"custom_tags,omitempty"` - // Data security mode decides what data governance model to use when - // accessing data from a cluster. - // - // The following modes can only be used with `kind`. * - // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate - // access mode depending on your compute configuration. * - // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * - // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. - // - // The following modes can be used regardless of `kind`. * `NONE`: No - // security isolation for multiple users sharing the cluster. Data - // governance features are not available in this mode. * `SINGLE_USER`: A - // secure cluster that can only be exclusively used by a single user - // specified in `single_user_name`. Most programming languages, cluster - // features and data governance features are available in this mode. * - // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. - // Cluster users are fully isolated so that they cannot see each other's - // data and credentials. Most data governance features are supported in this - // mode. But programming languages and cluster features might be limited. - // - // The following modes are deprecated starting with Databricks Runtime 15.0 - // and will be removed for future Databricks Runtime versions: - // - // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table - // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating - // from legacy Passthrough on high concurrency clusters. * - // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy - // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This - // mode provides a way that doesn’t have UC nor passthrough enabled. - DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - - DockerImage *DockerImage `json:"docker_image,omitempty"` - // The optional ID of the instance pool for the driver of the cluster - // belongs. The pool cluster uses the instance pool with id - // (instance_pool_id) if the driver pool is not assigned. - DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` - // The node type of the Spark driver. Note that this field is optional; if - // unset, the driver node type will be set as the same value as - // `node_type_id` defined above. - DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` - // Autoscaling Local Storage: when enabled, this cluster will dynamically - // acquire additional disk space when its Spark workers are running low on - // disk space. This feature requires specific AWS permissions to function - // correctly - refer to the User Guide for more details. - EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` - // Whether to enable LUKS on cluster VMs' local disks - EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId string `json:"existing_cluster_id,omitempty"` - // Attributes related to clusters running on Google Cloud Platform. If not - // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` - // The configuration for storing init scripts. Any number of destinations - // can be specified. The scripts are executed sequentially in the order - // provided. If `cluster_log_conf` is specified, init script logs are sent - // to `//init_scripts`. - InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` - // The optional ID of the instance pool to which the cluster belongs. - InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. - // - // When set to true, Databricks will automatically set single node related - // `custom_tags`, `spark_conf`, and `num_workers` - IsSingleNode bool `json:"is_single_node,omitempty"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey string `json:"job_cluster_key,omitempty"` - // The kind of compute described by this compute specification. - // - // Depending on `kind`, different validations and default values will be - // applied. - // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. - Kind Kind `json:"kind,omitempty"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries []Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. NewCluster *ClusterSpec `json:"new_cluster,omitempty"` - // This field encodes, through a single value, the resources available to - // each of the Spark nodes in this cluster. For example, the Spark nodes can - // be provisioned and optimized for memory or compute intensive workloads. A - // list of available node types can be retrieved by using the - // :method:clusters/listNodeTypes API call. - NodeTypeId string `json:"node_type_id,omitempty"` - // Number of worker nodes that this cluster should have. A cluster has one - // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 - // Spark nodes. - // - // Note: When reading the properties of a cluster, this field reflects the - // desired number of workers rather than the actual current number of - // workers. For instance, if a cluster is resized from 5 to 10 workers, this - // field will immediately be updated to reflect the target size of 10 - // workers, whereas the workers listed in `spark_info` will gradually - // increase from 5 to 10 as the new nodes are provisioned. - NumWorkers int `json:"num_workers,omitempty"` - // The ID of the cluster policy used to create the cluster if applicable. - PolicyId string `json:"policy_id,omitempty"` - // Determines the cluster's runtime engine, either standard or Photon. - // - // This field is not compatible with legacy `spark_version` values that - // contain `-photon-`. Remove `-photon-` from the `spark_version` and set - // `runtime_engine` to `PHOTON`. - // - // If left unspecified, the runtime engine defaults to standard unless the - // spark_version contains -photon-, in which case Photon will be used. - RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` - // Single user name if data_security_mode is `SINGLE_USER` - SingleUserName string `json:"single_user_name,omitempty"` - // An object containing a set of optional, user-specified Spark - // configuration key-value pairs. Users can also pass in a string of extra - // JVM options to the driver and the executors via - // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` - // respectively. - SparkConf map[string]string `json:"spark_conf,omitempty"` - // An object containing a set of optional, user-specified environment - // variable key-value pairs. Please note that key-value pair of the form - // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the - // driver and workers. - // - // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we - // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the - // example below. This ensures that all default databricks managed - // environmental variables are included as well. - // - // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", - // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": - // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` - SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` - // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of - // available Spark versions can be retrieved by using the - // :method:clusters/sparkVersions API call. - SparkVersion string `json:"spark_version,omitempty"` - // SSH public key contents that will be added to each Spark node in this - // cluster. The corresponding private keys can be used to login with the - // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. - SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. - // - // `effective_spark_version` is determined by `spark_version` (DBR release), - // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or - // not. - UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - - WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-"` } @@ -1099,102 +670,6 @@ type CronSchedule struct { TimezoneId string `json:"timezone_id"` } -// Data security mode decides what data governance model to use when accessing -// data from a cluster. -// -// The following modes can only be used with `kind`. * -// `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access -// mode depending on your compute configuration. * -// `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * -// `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. -// -// The following modes can be used regardless of `kind`. * `NONE`: No security -// isolation for multiple users sharing the cluster. Data governance features -// are not available in this mode. * `SINGLE_USER`: A secure cluster that can -// only be exclusively used by a single user specified in `single_user_name`. -// Most programming languages, cluster features and data governance features are -// available in this mode. * `USER_ISOLATION`: A secure cluster that can be -// shared by multiple users. Cluster users are fully isolated so that they -// cannot see each other's data and credentials. Most data governance features -// are supported in this mode. But programming languages and cluster features -// might be limited. -// -// The following modes are deprecated starting with Databricks Runtime 15.0 and -// will be removed for future Databricks Runtime versions: -// -// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL -// clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from -// legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This -// mode is for users migrating from legacy Passthrough on standard clusters. * -// `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have -// UC nor passthrough enabled. -type DataSecurityMode string - -// will choose the most appropriate access mode depending on your -// compute configuration. -const DataSecurityModeDataSecurityModeAuto DataSecurityMode = `DATA_SECURITY_MODE_AUTO` - -// Alias for `SINGLE_USER`. -const DataSecurityModeDataSecurityModeDedicated DataSecurityMode = `DATA_SECURITY_MODE_DEDICATED` - -// Alias for `USER_ISOLATION`. -const DataSecurityModeDataSecurityModeStandard DataSecurityMode = `DATA_SECURITY_MODE_STANDARD` - -// This mode is for users migrating from legacy Passthrough on high concurrency -// clusters. -const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH` - -// This mode is for users migrating from legacy Passthrough on standard -// clusters. -const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER` - -// This mode provides a way that doesn’t have UC nor passthrough enabled. -const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD` - -// This mode is for users migrating from legacy Table ACL clusters. -const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL` - -// No security isolation for multiple users sharing the cluster. Data governance -// features are not available in this mode. -const DataSecurityModeNone DataSecurityMode = `NONE` - -// A secure cluster that can only be exclusively used by a single user specified -// in `single_user_name`. Most programming languages, cluster features and data -// governance features are available in this mode. -const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER` - -// A secure cluster that can be shared by multiple users. Cluster users are -// fully isolated so that they cannot see each other's data and credentials. -// Most data governance features are supported in this mode. But programming -// languages and cluster features might be limited. -const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION` - -// String representation for [fmt.Print] -func (f *DataSecurityMode) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *DataSecurityMode) Set(v string) error { - switch v { - case `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, `DATA_SECURITY_MODE_STANDARD`, `LEGACY_PASSTHROUGH`, `LEGACY_SINGLE_USER`, `LEGACY_SINGLE_USER_STANDARD`, `LEGACY_TABLE_ACL`, `NONE`, `SINGLE_USER`, `USER_ISOLATION`: - *f = DataSecurityMode(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "DATA_SECURITY_MODE_AUTO", "DATA_SECURITY_MODE_DEDICATED", "DATA_SECURITY_MODE_STANDARD", "LEGACY_PASSTHROUGH", "LEGACY_SINGLE_USER", "LEGACY_SINGLE_USER_STANDARD", "LEGACY_TABLE_ACL", "NONE", "SINGLE_USER", "USER_ISOLATION"`, v) - } -} - -// Type always returns DataSecurityMode to satisfy [pflag.Value] interface -func (f *DataSecurityMode) Type() string { - return "DataSecurityMode" -} - -type DbfsStorageInfo struct { - // dbfs destination, e.g. `dbfs:/my/path` - Destination string `json:"destination"` -} - type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. @@ -1278,67 +753,6 @@ type DeleteRun struct { type DeleteRunResponse struct { } -type DockerBasicAuth struct { - // Password of the user - Password string `json:"password,omitempty"` - // Name of the user - Username string `json:"username,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s DockerBasicAuth) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type DockerImage struct { - BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` - // URL of the docker image. - Url string `json:"url,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *DockerImage) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s DockerImage) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// The type of EBS volumes that will be launched with this cluster. -type EbsVolumeType string - -const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` - -const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` - -// String representation for [fmt.Print] -func (f *EbsVolumeType) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *EbsVolumeType) Set(v string) error { - switch v { - case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: - *f = EbsVolumeType(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) - } -} - -// Type always returns EbsVolumeType to satisfy [pflag.Value] interface -func (f *EbsVolumeType) Type() string { - return "EbsVolumeType" -} - // Represents a change to the job cluster's settings that would be required for // the job clusters to become compliant with their policies. type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { @@ -1575,87 +989,6 @@ func (f *Format) Type() string { return "Format" } -type GcpAttributes struct { - // This field determines whether the instance pool will contain preemptible - // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs - // if the former is unavailable. - Availability GcpAvailability `json:"availability,omitempty"` - // boot disk size in GB - BootDiskSize int `json:"boot_disk_size,omitempty"` - // If provided, the cluster will impersonate the google service account when - // accessing gcloud services (like GCS). The google service account must - // have previously been added to the Databricks environment by an account - // administrator. - GoogleServiceAccount string `json:"google_service_account,omitempty"` - // If provided, each node (workers and driver) in the cluster will have this - // number of local SSDs attached. Each local SSD is 375GB in size. Refer to - // [GCP documentation] for the supported number of local SSDs for each - // instance type. - // - // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds - LocalSsdCount int `json:"local_ssd_count,omitempty"` - // This field determines whether the spark executors will be scheduled to - // run on preemptible VMs (when set to true) versus standard compute engine - // VMs (when set to false; default). Note: Soon to be deprecated, use the - // availability field instead. - UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` - // Identifier for the availability zone in which the cluster resides. This - // can be one of the following: - "HA" => High availability, spread nodes - // across availability zones for a Databricks deployment region [default] - - // "AUTO" => Databricks picks an availability zone to schedule the cluster - // on. - A GCP availability zone => Pick One of the available zones for - // (machine type + region) from - // https://cloud.google.com/compute/docs/regions-zones. - ZoneId string `json:"zone_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *GcpAttributes) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s GcpAttributes) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// This field determines whether the instance pool will contain preemptible VMs, -// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the -// former is unavailable. -type GcpAvailability string - -const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` - -const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` - -const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` - -// String representation for [fmt.Print] -func (f *GcpAvailability) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *GcpAvailability) Set(v string) error { - switch v { - case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: - *f = GcpAvailability(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) - } -} - -// Type always returns GcpAvailability to satisfy [pflag.Value] interface -func (f *GcpAvailability) Type() string { - return "GcpAvailability" -} - -type GcsStorageInfo struct { - // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` - Destination string `json:"destination"` -} - // Get job permission levels type GetJobPermissionLevelsRequest struct { // The job for which to get or manage permissions. @@ -1854,34 +1187,6 @@ func (s GitSource) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type InitScriptInfo struct { - // destination needs to be provided. e.g. `{ "abfss" : { "destination" : - // "abfss://@.dfs.core.windows.net/" - // } } - Abfss *Adlsgen2Info `json:"abfss,omitempty"` - // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : - // "dbfs:/home/cluster_log" } }` - Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` - // destination needs to be provided. e.g. `{ "file" : { "destination" : - // "file:/my/local/file.sh" } }` - File *LocalFileInfo `json:"file,omitempty"` - // destination needs to be provided. e.g. `{ "gcs": { "destination": - // "gs://my-bucket/file.sh" } }` - Gcs *GcsStorageInfo `json:"gcs,omitempty"` - // destination and either the region or endpoint need to be provided. e.g. - // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : - // "us-west-2" } }` Cluster iam role is used to access s3, please make sure - // the cluster iam role in `instance_profile_arn` has permission to write - // data to the s3 destination. - S3 *S3StorageInfo `json:"s3,omitempty"` - // destination needs to be provided. e.g. `{ "volumes" : { "destination" : - // "/Volumes/my-init.sh" } }` - Volumes *VolumesStorageInfo `json:"volumes,omitempty"` - // destination needs to be provided. e.g. `{ "workspace" : { "destination" : - // "/Users/user1@databricks.com/my-init.sh" } }` - Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` -} - // Job was retrieved successfully. type Job struct { // The time at which this job was created in epoch milliseconds @@ -2584,38 +1889,6 @@ type JobsHealthRules struct { Rules []JobsHealthRule `json:"rules,omitempty"` } -// The kind of compute described by this compute specification. -// -// Depending on `kind`, different validations and default values will be -// applied. -// -// The first usage of this value is for the simple cluster form where it sets -// `kind = CLASSIC_PREVIEW`. -type Kind string - -const KindClassicPreview Kind = `CLASSIC_PREVIEW` - -// String representation for [fmt.Print] -func (f *Kind) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *Kind) Set(v string) error { - switch v { - case `CLASSIC_PREVIEW`: - *f = Kind(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "CLASSIC_PREVIEW"`, v) - } -} - -// Type always returns Kind to satisfy [pflag.Value] interface -func (f *Kind) Type() string { - return "Kind" -} - type Library struct { // Specification of a CRAN library to be installed as part of the library Cran *RCranLibrary `json:"cran,omitempty"` @@ -2840,28 +2113,6 @@ func (s ListRunsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type LocalFileInfo struct { - // local file destination, e.g. `file:/my/local/file.sh` - Destination string `json:"destination"` -} - -type LogAnalyticsInfo struct { - // - LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` - // - LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type MavenLibrary struct { // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". Coordinates string `json:"coordinates"` @@ -4546,86 +3797,6 @@ func (f *RunType) Type() string { return "RunType" } -// Determines the cluster's runtime engine, either standard or Photon. -// -// This field is not compatible with legacy `spark_version` values that contain -// `-photon-`. Remove `-photon-` from the `spark_version` and set -// `runtime_engine` to `PHOTON`. -// -// If left unspecified, the runtime engine defaults to standard unless the -// spark_version contains -photon-, in which case Photon will be used. -type RuntimeEngine string - -const RuntimeEngineNull RuntimeEngine = `NULL` - -const RuntimeEnginePhoton RuntimeEngine = `PHOTON` - -const RuntimeEngineStandard RuntimeEngine = `STANDARD` - -// String representation for [fmt.Print] -func (f *RuntimeEngine) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *RuntimeEngine) Set(v string) error { - switch v { - case `NULL`, `PHOTON`, `STANDARD`: - *f = RuntimeEngine(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "NULL", "PHOTON", "STANDARD"`, v) - } -} - -// Type always returns RuntimeEngine to satisfy [pflag.Value] interface -func (f *RuntimeEngine) Type() string { - return "RuntimeEngine" -} - -type S3StorageInfo struct { - // (Optional) Set canned access control list for the logs, e.g. - // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the - // cluster iam role has `s3:PutObjectAcl` permission on the destination - // bucket and prefix. The full list of possible canned acl can be found at - // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. - // Please also note that by default only the object owner gets full - // controls. If you are using cross account role for writing data, you may - // want to set `bucket-owner-full-control` to make bucket owner able to read - // the logs. - CannedAcl string `json:"canned_acl,omitempty"` - // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be - // delivered using cluster iam role, please make sure you set cluster iam - // role and the role has write access to the destination. Please also note - // that you cannot use AWS keys to deliver logs. - Destination string `json:"destination"` - // (Optional) Flag to enable server side encryption, `false` by default. - EnableEncryption bool `json:"enable_encryption,omitempty"` - // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It - // will be used only when encryption is enabled and the default type is - // `sse-s3`. - EncryptionType string `json:"encryption_type,omitempty"` - // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or - // endpoint needs to be set. If both are set, endpoint will be used. - Endpoint string `json:"endpoint,omitempty"` - // (Optional) Kms key which will be used if encryption is enabled and - // encryption type is set to `sse-kms`. - KmsKey string `json:"kms_key,omitempty"` - // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. - // If both are set, endpoint will be used. - Region string `json:"region,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s S3StorageInfo) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - // Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file // will be retrieved\ from the local Databricks workspace. When set to `GIT`, // the SQL file will be retrieved from a Git repository defined in `git_source`. @@ -5954,11 +5125,6 @@ func (f *ViewsToExport) Type() string { return "ViewsToExport" } -type VolumesStorageInfo struct { - // Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` - Destination string `json:"destination"` -} - type Webhook struct { Id string `json:"id"` } @@ -5989,14 +5155,3 @@ type WebhookNotifications struct { // the `on_success` property. OnSuccess []Webhook `json:"on_success,omitempty"` } - -type WorkloadType struct { - // defined what type of clients can use the cluster. E.g. Notebooks, Jobs - Clients ClientsTypes `json:"clients"` -} - -type WorkspaceStorageInfo struct { - // workspace files destination, e.g. - // `/Users/user1@databricks.com/my-init.sh` - Destination string `json:"destination"` -} diff --git a/marketplace/v2/impl.go b/marketplace/v2/impl.go index 10a964e6..49ae6d60 100755 --- a/marketplace/v2/impl.go +++ b/marketplace/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just consumer_fulfillments API methods +// unexported type that holds implementations of just ConsumerFulfillments API methods type consumerFulfillmentsImpl struct { client *client.DatabricksClient } @@ -33,7 +33,7 @@ func (a *consumerFulfillmentsImpl) List(ctx context.Context, request ListFulfill return &listFulfillmentsResponse, err } -// unexported type that holds implementations of just consumer_installations API methods +// unexported type that holds implementations of just ConsumerInstallations API methods type consumerInstallationsImpl struct { client *client.DatabricksClient } @@ -79,13 +79,13 @@ func (a *consumerInstallationsImpl) Update(ctx context.Context, request UpdateIn var updateInstallationResponse UpdateInstallationResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateInstallationResponse) return &updateInstallationResponse, err } -// unexported type that holds implementations of just consumer_listings API methods +// unexported type that holds implementations of just ConsumerListings API methods type consumerListingsImpl struct { client *client.DatabricksClient } @@ -126,7 +126,7 @@ func (a *consumerListingsImpl) Search(ctx context.Context, request SearchListing return &searchListingsResponse, err } -// unexported type that holds implementations of just consumer_personalization_requests API methods +// unexported type that holds implementations of just ConsumerPersonalizationRequests API methods type consumerPersonalizationRequestsImpl struct { client *client.DatabricksClient } @@ -159,7 +159,7 @@ func (a *consumerPersonalizationRequestsImpl) List(ctx context.Context, request return &listAllPersonalizationRequestsResponse, err } -// unexported type that holds implementations of just consumer_providers API methods +// unexported type that holds implementations of just ConsumerProviders API methods type consumerProvidersImpl struct { client *client.DatabricksClient } @@ -191,7 +191,7 @@ func (a *consumerProvidersImpl) List(ctx context.Context, request ListProvidersR return &listProvidersResponse, err } -// unexported type that holds implementations of just provider_exchange_filters API methods +// unexported type that holds implementations of just ProviderExchangeFilters API methods type providerExchangeFiltersImpl struct { client *client.DatabricksClient } @@ -200,8 +200,8 @@ func (a *providerExchangeFiltersImpl) Create(ctx context.Context, request Create var createExchangeFilterResponse CreateExchangeFilterResponse path := "/api/2.0/marketplace-exchange/filters" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createExchangeFilterResponse) return &createExchangeFilterResponse, err } @@ -234,7 +234,7 @@ func (a *providerExchangeFiltersImpl) Update(ctx context.Context, request Update return &updateExchangeFilterResponse, err } -// unexported type that holds implementations of just provider_exchanges API methods +// unexported type that holds implementations of just ProviderExchanges API methods type providerExchangesImpl struct { client *client.DatabricksClient } @@ -317,13 +317,13 @@ func (a *providerExchangesImpl) Update(ctx context.Context, request UpdateExchan var updateExchangeResponse UpdateExchangeResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/exchanges/%v", request.Id) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateExchangeResponse) return &updateExchangeResponse, err } -// unexported type that holds implementations of just provider_files API methods +// unexported type that holds implementations of just ProviderFiles API methods type providerFilesImpl struct { client *client.DatabricksClient } @@ -365,7 +365,7 @@ func (a *providerFilesImpl) List(ctx context.Context, request ListFilesRequest) return &listFilesResponse, err } -// unexported type that holds implementations of just provider_listings API methods +// unexported type that holds implementations of just ProviderListings API methods type providerListingsImpl struct { client *client.DatabricksClient } @@ -417,7 +417,7 @@ func (a *providerListingsImpl) Update(ctx context.Context, request UpdateListing return &updateListingResponse, err } -// unexported type that holds implementations of just provider_personalization_requests API methods +// unexported type that holds implementations of just ProviderPersonalizationRequests API methods type providerPersonalizationRequestsImpl struct { client *client.DatabricksClient } @@ -441,7 +441,7 @@ func (a *providerPersonalizationRequestsImpl) Update(ctx context.Context, reques return &updatePersonalizationRequestResponse, err } -// unexported type that holds implementations of just provider_provider_analytics_dashboards API methods +// unexported type that holds implementations of just ProviderProviderAnalyticsDashboards API methods type providerProviderAnalyticsDashboardsImpl struct { client *client.DatabricksClient } @@ -483,7 +483,7 @@ func (a *providerProviderAnalyticsDashboardsImpl) Update(ctx context.Context, re return &updateProviderAnalyticsDashboardResponse, err } -// unexported type that holds implementations of just provider_providers API methods +// unexported type that holds implementations of just ProviderProviders API methods type providerProvidersImpl struct { client *client.DatabricksClient } @@ -529,8 +529,8 @@ func (a *providerProvidersImpl) Update(ctx context.Context, request UpdateProvid var updateProviderResponse UpdateProviderResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/providers/%v", request.Id) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateProviderResponse) return &updateProviderResponse, err } diff --git a/ml/v2/impl.go b/ml/v2/impl.go index 3c69d6aa..e0f844b4 100755 --- a/ml/v2/impl.go +++ b/ml/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just experiments API methods +// unexported type that holds implementations of just Experiments API methods type experimentsImpl struct { client *client.DatabricksClient } @@ -29,8 +29,8 @@ func (a *experimentsImpl) CreateRun(ctx context.Context, request CreateRun) (*Cr var createRunResponse CreateRunResponse path := "/api/2.0/mlflow/runs/create" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createRunResponse) return &createRunResponse, err } @@ -69,8 +69,8 @@ func (a *experimentsImpl) DeleteTag(ctx context.Context, request DeleteTag) erro var deleteTagResponse DeleteTagResponse path := "/api/2.0/mlflow/runs/delete-tag" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteTagResponse) return err } @@ -241,8 +241,8 @@ func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) (* var searchRunsResponse SearchRunsResponse path := "/api/2.0/mlflow/runs/search" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &searchRunsResponse) return &searchRunsResponse, err } @@ -291,8 +291,8 @@ func (a *experimentsImpl) UpdatePermissions(ctx context.Context, request Experim var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0/permissions/experiments/%v", request.ExperimentId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &experimentPermissions) return &experimentPermissions, err } @@ -307,7 +307,7 @@ func (a *experimentsImpl) UpdateRun(ctx context.Context, request UpdateRun) (*Up return &updateRunResponse, err } -// unexported type that holds implementations of just model_registry API methods +// unexported type that holds implementations of just ModelRegistry API methods type modelRegistryImpl struct { client *client.DatabricksClient } @@ -336,8 +336,8 @@ func (a *modelRegistryImpl) CreateModel(ctx context.Context, request CreateModel var createModelResponse CreateModelResponse path := "/api/2.0/mlflow/registered-models/create" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createModelResponse) return &createModelResponse, err } @@ -439,8 +439,8 @@ func (a *modelRegistryImpl) GetLatestVersions(ctx context.Context, request GetLa var getLatestVersionsResponse GetLatestVersionsResponse path := "/api/2.0/mlflow/registered-models/get-latest-versions" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &getLatestVersionsResponse) return &getLatestVersionsResponse, err } @@ -599,8 +599,8 @@ func (a *modelRegistryImpl) TransitionStage(ctx context.Context, request Transit var transitionStageResponse TransitionStageResponse path := "/api/2.0/mlflow/databricks/model-versions/transition-stage" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &transitionStageResponse) return &transitionStageResponse, err } diff --git a/oauth2/v2/impl.go b/oauth2/v2/impl.go index 448de41b..65419737 100755 --- a/oauth2/v2/impl.go +++ b/oauth2/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just account_federation_policy API methods +// unexported type that holds implementations of just AccountFederationPolicy API methods type accountFederationPolicyImpl struct { client *client.DatabricksClient } @@ -19,8 +19,8 @@ func (a *accountFederationPolicyImpl) Create(ctx context.Context, request Create var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request.Policy, &federationPolicy) return &federationPolicy, err } @@ -62,7 +62,7 @@ func (a *accountFederationPolicyImpl) Update(ctx context.Context, request Update return &federationPolicy, err } -// unexported type that holds implementations of just custom_app_integration API methods +// unexported type that holds implementations of just CustomAppIntegration API methods type customAppIntegrationImpl struct { client *client.DatabricksClient } @@ -114,7 +114,7 @@ func (a *customAppIntegrationImpl) Update(ctx context.Context, request UpdateCus return err } -// unexported type that holds implementations of just o_auth_published_apps API methods +// unexported type that holds implementations of just OAuthPublishedApps API methods type oAuthPublishedAppsImpl struct { client *client.DatabricksClient } @@ -128,7 +128,7 @@ func (a *oAuthPublishedAppsImpl) List(ctx context.Context, request ListOAuthPubl return &getPublishedAppsOutput, err } -// unexported type that holds implementations of just published_app_integration API methods +// unexported type that holds implementations of just PublishedAppIntegration API methods type publishedAppIntegrationImpl struct { client *client.DatabricksClient } @@ -180,7 +180,7 @@ func (a *publishedAppIntegrationImpl) Update(ctx context.Context, request Update return err } -// unexported type that holds implementations of just service_principal_federation_policy API methods +// unexported type that holds implementations of just ServicePrincipalFederationPolicy API methods type servicePrincipalFederationPolicyImpl struct { client *client.DatabricksClient } @@ -189,8 +189,8 @@ func (a *servicePrincipalFederationPolicyImpl) Create(ctx context.Context, reque var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request.Policy, &federationPolicy) return &federationPolicy, err } @@ -226,13 +226,13 @@ func (a *servicePrincipalFederationPolicyImpl) Update(ctx context.Context, reque var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Policy, &federationPolicy) return &federationPolicy, err } -// unexported type that holds implementations of just service_principal_secrets API methods +// unexported type that holds implementations of just ServicePrincipalSecrets API methods type servicePrincipalSecretsImpl struct { client *client.DatabricksClient } diff --git a/pipelines/v2/impl.go b/pipelines/v2/impl.go index d1f5a32a..335530cd 100755 --- a/pipelines/v2/impl.go +++ b/pipelines/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just pipelines API methods +// unexported type that holds implementations of just Pipelines API methods type pipelinesImpl struct { client *client.DatabricksClient } @@ -111,8 +111,8 @@ func (a *pipelinesImpl) StartUpdate(ctx context.Context, request StartUpdate) (* var startUpdateResponse StartUpdateResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/updates", request.PipelineId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &startUpdateResponse) return &startUpdateResponse, err } @@ -140,8 +140,8 @@ func (a *pipelinesImpl) UpdatePermissions(ctx context.Context, request PipelineP var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0/permissions/pipelines/%v", request.PipelineId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &pipelinePermissions) return &pipelinePermissions, err } diff --git a/provisioning/v2/impl.go b/provisioning/v2/impl.go index 96139d9a..1a538d2f 100755 --- a/provisioning/v2/impl.go +++ b/provisioning/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just credentials API methods +// unexported type that holds implementations of just Credentials API methods type credentialsImpl struct { client *client.DatabricksClient } @@ -52,7 +52,7 @@ func (a *credentialsImpl) List(ctx context.Context) ([]Credential, error) { return credentialList, err } -// unexported type that holds implementations of just encryption_keys API methods +// unexported type that holds implementations of just EncryptionKeys API methods type encryptionKeysImpl struct { client *client.DatabricksClient } @@ -94,7 +94,7 @@ func (a *encryptionKeysImpl) List(ctx context.Context) ([]CustomerManagedKey, er return customerManagedKeyList, err } -// unexported type that holds implementations of just networks API methods +// unexported type that holds implementations of just Networks API methods type networksImpl struct { client *client.DatabricksClient } @@ -136,7 +136,7 @@ func (a *networksImpl) List(ctx context.Context) ([]Network, error) { return networkList, err } -// unexported type that holds implementations of just private_access API methods +// unexported type that holds implementations of just PrivateAccess API methods type privateAccessImpl struct { client *client.DatabricksClient } @@ -145,8 +145,8 @@ func (a *privateAccessImpl) Create(ctx context.Context, request UpsertPrivateAcc var privateAccessSettings PrivateAccessSettings path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &privateAccessSettings) return &privateAccessSettings, err } @@ -188,7 +188,7 @@ func (a *privateAccessImpl) Replace(ctx context.Context, request UpsertPrivateAc return err } -// unexported type that holds implementations of just storage API methods +// unexported type that holds implementations of just Storage API methods type storageImpl struct { client *client.DatabricksClient } @@ -197,8 +197,8 @@ func (a *storageImpl) Create(ctx context.Context, request CreateStorageConfigura var storageConfiguration StorageConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &storageConfiguration) return &storageConfiguration, err } @@ -230,7 +230,7 @@ func (a *storageImpl) List(ctx context.Context) ([]StorageConfiguration, error) return storageConfigurationList, err } -// unexported type that holds implementations of just vpc_endpoints API methods +// unexported type that holds implementations of just VpcEndpoints API methods type vpcEndpointsImpl struct { client *client.DatabricksClient } @@ -272,7 +272,7 @@ func (a *vpcEndpointsImpl) List(ctx context.Context) ([]VpcEndpoint, error) { return vpcEndpointList, err } -// unexported type that holds implementations of just workspaces API methods +// unexported type that holds implementations of just Workspaces API methods type workspacesImpl struct { client *client.DatabricksClient } diff --git a/serving/v2/impl.go b/serving/v2/impl.go index 82b97a78..2acb806b 100755 --- a/serving/v2/impl.go +++ b/serving/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just serving_endpoints API methods +// unexported type that holds implementations of just ServingEndpoints API methods type servingEndpointsImpl struct { client *client.DatabricksClient } @@ -130,8 +130,8 @@ func (a *servingEndpointsImpl) Put(ctx context.Context, request PutRequest) (*Pu var putResponse PutResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/rate-limits", request.Name) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &putResponse) return &putResponse, err } @@ -170,8 +170,8 @@ func (a *servingEndpointsImpl) UpdateConfig(ctx context.Context, request Endpoin var servingEndpointDetailed ServingEndpointDetailed path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/config", request.Name) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &servingEndpointDetailed) return &servingEndpointDetailed, err } @@ -186,7 +186,7 @@ func (a *servingEndpointsImpl) UpdatePermissions(ctx context.Context, request Se return &servingEndpointPermissions, err } -// unexported type that holds implementations of just serving_endpoints_data_plane API methods +// unexported type that holds implementations of just ServingEndpointsDataPlane API methods type servingEndpointsDataPlaneImpl struct { client *client.DatabricksClient } diff --git a/settings/v2/impl.go b/settings/v2/impl.go index a9de6c1c..0790162e 100755 --- a/settings/v2/impl.go +++ b/settings/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just account_ip_access_lists API methods +// unexported type that holds implementations of just AccountIpAccessLists API methods type accountIpAccessListsImpl struct { client *client.DatabricksClient } @@ -56,8 +56,8 @@ func (a *accountIpAccessListsImpl) Replace(ctx context.Context, request ReplaceI var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &replaceResponse) return err } @@ -66,18 +66,18 @@ func (a *accountIpAccessListsImpl) Update(ctx context.Context, request UpdateIpA var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) return err } -// unexported type that holds implementations of just account_settings API methods +// unexported type that holds implementations of just AccountSettings API methods type accountSettingsImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just aibi_dashboard_embedding_access_policy API methods +// unexported type that holds implementations of just AibiDashboardEmbeddingAccessPolicy API methods type aibiDashboardEmbeddingAccessPolicyImpl struct { client *client.DatabricksClient } @@ -104,13 +104,13 @@ func (a *aibiDashboardEmbeddingAccessPolicyImpl) Update(ctx context.Context, req var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &aibiDashboardEmbeddingAccessPolicySetting) return &aibiDashboardEmbeddingAccessPolicySetting, err } -// unexported type that holds implementations of just aibi_dashboard_embedding_approved_domains API methods +// unexported type that holds implementations of just AibiDashboardEmbeddingApprovedDomains API methods type aibiDashboardEmbeddingApprovedDomainsImpl struct { client *client.DatabricksClient } @@ -143,7 +143,7 @@ func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Update(ctx context.Context, return &aibiDashboardEmbeddingApprovedDomainsSetting, err } -// unexported type that holds implementations of just automatic_cluster_update API methods +// unexported type that holds implementations of just AutomaticClusterUpdate API methods type automaticClusterUpdateImpl struct { client *client.DatabricksClient } @@ -167,7 +167,7 @@ func (a *automaticClusterUpdateImpl) Update(ctx context.Context, request UpdateA return &automaticClusterUpdateSetting, err } -// unexported type that holds implementations of just compliance_security_profile API methods +// unexported type that holds implementations of just ComplianceSecurityProfile API methods type complianceSecurityProfileImpl struct { client *client.DatabricksClient } @@ -185,13 +185,13 @@ func (a *complianceSecurityProfileImpl) Update(ctx context.Context, request Upda var complianceSecurityProfileSetting ComplianceSecurityProfileSetting path := "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &complianceSecurityProfileSetting) return &complianceSecurityProfileSetting, err } -// unexported type that holds implementations of just credentials_manager API methods +// unexported type that holds implementations of just CredentialsManager API methods type credentialsManagerImpl struct { client *client.DatabricksClient } @@ -206,7 +206,7 @@ func (a *credentialsManagerImpl) ExchangeToken(ctx context.Context, request Exch return &exchangeTokenResponse, err } -// unexported type that holds implementations of just csp_enablement_account API methods +// unexported type that holds implementations of just CspEnablementAccount API methods type cspEnablementAccountImpl struct { client *client.DatabricksClient } @@ -230,7 +230,7 @@ func (a *cspEnablementAccountImpl) Update(ctx context.Context, request UpdateCsp return &cspEnablementAccountSetting, err } -// unexported type that holds implementations of just default_namespace API methods +// unexported type that holds implementations of just DefaultNamespace API methods type defaultNamespaceImpl struct { client *client.DatabricksClient } @@ -263,7 +263,7 @@ func (a *defaultNamespaceImpl) Update(ctx context.Context, request UpdateDefault return &defaultNamespaceSetting, err } -// unexported type that holds implementations of just disable_legacy_access API methods +// unexported type that holds implementations of just DisableLegacyAccess API methods type disableLegacyAccessImpl struct { client *client.DatabricksClient } @@ -296,7 +296,7 @@ func (a *disableLegacyAccessImpl) Update(ctx context.Context, request UpdateDisa return &disableLegacyAccess, err } -// unexported type that holds implementations of just disable_legacy_dbfs API methods +// unexported type that holds implementations of just DisableLegacyDbfs API methods type disableLegacyDbfsImpl struct { client *client.DatabricksClient } @@ -323,13 +323,13 @@ func (a *disableLegacyDbfsImpl) Update(ctx context.Context, request UpdateDisabl var disableLegacyDbfs DisableLegacyDbfs path := "/api/2.0/settings/types/disable_legacy_dbfs/names/default" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyDbfs) return &disableLegacyDbfs, err } -// unexported type that holds implementations of just disable_legacy_features API methods +// unexported type that holds implementations of just DisableLegacyFeatures API methods type disableLegacyFeaturesImpl struct { client *client.DatabricksClient } @@ -362,7 +362,7 @@ func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDi return &disableLegacyFeatures, err } -// unexported type that holds implementations of just enhanced_security_monitoring API methods +// unexported type that holds implementations of just EnhancedSecurityMonitoring API methods type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient } @@ -386,7 +386,7 @@ func (a *enhancedSecurityMonitoringImpl) Update(ctx context.Context, request Upd return &enhancedSecurityMonitoringSetting, err } -// unexported type that holds implementations of just esm_enablement_account API methods +// unexported type that holds implementations of just EsmEnablementAccount API methods type esmEnablementAccountImpl struct { client *client.DatabricksClient } @@ -410,7 +410,7 @@ func (a *esmEnablementAccountImpl) Update(ctx context.Context, request UpdateEsm return &esmEnablementAccountSetting, err } -// unexported type that holds implementations of just ip_access_lists API methods +// unexported type that holds implementations of just IpAccessLists API methods type ipAccessListsImpl struct { client *client.DatabricksClient } @@ -466,13 +466,13 @@ func (a *ipAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessLi var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) return err } -// unexported type that holds implementations of just network_connectivity API methods +// unexported type that holds implementations of just NetworkConnectivity API methods type networkConnectivityImpl struct { client *client.DatabricksClient } @@ -551,7 +551,7 @@ func (a *networkConnectivityImpl) ListPrivateEndpointRules(ctx context.Context, return &listNccAzurePrivateEndpointRulesResponse, err } -// unexported type that holds implementations of just notification_destinations API methods +// unexported type that holds implementations of just NotificationDestinations API methods type notificationDestinationsImpl struct { client *client.DatabricksClient } @@ -603,7 +603,7 @@ func (a *notificationDestinationsImpl) Update(ctx context.Context, request Updat return ¬ificationDestination, err } -// unexported type that holds implementations of just personal_compute API methods +// unexported type that holds implementations of just PersonalCompute API methods type personalComputeImpl struct { client *client.DatabricksClient } @@ -636,7 +636,7 @@ func (a *personalComputeImpl) Update(ctx context.Context, request UpdatePersonal return &personalComputeSetting, err } -// unexported type that holds implementations of just restrict_workspace_admins API methods +// unexported type that holds implementations of just RestrictWorkspaceAdmins API methods type restrictWorkspaceAdminsImpl struct { client *client.DatabricksClient } @@ -669,12 +669,12 @@ func (a *restrictWorkspaceAdminsImpl) Update(ctx context.Context, request Update return &restrictWorkspaceAdminsSetting, err } -// unexported type that holds implementations of just settings API methods +// unexported type that holds implementations of just Settings API methods type settingsImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just token_management API methods +// unexported type that holds implementations of just TokenManagement API methods type tokenManagementImpl struct { client *client.DatabricksClient } @@ -683,8 +683,8 @@ func (a *tokenManagementImpl) CreateOboToken(ctx context.Context, request Create var createOboTokenResponse CreateOboTokenResponse path := "/api/2.0/token-management/on-behalf-of/tokens" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createOboTokenResponse) return &createOboTokenResponse, err } @@ -754,7 +754,7 @@ func (a *tokenManagementImpl) UpdatePermissions(ctx context.Context, request Tok return &tokenPermissions, err } -// unexported type that holds implementations of just tokens API methods +// unexported type that holds implementations of just Tokens API methods type tokensImpl struct { client *client.DatabricksClient } @@ -773,8 +773,8 @@ func (a *tokensImpl) Delete(ctx context.Context, request RevokeTokenRequest) err var revokeTokenResponse RevokeTokenResponse path := "/api/2.0/token/delete" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &revokeTokenResponse) return err } @@ -788,7 +788,7 @@ func (a *tokensImpl) List(ctx context.Context) (*ListPublicTokensResponse, error return &listPublicTokensResponse, err } -// unexported type that holds implementations of just workspace_conf API methods +// unexported type that holds implementations of just WorkspaceConf API methods type workspaceConfImpl struct { client *client.DatabricksClient } diff --git a/sharing/v2/impl.go b/sharing/v2/impl.go index b17078f3..0919f164 100755 --- a/sharing/v2/impl.go +++ b/sharing/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just providers API methods +// unexported type that holds implementations of just Providers API methods type providersImpl struct { client *client.DatabricksClient } @@ -19,8 +19,8 @@ func (a *providersImpl) Create(ctx context.Context, request CreateProvider) (*Pr var providerInfo ProviderInfo path := "/api/2.1/unity-catalog/providers" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &providerInfo) return &providerInfo, err } @@ -65,13 +65,13 @@ func (a *providersImpl) Update(ctx context.Context, request UpdateProvider) (*Pr var providerInfo ProviderInfo path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v", request.Name) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &providerInfo) return &providerInfo, err } -// unexported type that holds implementations of just recipient_activation API methods +// unexported type that holds implementations of just RecipientActivation API methods type recipientActivationImpl struct { client *client.DatabricksClient } @@ -94,7 +94,7 @@ func (a *recipientActivationImpl) RetrieveToken(ctx context.Context, request Ret return &retrieveTokenResponse, err } -// unexported type that holds implementations of just recipients API methods +// unexported type that holds implementations of just Recipients API methods type recipientsImpl struct { client *client.DatabricksClient } @@ -165,7 +165,7 @@ func (a *recipientsImpl) Update(ctx context.Context, request UpdateRecipient) (* return &recipientInfo, err } -// unexported type that holds implementations of just shares API methods +// unexported type that holds implementations of just Shares API methods type sharesImpl struct { client *client.DatabricksClient } diff --git a/sql/v2/impl.go b/sql/v2/impl.go index 530441ff..5b14b191 100755 --- a/sql/v2/impl.go +++ b/sql/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just alerts API methods +// unexported type that holds implementations of just Alerts API methods type alertsImpl struct { client *client.DatabricksClient } @@ -62,7 +62,7 @@ func (a *alertsImpl) Update(ctx context.Context, request UpdateAlertRequest) (*A return &alert, err } -// unexported type that holds implementations of just alerts_legacy API methods +// unexported type that holds implementations of just AlertsLegacy API methods type alertsLegacyImpl struct { client *client.DatabricksClient } @@ -114,7 +114,7 @@ func (a *alertsLegacyImpl) Update(ctx context.Context, request EditAlert) error return err } -// unexported type that holds implementations of just dashboard_widgets API methods +// unexported type that holds implementations of just DashboardWidgets API methods type dashboardWidgetsImpl struct { client *client.DatabricksClient } @@ -148,7 +148,7 @@ func (a *dashboardWidgetsImpl) Update(ctx context.Context, request CreateWidget) return &widget, err } -// unexported type that holds implementations of just dashboards API methods +// unexported type that holds implementations of just Dashboards API methods type dashboardsImpl struct { client *client.DatabricksClient } @@ -209,7 +209,7 @@ func (a *dashboardsImpl) Update(ctx context.Context, request DashboardEditConten return &dashboard, err } -// unexported type that holds implementations of just data_sources API methods +// unexported type that holds implementations of just DataSources API methods type dataSourcesImpl struct { client *client.DatabricksClient } @@ -223,7 +223,7 @@ func (a *dataSourcesImpl) List(ctx context.Context) ([]DataSource, error) { return dataSourceList, err } -// unexported type that holds implementations of just dbsql_permissions API methods +// unexported type that holds implementations of just DbsqlPermissions API methods type dbsqlPermissionsImpl struct { client *client.DatabricksClient } @@ -257,7 +257,7 @@ func (a *dbsqlPermissionsImpl) TransferOwnership(ctx context.Context, request Tr return &success, err } -// unexported type that holds implementations of just queries API methods +// unexported type that holds implementations of just Queries API methods type queriesImpl struct { client *client.DatabricksClient } @@ -318,7 +318,7 @@ func (a *queriesImpl) Update(ctx context.Context, request UpdateQueryRequest) (* return &query, err } -// unexported type that holds implementations of just queries_legacy API methods +// unexported type that holds implementations of just QueriesLegacy API methods type queriesLegacyImpl struct { client *client.DatabricksClient } @@ -327,8 +327,8 @@ func (a *queriesLegacyImpl) Create(ctx context.Context, request QueryPostContent var legacyQuery LegacyQuery path := "/api/2.0/preview/sql/queries" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyQuery) return &legacyQuery, err } @@ -373,13 +373,13 @@ func (a *queriesLegacyImpl) Update(ctx context.Context, request QueryEditContent var legacyQuery LegacyQuery path := fmt.Sprintf("/api/2.0/preview/sql/queries/%v", request.QueryId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyQuery) return &legacyQuery, err } -// unexported type that holds implementations of just query_history API methods +// unexported type that holds implementations of just QueryHistory API methods type queryHistoryImpl struct { client *client.DatabricksClient } @@ -393,7 +393,7 @@ func (a *queryHistoryImpl) List(ctx context.Context, request ListQueryHistoryReq return &listQueriesResponse, err } -// unexported type that holds implementations of just query_visualizations API methods +// unexported type that holds implementations of just QueryVisualizations API methods type queryVisualizationsImpl struct { client *client.DatabricksClient } @@ -427,7 +427,7 @@ func (a *queryVisualizationsImpl) Update(ctx context.Context, request UpdateVisu return &visualization, err } -// unexported type that holds implementations of just query_visualizations_legacy API methods +// unexported type that holds implementations of just QueryVisualizationsLegacy API methods type queryVisualizationsLegacyImpl struct { client *client.DatabricksClient } @@ -455,13 +455,13 @@ func (a *queryVisualizationsLegacyImpl) Update(ctx context.Context, request Lega var legacyVisualization LegacyVisualization path := fmt.Sprintf("/api/2.0/preview/sql/visualizations/%v", request.Id) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyVisualization) return &legacyVisualization, err } -// unexported type that holds implementations of just statement_execution API methods +// unexported type that holds implementations of just StatementExecution API methods type statementExecutionImpl struct { client *client.DatabricksClient } @@ -502,7 +502,7 @@ func (a *statementExecutionImpl) GetStatementResultChunkN(ctx context.Context, r return &resultData, err } -// unexported type that holds implementations of just warehouses API methods +// unexported type that holds implementations of just Warehouses API methods type warehousesImpl struct { client *client.DatabricksClient } @@ -530,8 +530,8 @@ func (a *warehousesImpl) Edit(ctx context.Context, request EditWarehouseRequest) var editWarehouseResponse EditWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v/edit", request.Id) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editWarehouseResponse) return err } @@ -595,8 +595,8 @@ func (a *warehousesImpl) SetWorkspaceWarehouseConfig(ctx context.Context, reques var setWorkspaceWarehouseConfigResponse SetWorkspaceWarehouseConfigResponse path := "/api/2.0/sql/config/warehouses" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, request, &setWorkspaceWarehouseConfigResponse) return err } diff --git a/vectorsearch/v2/impl.go b/vectorsearch/v2/impl.go index fa84f551..6db8e7d9 100755 --- a/vectorsearch/v2/impl.go +++ b/vectorsearch/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just vector_search_endpoints API methods +// unexported type that holds implementations of just VectorSearchEndpoints API methods type vectorSearchEndpointsImpl struct { client *client.DatabricksClient } @@ -51,7 +51,7 @@ func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request L return &listEndpointResponse, err } -// unexported type that holds implementations of just vector_search_indexes API methods +// unexported type that holds implementations of just VectorSearchIndexes API methods type vectorSearchIndexesImpl struct { client *client.DatabricksClient } @@ -106,8 +106,8 @@ func (a *vectorSearchIndexesImpl) QueryIndex(ctx context.Context, request QueryV var queryVectorIndexResponse QueryVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/query", request.IndexName) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &queryVectorIndexResponse) return &queryVectorIndexResponse, err } diff --git a/workspace/v2/impl.go b/workspace/v2/impl.go index 05f4e881..bb17a856 100755 --- a/workspace/v2/impl.go +++ b/workspace/v2/impl.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just git_credentials API methods +// unexported type that holds implementations of just GitCredentials API methods type gitCredentialsImpl struct { client *client.DatabricksClient } @@ -62,7 +62,7 @@ func (a *gitCredentialsImpl) Update(ctx context.Context, request UpdateCredentia return err } -// unexported type that holds implementations of just repos API methods +// unexported type that holds implementations of just Repos API methods type reposImpl struct { client *client.DatabricksClient } @@ -71,8 +71,8 @@ func (a *reposImpl) Create(ctx context.Context, request CreateRepoRequest) (*Cre var createRepoResponse CreateRepoResponse path := "/api/2.0/repos" headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createRepoResponse) return &createRepoResponse, err } @@ -136,8 +136,8 @@ func (a *reposImpl) Update(ctx context.Context, request UpdateRepoRequest) error var updateRepoResponse UpdateRepoResponse path := fmt.Sprintf("/api/2.0/repos/%v", request.RepoId) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateRepoResponse) return err } @@ -146,13 +146,13 @@ func (a *reposImpl) UpdatePermissions(ctx context.Context, request RepoPermissio var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0/permissions/repos/%v", request.RepoId) headers := make(map[string]string) - headers["Content-Type"] = "application/json" headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &repoPermissions) return &repoPermissions, err } -// unexported type that holds implementations of just secrets API methods +// unexported type that holds implementations of just Secrets API methods type secretsImpl struct { client *client.DatabricksClient } @@ -246,8 +246,8 @@ func (a *secretsImpl) PutAcl(ctx context.Context, request PutAcl) error { var putAclResponse PutAclResponse path := "/api/2.0/secrets/acls/put" headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, request, &putAclResponse) return err } @@ -262,7 +262,7 @@ func (a *secretsImpl) PutSecret(ctx context.Context, request PutSecret) error { return err } -// unexported type that holds implementations of just workspace API methods +// unexported type that holds implementations of just Workspace API methods type workspaceImpl struct { client *client.DatabricksClient }