From c94293f93832c57f9d8417995ed5a8d02c55716a Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Wed, 18 Sep 2024 01:03:08 +0200 Subject: [PATCH] [Internal] Update SDK to latest OpenAPI spec --- .codegen/_openapi_sha | 2 +- .gitattributes | 57 ----- experimental/mocks/mock_account_client.go | 11 + experimental/mocks/mock_workspace_client.go | 20 ++ service/apps/api.go | 208 ++++++++++++---- service/apps/impl.go | 44 ++-- service/apps/interface.go | 6 +- service/apps/model.go | 128 +++++++--- service/catalog/api.go | 41 ++- service/catalog/impl.go | 15 ++ service/catalog/interface.go | 28 +++ service/catalog/model.go | 166 ++++++++++++- service/compute/api.go | 12 + service/compute/interface.go | 6 + service/compute/model.go | 70 ++++-- service/iam/model.go | 6 +- service/jobs/model.go | 52 ++-- service/pipelines/model.go | 8 + service/pkg.go | 9 + service/serving/api.go | 11 +- service/serving/impl.go | 10 + service/serving/interface.go | 11 +- service/serving/model.go | 245 +++++++++++++++++- service/settings/api.go | 121 ++++++++- service/settings/impl.go | 66 +++++ service/settings/interface.go | 49 ++++ service/settings/model.go | 216 +++++++++++++++- service/sql/api.go | 4 +- service/sql/interface.go | 4 +- service/sql/model.go | 261 +------------------- workspace_client.go | 25 +- 31 files changed, 1437 insertions(+), 475 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4ceeab3d3..9071ff665 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file +0800eba6e1cda544158740020bfc59137efc28ff \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 1747db9ca..1a29ba803 100644 --- a/.gitattributes +++ b/.gitattributes @@ -133,39 +133,17 @@ service/apps/impl.go linguist-generated=true service/apps/interface.go linguist-generated=true service/apps/model.go linguist-generated=true service/billing/api.go linguist-generated=true -service/billing/billable_usage_usage_test.go linguist-generated=true -service/billing/budgets_usage_test.go linguist-generated=true service/billing/impl.go linguist-generated=true service/billing/interface.go linguist-generated=true -service/billing/log_delivery_usage_test.go linguist-generated=true service/billing/model.go linguist-generated=true service/catalog/api.go linguist-generated=true -service/catalog/catalogs_usage_test.go linguist-generated=true -service/catalog/connections_usage_test.go linguist-generated=true -service/catalog/external_locations_usage_test.go linguist-generated=true -service/catalog/grants_usage_test.go linguist-generated=true service/catalog/impl.go linguist-generated=true service/catalog/interface.go linguist-generated=true -service/catalog/metastore_assignments_usage_test.go linguist-generated=true -service/catalog/metastores_usage_test.go linguist-generated=true service/catalog/model.go linguist-generated=true -service/catalog/schemas_usage_test.go linguist-generated=true -service/catalog/storage_credentials_usage_test.go linguist-generated=true -service/catalog/tables_usage_test.go linguist-generated=true -service/catalog/volumes_usage_test.go linguist-generated=true -service/catalog/workspace_bindings_usage_test.go linguist-generated=true service/compute/api.go linguist-generated=true -service/compute/cluster_policies_usage_test.go linguist-generated=true -service/compute/clusters_usage_test.go linguist-generated=true -service/compute/command_execution_usage_test.go linguist-generated=true -service/compute/global_init_scripts_usage_test.go linguist-generated=true service/compute/impl.go linguist-generated=true -service/compute/instance_pools_usage_test.go linguist-generated=true -service/compute/instance_profiles_usage_test.go linguist-generated=true service/compute/interface.go linguist-generated=true -service/compute/libraries_usage_test.go linguist-generated=true service/compute/model.go linguist-generated=true -service/compute/policy_families_usage_test.go linguist-generated=true service/dashboards/api.go linguist-generated=true service/dashboards/impl.go linguist-generated=true service/dashboards/interface.go linguist-generated=true @@ -175,30 +153,21 @@ service/files/impl.go linguist-generated=true service/files/interface.go linguist-generated=true service/files/model.go linguist-generated=true service/iam/api.go linguist-generated=true -service/iam/current_user_usage_test.go linguist-generated=true -service/iam/groups_usage_test.go linguist-generated=true service/iam/impl.go linguist-generated=true service/iam/interface.go linguist-generated=true service/iam/model.go linguist-generated=true -service/iam/permissions_usage_test.go linguist-generated=true -service/iam/service_principals_usage_test.go linguist-generated=true -service/iam/users_usage_test.go linguist-generated=true -service/iam/workspace_assignment_usage_test.go linguist-generated=true service/jobs/api.go linguist-generated=true service/jobs/impl.go linguist-generated=true service/jobs/interface.go linguist-generated=true -service/jobs/jobs_usage_test.go linguist-generated=true service/jobs/model.go linguist-generated=true service/marketplace/api.go linguist-generated=true service/marketplace/impl.go linguist-generated=true service/marketplace/interface.go linguist-generated=true service/marketplace/model.go linguist-generated=true service/ml/api.go linguist-generated=true -service/ml/experiments_usage_test.go linguist-generated=true service/ml/impl.go linguist-generated=true service/ml/interface.go linguist-generated=true service/ml/model.go linguist-generated=true -service/ml/model_registry_usage_test.go linguist-generated=true service/oauth2/api.go linguist-generated=true service/oauth2/impl.go linguist-generated=true service/oauth2/interface.go linguist-generated=true @@ -207,19 +176,11 @@ service/pipelines/api.go linguist-generated=true service/pipelines/impl.go linguist-generated=true service/pipelines/interface.go linguist-generated=true service/pipelines/model.go linguist-generated=true -service/pipelines/pipelines_usage_test.go linguist-generated=true service/pkg.go linguist-generated=true service/provisioning/api.go linguist-generated=true -service/provisioning/credentials_usage_test.go linguist-generated=true -service/provisioning/encryption_keys_usage_test.go linguist-generated=true service/provisioning/impl.go linguist-generated=true service/provisioning/interface.go linguist-generated=true service/provisioning/model.go linguist-generated=true -service/provisioning/networks_usage_test.go linguist-generated=true -service/provisioning/private_access_usage_test.go linguist-generated=true -service/provisioning/storage_usage_test.go linguist-generated=true -service/provisioning/vpc_endpoints_usage_test.go linguist-generated=true -service/provisioning/workspaces_usage_test.go linguist-generated=true service/serving/api.go linguist-generated=true service/serving/impl.go linguist-generated=true service/serving/interface.go linguist-generated=true @@ -227,39 +188,21 @@ service/serving/model.go linguist-generated=true service/settings/api.go linguist-generated=true service/settings/impl.go linguist-generated=true service/settings/interface.go linguist-generated=true -service/settings/ip_access_lists_usage_test.go linguist-generated=true service/settings/model.go linguist-generated=true -service/settings/token_management_usage_test.go linguist-generated=true -service/settings/tokens_usage_test.go linguist-generated=true -service/settings/workspace_conf_usage_test.go linguist-generated=true service/sharing/api.go linguist-generated=true service/sharing/impl.go linguist-generated=true service/sharing/interface.go linguist-generated=true service/sharing/model.go linguist-generated=true -service/sharing/providers_usage_test.go linguist-generated=true -service/sharing/recipients_usage_test.go linguist-generated=true -service/sharing/shares_usage_test.go linguist-generated=true -service/sql/alerts_usage_test.go linguist-generated=true service/sql/api.go linguist-generated=true -service/sql/dashboards_usage_test.go linguist-generated=true -service/sql/data_sources_usage_test.go linguist-generated=true service/sql/impl.go linguist-generated=true service/sql/interface.go linguist-generated=true service/sql/model.go linguist-generated=true -service/sql/queries_usage_test.go linguist-generated=true -service/sql/query_history_usage_test.go linguist-generated=true -service/sql/statement_execution_usage_test.go linguist-generated=true -service/sql/warehouses_usage_test.go linguist-generated=true service/vectorsearch/api.go linguist-generated=true service/vectorsearch/impl.go linguist-generated=true service/vectorsearch/interface.go linguist-generated=true service/vectorsearch/model.go linguist-generated=true service/workspace/api.go linguist-generated=true -service/workspace/git_credentials_usage_test.go linguist-generated=true service/workspace/impl.go linguist-generated=true service/workspace/interface.go linguist-generated=true service/workspace/model.go linguist-generated=true -service/workspace/repos_usage_test.go linguist-generated=true -service/workspace/secrets_usage_test.go linguist-generated=true -service/workspace/workspace_usage_test.go linguist-generated=true workspace_client.go linguist-generated=true diff --git a/experimental/mocks/mock_account_client.go b/experimental/mocks/mock_account_client.go index 3bf7cbeb6..3e5024c2c 100755 --- a/experimental/mocks/mock_account_client.go +++ b/experimental/mocks/mock_account_client.go @@ -64,6 +64,9 @@ func NewMockAccountClient(t interface { mockCspEnablementAccount := settings.NewMockCspEnablementAccountInterface(t) mockAccountSettingsAPI.On("CspEnablementAccount").Return(mockCspEnablementAccount).Maybe() + mockDisableLegacyFeatures := settings.NewMockDisableLegacyFeaturesInterface(t) + mockAccountSettingsAPI.On("DisableLegacyFeatures").Return(mockDisableLegacyFeatures).Maybe() + mockEsmEnablementAccount := settings.NewMockEsmEnablementAccountInterface(t) mockAccountSettingsAPI.On("EsmEnablementAccount").Return(mockEsmEnablementAccount).Maybe() @@ -81,6 +84,14 @@ func (m *MockAccountClient) GetMockCspEnablementAccountAPI() *settings.MockCspEn return api } +func (m *MockAccountClient) GetMockDisableLegacyFeaturesAPI() *settings.MockDisableLegacyFeaturesInterface { + api, ok := m.GetMockAccountSettingsAPI().DisableLegacyFeatures().(*settings.MockDisableLegacyFeaturesInterface) + if !ok { + panic(fmt.Sprintf("expected DisableLegacyFeatures to be *settings.MockDisableLegacyFeaturesInterface, actual was %T", m.GetMockAccountSettingsAPI().DisableLegacyFeatures())) + } + return api +} + func (m *MockAccountClient) GetMockEsmEnablementAccountAPI() *settings.MockEsmEnablementAccountInterface { api, ok := m.GetMockAccountSettingsAPI().EsmEnablementAccount().(*settings.MockEsmEnablementAccountInterface) if !ok { diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index 1e3c4077c..cfe516b91 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -120,6 +120,7 @@ func NewMockWorkspaceClient(t interface { SystemSchemas: catalog.NewMockSystemSchemasInterface(t), TableConstraints: catalog.NewMockTableConstraintsInterface(t), Tables: catalog.NewMockTablesInterface(t), + TemporaryTableCredentials: catalog.NewMockTemporaryTableCredentialsInterface(t), TokenManagement: settings.NewMockTokenManagementInterface(t), Tokens: settings.NewMockTokensInterface(t), Users: iam.NewMockUsersInterface(t), @@ -144,6 +145,9 @@ func NewMockWorkspaceClient(t interface { mockdefaultNamespace := settings.NewMockDefaultNamespaceInterface(t) mocksettingsAPI.On("DefaultNamespace").Return(mockdefaultNamespace).Maybe() + mockdisableLegacyAccess := settings.NewMockDisableLegacyAccessInterface(t) + mocksettingsAPI.On("DisableLegacyAccess").Return(mockdisableLegacyAccess).Maybe() + mockenhancedSecurityMonitoring := settings.NewMockEnhancedSecurityMonitoringInterface(t) mocksettingsAPI.On("EnhancedSecurityMonitoring").Return(mockenhancedSecurityMonitoring).Maybe() @@ -177,6 +181,14 @@ func (m *MockWorkspaceClient) GetMockDefaultNamespaceAPI() *settings.MockDefault return api } +func (m *MockWorkspaceClient) GetMockDisableLegacyAccessAPI() *settings.MockDisableLegacyAccessInterface { + api, ok := m.GetMockSettingsAPI().DisableLegacyAccess().(*settings.MockDisableLegacyAccessInterface) + if !ok { + panic(fmt.Sprintf("expected DisableLegacyAccess to be *settings.MockDisableLegacyAccessInterface, actual was %T", m.GetMockSettingsAPI().DisableLegacyAccess())) + } + return api +} + func (m *MockWorkspaceClient) GetMockEnhancedSecurityMonitoringAPI() *settings.MockEnhancedSecurityMonitoringInterface { api, ok := m.GetMockSettingsAPI().EnhancedSecurityMonitoring().(*settings.MockEnhancedSecurityMonitoringInterface) if !ok { @@ -833,6 +845,14 @@ func (m *MockWorkspaceClient) GetMockTablesAPI() *catalog.MockTablesInterface { return api } +func (m *MockWorkspaceClient) GetMockTemporaryTableCredentialsAPI() *catalog.MockTemporaryTableCredentialsInterface { + api, ok := m.WorkspaceClient.TemporaryTableCredentials.(*catalog.MockTemporaryTableCredentialsInterface) + if !ok { + panic(fmt.Sprintf("expected TemporaryTableCredentials to be *catalog.MockTemporaryTableCredentialsInterface, actual was %T", m.WorkspaceClient.TemporaryTableCredentials)) + } + return api +} + func (m *MockWorkspaceClient) GetMockTokenManagementAPI() *settings.MockTokenManagementInterface { api, ok := m.WorkspaceClient.TokenManagement.(*settings.MockTokenManagementInterface) if !ok { diff --git a/service/apps/api.go b/service/apps/api.go index c5985426b..ebc868dac 100755 --- a/service/apps/api.go +++ b/service/apps/api.go @@ -16,8 +16,12 @@ import ( type AppsInterface interface { - // WaitGetAppIdle repeatedly calls [AppsAPI.Get] and waits to reach IDLE state - WaitGetAppIdle(ctx context.Context, name string, + // WaitGetAppActive repeatedly calls [AppsAPI.Get] and waits to reach ACTIVE state + WaitGetAppActive(ctx context.Context, name string, + timeout time.Duration, callback func(*App)) (*App, error) + + // WaitGetAppStopped repeatedly calls [AppsAPI.Get] and waits to reach STOPPED state + WaitGetAppStopped(ctx context.Context, name string, timeout time.Duration, callback func(*App)) (*App, error) // WaitGetDeploymentAppSucceeded repeatedly calls [AppsAPI.GetDeployment] and waits to reach SUCCEEDED state @@ -27,25 +31,25 @@ type AppsInterface interface { // Create an app. // // Creates a new app. - Create(ctx context.Context, createAppRequest CreateAppRequest) (*WaitGetAppIdle[App], error) + Create(ctx context.Context, createAppRequest CreateAppRequest) (*WaitGetAppActive[App], error) - // Calls [AppsAPIInterface.Create] and waits to reach IDLE state + // Calls [AppsAPIInterface.Create] and waits to reach ACTIVE state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[App](60*time.Minute) functional option. // - // Deprecated: use [AppsAPIInterface.Create].Get() or [AppsAPIInterface.WaitGetAppIdle] + // Deprecated: use [AppsAPIInterface.Create].Get() or [AppsAPIInterface.WaitGetAppActive] CreateAndWait(ctx context.Context, createAppRequest CreateAppRequest, options ...retries.Option[App]) (*App, error) // Delete an app. // // Deletes an app. - Delete(ctx context.Context, request DeleteAppRequest) error + Delete(ctx context.Context, request DeleteAppRequest) (*App, error) // Delete an app. // // Deletes an app. - DeleteByName(ctx context.Context, name string) error + DeleteByName(ctx context.Context, name string) (*App, error) // Create an app deployment. // @@ -146,20 +150,28 @@ type AppsInterface interface { // Start an app. // // Start the last active deployment of the app in the workspace. - Start(ctx context.Context, startAppRequest StartAppRequest) (*WaitGetDeploymentAppSucceeded[AppDeployment], error) + Start(ctx context.Context, startAppRequest StartAppRequest) (*WaitGetAppActive[App], error) - // Calls [AppsAPIInterface.Start] and waits to reach SUCCEEDED state + // Calls [AppsAPIInterface.Start] and waits to reach ACTIVE state // // You can override the default timeout of 20 minutes by calling adding - // retries.Timeout[AppDeployment](60*time.Minute) functional option. + // retries.Timeout[App](60*time.Minute) functional option. // - // Deprecated: use [AppsAPIInterface.Start].Get() or [AppsAPIInterface.WaitGetDeploymentAppSucceeded] - StartAndWait(ctx context.Context, startAppRequest StartAppRequest, options ...retries.Option[AppDeployment]) (*AppDeployment, error) + // Deprecated: use [AppsAPIInterface.Start].Get() or [AppsAPIInterface.WaitGetAppActive] + StartAndWait(ctx context.Context, startAppRequest StartAppRequest, options ...retries.Option[App]) (*App, error) // Stop an app. // // Stops the active deployment of the app in the workspace. - Stop(ctx context.Context, request StopAppRequest) error + Stop(ctx context.Context, stopAppRequest StopAppRequest) (*WaitGetAppStopped[App], error) + + // Calls [AppsAPIInterface.Stop] and waits to reach STOPPED state + // + // You can override the default timeout of 20 minutes by calling adding + // retries.Timeout[App](60*time.Minute) functional option. + // + // Deprecated: use [AppsAPIInterface.Stop].Get() or [AppsAPIInterface.WaitGetAppStopped] + StopAndWait(ctx context.Context, stopAppRequest StopAppRequest, options ...retries.Option[App]) (*App, error) // Update an app. // @@ -188,8 +200,65 @@ type AppsAPI struct { appsImpl } -// WaitGetAppIdle repeatedly calls [AppsAPI.Get] and waits to reach IDLE state -func (a *AppsAPI) WaitGetAppIdle(ctx context.Context, name string, +// WaitGetAppActive repeatedly calls [AppsAPI.Get] and waits to reach ACTIVE state +func (a *AppsAPI) WaitGetAppActive(ctx context.Context, name string, + timeout time.Duration, callback func(*App)) (*App, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "long-running") + return retries.Poll[App](ctx, timeout, func() (*App, *retries.Err) { + app, err := a.Get(ctx, GetAppRequest{ + Name: name, + }) + if err != nil { + return nil, retries.Halt(err) + } + if callback != nil { + callback(app) + } + status := app.ComputeStatus.State + statusMessage := fmt.Sprintf("current status: %s", status) + if app.ComputeStatus != nil { + statusMessage = app.ComputeStatus.Message + } + switch status { + case ComputeStateActive: // target state + return app, nil + case ComputeStateError: + err := fmt.Errorf("failed to reach %s, got %s: %s", + ComputeStateActive, status, statusMessage) + return nil, retries.Halt(err) + default: + return nil, retries.Continues(statusMessage) + } + }) +} + +// WaitGetAppActive is a wrapper that calls [AppsAPI.WaitGetAppActive] and waits to reach ACTIVE state. +type WaitGetAppActive[R any] struct { + Response *R + Name string `json:"name"` + Poll func(time.Duration, func(*App)) (*App, error) + callback func(*App) + timeout time.Duration +} + +// OnProgress invokes a callback every time it polls for the status update. +func (w *WaitGetAppActive[R]) OnProgress(callback func(*App)) *WaitGetAppActive[R] { + w.callback = callback + return w +} + +// Get the App with the default timeout of 20 minutes. +func (w *WaitGetAppActive[R]) Get() (*App, error) { + return w.Poll(w.timeout, w.callback) +} + +// Get the App with custom timeout. +func (w *WaitGetAppActive[R]) GetWithTimeout(timeout time.Duration) (*App, error) { + return w.Poll(timeout, w.callback) +} + +// WaitGetAppStopped repeatedly calls [AppsAPI.Get] and waits to reach STOPPED state +func (a *AppsAPI) WaitGetAppStopped(ctx context.Context, name string, timeout time.Duration, callback func(*App)) (*App, error) { ctx = useragent.InContext(ctx, "sdk-feature", "long-running") return retries.Poll[App](ctx, timeout, func() (*App, *retries.Err) { @@ -202,17 +271,17 @@ func (a *AppsAPI) WaitGetAppIdle(ctx context.Context, name string, if callback != nil { callback(app) } - status := app.Status.State + status := app.ComputeStatus.State statusMessage := fmt.Sprintf("current status: %s", status) - if app.Status != nil { - statusMessage = app.Status.Message + if app.ComputeStatus != nil { + statusMessage = app.ComputeStatus.Message } switch status { - case AppStateIdle: // target state + case ComputeStateStopped: // target state return app, nil - case AppStateError: + case ComputeStateError: err := fmt.Errorf("failed to reach %s, got %s: %s", - AppStateIdle, status, statusMessage) + ComputeStateStopped, status, statusMessage) return nil, retries.Halt(err) default: return nil, retries.Continues(statusMessage) @@ -220,8 +289,8 @@ func (a *AppsAPI) WaitGetAppIdle(ctx context.Context, name string, }) } -// WaitGetAppIdle is a wrapper that calls [AppsAPI.WaitGetAppIdle] and waits to reach IDLE state. -type WaitGetAppIdle[R any] struct { +// WaitGetAppStopped is a wrapper that calls [AppsAPI.WaitGetAppStopped] and waits to reach STOPPED state. +type WaitGetAppStopped[R any] struct { Response *R Name string `json:"name"` Poll func(time.Duration, func(*App)) (*App, error) @@ -230,18 +299,18 @@ type WaitGetAppIdle[R any] struct { } // OnProgress invokes a callback every time it polls for the status update. -func (w *WaitGetAppIdle[R]) OnProgress(callback func(*App)) *WaitGetAppIdle[R] { +func (w *WaitGetAppStopped[R]) OnProgress(callback func(*App)) *WaitGetAppStopped[R] { w.callback = callback return w } // Get the App with the default timeout of 20 minutes. -func (w *WaitGetAppIdle[R]) Get() (*App, error) { +func (w *WaitGetAppStopped[R]) Get() (*App, error) { return w.Poll(w.timeout, w.callback) } // Get the App with custom timeout. -func (w *WaitGetAppIdle[R]) GetWithTimeout(timeout time.Duration) (*App, error) { +func (w *WaitGetAppStopped[R]) GetWithTimeout(timeout time.Duration) (*App, error) { return w.Poll(timeout, w.callback) } @@ -307,28 +376,28 @@ func (w *WaitGetDeploymentAppSucceeded[R]) GetWithTimeout(timeout time.Duration) // Create an app. // // Creates a new app. -func (a *AppsAPI) Create(ctx context.Context, createAppRequest CreateAppRequest) (*WaitGetAppIdle[App], error) { +func (a *AppsAPI) Create(ctx context.Context, createAppRequest CreateAppRequest) (*WaitGetAppActive[App], error) { app, err := a.appsImpl.Create(ctx, createAppRequest) if err != nil { return nil, err } - return &WaitGetAppIdle[App]{ + return &WaitGetAppActive[App]{ Response: app, Name: app.Name, Poll: func(timeout time.Duration, callback func(*App)) (*App, error) { - return a.WaitGetAppIdle(ctx, app.Name, timeout, callback) + return a.WaitGetAppActive(ctx, app.Name, timeout, callback) }, timeout: 20 * time.Minute, callback: nil, }, nil } -// Calls [AppsAPI.Create] and waits to reach IDLE state +// Calls [AppsAPI.Create] and waits to reach ACTIVE state // // You can override the default timeout of 20 minutes by calling adding // retries.Timeout[App](60*time.Minute) functional option. // -// Deprecated: use [AppsAPI.Create].Get() or [AppsAPI.WaitGetAppIdle] +// Deprecated: use [AppsAPI.Create].Get() or [AppsAPI.WaitGetAppActive] func (a *AppsAPI) CreateAndWait(ctx context.Context, createAppRequest CreateAppRequest, options ...retries.Option[App]) (*App, error) { wait, err := a.Create(ctx, createAppRequest) if err != nil { @@ -353,7 +422,7 @@ func (a *AppsAPI) CreateAndWait(ctx context.Context, createAppRequest CreateAppR // Delete an app. // // Deletes an app. -func (a *AppsAPI) DeleteByName(ctx context.Context, name string) error { +func (a *AppsAPI) DeleteByName(ctx context.Context, name string) (*App, error) { return a.appsImpl.Delete(ctx, DeleteAppRequest{ Name: name, }) @@ -535,42 +604,87 @@ func (a *AppsAPI) ListDeploymentsByAppName(ctx context.Context, appName string) // Start an app. // // Start the last active deployment of the app in the workspace. -func (a *AppsAPI) Start(ctx context.Context, startAppRequest StartAppRequest) (*WaitGetDeploymentAppSucceeded[AppDeployment], error) { - appDeployment, err := a.appsImpl.Start(ctx, startAppRequest) +func (a *AppsAPI) Start(ctx context.Context, startAppRequest StartAppRequest) (*WaitGetAppActive[App], error) { + app, err := a.appsImpl.Start(ctx, startAppRequest) if err != nil { return nil, err } - return &WaitGetDeploymentAppSucceeded[AppDeployment]{ - Response: appDeployment, - AppName: startAppRequest.Name, - DeploymentId: appDeployment.DeploymentId, - Poll: func(timeout time.Duration, callback func(*AppDeployment)) (*AppDeployment, error) { - return a.WaitGetDeploymentAppSucceeded(ctx, startAppRequest.Name, appDeployment.DeploymentId, timeout, callback) + return &WaitGetAppActive[App]{ + Response: app, + Name: app.Name, + Poll: func(timeout time.Duration, callback func(*App)) (*App, error) { + return a.WaitGetAppActive(ctx, app.Name, timeout, callback) }, timeout: 20 * time.Minute, callback: nil, }, nil } -// Calls [AppsAPI.Start] and waits to reach SUCCEEDED state +// Calls [AppsAPI.Start] and waits to reach ACTIVE state // // You can override the default timeout of 20 minutes by calling adding -// retries.Timeout[AppDeployment](60*time.Minute) functional option. +// retries.Timeout[App](60*time.Minute) functional option. // -// Deprecated: use [AppsAPI.Start].Get() or [AppsAPI.WaitGetDeploymentAppSucceeded] -func (a *AppsAPI) StartAndWait(ctx context.Context, startAppRequest StartAppRequest, options ...retries.Option[AppDeployment]) (*AppDeployment, error) { +// Deprecated: use [AppsAPI.Start].Get() or [AppsAPI.WaitGetAppActive] +func (a *AppsAPI) StartAndWait(ctx context.Context, startAppRequest StartAppRequest, options ...retries.Option[App]) (*App, error) { wait, err := a.Start(ctx, startAppRequest) if err != nil { return nil, err } - tmp := &retries.Info[AppDeployment]{Timeout: 20 * time.Minute} + tmp := &retries.Info[App]{Timeout: 20 * time.Minute} for _, o := range options { o(tmp) } wait.timeout = tmp.Timeout - wait.callback = func(info *AppDeployment) { + wait.callback = func(info *App) { for _, o := range options { - o(&retries.Info[AppDeployment]{ + o(&retries.Info[App]{ + Info: info, + Timeout: wait.timeout, + }) + } + } + return wait.Get() +} + +// Stop an app. +// +// Stops the active deployment of the app in the workspace. +func (a *AppsAPI) Stop(ctx context.Context, stopAppRequest StopAppRequest) (*WaitGetAppStopped[App], error) { + app, err := a.appsImpl.Stop(ctx, stopAppRequest) + if err != nil { + return nil, err + } + return &WaitGetAppStopped[App]{ + Response: app, + Name: app.Name, + Poll: func(timeout time.Duration, callback func(*App)) (*App, error) { + return a.WaitGetAppStopped(ctx, app.Name, timeout, callback) + }, + timeout: 20 * time.Minute, + callback: nil, + }, nil +} + +// Calls [AppsAPI.Stop] and waits to reach STOPPED state +// +// You can override the default timeout of 20 minutes by calling adding +// retries.Timeout[App](60*time.Minute) functional option. +// +// Deprecated: use [AppsAPI.Stop].Get() or [AppsAPI.WaitGetAppStopped] +func (a *AppsAPI) StopAndWait(ctx context.Context, stopAppRequest StopAppRequest, options ...retries.Option[App]) (*App, error) { + wait, err := a.Stop(ctx, stopAppRequest) + if err != nil { + return nil, err + } + tmp := &retries.Info[App]{Timeout: 20 * time.Minute} + for _, o := range options { + o(tmp) + } + wait.timeout = tmp.Timeout + wait.callback = func(info *App) { + for _, o := range options { + o(&retries.Info[App]{ Info: info, Timeout: wait.timeout, }) diff --git a/service/apps/impl.go b/service/apps/impl.go index 2f70a3b4f..fe8d2d6c2 100755 --- a/service/apps/impl.go +++ b/service/apps/impl.go @@ -17,7 +17,7 @@ type appsImpl struct { func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, error) { var app App - path := "/api/2.0/preview/apps" + path := "/api/2.0/apps" headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -25,18 +25,18 @@ func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, return &app, err } -func (a *appsImpl) Delete(ctx context.Context, request DeleteAppRequest) error { - var deleteResponse DeleteResponse - path := fmt.Sprintf("/api/2.0/preview/apps/%v", request.Name) +func (a *appsImpl) Delete(ctx context.Context, request DeleteAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) - return err + err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &app) + return &app, err } func (a *appsImpl) Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment - path := fmt.Sprintf("/api/2.0/preview/apps/%v/deployments", request.AppName) + path := fmt.Sprintf("/api/2.0/apps/%v/deployments", request.AppName) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -46,7 +46,7 @@ func (a *appsImpl) Deploy(ctx context.Context, request CreateAppDeploymentReques func (a *appsImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) { var app App - path := fmt.Sprintf("/api/2.0/preview/apps/%v", request.Name) + path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) headers := make(map[string]string) headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodGet, path, headers, request, &app) @@ -55,7 +55,7 @@ func (a *appsImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) func (a *appsImpl) GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment - path := fmt.Sprintf("/api/2.0/preview/apps/%v/deployments/%v", request.AppName, request.DeploymentId) + path := fmt.Sprintf("/api/2.0/apps/%v/deployments/%v", request.AppName, request.DeploymentId) headers := make(map[string]string) headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodGet, path, headers, request, &appDeployment) @@ -82,7 +82,7 @@ func (a *appsImpl) GetPermissions(ctx context.Context, request GetAppPermissions func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { var listAppsResponse ListAppsResponse - path := "/api/2.0/preview/apps" + path := "/api/2.0/apps" headers := make(map[string]string) headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAppsResponse) @@ -91,7 +91,7 @@ func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) (*ListApps func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { var listAppDeploymentsResponse ListAppDeploymentsResponse - path := fmt.Sprintf("/api/2.0/preview/apps/%v/deployments", request.AppName) + path := fmt.Sprintf("/api/2.0/apps/%v/deployments", request.AppName) headers := make(map[string]string) headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAppDeploymentsResponse) @@ -108,29 +108,29 @@ func (a *appsImpl) SetPermissions(ctx context.Context, request AppPermissionsReq return &appPermissions, err } -func (a *appsImpl) Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) { - var appDeployment AppDeployment - path := fmt.Sprintf("/api/2.0/preview/apps/%v/start", request.Name) +func (a *appsImpl) Start(ctx context.Context, request StartAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0/apps/%v/start", request.Name) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &appDeployment) - return &appDeployment, err + err := a.client.Do(ctx, http.MethodPost, path, headers, request, &app) + return &app, err } -func (a *appsImpl) Stop(ctx context.Context, request StopAppRequest) error { - var stopAppResponse StopAppResponse - path := fmt.Sprintf("/api/2.0/preview/apps/%v/stop", request.Name) +func (a *appsImpl) Stop(ctx context.Context, request StopAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0/apps/%v/stop", request.Name) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &stopAppResponse) - return err + err := a.client.Do(ctx, http.MethodPost, path, headers, request, &app) + return &app, err } func (a *appsImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, error) { var app App - path := fmt.Sprintf("/api/2.0/preview/apps/%v", request.Name) + path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" diff --git a/service/apps/interface.go b/service/apps/interface.go index 95967598a..df88eec6a 100755 --- a/service/apps/interface.go +++ b/service/apps/interface.go @@ -19,7 +19,7 @@ type AppsService interface { // Delete an app. // // Deletes an app. - Delete(ctx context.Context, request DeleteAppRequest) error + Delete(ctx context.Context, request DeleteAppRequest) (*App, error) // Create an app deployment. // @@ -71,12 +71,12 @@ type AppsService interface { // Start an app. // // Start the last active deployment of the app in the workspace. - Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) + Start(ctx context.Context, request StartAppRequest) (*App, error) // Stop an app. // // Stops the active deployment of the app in the workspace. - Stop(ctx context.Context, request StopAppRequest) error + Stop(ctx context.Context, request StopAppRequest) (*App, error) // Update an app. // diff --git a/service/apps/model.go b/service/apps/model.go index 23698eac3..f01f43cd5 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -11,6 +11,10 @@ import ( type App struct { // The active deployment of the app. ActiveDeployment *AppDeployment `json:"active_deployment,omitempty"` + + AppStatus *ApplicationStatus `json:"app_status,omitempty"` + + ComputeStatus *ComputeStatus `json:"compute_status,omitempty"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime string `json:"create_time,omitempty"` // The email of the user that created the app. @@ -26,8 +30,6 @@ type App struct { ServicePrincipalId int64 `json:"service_principal_id,omitempty"` ServicePrincipalName string `json:"service_principal_name,omitempty"` - - Status *AppStatus `json:"status,omitempty"` // The update time of the app. Formatted timestamp in ISO 6801. UpdateTime string `json:"update_time,omitempty"` // The email of the user that last updated the app. @@ -108,7 +110,7 @@ type AppDeployment struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath string `json:"source_code_path"` + SourceCodePath string `json:"source_code_path,omitempty"` // Status and status message of the deployment Status *AppDeploymentStatus `json:"status,omitempty"` // The update time of the deployment. Formatted timestamp in ISO 6801. @@ -170,12 +172,12 @@ func (f *AppDeploymentMode) Type() string { type AppDeploymentState string +const AppDeploymentStateCancelled AppDeploymentState = `CANCELLED` + const AppDeploymentStateFailed AppDeploymentState = `FAILED` const AppDeploymentStateInProgress AppDeploymentState = `IN_PROGRESS` -const AppDeploymentStateStopped AppDeploymentState = `STOPPED` - const AppDeploymentStateSucceeded AppDeploymentState = `SUCCEEDED` // String representation for [fmt.Print] @@ -186,11 +188,11 @@ func (f *AppDeploymentState) String() string { // Set raw string value and validate it against allowed values func (f *AppDeploymentState) Set(v string) error { switch v { - case `FAILED`, `IN_PROGRESS`, `STOPPED`, `SUCCEEDED`: + case `CANCELLED`, `FAILED`, `IN_PROGRESS`, `SUCCEEDED`: *f = AppDeploymentState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "FAILED", "IN_PROGRESS", "STOPPED", "SUCCEEDED"`, v) + return fmt.Errorf(`value "%s" is not one of "CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"`, v) } } @@ -302,63 +304,113 @@ type AppPermissionsRequest struct { AppName string `json:"-" url:"-"` } -type AppState string +type ApplicationState string + +const ApplicationStateCrashed ApplicationState = `CRASHED` + +const ApplicationStateDeploying ApplicationState = `DEPLOYING` + +const ApplicationStateRunning ApplicationState = `RUNNING` + +const ApplicationStateUnavailable ApplicationState = `UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *ApplicationState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ApplicationState) Set(v string) error { + switch v { + case `CRASHED`, `DEPLOYING`, `RUNNING`, `UNAVAILABLE`: + *f = ApplicationState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CRASHED", "DEPLOYING", "RUNNING", "UNAVAILABLE"`, v) + } +} + +// Type always returns ApplicationState to satisfy [pflag.Value] interface +func (f *ApplicationState) Type() string { + return "ApplicationState" +} + +type ApplicationStatus struct { + // Application status message + Message string `json:"message,omitempty"` + // State of the application. + State ApplicationState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ApplicationStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} -const AppStateCreating AppState = `CREATING` +func (s ApplicationStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ComputeState string + +const ComputeStateActive ComputeState = `ACTIVE` -const AppStateDeleted AppState = `DELETED` +const ComputeStateDeleting ComputeState = `DELETING` -const AppStateDeleting AppState = `DELETING` +const ComputeStateError ComputeState = `ERROR` -const AppStateError AppState = `ERROR` +const ComputeStateStarting ComputeState = `STARTING` -const AppStateIdle AppState = `IDLE` +const ComputeStateStopped ComputeState = `STOPPED` -const AppStateRunning AppState = `RUNNING` +const ComputeStateStopping ComputeState = `STOPPING` -const AppStateStarting AppState = `STARTING` +const ComputeStateUpdating ComputeState = `UPDATING` // String representation for [fmt.Print] -func (f *AppState) String() string { +func (f *ComputeState) String() string { return string(*f) } // Set raw string value and validate it against allowed values -func (f *AppState) Set(v string) error { +func (f *ComputeState) Set(v string) error { switch v { - case `CREATING`, `DELETED`, `DELETING`, `ERROR`, `IDLE`, `RUNNING`, `STARTING`: - *f = AppState(v) + case `ACTIVE`, `DELETING`, `ERROR`, `STARTING`, `STOPPED`, `STOPPING`, `UPDATING`: + *f = ComputeState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CREATING", "DELETED", "DELETING", "ERROR", "IDLE", "RUNNING", "STARTING"`, v) + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETING", "ERROR", "STARTING", "STOPPED", "STOPPING", "UPDATING"`, v) } } -// Type always returns AppState to satisfy [pflag.Value] interface -func (f *AppState) Type() string { - return "AppState" +// Type always returns ComputeState to satisfy [pflag.Value] interface +func (f *ComputeState) Type() string { + return "ComputeState" } -type AppStatus struct { - // Message corresponding with the app state. +type ComputeStatus struct { + // Compute status message Message string `json:"message,omitempty"` - // State of the app. - State AppState `json:"state,omitempty"` + // State of the app compute. + State ComputeState `json:"state,omitempty"` ForceSendFields []string `json:"-"` } -func (s *AppStatus) UnmarshalJSON(b []byte) error { +func (s *ComputeStatus) UnmarshalJSON(b []byte) error { return marshal.Unmarshal(b, s) } -func (s AppStatus) MarshalJSON() ([]byte, error) { +func (s ComputeStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } type CreateAppDeploymentRequest struct { // The name of the app. AppName string `json:"-" url:"-"` + // The unique id of the deployment. + DeploymentId string `json:"deployment_id,omitempty"` // The mode of which the deployment will manage the source code. Mode AppDeploymentMode `json:"mode,omitempty"` // The workspace file system path of the source code used to create the app @@ -368,7 +420,17 @@ type CreateAppDeploymentRequest struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath string `json:"source_code_path"` + SourceCodePath string `json:"source_code_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAppDeploymentRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAppDeploymentRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateAppRequest struct { @@ -395,9 +457,6 @@ type DeleteAppRequest struct { Name string `json:"-" url:"-"` } -type DeleteResponse struct { -} - // Get an app deployment type GetAppDeploymentRequest struct { // The name of the app. @@ -512,9 +571,6 @@ type StopAppRequest struct { Name string `json:"-" url:"-"` } -type StopAppResponse struct { -} - type UpdateAppRequest struct { // The description of the app. Description string `json:"description,omitempty"` diff --git a/service/catalog/api.go b/service/catalog/api.go index 15e78f103..ccb1068b3 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Metastore Assignments, Account Metastores, Account Storage Credentials, Artifact Allowlists, Catalogs, Connections, External Locations, Functions, Grants, Metastores, Model Versions, Online Tables, Quality Monitors, Registered Models, Resource Quotas, Schemas, Storage Credentials, System Schemas, Table Constraints, Tables, Volumes, Workspace Bindings, etc. +// These APIs allow you to manage Account Metastore Assignments, Account Metastores, Account Storage Credentials, Artifact Allowlists, Catalogs, Connections, External Locations, Functions, Grants, Metastores, Model Versions, Online Tables, Quality Monitors, Registered Models, Resource Quotas, Schemas, Storage Credentials, System Schemas, Table Constraints, Tables, Temporary Table Credentials, Volumes, Workspace Bindings, etc. package catalog import ( @@ -3634,6 +3634,45 @@ func (a *TablesAPI) ListSummariesAll(ctx context.Context, request ListSummariesR return listing.ToSlice[TableSummary](ctx, iterator) } +type TemporaryTableCredentialsInterface interface { + + // Generate a temporary table credential. + // + // Get a short-lived credential for directly accessing the table data on cloud + // storage. The metastore must have external_access_enabled flag set to true + // (default false). The caller must have EXTERNAL_USE_SCHEMA privilege on the + // parent schema and this privilege can only be granted by catalog owners. + GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) +} + +func NewTemporaryTableCredentials(client *client.DatabricksClient) *TemporaryTableCredentialsAPI { + return &TemporaryTableCredentialsAPI{ + temporaryTableCredentialsImpl: temporaryTableCredentialsImpl{ + client: client, + }, + } +} + +// Temporary Table Credentials refer to short-lived, downscoped credentials used +// to access cloud storage locationswhere table data is stored in Databricks. +// These credentials are employed to provide secure and time-limitedaccess to +// data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud +// provider has its own typeof credentials: AWS uses temporary session tokens +// via AWS Security Token Service (STS), Azure utilizesShared Access Signatures +// (SAS) for its data storage services, and Google Cloud supports temporary +// credentialsthrough OAuth 2.0.Temporary table credentials ensure that data +// access is limited in scope and duration, reducing the risk ofunauthorized +// access or misuse. To use the temporary table credentials API, a metastore +// admin needs to enable the external_access_enabled flag (off by default) at +// the metastore level, and user needs to be granted the EXTERNAL USE SCHEMA +// permission at the schema level by catalog admin. Note that EXTERNAL USE +// SCHEMA is a schema level permission that can only be granted by catalog admin +// explicitly and is not included in schema ownership or ALL PRIVILEGES on the +// schema for security reason. +type TemporaryTableCredentialsAPI struct { + temporaryTableCredentialsImpl +} + type VolumesInterface interface { // Create a Volume. diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 6a5297d38..4055d4f12 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -1022,6 +1022,21 @@ func (a *tablesImpl) Update(ctx context.Context, request UpdateTableRequest) err return err } +// unexported type that holds implementations of just TemporaryTableCredentials API methods +type temporaryTableCredentialsImpl struct { + client *client.DatabricksClient +} + +func (a *temporaryTableCredentialsImpl) GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) { + var generateTemporaryTableCredentialResponse GenerateTemporaryTableCredentialResponse + path := "/api/2.0/unity-catalog/temporary-table-credentials" + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, request, &generateTemporaryTableCredentialResponse) + return &generateTemporaryTableCredentialResponse, err +} + // unexported type that holds implementations of just Volumes API methods type volumesImpl struct { client *client.DatabricksClient diff --git a/service/catalog/interface.go b/service/catalog/interface.go index c274cc2a8..5466721d9 100755 --- a/service/catalog/interface.go +++ b/service/catalog/interface.go @@ -1140,6 +1140,34 @@ type TablesService interface { Update(ctx context.Context, request UpdateTableRequest) error } +// Temporary Table Credentials refer to short-lived, downscoped credentials used +// to access cloud storage locationswhere table data is stored in Databricks. +// These credentials are employed to provide secure and time-limitedaccess to +// data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud +// provider has its own typeof credentials: AWS uses temporary session tokens +// via AWS Security Token Service (STS), Azure utilizesShared Access Signatures +// (SAS) for its data storage services, and Google Cloud supports temporary +// credentialsthrough OAuth 2.0.Temporary table credentials ensure that data +// access is limited in scope and duration, reducing the risk ofunauthorized +// access or misuse. To use the temporary table credentials API, a metastore +// admin needs to enable the external_access_enabled flag (off by default) at +// the metastore level, and user needs to be granted the EXTERNAL USE SCHEMA +// permission at the schema level by catalog admin. Note that EXTERNAL USE +// SCHEMA is a schema level permission that can only be granted by catalog admin +// explicitly and is not included in schema ownership or ALL PRIVILEGES on the +// schema for security reason. +type TemporaryTableCredentialsService interface { + + // Generate a temporary table credential. + // + // Get a short-lived credential for directly accessing the table data on + // cloud storage. The metastore must have external_access_enabled flag set + // to true (default false). The caller must have EXTERNAL_USE_SCHEMA + // privilege on the parent schema and this privilege can only be granted by + // catalog owners. + GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) +} + // Volumes are a Unity Catalog (UC) capability for accessing, storing, // governing, organizing and processing files. Use cases include running machine // learning on unstructured data such as image, audio, video, or PDF files, diff --git a/service/catalog/model.go b/service/catalog/model.go index 64bb4e9a1..cbb1ce341 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -122,6 +122,31 @@ func (f *ArtifactType) Type() string { type AssignResponse struct { } +// AWS temporary credentials for API authentication. Read more at +// https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. +type AwsCredentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId string `json:"access_key_id,omitempty"` + // The Amazon Resource Name (ARN) of the S3 access point for temporary + // credentials related the external location. + AccessPoint string `json:"access_point,omitempty"` + // The secret access key that can be used to sign AWS API requests. + SecretAccessKey string `json:"secret_access_key,omitempty"` + // The token that users must pass to AWS API to use the temporary + // credentials. + SessionToken string `json:"session_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsCredentials) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsCredentials) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn string `json:"role_arn"` @@ -209,6 +234,23 @@ type AzureServicePrincipal struct { DirectoryId string `json:"directory_id"` } +// Azure temporary credentials for API authentication. Read more at +// https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas +type AzureUserDelegationSas struct { + // The signed URI (SAS Token) used to access blob services for a given path + SasToken string `json:"sas_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureUserDelegationSas) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureUserDelegationSas) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Cancel refresh type CancelRefreshRequest struct { // ID of the refresh. @@ -815,7 +857,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties string `json:"properties,omitempty"` // Table function return parameters. - ReturnParams FunctionParameterInfos `json:"return_params"` + ReturnParams *FunctionParameterInfos `json:"return_params,omitempty"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -825,7 +867,7 @@ type CreateFunction struct { // Function body. RoutineDefinition string `json:"routine_definition"` // Function dependencies. - RoutineDependencies DependencyList `json:"routine_dependencies"` + RoutineDependencies *DependencyList `json:"routine_dependencies,omitempty"` // Name of parent schema relative to its parent catalog. SchemaName string `json:"schema_name"` // Function security type. @@ -2105,6 +2147,72 @@ func (f *FunctionParameterType) Type() string { return "FunctionParameterType" } +// GCP temporary credentials for API authentication. Read more at +// https://developers.google.com/identity/protocols/oauth2/service-account +type GcpOauthToken struct { + OauthToken string `json:"oauth_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpOauthToken) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpOauthToken) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenerateTemporaryTableCredentialRequest struct { + // The operation performed against the table data, either READ or + // READ_WRITE. If READ_WRITE is specified, the credentials returned will + // have write permissions, otherwise, it will be read only. + Operation TableOperation `json:"operation,omitempty"` + // UUID of the table to read or write. + TableId string `json:"table_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenerateTemporaryTableCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenerateTemporaryTableCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenerateTemporaryTableCredentialResponse struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `json:"aws_temp_credentials,omitempty"` + // Azure temporary credentials for API authentication. Read more at + // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas + AzureUserDelegationSas *AzureUserDelegationSas `json:"azure_user_delegation_sas,omitempty"` + // Server time when the credential will expire, in unix epoch milliseconds + // since January 1, 1970 at 00:00:00 UTC. The API client is advised to cache + // the credential given this expiration time. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `json:"gcp_oauth_token,omitempty"` + // R2 temporary credentials for API authentication. Read more at + // https://developers.cloudflare.com/r2/api/s3/tokens/. + R2TempCredentials *R2Credentials `json:"r2_temp_credentials,omitempty"` + // The URL of the storage path accessible by the temporary credential. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenerateTemporaryTableCredentialResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenerateTemporaryTableCredentialResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Gets the metastore assignment for a workspace type GetAccountMetastoreAssignmentRequest struct { // Workspace ID. @@ -2336,6 +2444,9 @@ type GetMetastoreSummaryResponse struct { DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope GetMetastoreSummaryResponseDeltaSharingScope `json:"delta_sharing_scope,omitempty"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId string `json:"global_metastore_id,omitempty"` @@ -3282,6 +3393,9 @@ type MetastoreInfo struct { DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope MetastoreInfoDeltaSharingScope `json:"delta_sharing_scope,omitempty"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId string `json:"global_metastore_id,omitempty"` @@ -4263,6 +4377,27 @@ func (s QuotaInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// R2 temporary credentials for API authentication. Read more at +// https://developers.cloudflare.com/r2/api/s3/tokens/. +type R2Credentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId string `json:"access_key_id,omitempty"` + // The secret access key associated with the access key. + SecretAccessKey string `json:"secret_access_key,omitempty"` + // The generated JWT that users must pass to use the temporary credentials. + SessionToken string `json:"session_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *R2Credentials) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s R2Credentials) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get a Volume type ReadVolumeRequest struct { // Whether to include volumes in the response for which the principal can @@ -4783,6 +4918,33 @@ func (s TableInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type TableOperation string + +const TableOperationRead TableOperation = `READ` + +const TableOperationReadWrite TableOperation = `READ_WRITE` + +// String representation for [fmt.Print] +func (f *TableOperation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableOperation) Set(v string) error { + switch v { + case `READ`, `READ_WRITE`: + *f = TableOperation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "READ", "READ_WRITE"`, v) + } +} + +// Type always returns TableOperation to satisfy [pflag.Value] interface +func (f *TableOperation) Type() string { + return "TableOperation" +} + type TableRowFilter struct { // The full name of the row filter SQL UDF. FunctionName string `json:"function_name"` diff --git a/service/compute/api.go b/service/compute/api.go index 72827aac3..458f0f527 100755 --- a/service/compute/api.go +++ b/service/compute/api.go @@ -306,6 +306,12 @@ type ClustersInterface interface { // If Databricks acquires at least 85% of the requested on-demand nodes, cluster // creation will succeed. Otherwise the cluster will terminate with an // informative error message. + // + // Rather than authoring the cluster's JSON definition from scratch, Databricks + // recommends filling out the [create compute UI] and then copying the generated + // JSON definition from the UI. + // + // [create compute UI]: https://docs.databricks.com/compute/configure.html Create(ctx context.Context, createCluster CreateCluster) (*WaitGetClusterRunning[CreateClusterResponse], error) // Calls [ClustersAPIInterface.Create] and waits to reach RUNNING state @@ -773,6 +779,12 @@ func (w *WaitGetClusterTerminated[R]) GetWithTimeout(timeout time.Duration) (*Cl // If Databricks acquires at least 85% of the requested on-demand nodes, cluster // creation will succeed. Otherwise the cluster will terminate with an // informative error message. +// +// Rather than authoring the cluster's JSON definition from scratch, Databricks +// recommends filling out the [create compute UI] and then copying the generated +// JSON definition from the UI. +// +// [create compute UI]: https://docs.databricks.com/compute/configure.html func (a *ClustersAPI) Create(ctx context.Context, createCluster CreateCluster) (*WaitGetClusterRunning[CreateClusterResponse], error) { createClusterResponse, err := a.clustersImpl.Create(ctx, createCluster) if err != nil { diff --git a/service/compute/interface.go b/service/compute/interface.go index cdd3135d3..92b5d9af8 100755 --- a/service/compute/interface.go +++ b/service/compute/interface.go @@ -130,6 +130,12 @@ type ClustersService interface { // If Databricks acquires at least 85% of the requested on-demand nodes, // cluster creation will succeed. Otherwise the cluster will terminate with // an informative error message. + // + // Rather than authoring the cluster's JSON definition from scratch, + // Databricks recommends filling out the [create compute UI] and then + // copying the generated JSON definition from the UI. + // + // [create compute UI]: https://docs.databricks.com/compute/configure.html Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) // Terminate cluster. diff --git a/service/compute/model.go b/service/compute/model.go index a7d045066..7ce20880f 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -493,8 +493,14 @@ type ClusterAttributes struct { NodeTypeId string `json:"node_type_id,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` @@ -713,8 +719,14 @@ type ClusterDetails struct { NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` @@ -1257,8 +1269,14 @@ type ClusterSpec struct { NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` @@ -1557,8 +1575,14 @@ type CreateCluster struct { NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` @@ -2254,8 +2278,14 @@ type EditCluster struct { NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` @@ -4619,8 +4649,14 @@ func (s Results) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Decides which runtime engine to be use, e.g. Standard vs. Photon. If -// unspecified, the runtime engine is inferred from spark_version. +// Determines the cluster's runtime engine, either standard or Photon. +// +// This field is not compatible with legacy `spark_version` values that contain +// `-photon-`. Remove `-photon-` from the `spark_version` and set +// `runtime_engine` to `PHOTON`. +// +// If left unspecified, the runtime engine defaults to standard unless the +// spark_version contains -photon-, in which case Photon will be used. type RuntimeEngine string const RuntimeEngineNull RuntimeEngine = `NULL` @@ -5175,8 +5211,14 @@ type UpdateClusterResource struct { NumWorkers int `json:"num_workers,omitempty"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName string `json:"single_user_name,omitempty"` diff --git a/service/iam/model.go b/service/iam/model.go index 57fc0eb45..208e33df1 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -330,7 +330,7 @@ type Group struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks group ID - Id string `json:"id,omitempty" url:"-"` + Id string `json:"id,omitempty"` Members []ComplexValue `json:"members,omitempty"` // Container for the group identifier. Workspace local versus account. @@ -1237,7 +1237,7 @@ type ServicePrincipal struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks service principal ID. - Id string `json:"id,omitempty"` + Id string `json:"id,omitempty" url:"-"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `json:"roles,omitempty"` // The schema of the List response. @@ -1326,7 +1326,7 @@ type User struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks user ID. This is automatically set by Databricks. Any value // provided by the client will be ignored. - Id string `json:"id,omitempty"` + Id string `json:"id,omitempty" url:"-"` Name *Name `json:"name,omitempty"` // Corresponds to AWS instance profile/arn role. diff --git a/service/jobs/model.go b/service/jobs/model.go index a29a64117..2ac304cf0 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -379,7 +379,11 @@ type CreateJob struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `json:"environments,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -425,12 +429,12 @@ type CreateJob struct { Parameters []JobParameterDefinition `json:"parameters,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `json:"run_as,omitempty"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -1361,12 +1365,12 @@ type JobPermissionsRequest struct { JobId string `json:"-" url:"-"` } -// Write-only setting, available only in Create/Update/Reset and Submit calls. -// Specifies the user or service principal that the job runs as. If not -// specified, the job runs as the user who created the job. +// Write-only setting. Specifies the user, service principal or group that the +// job/pipeline runs as. If not specified, the job/pipeline runs as the user who +// created the job/pipeline. // -// Only `user_name` or `service_principal_name` can be specified. If both are -// specified, an error is thrown. +// Exactly one of `user_name`, `service_principal_name`, `group_name` should be +// specified. If not, an error is thrown. type JobRunAs struct { // Application ID of an active service principal. Setting this field // requires the `servicePrincipal/user` role. @@ -1405,7 +1409,11 @@ type JobSettings struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `json:"environments,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -1451,12 +1459,12 @@ type JobSettings struct { Parameters []JobParameterDefinition `json:"parameters,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `json:"run_as,omitempty"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -3092,12 +3100,16 @@ type RunParameters struct { // completed successfully with some failures; leaf tasks were successful. * // `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * // `UPSTREAM_CANCELED`: The run was skipped because an upstream task was -// canceled. +// canceled. * `DISABLED`: The run was skipped because it was disabled +// explicitly by the user. type RunResultState string // The run was canceled at user request. const RunResultStateCanceled RunResultState = `CANCELED` +// The run was skipped because it was disabled explicitly by the user. +const RunResultStateDisabled RunResultState = `DISABLED` + // The run was skipped because the necessary conditions were not met. const RunResultStateExcluded RunResultState = `EXCLUDED` @@ -3131,11 +3143,11 @@ func (f *RunResultState) String() string { // Set raw string value and validate it against allowed values func (f *RunResultState) Set(v string) error { switch v { - case `CANCELED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_FAILED`: + case `CANCELED`, `DISABLED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_FAILED`: *f = RunResultState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANCELED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_FAILED"`, v) + return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_FAILED"`, v) } } diff --git a/service/pipelines/model.go b/service/pipelines/model.go index 067608e41..d0a077718 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -13,6 +13,8 @@ type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -164,6 +166,8 @@ type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -332,6 +336,8 @@ type GetPipelineResponse struct { ClusterId string `json:"cluster_id,omitempty"` // The username of the pipeline creator. CreatorUserName string `json:"creator_user_name,omitempty"` + // Serverless budget policy ID of this pipeline. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` // The health of a pipeline. Health GetPipelineResponseHealth `json:"health,omitempty"` // The last time the pipeline settings were modified or created. @@ -1087,6 +1093,8 @@ type PipelinePermissionsRequest struct { } type PipelineSpec struct { + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, diff --git a/service/pkg.go b/service/pkg.go index 90179296d..476f14e16 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -62,6 +62,10 @@ // // - [settings.DefaultNamespaceAPI]: The default namespace setting API allows users to configure the default namespace for a Databricks workspace. // +// - [settings.DisableLegacyAccessAPI]: 'Disabling legacy access' has the following impacts: 1. +// +// - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. +// // - [provisioning.EncryptionKeysAPI]: These APIs manage encryption key configurations for this workspace (optional). // // - [settings.EnhancedSecurityMonitoringAPI]: Controls whether enhanced security monitoring is enabled for the current workspace. @@ -216,6 +220,8 @@ // // - [catalog.TablesAPI]: A table resides in the third layer of Unity Catalog’s three-level namespace. // +// - [catalog.TemporaryTableCredentialsAPI]: Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage locationswhere table data is stored in Databricks. +// // - [settings.TokenManagementAPI]: Enables administrators to get all tokens and delete tokens for other users. // // - [settings.TokensAPI]: The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs. @@ -307,6 +313,8 @@ var ( _ *files.DbfsAPI = nil _ *sql.DbsqlPermissionsAPI = nil _ *settings.DefaultNamespaceAPI = nil + _ *settings.DisableLegacyAccessAPI = nil + _ *settings.DisableLegacyFeaturesAPI = nil _ *provisioning.EncryptionKeysAPI = nil _ *settings.EnhancedSecurityMonitoringAPI = nil _ *settings.EsmEnablementAccountAPI = nil @@ -384,6 +392,7 @@ var ( _ *catalog.SystemSchemasAPI = nil _ *catalog.TableConstraintsAPI = nil _ *catalog.TablesAPI = nil + _ *catalog.TemporaryTableCredentialsAPI = nil _ *settings.TokenManagementAPI = nil _ *settings.TokensAPI = nil _ *billing.UsageDashboardsAPI = nil diff --git a/service/serving/api.go b/service/serving/api.go index 0b9e37ab5..3f73673e7 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -134,10 +134,17 @@ type ServingEndpointsInterface interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: only external and - // foundation model endpoints are supported as of now. + // Used to update the rate limits of a serving endpoint. NOTE: Only foundation + // model endpoints are currently supported. For external models, use AI Gateway + // to manage rate limits. Put(ctx context.Context, request PutRequest) (*PutResponse, error) + // Update AI Gateway of a serving endpoint. + // + // Used to update the AI Gateway of a serving endpoint. NOTE: Only external + // model endpoints are currently supported. + PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) + // Query a serving endpoint. Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) diff --git a/service/serving/impl.go b/service/serving/impl.go index 812ae7f6f..0b211b214 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -131,6 +131,16 @@ func (a *servingEndpointsImpl) Put(ctx context.Context, request PutRequest) (*Pu return &putResponse, err } +func (a *servingEndpointsImpl) PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) { + var putAiGatewayResponse PutAiGatewayResponse + path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/ai-gateway", request.Name) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, request, &putAiGatewayResponse) + return &putAiGatewayResponse, err +} + func (a *servingEndpointsImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { var queryEndpointResponse QueryEndpointResponse path := fmt.Sprintf("/serving-endpoints/%v/invocations", request.Name) diff --git a/service/serving/interface.go b/service/serving/interface.go index 1f3185047..4f60ee2e7 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -79,10 +79,17 @@ type ServingEndpointsService interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: only external - // and foundation model endpoints are supported as of now. + // Used to update the rate limits of a serving endpoint. NOTE: Only + // foundation model endpoints are currently supported. For external models, + // use AI Gateway to manage rate limits. Put(ctx context.Context, request PutRequest) (*PutResponse, error) + // Update AI Gateway of a serving endpoint. + // + // Used to update the AI Gateway of a serving endpoint. NOTE: Only external + // model endpoints are currently supported. + PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) + // Query a serving endpoint. Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) diff --git a/service/serving/model.go b/service/serving/model.go index 6d63ed8cb..6ac7eb49d 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -33,6 +33,202 @@ func (s Ai21LabsConfig) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type AiGatewayConfig struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type AiGatewayGuardrailParameters struct { + // List of invalid keywords. AI guardrail uses keyword or string matching to + // decide if the keyword exists in the request or response content. + InvalidKeywords []string `json:"invalid_keywords,omitempty"` + // Configuration for guardrail PII filter. + Pii *AiGatewayGuardrailPiiBehavior `json:"pii,omitempty"` + // Indicates whether the safety filter is enabled. + Safety bool `json:"safety,omitempty"` + // The list of allowed topics. Given a chat request, this guardrail flags + // the request if its topic is not in the allowed topics. + ValidTopics []string `json:"valid_topics,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayGuardrailParameters) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayGuardrailParameters) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AiGatewayGuardrailPiiBehavior struct { + // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' + // is set for the input guardrail and the request contains PII, the request + // is not sent to the model server and 400 status code is returned; if + // 'BLOCK' is set for the output guardrail and the model response contains + // PII, the PII info in the response is redacted and 400 status code is + // returned. + Behavior AiGatewayGuardrailPiiBehaviorBehavior `json:"behavior"` +} + +// Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is +// set for the input guardrail and the request contains PII, the request is not +// sent to the model server and 400 status code is returned; if 'BLOCK' is set +// for the output guardrail and the model response contains PII, the PII info in +// the response is redacted and 400 status code is returned. +type AiGatewayGuardrailPiiBehaviorBehavior string + +const AiGatewayGuardrailPiiBehaviorBehaviorBlock AiGatewayGuardrailPiiBehaviorBehavior = `BLOCK` + +const AiGatewayGuardrailPiiBehaviorBehaviorNone AiGatewayGuardrailPiiBehaviorBehavior = `NONE` + +// String representation for [fmt.Print] +func (f *AiGatewayGuardrailPiiBehaviorBehavior) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayGuardrailPiiBehaviorBehavior) Set(v string) error { + switch v { + case `BLOCK`, `NONE`: + *f = AiGatewayGuardrailPiiBehaviorBehavior(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCK", "NONE"`, v) + } +} + +// Type always returns AiGatewayGuardrailPiiBehaviorBehavior to satisfy [pflag.Value] interface +func (f *AiGatewayGuardrailPiiBehaviorBehavior) Type() string { + return "AiGatewayGuardrailPiiBehaviorBehavior" +} + +type AiGatewayGuardrails struct { + // Configuration for input guardrail filters. + Input *AiGatewayGuardrailParameters `json:"input,omitempty"` + // Configuration for output guardrail filters. + Output *AiGatewayGuardrailParameters `json:"output,omitempty"` +} + +type AiGatewayInferenceTableConfig struct { + // The name of the catalog in Unity Catalog. Required when enabling + // inference tables. NOTE: On update, you have to disable inference table + // first in order to change the catalog name. + CatalogName string `json:"catalog_name,omitempty"` + // Indicates whether the inference table is enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the schema in Unity Catalog. Required when enabling inference + // tables. NOTE: On update, you have to disable inference table first in + // order to change the schema name. + SchemaName string `json:"schema_name,omitempty"` + // The prefix of the table in Unity Catalog. NOTE: On update, you have to + // disable inference table first in order to change the prefix name. + TableNamePrefix string `json:"table_name_prefix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayInferenceTableConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayInferenceTableConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AiGatewayRateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls int `json:"calls"` + // Key field for a rate limit. Currently, only 'user' and 'endpoint' are + // supported, with 'endpoint' being the default if not specified. + Key AiGatewayRateLimitKey `json:"key,omitempty"` + // Renewal period field for a rate limit. Currently, only 'minute' is + // supported. + RenewalPeriod AiGatewayRateLimitRenewalPeriod `json:"renewal_period"` +} + +// Key field for a rate limit. Currently, only 'user' and 'endpoint' are +// supported, with 'endpoint' being the default if not specified. +type AiGatewayRateLimitKey string + +const AiGatewayRateLimitKeyEndpoint AiGatewayRateLimitKey = `endpoint` + +const AiGatewayRateLimitKeyUser AiGatewayRateLimitKey = `user` + +// String representation for [fmt.Print] +func (f *AiGatewayRateLimitKey) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayRateLimitKey) Set(v string) error { + switch v { + case `endpoint`, `user`: + *f = AiGatewayRateLimitKey(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "endpoint", "user"`, v) + } +} + +// Type always returns AiGatewayRateLimitKey to satisfy [pflag.Value] interface +func (f *AiGatewayRateLimitKey) Type() string { + return "AiGatewayRateLimitKey" +} + +// Renewal period field for a rate limit. Currently, only 'minute' is supported. +type AiGatewayRateLimitRenewalPeriod string + +const AiGatewayRateLimitRenewalPeriodMinute AiGatewayRateLimitRenewalPeriod = `minute` + +// String representation for [fmt.Print] +func (f *AiGatewayRateLimitRenewalPeriod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayRateLimitRenewalPeriod) Set(v string) error { + switch v { + case `minute`: + *f = AiGatewayRateLimitRenewalPeriod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "minute"`, v) + } +} + +// Type always returns AiGatewayRateLimitRenewalPeriod to satisfy [pflag.Value] interface +func (f *AiGatewayRateLimitRenewalPeriod) Type() string { + return "AiGatewayRateLimitRenewalPeriod" +} + +type AiGatewayUsageTrackingConfig struct { + // Whether to enable usage tracking. + Enabled bool `json:"enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayUsageTrackingConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayUsageTrackingConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste @@ -273,14 +469,17 @@ func (s CohereConfig) MarshalJSON() ([]byte, error) { } type CreateServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: only + // external model endpoints are supported as of now. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The core config of the serving endpoint. Config EndpointCoreConfigInput `json:"config"` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. Name string `json:"name"` - // Rate limits to be applied to the serving endpoint. NOTE: only external - // and foundation model endpoints are supported as of now. + // Rate limits to be applied to the serving endpoint. NOTE: this field is + // deprecated, please use AI Gateway to manage rate limits. RateLimits []RateLimit `json:"rate_limits,omitempty"` // Enable route optimization for the serving endpoint. RouteOptimized bool `json:"route_optimized,omitempty"` @@ -901,6 +1100,42 @@ func (s PayloadTable) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Update AI Gateway of a serving endpoint +type PutAiGatewayRequest struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // The name of the serving endpoint whose AI Gateway is being updated. This + // field is required. + Name string `json:"-" url:"-"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type PutAiGatewayResponse struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality . + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + // Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. @@ -1592,6 +1827,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `json:"config,omitempty"` // The timestamp when the endpoint was created in Unix time. @@ -1668,6 +1906,9 @@ func (s ServingEndpointAccessControlResponse) MarshalJSON() ([]byte, error) { } type ServingEndpointDetailed struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `json:"config,omitempty"` // The timestamp when the endpoint was created in Unix time. diff --git a/service/settings/api.go b/service/settings/api.go index 2933e5764..20e7bf902 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists, Account Settings, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Features, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settings import ( @@ -269,6 +269,14 @@ type AccountSettingsInterface interface { // compliance security profile enabled by default. CspEnablementAccount() CspEnablementAccountInterface + // Disable legacy features for new Databricks workspaces. + // + // For newly created workspaces: 1. Disables the use of DBFS root and + // mounts. 2. Hive Metastore will not be provisioned. 3. Disables the use of + // ‘No-isolation clusters’. 4. Disables Databricks Runtime versions + // prior to 13.3LTS. + DisableLegacyFeatures() DisableLegacyFeaturesInterface + // The enhanced security monitoring setting at the account level controls // whether to enable the feature on new workspaces. By default, this // account-level setting is disabled for new workspaces. After workspace @@ -297,6 +305,8 @@ func NewAccountSettings(client *client.DatabricksClient) *AccountSettingsAPI { cspEnablementAccount: NewCspEnablementAccount(client), + disableLegacyFeatures: NewDisableLegacyFeatures(client), + esmEnablementAccount: NewEsmEnablementAccount(client), personalCompute: NewPersonalCompute(client), @@ -317,6 +327,14 @@ type AccountSettingsAPI struct { // compliance security profile enabled by default. cspEnablementAccount CspEnablementAccountInterface + // Disable legacy features for new Databricks workspaces. + // + // For newly created workspaces: 1. Disables the use of DBFS root and + // mounts. 2. Hive Metastore will not be provisioned. 3. Disables the use of + // ‘No-isolation clusters’. 4. Disables Databricks Runtime versions + // prior to 13.3LTS. + disableLegacyFeatures DisableLegacyFeaturesInterface + // The enhanced security monitoring setting at the account level controls // whether to enable the feature on new workspaces. By default, this // account-level setting is disabled for new workspaces. After workspace @@ -341,6 +359,10 @@ func (a *AccountSettingsAPI) CspEnablementAccount() CspEnablementAccountInterfac return a.cspEnablementAccount } +func (a *AccountSettingsAPI) DisableLegacyFeatures() DisableLegacyFeaturesInterface { + return a.disableLegacyFeatures +} + func (a *AccountSettingsAPI) EsmEnablementAccount() EsmEnablementAccountInterface { return a.esmEnablementAccount } @@ -522,6 +544,79 @@ type DefaultNamespaceAPI struct { defaultNamespaceImpl } +type DisableLegacyAccessInterface interface { + + // Delete Legacy Access Disablement Status. + // + // Deletes legacy access disablement status. + Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) + + // Retrieve Legacy Access Disablement Status. + // + // Retrieves legacy access disablement Status. + Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) + + // Update Legacy Access Disablement Status. + // + // Updates legacy access disablement status. + Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) +} + +func NewDisableLegacyAccess(client *client.DatabricksClient) *DisableLegacyAccessAPI { + return &DisableLegacyAccessAPI{ + disableLegacyAccessImpl: disableLegacyAccessImpl{ + client: client, + }, + } +} + +// 'Disabling legacy access' has the following impacts: +// +// 1. Disables direct access to the Hive Metastore. However, you can still +// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs +// link) on any External Location access from the workspace. 3. Alters DBFS path +// access to use External Location permissions in place of legacy credentials. +// 4. Enforces Unity Catalog access on all path based access. +type DisableLegacyAccessAPI struct { + disableLegacyAccessImpl +} + +type DisableLegacyFeaturesInterface interface { + + // Delete the disable legacy features setting. + // + // Deletes the disable legacy features setting. + Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) + + // Get the disable legacy features setting. + // + // Gets the value of the disable legacy features setting. + Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) + + // Update the disable legacy features setting. + // + // Updates the value of the disable legacy features setting. + Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) +} + +func NewDisableLegacyFeatures(client *client.DatabricksClient) *DisableLegacyFeaturesAPI { + return &DisableLegacyFeaturesAPI{ + disableLegacyFeaturesImpl: disableLegacyFeaturesImpl{ + client: client, + }, + } +} + +// Disable legacy features for new Databricks workspaces. +// +// For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. +// Hive Metastore will not be provisioned. 3. Disables the use of +// ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to +// 13.3LTS. +type DisableLegacyFeaturesAPI struct { + disableLegacyFeaturesImpl +} + type EnhancedSecurityMonitoringInterface interface { // Get the enhanced security monitoring setting. @@ -1326,6 +1421,15 @@ type SettingsInterface interface { // Catalog-enabled compute. DefaultNamespace() DefaultNamespaceInterface + // 'Disabling legacy access' has the following impacts: + // + // 1. Disables direct access to the Hive Metastore. However, you can still + // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode + // (docs link) on any External Location access from the workspace. 3. Alters + // DBFS path access to use External Location permissions in place of legacy + // credentials. 4. Enforces Unity Catalog access on all path based access. + DisableLegacyAccess() DisableLegacyAccessInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1363,6 +1467,8 @@ func NewSettings(client *client.DatabricksClient) *SettingsAPI { defaultNamespace: NewDefaultNamespace(client), + disableLegacyAccess: NewDisableLegacyAccess(client), + enhancedSecurityMonitoring: NewEnhancedSecurityMonitoring(client), restrictWorkspaceAdmins: NewRestrictWorkspaceAdmins(client), @@ -1400,6 +1506,15 @@ type SettingsAPI struct { // Catalog-enabled compute. defaultNamespace DefaultNamespaceInterface + // 'Disabling legacy access' has the following impacts: + // + // 1. Disables direct access to the Hive Metastore. However, you can still + // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode + // (docs link) on any External Location access from the workspace. 3. Alters + // DBFS path access to use External Location permissions in place of legacy + // credentials. 4. Enforces Unity Catalog access on all path based access. + disableLegacyAccess DisableLegacyAccessInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1437,6 +1552,10 @@ func (a *SettingsAPI) DefaultNamespace() DefaultNamespaceInterface { return a.defaultNamespace } +func (a *SettingsAPI) DisableLegacyAccess() DisableLegacyAccessInterface { + return a.disableLegacyAccess +} + func (a *SettingsAPI) EnhancedSecurityMonitoring() EnhancedSecurityMonitoringInterface { return a.enhancedSecurityMonitoring } diff --git a/service/settings/impl.go b/service/settings/impl.go index e0d71f46d..6ae3c3650 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -197,6 +197,72 @@ func (a *defaultNamespaceImpl) Update(ctx context.Context, request UpdateDefault return &defaultNamespaceSetting, err } +// unexported type that holds implementations of just DisableLegacyAccess API methods +type disableLegacyAccessImpl struct { + client *client.DatabricksClient +} + +func (a *disableLegacyAccessImpl) Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) { + var deleteDisableLegacyAccessResponse DeleteDisableLegacyAccessResponse + path := "/api/2.0/settings/types/disable_legacy_access/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDisableLegacyAccessResponse) + return &deleteDisableLegacyAccessResponse, err +} + +func (a *disableLegacyAccessImpl) Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { + var disableLegacyAccess DisableLegacyAccess + path := "/api/2.0/settings/types/disable_legacy_access/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &disableLegacyAccess) + return &disableLegacyAccess, err +} + +func (a *disableLegacyAccessImpl) Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { + var disableLegacyAccess DisableLegacyAccess + path := "/api/2.0/settings/types/disable_legacy_access/names/default" + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyAccess) + return &disableLegacyAccess, err +} + +// unexported type that holds implementations of just DisableLegacyFeatures API methods +type disableLegacyFeaturesImpl struct { + client *client.DatabricksClient +} + +func (a *disableLegacyFeaturesImpl) Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) { + var deleteDisableLegacyFeaturesResponse DeleteDisableLegacyFeaturesResponse + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDisableLegacyFeaturesResponse) + return &deleteDisableLegacyFeaturesResponse, err +} + +func (a *disableLegacyFeaturesImpl) Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { + var disableLegacyFeatures DisableLegacyFeatures + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &disableLegacyFeatures) + return &disableLegacyFeatures, err +} + +func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { + var disableLegacyFeatures DisableLegacyFeatures + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyFeatures) + return &disableLegacyFeatures, err +} + // unexported type that holds implementations of just EnhancedSecurityMonitoring API methods type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index cd4a02fc6..843f15bef 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -224,6 +224,55 @@ type DefaultNamespaceService interface { Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) } +// 'Disabling legacy access' has the following impacts: +// +// 1. Disables direct access to the Hive Metastore. However, you can still +// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs +// link) on any External Location access from the workspace. 3. Alters DBFS path +// access to use External Location permissions in place of legacy credentials. +// 4. Enforces Unity Catalog access on all path based access. +type DisableLegacyAccessService interface { + + // Delete Legacy Access Disablement Status. + // + // Deletes legacy access disablement status. + Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) + + // Retrieve Legacy Access Disablement Status. + // + // Retrieves legacy access disablement Status. + Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) + + // Update Legacy Access Disablement Status. + // + // Updates legacy access disablement status. + Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) +} + +// Disable legacy features for new Databricks workspaces. +// +// For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. +// Hive Metastore will not be provisioned. 3. Disables the use of +// ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to +// 13.3LTS. +type DisableLegacyFeaturesService interface { + + // Delete the disable legacy features setting. + // + // Deletes the disable legacy features setting. + Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) + + // Get the disable legacy features setting. + // + // Gets the value of the disable legacy features setting. + Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) + + // Update the disable legacy features setting. + // + // Updates the value of the disable legacy features setting. + Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) +} + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the compliance diff --git a/service/settings/model.go b/service/settings/model.go index 8b5eaa41c..e41358a98 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -36,6 +36,20 @@ func (s AutomaticClusterUpdateSetting) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type BooleanMessage struct { + Value bool `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BooleanMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BooleanMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ClusterAutoRestartMessage struct { CanToggle bool `json:"can_toggle,omitempty"` @@ -588,6 +602,74 @@ type DeleteDefaultNamespaceSettingResponse struct { Etag string `json:"etag"` } +// Delete Legacy Access Disablement Status +type DeleteDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDisableLegacyAccessRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDisableLegacyAccessResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete the disable legacy features setting +type DeleteDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDisableLegacyFeaturesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDisableLegacyFeaturesResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + // Delete access list type DeleteIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -726,6 +808,62 @@ func (f *DestinationType) Type() string { return "DestinationType" } +type DisableLegacyAccess struct { + DisableLegacyAccess BooleanMessage `json:"disable_legacy_access"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DisableLegacyAccess) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DisableLegacyAccess) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DisableLegacyFeatures struct { + DisableLegacyFeatures BooleanMessage `json:"disable_legacy_features"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DisableLegacyFeatures) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DisableLegacyFeatures) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type EmailConfig struct { // Email addresses to notify. Addresses []string `json:"addresses,omitempty"` @@ -987,6 +1125,50 @@ func (s GetDefaultNamespaceSettingRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Retrieve Legacy Access Disablement Status +type GetDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDisableLegacyAccessRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the disable legacy features setting +type GetDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDisableLegacyFeaturesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get the enhanced security monitoring setting type GetEnhancedSecurityMonitoringSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -2036,6 +2218,8 @@ type TokenPermissionsRequest struct { // supported. type TokenType string +const TokenTypeArclightAzureExchangeToken TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN` + const TokenTypeAzureActiveDirectoryToken TokenType = `AZURE_ACTIVE_DIRECTORY_TOKEN` // String representation for [fmt.Print] @@ -2046,11 +2230,11 @@ func (f *TokenType) String() string { // Set raw string value and validate it against allowed values func (f *TokenType) Set(v string) error { switch v { - case `AZURE_ACTIVE_DIRECTORY_TOKEN`: + case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: *f = TokenType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) + return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) } } @@ -2123,6 +2307,34 @@ type UpdateDefaultNamespaceSettingRequest struct { Setting DefaultNamespaceSetting `json:"setting"` } +// Details required to update a setting. +type UpdateDisableLegacyAccessRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask string `json:"field_mask"` + + Setting DisableLegacyAccess `json:"setting"` +} + +// Details required to update a setting. +type UpdateDisableLegacyFeaturesRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask string `json:"field_mask"` + + Setting DisableLegacyFeatures `json:"setting"` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/service/sql/api.go b/service/sql/api.go index 675b27bce..66c9d2323 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -1797,7 +1797,9 @@ func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionA // network latency from caller to service. - The system will auto-close a // statement after one hour if the client stops polling and thus you must poll // at least once an hour. - The results are only available for one hour after -// success; polling does not extend this. +// success; polling does not extend this. - The SQL Execution API must be used +// for the entire lifecycle of the statement. For example, you cannot use the +// Jobs API to execute the command, and then the SQL Execution API to cancel it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/service/sql/interface.go b/service/sql/interface.go index 9d654260b..01105543f 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -578,7 +578,9 @@ type QueryVisualizationsLegacyService interface { // network latency from caller to service. - The system will auto-close a // statement after one hour if the client stops polling and thus you must poll // at least once an hour. - The results are only available for one hour after -// success; polling does not extend this. +// success; polling does not extend this. - The SQL Execution API must be used +// for the entire lifecycle of the statement. For example, you cannot use the +// Jobs API to execute the command, and then the SQL Execution API to cancel it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/service/sql/model.go b/service/sql/model.go index fd359bc96..25cf2e795 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -398,6 +398,8 @@ const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW` const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS` +const ChannelNameChannelNameUnspecified ChannelName = `CHANNEL_NAME_UNSPECIFIED` + // String representation for [fmt.Print] func (f *ChannelName) String() string { return string(*f) @@ -406,11 +408,11 @@ func (f *ChannelName) String() string { // Set raw string value and validate it against allowed values func (f *ChannelName) Set(v string) error { switch v { - case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_PREVIOUS`: + case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_PREVIOUS`, `CHANNEL_NAME_UNSPECIFIED`: *f = ChannelName(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS"`, v) + return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS", "CHANNEL_NAME_UNSPECIFIED"`, v) } } @@ -419,25 +421,6 @@ func (f *ChannelName) Type() string { return "ChannelName" } -// Client code that triggered the request -type ClientCallContext struct { - // File name that contains the last line that triggered the request. - FileName *EncodedText `json:"file_name,omitempty"` - // Last line number within a file or notebook cell that triggered the - // request. - LineNumber int `json:"line_number,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *ClientCallContext) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s ClientCallContext) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type ColumnInfo struct { // The name of the column. Name string `json:"name,omitempty"` @@ -1378,51 +1361,6 @@ type EditWarehouseResponse struct { type Empty struct { } -type EncodedText struct { - // Carry text data in different form. - Encoding EncodedTextEncoding `json:"encoding,omitempty"` - // text data - Text string `json:"text,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *EncodedText) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s EncodedText) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// Carry text data in different form. -type EncodedTextEncoding string - -const EncodedTextEncodingBase64 EncodedTextEncoding = `BASE64` - -const EncodedTextEncodingPlain EncodedTextEncoding = `PLAIN` - -// String representation for [fmt.Print] -func (f *EncodedTextEncoding) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *EncodedTextEncoding) Set(v string) error { - switch v { - case `BASE64`, `PLAIN`: - *f = EncodedTextEncoding(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "BASE64", "PLAIN"`, v) - } -} - -// Type always returns EncodedTextEncoding to satisfy [pflag.Value] interface -func (f *EncodedTextEncoding) Type() string { - return "EncodedTextEncoding" -} - type EndpointConfPair struct { Key string `json:"key,omitempty"` @@ -3163,8 +3101,6 @@ type QueryInfo struct { QueryEndTimeMs int64 `json:"query_end_time_ms,omitempty"` // The query ID. QueryId string `json:"query_id,omitempty"` - - QuerySource *QuerySource `json:"query_source,omitempty"` // The time the query started. QueryStartTimeMs int64 `json:"query_start_time_ms,omitempty"` // The text of the query. @@ -3384,190 +3320,6 @@ func (s QueryPostContent) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type QuerySource struct { - // UUID - AlertId string `json:"alert_id,omitempty"` - // Client code that triggered the request - ClientCallContext *ClientCallContext `json:"client_call_context,omitempty"` - // Id associated with a notebook cell - CommandId string `json:"command_id,omitempty"` - // Id associated with a notebook run or execution - CommandRunId string `json:"command_run_id,omitempty"` - // UUID - DashboardId string `json:"dashboard_id,omitempty"` - // UUID for Lakeview Dashboards, separate from DBSQL Dashboards - // (dashboard_id) - DashboardV3Id string `json:"dashboard_v3_id,omitempty"` - - DriverInfo *QuerySourceDriverInfo `json:"driver_info,omitempty"` - // Spark service that received and processed the query - EntryPoint QuerySourceEntryPoint `json:"entry_point,omitempty"` - // UUID for Genie space - GenieSpaceId string `json:"genie_space_id,omitempty"` - - IsCloudFetch bool `json:"is_cloud_fetch,omitempty"` - - IsDatabricksSqlExecApi bool `json:"is_databricks_sql_exec_api,omitempty"` - - JobId string `json:"job_id,omitempty"` - // With background compute, jobs can be managed by different internal teams. - // When not specified, not a background compute job When specified and the - // value is not JOBS, it is a background compute job - JobManagedBy QuerySourceJobManager `json:"job_managed_by,omitempty"` - - NotebookId string `json:"notebook_id,omitempty"` - // String provided by a customer that'll help them identify the query - QueryTags string `json:"query_tags,omitempty"` - // Id associated with a job run or execution - RunId string `json:"run_id,omitempty"` - // Id associated with a notebook cell run or execution - RunnableCommandId string `json:"runnable_command_id,omitempty"` - - ScheduledBy QuerySourceTrigger `json:"scheduled_by,omitempty"` - - ServerlessChannelInfo *ServerlessChannelInfo `json:"serverless_channel_info,omitempty"` - // UUID - SourceQueryId string `json:"source_query_id,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *QuerySource) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s QuerySource) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type QuerySourceDriverInfo struct { - BiToolEntry string `json:"bi_tool_entry,omitempty"` - - DriverName string `json:"driver_name,omitempty"` - - SimbaBrandingVendor string `json:"simba_branding_vendor,omitempty"` - - VersionNumber string `json:"version_number,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *QuerySourceDriverInfo) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s QuerySourceDriverInfo) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// Spark service that received and processed the query -type QuerySourceEntryPoint string - -const QuerySourceEntryPointDlt QuerySourceEntryPoint = `DLT` - -const QuerySourceEntryPointSparkConnect QuerySourceEntryPoint = `SPARK_CONNECT` - -const QuerySourceEntryPointThriftServer QuerySourceEntryPoint = `THRIFT_SERVER` - -// String representation for [fmt.Print] -func (f *QuerySourceEntryPoint) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *QuerySourceEntryPoint) Set(v string) error { - switch v { - case `DLT`, `SPARK_CONNECT`, `THRIFT_SERVER`: - *f = QuerySourceEntryPoint(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "DLT", "SPARK_CONNECT", "THRIFT_SERVER"`, v) - } -} - -// Type always returns QuerySourceEntryPoint to satisfy [pflag.Value] interface -func (f *QuerySourceEntryPoint) Type() string { - return "QuerySourceEntryPoint" -} - -// Copied from elastic-spark-common/api/messages/manager.proto with enum values -// changed by 1 to accommodate JOB_MANAGER_UNSPECIFIED -type QuerySourceJobManager string - -const QuerySourceJobManagerAppSystemTable QuerySourceJobManager = `APP_SYSTEM_TABLE` - -const QuerySourceJobManagerAutoml QuerySourceJobManager = `AUTOML` - -const QuerySourceJobManagerAutoMaintenance QuerySourceJobManager = `AUTO_MAINTENANCE` - -const QuerySourceJobManagerCleanRooms QuerySourceJobManager = `CLEAN_ROOMS` - -const QuerySourceJobManagerDataMonitoring QuerySourceJobManager = `DATA_MONITORING` - -const QuerySourceJobManagerDataSharing QuerySourceJobManager = `DATA_SHARING` - -const QuerySourceJobManagerEncryption QuerySourceJobManager = `ENCRYPTION` - -const QuerySourceJobManagerFabricCrawler QuerySourceJobManager = `FABRIC_CRAWLER` - -const QuerySourceJobManagerJobs QuerySourceJobManager = `JOBS` - -const QuerySourceJobManagerLakeview QuerySourceJobManager = `LAKEVIEW` - -const QuerySourceJobManagerManagedRag QuerySourceJobManager = `MANAGED_RAG` - -const QuerySourceJobManagerScheduledMvRefresh QuerySourceJobManager = `SCHEDULED_MV_REFRESH` - -const QuerySourceJobManagerTesting QuerySourceJobManager = `TESTING` - -// String representation for [fmt.Print] -func (f *QuerySourceJobManager) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *QuerySourceJobManager) Set(v string) error { - switch v { - case `APP_SYSTEM_TABLE`, `AUTOML`, `AUTO_MAINTENANCE`, `CLEAN_ROOMS`, `DATA_MONITORING`, `DATA_SHARING`, `ENCRYPTION`, `FABRIC_CRAWLER`, `JOBS`, `LAKEVIEW`, `MANAGED_RAG`, `SCHEDULED_MV_REFRESH`, `TESTING`: - *f = QuerySourceJobManager(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "APP_SYSTEM_TABLE", "AUTOML", "AUTO_MAINTENANCE", "CLEAN_ROOMS", "DATA_MONITORING", "DATA_SHARING", "ENCRYPTION", "FABRIC_CRAWLER", "JOBS", "LAKEVIEW", "MANAGED_RAG", "SCHEDULED_MV_REFRESH", "TESTING"`, v) - } -} - -// Type always returns QuerySourceJobManager to satisfy [pflag.Value] interface -func (f *QuerySourceJobManager) Type() string { - return "QuerySourceJobManager" -} - -type QuerySourceTrigger string - -const QuerySourceTriggerManual QuerySourceTrigger = `MANUAL` - -const QuerySourceTriggerScheduled QuerySourceTrigger = `SCHEDULED` - -// String representation for [fmt.Print] -func (f *QuerySourceTrigger) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *QuerySourceTrigger) Set(v string) error { - switch v { - case `MANUAL`, `SCHEDULED`: - *f = QuerySourceTrigger(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "MANUAL", "SCHEDULED"`, v) - } -} - -// Type always returns QuerySourceTrigger to satisfy [pflag.Value] interface -func (f *QuerySourceTrigger) Type() string { - return "QuerySourceTrigger" -} - type QueryStatementType string const QueryStatementTypeAlter QueryStatementType = `ALTER` @@ -3836,11 +3588,6 @@ func (f *RunAsRole) Type() string { return "RunAsRole" } -type ServerlessChannelInfo struct { - // Name of the Channel - Name ChannelName `json:"name,omitempty"` -} - type ServiceError struct { ErrorCode ServiceErrorCode `json:"error_code,omitempty"` // A brief summary of the error condition. diff --git a/workspace_client.go b/workspace_client.go index a0ac02900..a16249f6b 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -933,7 +933,10 @@ type WorkspaceClient struct { // caller delays and network latency from caller to service. - The system // will auto-close a statement after one hour if the client stops polling // and thus you must poll at least once an hour. - The results are only - // available for one hour after success; polling does not extend this. + // available for one hour after success; polling does not extend this. - The + // SQL Execution API must be used for the entire lifecycle of the statement. + // For example, you cannot use the Jobs API to execute the command, and then + // the SQL Execution API to cancel it. // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html @@ -987,6 +990,25 @@ type WorkspaceClient struct { // is a particular kind of table (rather than a managed or external table). Tables catalog.TablesInterface + // Temporary Table Credentials refer to short-lived, downscoped credentials + // used to access cloud storage locationswhere table data is stored in + // Databricks. These credentials are employed to provide secure and + // time-limitedaccess to data in cloud environments such as AWS, Azure, and + // Google Cloud. Each cloud provider has its own typeof credentials: AWS + // uses temporary session tokens via AWS Security Token Service (STS), Azure + // utilizesShared Access Signatures (SAS) for its data storage services, and + // Google Cloud supports temporary credentialsthrough OAuth 2.0.Temporary + // table credentials ensure that data access is limited in scope and + // duration, reducing the risk ofunauthorized access or misuse. To use the + // temporary table credentials API, a metastore admin needs to enable the + // external_access_enabled flag (off by default) at the metastore level, and + // user needs to be granted the EXTERNAL USE SCHEMA permission at the schema + // level by catalog admin. Note that EXTERNAL USE SCHEMA is a schema level + // permission that can only be granted by catalog admin explicitly and is + // not included in schema ownership or ALL PRIVILEGES on the schema for + // security reason. + TemporaryTableCredentials catalog.TemporaryTableCredentialsInterface + // Enables administrators to get all tokens and delete tokens for other // users. Admins can either get every token, get a specific token by ID, or // get all tokens for a particular user. @@ -1191,6 +1213,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { SystemSchemas: catalog.NewSystemSchemas(databricksClient), TableConstraints: catalog.NewTableConstraints(databricksClient), Tables: catalog.NewTables(databricksClient), + TemporaryTableCredentials: catalog.NewTemporaryTableCredentials(databricksClient), TokenManagement: settings.NewTokenManagement(databricksClient), Tokens: settings.NewTokens(databricksClient), Users: iam.NewUsers(databricksClient),