diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8b01a2422..4ceeab3d3 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -3eae49b444cac5a0118a3503e5b7ecef7f96527a \ No newline at end of file +d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 720b73d08..1747db9ca 100644 --- a/.gitattributes +++ b/.gitattributes @@ -23,6 +23,7 @@ experimental/mocks/service/catalog/mock_model_versions_interface.go linguist-gen experimental/mocks/service/catalog/mock_online_tables_interface.go linguist-generated=true experimental/mocks/service/catalog/mock_quality_monitors_interface.go linguist-generated=true experimental/mocks/service/catalog/mock_registered_models_interface.go linguist-generated=true +experimental/mocks/service/catalog/mock_resource_quotas_interface.go linguist-generated=true experimental/mocks/service/catalog/mock_schemas_interface.go linguist-generated=true experimental/mocks/service/catalog/mock_storage_credentials_interface.go linguist-generated=true experimental/mocks/service/catalog/mock_system_schemas_interface.go linguist-generated=true @@ -37,6 +38,7 @@ experimental/mocks/service/compute/mock_global_init_scripts_interface.go linguis experimental/mocks/service/compute/mock_instance_pools_interface.go linguist-generated=true experimental/mocks/service/compute/mock_instance_profiles_interface.go linguist-generated=true experimental/mocks/service/compute/mock_libraries_interface.go linguist-generated=true +experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go linguist-generated=true experimental/mocks/service/compute/mock_policy_families_interface.go linguist-generated=true experimental/mocks/service/dashboards/mock_genie_interface.go linguist-generated=true experimental/mocks/service/dashboards/mock_lakeview_interface.go linguist-generated=true @@ -55,6 +57,7 @@ experimental/mocks/service/iam/mock_service_principals_interface.go linguist-gen experimental/mocks/service/iam/mock_users_interface.go linguist-generated=true experimental/mocks/service/iam/mock_workspace_assignment_interface.go linguist-generated=true experimental/mocks/service/jobs/mock_jobs_interface.go linguist-generated=true +experimental/mocks/service/jobs/mock_policy_compliance_for_jobs_interface.go linguist-generated=true experimental/mocks/service/marketplace/mock_consumer_fulfillments_interface.go linguist-generated=true experimental/mocks/service/marketplace/mock_consumer_installations_interface.go linguist-generated=true experimental/mocks/service/marketplace/mock_consumer_listings_interface.go linguist-generated=true diff --git a/CHANGELOG.md b/CHANGELOG.md index f0deb4543..1c388690b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Version changelog +## [Release] Release v0.46.0 + +### Bug Fixes + + * Fail fast when authenticating if host is not configured ([#1033](https://github.com/databricks/databricks-sdk-go/pull/1033)). + * Improve non-JSON error handling ([#1031](https://github.com/databricks/databricks-sdk-go/pull/1031)). + + +### Internal Changes + + * Add TestAccCreateOboTokenOnAws to flaky test list ([#1029](https://github.com/databricks/databricks-sdk-go/pull/1029)). + * Add workflows manage integration tests checks ([#1032](https://github.com/databricks/databricks-sdk-go/pull/1032)). + * Fix TestMwsAccWorkspaces cleanup ([#1028](https://github.com/databricks/databricks-sdk-go/pull/1028)). + * Improve integration test comment ([#1035](https://github.com/databricks/databricks-sdk-go/pull/1035)). + * Temporary ignore Metastore test failures ([#1027](https://github.com/databricks/databricks-sdk-go/pull/1027)). + * Update test to support new accounts ([#1026](https://github.com/databricks/databricks-sdk-go/pull/1026)). + * Use statuses instead of checks ([#1036](https://github.com/databricks/databricks-sdk-go/pull/1036)). + + +### API Changes: + + * Added `RegenerateDashboard` method for [w.QualityMonitors](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#QualityMonitorsAPI) workspace-level service. + * Added [catalog.RegenerateDashboardRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#RegenerateDashboardRequest) and [catalog.RegenerateDashboardResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#RegenerateDashboardResponse). + * Added [jobs.QueueDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#QueueDetails), [jobs.QueueDetailsCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#QueueDetailsCodeCode), [jobs.RunLifecycleStateV2State](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunLifecycleStateV2State), [jobs.RunStatus](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunStatus), [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode), [jobs.TerminationDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationDetails) and [jobs.TerminationTypeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationTypeType). + * Added `Status` field for [jobs.BaseRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun). + * Added `Status` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). + * Added `Status` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). + * Added `Status` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). + * Added `MaxProvisionedThroughput` and `MinProvisionedThroughput` fields for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput). + * Added `ColumnsToSync` field for [vectorsearch.DeltaSyncVectorIndexSpecRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#DeltaSyncVectorIndexSpecRequest). + * Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to no longer be required. + +OpenAPI SHA: d05898328669a3f8ab0c2ecee37db2673d3ea3f7, Date: 2024-09-04 + ## [Release] Release v0.45.0 ### Bug Fixes diff --git a/experimental/mocks/service/catalog/mock_quality_monitors_interface.go b/experimental/mocks/service/catalog/mock_quality_monitors_interface.go index 25e4226b9..bd884c703 100644 --- a/experimental/mocks/service/catalog/mock_quality_monitors_interface.go +++ b/experimental/mocks/service/catalog/mock_quality_monitors_interface.go @@ -578,6 +578,65 @@ func (_c *MockQualityMonitorsInterface_ListRefreshesByTableName_Call) RunAndRetu return _c } +// RegenerateDashboard provides a mock function with given fields: ctx, request +func (_m *MockQualityMonitorsInterface) RegenerateDashboard(ctx context.Context, request catalog.RegenerateDashboardRequest) (*catalog.RegenerateDashboardResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for RegenerateDashboard") + } + + var r0 *catalog.RegenerateDashboardResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, catalog.RegenerateDashboardRequest) (*catalog.RegenerateDashboardResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, catalog.RegenerateDashboardRequest) *catalog.RegenerateDashboardResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*catalog.RegenerateDashboardResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, catalog.RegenerateDashboardRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQualityMonitorsInterface_RegenerateDashboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegenerateDashboard' +type MockQualityMonitorsInterface_RegenerateDashboard_Call struct { + *mock.Call +} + +// RegenerateDashboard is a helper method to define mock.On call +// - ctx context.Context +// - request catalog.RegenerateDashboardRequest +func (_e *MockQualityMonitorsInterface_Expecter) RegenerateDashboard(ctx interface{}, request interface{}) *MockQualityMonitorsInterface_RegenerateDashboard_Call { + return &MockQualityMonitorsInterface_RegenerateDashboard_Call{Call: _e.mock.On("RegenerateDashboard", ctx, request)} +} + +func (_c *MockQualityMonitorsInterface_RegenerateDashboard_Call) Run(run func(ctx context.Context, request catalog.RegenerateDashboardRequest)) *MockQualityMonitorsInterface_RegenerateDashboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(catalog.RegenerateDashboardRequest)) + }) + return _c +} + +func (_c *MockQualityMonitorsInterface_RegenerateDashboard_Call) Return(_a0 *catalog.RegenerateDashboardResponse, _a1 error) *MockQualityMonitorsInterface_RegenerateDashboard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQualityMonitorsInterface_RegenerateDashboard_Call) RunAndReturn(run func(context.Context, catalog.RegenerateDashboardRequest) (*catalog.RegenerateDashboardResponse, error)) *MockQualityMonitorsInterface_RegenerateDashboard_Call { + _c.Call.Return(run) + return _c +} + // RunRefresh provides a mock function with given fields: ctx, request func (_m *MockQualityMonitorsInterface) RunRefresh(ctx context.Context, request catalog.RunRefreshRequest) (*catalog.MonitorRefreshInfo, error) { ret := _m.Called(ctx, request) diff --git a/service/billing/billable_usage_usage_test.go b/service/billing/billable_usage_usage_test.go index c7364adb2..c5691bda9 100755 --- a/service/billing/billable_usage_usage_test.go +++ b/service/billing/billable_usage_usage_test.go @@ -19,8 +19,8 @@ func ExampleBillableUsageAPI_Download_usageDownload() { } resp, err := a.BillableUsage.Download(ctx, billing.DownloadRequest{ - StartMonth: "2023-01", - EndMonth: "2023-02", + StartMonth: "2024-08", + EndMonth: "2024-09", }) if err != nil { panic(err) diff --git a/service/catalog/api.go b/service/catalog/api.go index 8fb88cd9d..15e78f103 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -2005,6 +2005,21 @@ type QualityMonitorsInterface interface { // created. ListRefreshesByTableName(ctx context.Context, tableName string) (*MonitorRefreshListResponse, error) + // Regenerate a monitoring dashboard. + // + // Regenerates the monitoring dashboard for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table + // + // The call must be made from the workspace where the monitor was created. The + // dashboard will be regenerated in the assets directory that was specified when + // the monitor was created. + RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) + // Queue a metric refresh for a monitor. // // Queues a metric refresh on the monitor for the specified table. The refresh diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 424002717..6a5297d38 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -671,6 +671,16 @@ func (a *qualityMonitorsImpl) ListRefreshes(ctx context.Context, request ListRef return &monitorRefreshListResponse, err } +func (a *qualityMonitorsImpl) RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) { + var regenerateDashboardResponse RegenerateDashboardResponse + path := fmt.Sprintf("/api/2.1/quality-monitoring/tables/%v/monitor/dashboard", request.TableName) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, request, ®enerateDashboardResponse) + return ®enerateDashboardResponse, err +} + func (a *qualityMonitorsImpl) RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) { var monitorRefreshInfo MonitorRefreshInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor/refreshes", request.TableName) diff --git a/service/catalog/interface.go b/service/catalog/interface.go index cd1ceef8a..c274cc2a8 100755 --- a/service/catalog/interface.go +++ b/service/catalog/interface.go @@ -670,6 +670,21 @@ type QualityMonitorsService interface { // was created. ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) + // Regenerate a monitoring dashboard. + // + // Regenerates the monitoring dashboard for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. + // have **USE_CATALOG** on the table's parent catalog and be an owner of the + // table's parent schema 3. have the following permissions: - + // **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the + // table's parent schema - be an owner of the table + // + // The call must be made from the workspace where the monitor was created. + // The dashboard will be regenerated in the assets directory that was + // specified when the monitor was created. + RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) + // Queue a metric refresh for a monitor. // // Queues a metric refresh on the monitor for the specified table. The diff --git a/service/catalog/model.go b/service/catalog/model.go index d94f95682..64bb4e9a1 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -989,7 +989,9 @@ func (s CreateMetastore) MarshalJSON() ([]byte, error) { } type CreateMetastoreAssignment struct { - // The name of the default catalog in the metastore. + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. DefaultCatalogName string `json:"default_catalog_name"` // The unique ID of the metastore. MetastoreId string `json:"metastore_id"` @@ -4280,6 +4282,41 @@ func (s ReadVolumeRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type RegenerateDashboardRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` + // Optional argument to specify the warehouse for dashboard regeneration. If + // not specified, the first running warehouse will be used. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegenerateDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegenerateDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegenerateDashboardResponse struct { + // Id of the regenerated monitoring dashboard. + DashboardId string `json:"dashboard_id,omitempty"` + // The directory where the regenerated dashboard is stored. + ParentFolder string `json:"parent_folder,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegenerateDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegenerateDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Registered model alias. type RegisteredModelAlias struct { // Name of the alias, e.g. 'champion' or 'latest_stable' @@ -5022,7 +5059,9 @@ func (s UpdateMetastore) MarshalJSON() ([]byte, error) { } type UpdateMetastoreAssignment struct { - // The name of the default catalog for the metastore. + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. DefaultCatalogName string `json:"default_catalog_name,omitempty"` // The unique ID of the metastore. MetastoreId string `json:"metastore_id,omitempty"` diff --git a/service/iam/model.go b/service/iam/model.go index 5cfdc0755..57fc0eb45 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -212,9 +212,10 @@ type GetPermissionRequest struct { // The id of the request object. RequestObjectId string `json:"-" url:"-"` // The type of the request object. Can be one of the following: alerts, - // authorization, clusters, cluster-policies, dbsql-dashboards, directories, - // experiments, files, instance-pools, jobs, notebooks, pipelines, queries, - // registered-models, repos, serving-endpoints, or warehouses. + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. RequestObjectType string `json:"-" url:"-"` } @@ -1136,9 +1137,10 @@ type PermissionsRequest struct { // The id of the request object. RequestObjectId string `json:"-" url:"-"` // The type of the request object. Can be one of the following: alerts, - // authorization, clusters, cluster-policies, dbsql-dashboards, directories, - // experiments, files, instance-pools, jobs, notebooks, pipelines, queries, - // registered-models, repos, serving-endpoints, or warehouses. + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. RequestObjectType string `json:"-" url:"-"` } @@ -1235,7 +1237,7 @@ type ServicePrincipal struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks service principal ID. - Id string `json:"id,omitempty" url:"-"` + Id string `json:"id,omitempty"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `json:"roles,omitempty"` // The schema of the List response. diff --git a/service/jobs/model.go b/service/jobs/model.go index 160c7055e..a29a64117 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -141,8 +141,10 @@ type BaseRun struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `json:"tasks,omitempty"` @@ -2014,6 +2016,67 @@ type PythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type QueueDetails struct { + // The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run + // was queued due to reaching the workspace limit of active task runs. * + // `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the + // per-job limit of concurrent job runs. * + // `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching + // the workspace limit of active run job tasks. + Code QueueDetailsCodeCode `json:"code,omitempty"` + // A descriptive message with the queuing details. This field is + // unstructured, and its exact format is subject to change. + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueueDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueueDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was +// queued due to reaching the workspace limit of active task runs. * +// `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the per-job +// limit of concurrent job runs. * `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run +// was queued due to reaching the workspace limit of active run job tasks. +type QueueDetailsCodeCode string + +// The run was queued due to reaching the workspace limit of active task runs. +const QueueDetailsCodeCodeActiveRunsLimitReached QueueDetailsCodeCode = `ACTIVE_RUNS_LIMIT_REACHED` + +// The run was queued due to reaching the workspace limit of active run job +// tasks. +const QueueDetailsCodeCodeActiveRunJobTasksLimitReached QueueDetailsCodeCode = `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED` + +// The run was queued due to reaching the per-job limit of concurrent job runs. +const QueueDetailsCodeCodeMaxConcurrentRunsReached QueueDetailsCodeCode = `MAX_CONCURRENT_RUNS_REACHED` + +// String representation for [fmt.Print] +func (f *QueueDetailsCodeCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueueDetailsCodeCode) Set(v string) error { + switch v { + case `ACTIVE_RUNS_LIMIT_REACHED`, `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`, `MAX_CONCURRENT_RUNS_REACHED`: + *f = QueueDetailsCodeCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_RUNS_LIMIT_REACHED", "ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED", "MAX_CONCURRENT_RUNS_REACHED"`, v) + } +} + +// Type always returns QueueDetailsCodeCode to satisfy [pflag.Value] interface +func (f *QueueDetailsCodeCode) Type() string { + return "QueueDetailsCodeCode" +} + type QueueSettings struct { // If true, enable queueing for the job. This is a required field. Enabled bool `json:"enabled"` @@ -2027,8 +2090,10 @@ type RepairHistoryItem struct { Id int64 `json:"id,omitempty"` // The start time of the (repaired) run. StartTime int64 `json:"start_time,omitempty"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []int64 `json:"task_run_ids,omitempty"` @@ -2400,8 +2465,10 @@ type Run struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `json:"tasks,omitempty"` @@ -2712,6 +2779,42 @@ func (f *RunLifeCycleState) Type() string { return "RunLifeCycleState" } +// The current state of the run. +type RunLifecycleStateV2State string + +const RunLifecycleStateV2StateBlocked RunLifecycleStateV2State = `BLOCKED` + +const RunLifecycleStateV2StatePending RunLifecycleStateV2State = `PENDING` + +const RunLifecycleStateV2StateQueued RunLifecycleStateV2State = `QUEUED` + +const RunLifecycleStateV2StateRunning RunLifecycleStateV2State = `RUNNING` + +const RunLifecycleStateV2StateTerminated RunLifecycleStateV2State = `TERMINATED` + +const RunLifecycleStateV2StateTerminating RunLifecycleStateV2State = `TERMINATING` + +// String representation for [fmt.Print] +func (f *RunLifecycleStateV2State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunLifecycleStateV2State) Set(v string) error { + switch v { + case `BLOCKED`, `PENDING`, `QUEUED`, `RUNNING`, `TERMINATED`, `TERMINATING`: + *f = RunLifecycleStateV2State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "PENDING", "QUEUED", "RUNNING", "TERMINATED", "TERMINATING"`, v) + } +} + +// Type always returns RunLifecycleStateV2State to satisfy [pflag.Value] interface +func (f *RunLifecycleStateV2State) Type() string { + return "RunLifecycleStateV2State" +} + type RunNow struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt @@ -3069,6 +3172,17 @@ func (s RunState) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The current status of the run +type RunStatus struct { + // If the run was queued, details about the reason for queuing the run. + QueueDetails *QueueDetails `json:"queue_details,omitempty"` + // The current state of the run. + State RunLifecycleStateV2State `json:"state,omitempty"` + // If the run is in a TERMINATING or TERMINATED state, details about the + // reason for terminating the run. + TerminationDetails *TerminationDetails `json:"termination_details,omitempty"` +} + // Used when outputting a child run, in GetRun or ListRuns. type RunTask struct { // The sequence number of this run attempt for a triggered job run. The @@ -3214,8 +3328,10 @@ type RunTask struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime int64 `json:"start_time,omitempty"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -4117,6 +4233,274 @@ func (s TaskNotificationSettings) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The code indicates why the run was terminated. Additional codes might be +// introduced in future releases. * `SUCCESS`: The run was completed +// successfully. * `CANCELED`: The run was canceled during execution by the +// Databricks platform; for example, if the maximum run duration was exceeded. * +// `SKIPPED`: Run was never executed, for example, if the upstream task run +// failed, the dependency type condition was not met, or there were no material +// tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected +// error. Refer to the state message for further details. * `DRIVER_ERROR`: The +// run encountered an error while communicating with the Spark Driver. * +// `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state +// message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to +// complete the checkout due to an error when communicating with the third party +// service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an +// invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The +// workspace has reached the quota for the maximum number of concurrent active +// runs. Consider scheduling the runs over a larger time frame. * +// `FEATURE_DISABLED`: The run failed because it tried to access a feature +// unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number +// of cluster creation, start, and upsize requests have exceeded the allotted +// rate limit. Consider spreading the run execution over a larger time frame. * +// `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the +// customer blob storage. Refer to the state message for further details. * +// `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more +// details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: +// The run failed due to a permission issue while accessing a resource. Refer to +// the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The +// run failed while installing the user-requested library. Refer to the state +// message for further details. The causes might include, but are not limited +// to: The provided library is invalid, there are insufficient permissions to +// install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The +// scheduled run exceeds the limit of maximum concurrent runs set for the job. * +// `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has +// already reached the maximum number of contexts it is configured to create. +// See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution +// does not exist. Refer to the state message for further details. * +// `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. +// Refer to the state message for further details. * `CLOUD_FAILURE`: The run +// failed due to a cloud provider issue. Refer to the state message for further +// details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching +// the job level queue size limit. +// +// [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now +type TerminationCodeCode string + +// The run was canceled during execution by the platform; for +// example, if the maximum run duration was exceeded. +const TerminationCodeCodeCanceled TerminationCodeCode = `CANCELED` + +// The run failed due to a cloud provider issue. Refer to the state message for +// further details. +const TerminationCodeCodeCloudFailure TerminationCodeCode = `CLOUD_FAILURE` + +// The run failed due to a cluster error. Refer to the state message for further +// details. +const TerminationCodeCodeClusterError TerminationCodeCode = `CLUSTER_ERROR` + +// The number of cluster creation, start, and upsize requests have exceeded the +// allotted rate limit. Consider spreading the run execution over a larger time +// frame. +const TerminationCodeCodeClusterRequestLimitExceeded TerminationCodeCode = `CLUSTER_REQUEST_LIMIT_EXCEEDED` + +// The run encountered an error while communicating with the Spark Driver. +const TerminationCodeCodeDriverError TerminationCodeCode = `DRIVER_ERROR` + +// The run failed because it tried to access a feature unavailable for the +// workspace. +const TerminationCodeCodeFeatureDisabled TerminationCodeCode = `FEATURE_DISABLED` + +// The run encountered an unexpected error. Refer to the state message for +// further details. +const TerminationCodeCodeInternalError TerminationCodeCode = `INTERNAL_ERROR` + +// The run failed because it issued an invalid request to start the cluster. +const TerminationCodeCodeInvalidClusterRequest TerminationCodeCode = `INVALID_CLUSTER_REQUEST` + +// The run failed due to an invalid configuration. Refer to the state message +// for further details. +const TerminationCodeCodeInvalidRunConfiguration TerminationCodeCode = `INVALID_RUN_CONFIGURATION` + +// The run failed while installing the user-requested library. Refer to the +// state message for further details. The causes might include, but are not +// limited to: The provided library is invalid, there are insufficient +// permissions to install the library, and so forth. +const TerminationCodeCodeLibraryInstallationError TerminationCodeCode = `LIBRARY_INSTALLATION_ERROR` + +// The scheduled run exceeds the limit of maximum concurrent runs set for the +// job. +const TerminationCodeCodeMaxConcurrentRunsExceeded TerminationCodeCode = `MAX_CONCURRENT_RUNS_EXCEEDED` + +// The run was skipped due to reaching the job level queue size limit. +const TerminationCodeCodeMaxJobQueueSizeExceeded TerminationCodeCode = `MAX_JOB_QUEUE_SIZE_EXCEEDED` + +// The run is scheduled on a cluster that has already reached the maximum number +// of contexts it is configured to create. See: [Link]. +// +// [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now +const TerminationCodeCodeMaxSparkContextsExceeded TerminationCodeCode = `MAX_SPARK_CONTEXTS_EXCEEDED` + +// Failed to complete the checkout due to an error when communicating with the +// third party service. +const TerminationCodeCodeRepositoryCheckoutFailed TerminationCodeCode = `REPOSITORY_CHECKOUT_FAILED` + +// A resource necessary for run execution does not exist. Refer to the state +// message for further details. +const TerminationCodeCodeResourceNotFound TerminationCodeCode = `RESOURCE_NOT_FOUND` + +// The run was completed with task failures. For more details, refer to the +// state message or run output. +const TerminationCodeCodeRunExecutionError TerminationCodeCode = `RUN_EXECUTION_ERROR` + +// Run was never executed, for example, if the upstream task run failed, the +// dependency type condition was not met, or there were no material tasks to +// execute. +const TerminationCodeCodeSkipped TerminationCodeCode = `SKIPPED` + +// The run failed due to an error when accessing the customer blob storage. +// Refer to the state message for further details. +const TerminationCodeCodeStorageAccessError TerminationCodeCode = `STORAGE_ACCESS_ERROR` + +// The run was completed successfully. +const TerminationCodeCodeSuccess TerminationCodeCode = `SUCCESS` + +// The run failed due to a permission issue while accessing a resource. Refer to +// the state message for further details. +const TerminationCodeCodeUnauthorizedError TerminationCodeCode = `UNAUTHORIZED_ERROR` + +// The workspace has reached the quota for the maximum number of concurrent +// active runs. Consider scheduling the runs over a larger time frame. +const TerminationCodeCodeWorkspaceRunLimitExceeded TerminationCodeCode = `WORKSPACE_RUN_LIMIT_EXCEEDED` + +// String representation for [fmt.Print] +func (f *TerminationCodeCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationCodeCode) Set(v string) error { + switch v { + case `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: + *f = TerminationCodeCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) + } +} + +// Type always returns TerminationCodeCode to satisfy [pflag.Value] interface +func (f *TerminationCodeCode) Type() string { + return "TerminationCodeCode" +} + +type TerminationDetails struct { + // The code indicates why the run was terminated. Additional codes might be + // introduced in future releases. * `SUCCESS`: The run was completed + // successfully. * `CANCELED`: The run was canceled during execution by the + // Databricks platform; for example, if the maximum run duration was + // exceeded. * `SKIPPED`: Run was never executed, for example, if the + // upstream task run failed, the dependency type condition was not met, or + // there were no material tasks to execute. * `INTERNAL_ERROR`: The run + // encountered an unexpected error. Refer to the state message for further + // details. * `DRIVER_ERROR`: The run encountered an error while + // communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed + // due to a cluster error. Refer to the state message for further details. * + // `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an + // error when communicating with the third party service. * + // `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid + // request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The + // workspace has reached the quota for the maximum number of concurrent + // active runs. Consider scheduling the runs over a larger time frame. * + // `FEATURE_DISABLED`: The run failed because it tried to access a feature + // unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The + // number of cluster creation, start, and upsize requests have exceeded the + // allotted rate limit. Consider spreading the run execution over a larger + // time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when + // accessing the customer blob storage. Refer to the state message for + // further details. * `RUN_EXECUTION_ERROR`: The run was completed with task + // failures. For more details, refer to the state message or run output. * + // `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while + // accessing a resource. Refer to the state message for further details. * + // `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the + // user-requested library. Refer to the state message for further details. + // The causes might include, but are not limited to: The provided library is + // invalid, there are insufficient permissions to install the library, and + // so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the + // limit of maximum concurrent runs set for the job. * + // `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has + // already reached the maximum number of contexts it is configured to + // create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run + // execution does not exist. Refer to the state message for further details. + // * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid + // configuration. Refer to the state message for further details. * + // `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to + // the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: + // The run was skipped due to reaching the job level queue size limit. + // + // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now + Code TerminationCodeCode `json:"code,omitempty"` + // A descriptive message with the termination details. This field is + // unstructured and the format might change. + Message string `json:"message,omitempty"` + // * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An + // error occurred in the Databricks platform. Please look at the [status + // page] or contact support if the issue persists. * `CLIENT_ERROR`: The run + // was terminated because of an error caused by user input or the job + // configuration. * `CLOUD_FAILURE`: The run was terminated because of an + // issue with your cloud provider. + // + // [status page]: https://status.databricks.com/ + Type TerminationTypeType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TerminationDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TerminationDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An +// error occurred in the Databricks platform. Please look at the [status page] +// or contact support if the issue persists. * `CLIENT_ERROR`: The run was +// terminated because of an error caused by user input or the job configuration. +// * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud +// provider. +// +// [status page]: https://status.databricks.com/ +type TerminationTypeType string + +// The run was terminated because of an error caused by user input or the job +// configuration. +const TerminationTypeTypeClientError TerminationTypeType = `CLIENT_ERROR` + +// The run was terminated because of an issue with your cloud provider. +const TerminationTypeTypeCloudFailure TerminationTypeType = `CLOUD_FAILURE` + +// An error occurred in the platform. Please look at the [status +// page] or contact support if the issue persists. +// +// [status page]: https://status.databricks.com/ +const TerminationTypeTypeInternalError TerminationTypeType = `INTERNAL_ERROR` + +// The run terminated without any issues +const TerminationTypeTypeSuccess TerminationTypeType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationTypeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationTypeType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `INTERNAL_ERROR`, `SUCCESS`: + *f = TerminationTypeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "INTERNAL_ERROR", "SUCCESS"`, v) + } +} + +// Type always returns TerminationTypeType to satisfy [pflag.Value] interface +func (f *TerminationTypeType) Type() string { + return "TerminationTypeType" +} + // Additional details about what triggered the run type TriggerInfo struct { // The run id of the Run Job task run diff --git a/service/ml/api.go b/service/ml/api.go index 1c427ba47..0b5818059 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -124,7 +124,12 @@ type ExperimentsInterface interface { // Get all artifacts. // // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is - // specified, the response contains only artifacts with the specified prefix.", + // specified, the response contains only artifacts with the specified prefix. + // This API does not support pagination when listing artifacts in UC Volumes. A + // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). // // This method is generated by Databricks SDK Code Generator. ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] @@ -132,7 +137,12 @@ type ExperimentsInterface interface { // Get all artifacts. // // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is - // specified, the response contains only artifacts with the specified prefix.", + // specified, the response contains only artifacts with the specified prefix. + // This API does not support pagination when listing artifacts in UC Volumes. A + // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). // // This method is generated by Databricks SDK Code Generator. ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) @@ -392,7 +402,12 @@ func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, exper // Get all artifacts. // // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix.", +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). // // This method is generated by Databricks SDK Code Generator. func (a *ExperimentsAPI) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { @@ -422,7 +437,12 @@ func (a *ExperimentsAPI) ListArtifacts(ctx context.Context, request ListArtifact // Get all artifacts. // // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix.", +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). // // This method is generated by Databricks SDK Code Generator. func (a *ExperimentsAPI) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { diff --git a/service/ml/interface.go b/service/ml/interface.go index c102c8697..f85cf28c1 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -114,7 +114,11 @@ type ExperimentsService interface { // // List artifacts for a run. Takes an optional `artifact_path` prefix. If it // is specified, the response contains only artifacts with the specified - // prefix.", + // prefix. This API does not support pagination when listing artifacts in UC + // Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. + // Please call `/api/2.0/fs/directories{directory_path}` for listing + // artifacts in UC Volumes, which supports pagination. See [List directory + // contents | Files API](/api/workspace/files/listdirectorycontents). // // Use ListArtifactsAll() to get all FileInfo instances, which will iterate over every result page. ListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) diff --git a/service/ml/model.go b/service/ml/model.go index 95d0c0341..bf9d039a4 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -1239,7 +1239,12 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { // Get all artifacts type ListArtifactsRequest struct { - // Token indicating the page of artifact results to fetch + // Token indicating the page of artifact results to fetch. `page_token` is + // not supported when listing artifacts in UC Volumes. A maximum of 1000 + // artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). PageToken string `json:"-" url:"page_token,omitempty"` // Filter artifacts matching this path (a relative path from the root // artifact directory). diff --git a/service/serving/model.go b/service/serving/model.go index f481113c0..6d63ed8cb 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -1313,6 +1313,10 @@ type ServedModelInput struct { // ARN of the instance profile that the served model will use to access AWS // resources. InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` // The name of the model in Databricks Model Registry to be served or if the // model resides in Unity Catalog, the full name of model, in the form of // __catalog_name__.__schema_name__.__model_name__. @@ -1335,7 +1339,7 @@ type ServedModelInput struct { // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 // provisioned concurrency). If scale-to-zero is enabled, the lower bound of // the provisioned concurrency for each workload size will be 0. - WorkloadSize ServedModelInputWorkloadSize `json:"workload_size"` + WorkloadSize ServedModelInputWorkloadSize `json:"workload_size,omitempty"` // The workload type of the served model. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index d1267893e..48335536a 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -141,6 +141,11 @@ type DeleteIndexResponse struct { } type DeltaSyncVectorIndexSpecRequest struct { + // [Optional] Select the columns to sync with the vector index. If you leave + // this field blank, all columns from the source table are synced with the + // index. The primary key column and embedding source column or embedding + // vector column are always synced. + ColumnsToSync []string `json:"columns_to_sync,omitempty"` // The columns that contain the embedding source. EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` // The columns that contain the embedding vectors. The format should be diff --git a/version/version.go b/version/version.go index ae8b31890..9577deb45 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.45.0" +const Version = "0.46.0"