diff --git a/acceptance/bundle/help/bundle-generate-pipeline/output.txt b/acceptance/bundle/help/bundle-generate-pipeline/output.txt index 7d0db9a098..c24f552ed4 100644 --- a/acceptance/bundle/help/bundle-generate-pipeline/output.txt +++ b/acceptance/bundle/help/bundle-generate-pipeline/output.txt @@ -1,6 +1,6 @@ >>> [CLI] bundle generate pipeline --help -Generate bundle configuration for an existing Delta Live Tables pipeline. +Generate bundle configuration for an existing Spark Declarative Pipeline. This command downloads an existing Lakeflow Spark Declarative Pipeline's configuration and any associated notebooks, creating bundle files that you can use to deploy the pipeline to other diff --git a/acceptance/bundle/help/bundle-open/output.txt b/acceptance/bundle/help/bundle-open/output.txt index 568908f937..8c5f25db3c 100644 --- a/acceptance/bundle/help/bundle-open/output.txt +++ b/acceptance/bundle/help/bundle-open/output.txt @@ -4,7 +4,7 @@ Open a deployed bundle resource in the Databricks workspace. Examples: databricks bundle open # Prompts to select a resource to open - databricks bundle open my_job # Open specific job in Workflows UI + databricks bundle open my_job # Open specific job in Jobs UI databricks bundle open my_dashboard # Open dashboard in browser Use after deployment to quickly navigate to your resources in the workspace. diff --git a/acceptance/bundle/paths/invalid_pipeline_globs/databricks.yml b/acceptance/bundle/paths/invalid_pipeline_globs/databricks.yml index 5ed46e048a..d80b8aebb5 100644 --- a/acceptance/bundle/paths/invalid_pipeline_globs/databricks.yml +++ b/acceptance/bundle/paths/invalid_pipeline_globs/databricks.yml @@ -9,5 +9,5 @@ resources: variables: notebook_dir: - description: Directory with DLT notebooks + description: Directory with SDP notebooks default: non-existent diff --git a/acceptance/bundle/paths/pipeline_expected_file_got_notebook/databricks.yml b/acceptance/bundle/paths/pipeline_expected_file_got_notebook/databricks.yml index 7d176f0cd5..4fcdf53e03 100644 --- a/acceptance/bundle/paths/pipeline_expected_file_got_notebook/databricks.yml +++ b/acceptance/bundle/paths/pipeline_expected_file_got_notebook/databricks.yml @@ -6,5 +6,5 @@ include: variables: notebook_dir: - description: Directory with DLT notebooks + description: Directory with SDP notebooks default: notebooks diff --git a/acceptance/bundle/paths/pipeline_globs/root/databricks.yml b/acceptance/bundle/paths/pipeline_globs/root/databricks.yml index a2b3f77698..843bd92348 100644 --- a/acceptance/bundle/paths/pipeline_globs/root/databricks.yml +++ b/acceptance/bundle/paths/pipeline_globs/root/databricks.yml @@ -6,8 +6,8 @@ include: variables: notebook_dir: - description: Directory with DLT notebooks + description: Directory with SDP notebooks default: notebooks file_dir: - description: Directory with DLT files + description: Directory with SDP files default: files diff --git a/acceptance/bundle/run_as/pipelines_legacy/output.txt b/acceptance/bundle/run_as/pipelines_legacy/output.txt index 654d5eab11..1462caf861 100644 --- a/acceptance/bundle/run_as/pipelines_legacy/output.txt +++ b/acceptance/bundle/run_as/pipelines_legacy/output.txt @@ -1,6 +1,6 @@ >>> [CLI] bundle validate -o json -Warning: You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC. +Warning: You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the Spark Declarative Pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC. at experimental.use_legacy_run_as in databricks.yml:8:22 diff --git a/acceptance/bundle/telemetry/deploy-experimental/output.txt b/acceptance/bundle/telemetry/deploy-experimental/output.txt index d96e688b0a..a65a9b2e8a 100644 --- a/acceptance/bundle/telemetry/deploy-experimental/output.txt +++ b/acceptance/bundle/telemetry/deploy-experimental/output.txt @@ -1,6 +1,6 @@ >>> [CLI] bundle deploy -Warning: You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC. +Warning: You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the Spark Declarative Pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC. at experimental.use_legacy_run_as in databricks.yml:5:22 diff --git a/acceptance/bundle/templates/dbt-sql/output/my_dbt_sql/README.md b/acceptance/bundle/templates/dbt-sql/output/my_dbt_sql/README.md index 6fd15788a5..1793627340 100644 --- a/acceptance/bundle/templates/dbt-sql/output/my_dbt_sql/README.md +++ b/acceptance/bundle/templates/dbt-sql/output/my_dbt_sql/README.md @@ -102,7 +102,7 @@ on CI/CD setup. ## Manually deploying to Databricks with Declarative Automation Bundles Declarative Automation Bundles can be used to deploy to Databricks and to execute -dbt commands as a job using Databricks Workflows. See +dbt commands as a job using Databricks Jobs. See https://docs.databricks.com/dev-tools/bundles/index.html to learn more. Use the Databricks CLI to deploy a development copy of this project to a workspace: @@ -117,7 +117,7 @@ is optional here.) This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] my_dbt_sql_job` to your workspace. -You can find that job by opening your workpace and clicking on **Workflows**. +You can find that job by opening your workpace and clicking on **Jobs**. You can also deploy to your production target directly from the command-line. The warehouse, catalog, and schema for that target are configured in `dbt_profiles/profiles.yml`. diff --git a/acceptance/bundle/templates/default-scala/output/my_default_scala/README.md b/acceptance/bundle/templates/default-scala/output/my_default_scala/README.md index 9bc393514c..7a02014993 100644 --- a/acceptance/bundle/templates/default-scala/output/my_default_scala/README.md +++ b/acceptance/bundle/templates/default-scala/output/my_default_scala/README.md @@ -21,7 +21,7 @@ The 'my_default_scala' project was generated by using the default-scala template This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] my_default_scala_job` to your workspace. - You can find that job by opening your workspace and clicking on **Workflows**. + You can find that job by opening your workspace and clicking on **Jobs**. 4. Similarly, to deploy a production copy, type: ``` diff --git a/acceptance/bundle/templates/default-sql/output/my_default_sql/README.md b/acceptance/bundle/templates/default-sql/output/my_default_sql/README.md index 9d915327db..5d55c2cf74 100644 --- a/acceptance/bundle/templates/default-sql/output/my_default_sql/README.md +++ b/acceptance/bundle/templates/default-sql/output/my_default_sql/README.md @@ -21,7 +21,7 @@ The 'my_default_sql' project was generated by using the default-sql template. This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] my_default_sql_job` to your workspace. - You can find that job by opening your workpace and clicking on **Workflows**. + You can find that job by opening your workpace and clicking on **Jobs**. 4. Similarly, to deploy a production copy, type: ``` diff --git a/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_daily.sql b/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_daily.sql index ea7b80b54f..27bf1eed46 100644 --- a/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_daily.sql +++ b/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_daily.sql @@ -1,4 +1,4 @@ --- This query is executed using Databricks Workflows (see resources/my_default_sql_sql.job.yml) +-- This query is executed using Databricks Jobs (see resources/my_default_sql_sql.job.yml) USE CATALOG {{catalog}}; USE IDENTIFIER({{schema}}); diff --git a/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_raw.sql b/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_raw.sql index 79b1354cf4..d0d1afa660 100644 --- a/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_raw.sql +++ b/acceptance/bundle/templates/default-sql/output/my_default_sql/src/orders_raw.sql @@ -1,4 +1,4 @@ --- This query is executed using Databricks Workflows (see resources/my_default_sql_sql.job.yml) +-- This query is executed using Databricks Jobs (see resources/my_default_sql_sql.job.yml) -- -- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ -- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/input.json b/acceptance/bundle/templates/experimental-jobs-as-code/input.json index 5c5fcfc385..b67fd93769 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/input.json +++ b/acceptance/bundle/templates/experimental-jobs-as-code/input.json @@ -2,5 +2,5 @@ "project_name": "my_jobs_as_code", "include_notebook": "yes", "include_python": "yes", - "include_dlt": "yes" + "include_sdp": "yes" } diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output.txt b/acceptance/bundle/templates/experimental-jobs-as-code/output.txt index 089a5c53a4..19931a2f73 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output.txt +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output.txt @@ -95,7 +95,7 @@ Warning: Ignoring Databricks CLI version constraint for development build. Requi "libraries": [ { "notebook": { - "path": "/Workspace/Users/[USERNAME]/.bundle/my_jobs_as_code/dev/files/src/dlt_pipeline" + "path": "/Workspace/Users/[USERNAME]/.bundle/my_jobs_as_code/dev/files/src/sdp_pipeline" } } ], diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/README.md b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/README.md index 6bfac07da0..1eddbfdc8e 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/README.md +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/README.md @@ -40,7 +40,7 @@ The 'my_jobs_as_code' project was generated by using the "Jobs as code" template This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] my_jobs_as_code_job` to your workspace. - You can find that job by opening your workspace and clicking on **Workflows**. + You can find that job by opening your workspace and clicking on **Jobs**. 3. Similarly, to deploy a production copy, type: ``` diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/pyproject.toml b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/pyproject.toml index 4478dace35..06b7cde899 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/pyproject.toml +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/pyproject.toml @@ -32,7 +32,7 @@ where = ["src"] dev-dependencies = [ "databricks-bundles==x.y.z", - ## Add code completion support for DLT + ## Add code completion support for Spark Declarative Pipelines # "databricks-dlt", ## databricks-connect can be used to run parts of this project locally. diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/resources/my_jobs_as_code_pipeline.py b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/resources/my_jobs_as_code_pipeline.py index 9d83e573a9..5e86c5c232 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/resources/my_jobs_as_code_pipeline.py +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/resources/my_jobs_as_code_pipeline.py @@ -9,7 +9,7 @@ "libraries": [ { "notebook": { - "path": "src/dlt_pipeline.ipynb", + "path": "src/sdp_pipeline.ipynb", }, }, ], diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/notebook.ipynb b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/notebook.ipynb index 227c7cc558..247706b44f 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/notebook.ipynb +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/notebook.ipynb @@ -14,7 +14,7 @@ "source": [ "# Default notebook\n", "\n", - "This default notebook is executed using Databricks Workflows as defined in resources/my_jobs_as_code.job.yml." + "This default notebook is executed using Databricks Jobs as defined in resources/my_jobs_as_code.job.yml." ] }, { diff --git a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/dlt_pipeline.ipynb b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/sdp_pipeline.ipynb similarity index 96% rename from acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/dlt_pipeline.ipynb rename to acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/sdp_pipeline.ipynb index d651c00422..eec7c3f2da 100644 --- a/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/dlt_pipeline.ipynb +++ b/acceptance/bundle/templates/experimental-jobs-as-code/output/my_jobs_as_code/src/sdp_pipeline.ipynb @@ -12,7 +12,7 @@ } }, "source": [ - "# DLT pipeline\n", + "# SDP pipeline\n", "\n", "This Lakeflow Spark Declarative Pipeline definition is executed using a pipeline defined in resources/my_jobs_as_code.pipeline.yml." ] @@ -72,7 +72,7 @@ "notebookMetadata": { "pythonIndentUnit": 2 }, - "notebookName": "dlt_pipeline", + "notebookName": "sdp_pipeline", "widgets": {} }, "kernelspec": { diff --git a/bundle/config/mutator/resourcemutator/capture_uc_dependencies.go b/bundle/config/mutator/resourcemutator/capture_uc_dependencies.go index 92d22333e7..61c2fed259 100644 --- a/bundle/config/mutator/resourcemutator/capture_uc_dependencies.go +++ b/bundle/config/mutator/resourcemutator/capture_uc_dependencies.go @@ -12,7 +12,7 @@ import ( type captureUCDependencies struct{} -// If a user defines a UC schema in the bundle, they can refer to it in DLT pipelines, +// If a user defines a UC schema in the bundle, they can refer to it in SDP pipelines, // UC Volumes, Registered Models, Quality Monitors, or Model Serving Endpoints using the // `${resources.schemas..name}` syntax. Using this syntax allows TF to capture // the deploy time dependency this resource has on the schema and deploy changes to the @@ -110,7 +110,7 @@ func (m *captureUCDependencies) Apply(ctx context.Context, b *bundle.Bundle) dia if p == nil { continue } - // "schema" and "target" have the same semantics in the DLT API but are mutually + // "schema" and "target" have the same semantics in the SDP API but are mutually // exclusive i.e. only one can be set at a time. p.Schema = resolveSchema(b, p.Catalog, p.Schema) p.Target = resolveSchema(b, p.Catalog, p.Target) diff --git a/bundle/config/mutator/resourcemutator/run_as.go b/bundle/config/mutator/resourcemutator/run_as.go index 7360048213..074b6d07db 100644 --- a/bundle/config/mutator/resourcemutator/run_as.go +++ b/bundle/config/mutator/resourcemutator/run_as.go @@ -178,7 +178,7 @@ func setRunAsForAlerts(b *bundle.Bundle) { } } -// Legacy behavior of run_as for DLT pipelines. Available under the experimental.use_run_as_legacy flag. +// Legacy behavior of run_as for SDP pipelines. Available under the experimental.use_run_as_legacy flag. // Only available to unblock customers stuck due to breaking changes in https://github.com/databricks/cli/pull/1233 func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) { runAs := b.Config.RunAs @@ -228,7 +228,7 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.Diagnostics{ { Severity: diag.Warning, - Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.", + Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the Spark Declarative Pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.", Paths: []dyn.Path{dyn.MustPathFromString("experimental.use_legacy_run_as")}, Locations: b.Config.GetLocations("experimental.use_legacy_run_as"), }, diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index 459d2c19f6..dedd29b198 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -327,9 +327,9 @@ github.com/databricks/cli/bundle/config.Root: ``` "run_as": "description": |- - The identity to use when running Declarative Automation Bundles workflows. + The identity to use when running Declarative Automation Bundles resources. "markdown_description": |- - The identity to use when running Declarative Automation Bundles workflows. See [\_](/dev-tools/bundles/run-as.md). + The identity to use when running Declarative Automation Bundles resources. See [\_](/dev-tools/bundles/run-as.md). "scripts": "description": |- PLACEHOLDER @@ -420,7 +420,7 @@ github.com/databricks/cli/bundle/config.Workspace: The Databricks account ID. "artifact_path": "description": |- - The artifact path to use within the workspace for both deployments and workflow runs + The artifact path to use within the workspace for both deployments and job runs "auth_type": "description": |- The authentication type. @@ -450,7 +450,7 @@ github.com/databricks/cli/bundle/config.Workspace: Experimental feature flag to indicate if the host is a unified host "file_path": "description": |- - The file path to use within the workspace for both deployments and workflow runs + The file path to use within the workspace for both deployments and job runs "google_service_account": "description": |- The Google service account name diff --git a/bundle/internal/schema/annotations_openapi_overrides.yml b/bundle/internal/schema/annotations_openapi_overrides.yml index 611289083e..921c35e55a 100644 --- a/bundle/internal/schema/annotations_openapi_overrides.yml +++ b/bundle/internal/schema/annotations_openapi_overrides.yml @@ -328,7 +328,7 @@ github.com/databricks/cli/bundle/config/resources.ModelServingEndpoint: github.com/databricks/cli/bundle/config/resources.Pipeline: "_": "markdown_description": |- - The pipeline resource allows you to create Delta Live Tables [pipelines](/api/workspace/pipelines/create). For information about pipelines, see [_](/dlt/index.md). For a tutorial that uses the Declarative Automation Bundles template to create a pipeline, see [_](/dev-tools/bundles/pipelines-tutorial.md). + The pipeline resource allows you to create Spark Declarative [Pipelines](/api/workspace/pipelines/create). For information about pipelines, see [_](/dlt/index.md). For a tutorial that uses the Declarative Automation Bundles template to create a pipeline, see [_](/dev-tools/bundles/pipelines-tutorial.md). "markdown_examples": |- The following example defines a pipeline with the resource key `hello-pipeline`: @@ -454,7 +454,7 @@ github.com/databricks/cli/bundle/config/resources.RegisteredModel: github.com/databricks/cli/bundle/config/resources.Schema: "_": "markdown_description": |- - The schema resource type allows you to define Unity Catalog [schemas](/api/workspace/schemas/create) for tables and other assets in your workflows and pipelines created as part of a bundle. A schema, different from other resource types, has the following limitations: + The schema resource type allows you to define Unity Catalog [schemas](/api/workspace/schemas/create) for tables and other assets in your jobs and pipelines created as part of a bundle. A schema, different from other resource types, has the following limitations: - The owner of a schema resource is always the deployment user, and cannot be changed. If `run_as` is specified in the bundle, it will be ignored by operations on the schema. - Only fields supported by the corresponding [Schemas object create API](/api/workspace/schemas/create) are available for the schema resource. For example, `enable_predictive_optimization` is not supported as it is only available on the [update API](/api/workspace/schemas/update). diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 110ab75731..25ae76ec60 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -36,12 +36,12 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle, plan *deployplan.P types := []deployplan.ActionType{deployplan.Recreate, deployplan.Delete} schemaActions := filterGroup(actions, "schemas", types...) - dltActions := filterGroup(actions, "pipelines", types...) + pipelineActions := filterGroup(actions, "pipelines", types...) volumeActions := filterGroup(actions, "volumes", types...) dashboardActions := filterGroup(actions, "dashboards", types...) // We don't need to display any prompts in this case. - if len(schemaActions) == 0 && len(dltActions) == 0 && len(volumeActions) == 0 && len(dashboardActions) == 0 { + if len(schemaActions) == 0 && len(pipelineActions) == 0 && len(volumeActions) == 0 && len(dashboardActions) == 0 { return true, nil } @@ -56,10 +56,10 @@ func approvalForDeploy(ctx context.Context, b *bundle.Bundle, plan *deployplan.P } } - // One or more DLT pipelines is being recreated. - if len(dltActions) != 0 { + // One or more SDP pipelines is being recreated. + if len(pipelineActions) != 0 { cmdio.LogString(ctx, deleteOrRecreatePipelineMessage) - for _, action := range dltActions { + for _, action := range pipelineActions { cmdio.Log(ctx, action) } } diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 993adec793..9284467c8f 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1322,7 +1322,7 @@ } }, "additionalProperties": false, - "markdownDescription": "The pipeline resource allows you to create Delta Live Tables [pipelines](https://docs.databricks.com/api/workspace/pipelines/create). For information about pipelines, see [link](https://docs.databricks.com/dlt/index.html). For a tutorial that uses the Declarative Automation Bundles template to create a pipeline, see [link](https://docs.databricks.com/dev-tools/bundles/pipelines-tutorial.html)." + "markdownDescription": "The pipeline resource allows you to create Spark Declarative [Pipelines](https://docs.databricks.com/api/workspace/pipelines/create). For information about pipelines, see [link](https://docs.databricks.com/dlt/index.html). For a tutorial that uses the Declarative Automation Bundles template to create a pipeline, see [link](https://docs.databricks.com/dev-tools/bundles/pipelines-tutorial.html)." }, { "type": "string", @@ -1696,7 +1696,7 @@ "catalog_name", "name" ], - "markdownDescription": "The schema resource type allows you to define Unity Catalog [schemas](https://docs.databricks.com/api/workspace/schemas/create) for tables and other assets in your workflows and pipelines created as part of a bundle. A schema, different from other resource types, has the following limitations:\n\n- The owner of a schema resource is always the deployment user, and cannot be changed. If `run_as` is specified in the bundle, it will be ignored by operations on the schema.\n- Only fields supported by the corresponding [Schemas object create API](https://docs.databricks.com/api/workspace/schemas/create) are available for the schema resource. For example, `enable_predictive_optimization` is not supported as it is only available on the [update API](https://docs.databricks.com/api/workspace/schemas/update)." + "markdownDescription": "The schema resource type allows you to define Unity Catalog [schemas](https://docs.databricks.com/api/workspace/schemas/create) for tables and other assets in your jobs and pipelines created as part of a bundle. A schema, different from other resource types, has the following limitations:\n\n- The owner of a schema resource is always the deployment user, and cannot be changed. If `run_as` is specified in the bundle, it will be ignored by operations on the schema.\n- Only fields supported by the corresponding [Schemas object create API](https://docs.databricks.com/api/workspace/schemas/create) are available for the schema resource. For example, `enable_predictive_optimization` is not supported as it is only available on the [update API](https://docs.databricks.com/api/workspace/schemas/update)." }, { "type": "string", @@ -2654,7 +2654,7 @@ "$ref": "#/$defs/string" }, "artifact_path": { - "description": "The artifact path to use within the workspace for both deployments and workflow runs", + "description": "The artifact path to use within the workspace for both deployments and job runs", "$ref": "#/$defs/string" }, "auth_type": { @@ -2694,7 +2694,7 @@ "$ref": "#/$defs/bool" }, "file_path": { - "description": "The file path to use within the workspace for both deployments and workflow runs", + "description": "The file path to use within the workspace for both deployments and job runs", "$ref": "#/$defs/string" }, "google_service_account": { @@ -11907,9 +11907,9 @@ "markdownDescription": "A Map that defines the resources for the bundle, where each key is the name of the resource, and the value is a Map that defines the resource. For more information about Declarative Automation Bundles supported resources, and resource definition reference, see [link](https://docs.databricks.com/dev-tools/bundles/resources.html).\n\n```yaml\nresources:\n \u003cresource-type\u003e:\n \u003cresource-name\u003e:\n \u003cresource-field-name\u003e: \u003cresource-field-value\u003e\n```" }, "run_as": { - "description": "The identity to use when running Declarative Automation Bundles workflows.", + "description": "The identity to use when running Declarative Automation Bundles resources.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.JobRunAs", - "markdownDescription": "The identity to use when running Declarative Automation Bundles workflows. See [link](https://docs.databricks.com/dev-tools/bundles/run-as.html)." + "markdownDescription": "The identity to use when running Declarative Automation Bundles resources. See [link](https://docs.databricks.com/dev-tools/bundles/run-as.html)." }, "scripts": { "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config.Script" diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index dd422d7808..51d780014b 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -30,7 +30,7 @@ func NewGeneratePipelineCommand() *cobra.Command { cmd := &cobra.Command{ Use: "pipeline", Short: "Generate bundle configuration for a pipeline", - Long: `Generate bundle configuration for an existing Delta Live Tables pipeline. + Long: `Generate bundle configuration for an existing Spark Declarative Pipeline. This command downloads an existing Lakeflow Spark Declarative Pipeline's configuration and any associated notebooks, creating bundle files that you can use to deploy the pipeline to other diff --git a/cmd/bundle/open.go b/cmd/bundle/open.go index 483f5edff5..e7fa960c3d 100644 --- a/cmd/bundle/open.go +++ b/cmd/bundle/open.go @@ -57,7 +57,7 @@ func newOpenCommand() *cobra.Command { Examples: databricks bundle open # Prompts to select a resource to open - databricks bundle open my_job # Open specific job in Workflows UI + databricks bundle open my_job # Open specific job in Jobs UI databricks bundle open my_dashboard # Open dashboard in browser Use after deployment to quickly navigate to your resources in the workspace.`, diff --git a/cmd/workspace/permissions/overrides.go b/cmd/workspace/permissions/overrides.go index f5efce48ee..b8f15e6276 100644 --- a/cmd/workspace/permissions/overrides.go +++ b/cmd/workspace/permissions/overrides.go @@ -15,9 +15,9 @@ func cmdOverride(cmd *cobra.Command) { * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which users can use cluster policies. - * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage - which users can view, manage, run, cancel, or own a Delta Live Tables - pipeline. + * **[Spark Declarative Pipeline permissions](:service:pipelines)** — Manage + which users can view, manage, run, cancel, or own a Spark Declarative + Pipeline. * **[Job permissions](:service:jobs)** — Manage which users can view, manage, trigger, cancel, or own a job. diff --git a/libs/template/templates/dbt-sql/README.md b/libs/template/templates/dbt-sql/README.md index 0ddce68ed3..0acd6e719f 100644 --- a/libs/template/templates/dbt-sql/README.md +++ b/libs/template/templates/dbt-sql/README.md @@ -3,7 +3,7 @@ This folder provides a template for using dbt-core with Declarative Automation Bundles. It leverages dbt-core for local development and relies on Declarative Automation Bundles for deployment (either manually or with CI/CD). In production, -dbt is executed using Databricks Workflows. +dbt is executed using Databricks Jobs. * Learn more about the dbt and its standard project structure here: https://docs.getdbt.com/docs/build/projects. * Learn more about Declarative Automation Bundles here: https://docs.databricks.com/en/dev-tools/bundles/index.html diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl index 683bde99cc..1a98bc1917 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl @@ -102,7 +102,7 @@ on CI/CD setup. ## Manually deploying to Databricks with Declarative Automation Bundles Declarative Automation Bundles can be used to deploy to Databricks and to execute -dbt commands as a job using Databricks Workflows. See +dbt commands as a job using Databricks Jobs. See https://docs.databricks.com/dev-tools/bundles/index.html to learn more. Use the Databricks CLI to deploy a development copy of this project to a workspace: @@ -117,7 +117,7 @@ is optional here.) This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] {{.project_name}}_job` to your workspace. -You can find that job by opening your workpace and clicking on **Workflows**. +You can find that job by opening your workpace and clicking on **Jobs**. You can also deploy to your production target directly from the command-line. The warehouse, catalog, and schema for that target are configured in `dbt_profiles/profiles.yml`. diff --git a/libs/template/templates/default-scala/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-scala/template/{{.project_name}}/README.md.tmpl index cc4be2586c..b0d5520713 100644 --- a/libs/template/templates/default-scala/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-scala/template/{{.project_name}}/README.md.tmpl @@ -21,7 +21,7 @@ The '{{.project_name}}' project was generated by using the default-scala templat This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] {{.project_name}}_job` to your workspace. - You can find that job by opening your workspace and clicking on **Workflows**. + You can find that job by opening your workspace and clicking on **Jobs**. 4. Similarly, to deploy a production copy, type: ``` diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl index 1377874bf7..8cb1308aab 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl @@ -21,7 +21,7 @@ The '{{.project_name}}' project was generated by using the default-sql template. This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] {{.project_name}}_job` to your workspace. - You can find that job by opening your workpace and clicking on **Workflows**. + You can find that job by opening your workpace and clicking on **Jobs**. 4. Similarly, to deploy a production copy, type: ``` diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl index 444ae4e033..913f030f33 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -1,4 +1,4 @@ --- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql.job.yml) +-- This query is executed using Databricks Jobs (see resources/{{.project_name}}_sql.job.yml) USE CATALOG {{"{{"}}catalog{{"}}"}}; USE IDENTIFIER({{"{{"}}schema{{"}}"}}); diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl index 80f6773cb3..f95e11e20a 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -1,4 +1,4 @@ --- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql.job.yml) +-- This query is executed using Databricks Jobs (see resources/{{.project_name}}_sql.job.yml) -- -- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ -- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html diff --git a/libs/template/templates/experimental-jobs-as-code/databricks_template_schema.json b/libs/template/templates/experimental-jobs-as-code/databricks_template_schema.json index 574ce59259..c523f61397 100644 --- a/libs/template/templates/experimental-jobs-as-code/databricks_template_schema.json +++ b/libs/template/templates/experimental-jobs-as-code/databricks_template_schema.json @@ -16,11 +16,11 @@ "description": "Include a stub (sample) notebook in '{{.project_name}}{{path_separator}}src'", "order": 2 }, - "include_dlt": { + "include_sdp": { "type": "string", "default": "yes", "enum": ["yes", "no"], - "description": "Include a stub (sample) Delta Live Tables pipeline in '{{.project_name}}{{path_separator}}src'", + "description": "Include a stub (sample) Spark Declarative Pipeline in '{{.project_name}}{{path_separator}}src'", "order": 3 }, "include_python": { diff --git a/libs/template/templates/experimental-jobs-as-code/template/__preamble.tmpl b/libs/template/templates/experimental-jobs-as-code/template/__preamble.tmpl index bd284b0252..d2bbe23e2d 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/__preamble.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/__preamble.tmpl @@ -4,7 +4,7 @@ This file only contains template directives; it is skipped for the actual output {{skip "__preamble"}} -{{$notDLT := not (eq .include_dlt "yes")}} +{{$notSDP := not (eq .include_sdp "yes")}} {{$notNotebook := not (eq .include_notebook "yes")}} {{$notPython := not (eq .include_python "yes")}} @@ -13,8 +13,8 @@ This file only contains template directives; it is skipped for the actual output {{skip "{{.project_name}}/tests/main_test.py"}} {{end}} -{{if $notDLT}} - {{skip "{{.project_name}}/src/dlt_pipeline.ipynb"}} +{{if $notSDP}} + {{skip "{{.project_name}}/src/sdp_pipeline.ipynb"}} {{skip "{{.project_name}}/resources/{{.project_name}}_pipeline.py"}} {{end}} @@ -22,7 +22,7 @@ This file only contains template directives; it is skipped for the actual output {{skip "{{.project_name}}/src/notebook.ipynb"}} {{end}} -{{if (and $notDLT $notNotebook $notPython)}} +{{if (and $notSDP $notNotebook $notPython)}} {{skip "{{.project_name}}/resources/{{.project_name}}_job.py"}} {{else}} {{skip "{{.project_name}}/resources/.gitkeep"}} diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/README.md.tmpl index 37e7040846..6e399d0162 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/README.md.tmpl @@ -42,7 +42,7 @@ The '{{.project_name}}' project was generated by using the "Jobs as code" templa This deploys everything that's defined for this project. For example, the default template would deploy a job called `[dev yourname] {{.project_name}}_job` to your workspace. - You can find that job by opening your workspace and clicking on **Workflows**. + You can find that job by opening your workspace and clicking on **Jobs**. 3. Similarly, to deploy a production copy, type: ``` diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/pyproject.toml.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/pyproject.toml.tmpl index 4cb0e6d9ee..63b28b0994 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/pyproject.toml.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/pyproject.toml.tmpl @@ -41,7 +41,7 @@ py-modules = [] dev-dependencies = [ "databricks-bundles=={{template "latest_databricks_bundles_version"}}", - ## Add code completion support for DLT + ## Add code completion support for Spark Declarative Pipelines # "databricks-dlt", ## databricks-connect can be used to run parts of this project locally. diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_job.py.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_job.py.tmpl index ff554c45c5..e0f3322fcf 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_job.py.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_job.py.tmpl @@ -1,11 +1,11 @@ -{{$include_dlt := "no" -}} +{{$include_sdp := "no" -}} from databricks.bundles.jobs import Job """ The main job for {{.project_name}}. -{{- /* Clarify what this job is for for DLT-only users. */}} -{{if and (eq $include_dlt "yes") (and (eq .include_notebook "no") (eq .include_python "no")) -}} +{{- /* Clarify what this job is for for SDP-only users. */}} +{{if and (eq $include_sdp "yes") (and (eq .include_notebook "no") (eq .include_python "no")) -}} This job runs {{.project_name}}_pipeline on a schedule. {{end -}} """ @@ -37,7 +37,7 @@ This job runs {{.project_name}}_pipeline on a schedule. }, }, {{- end -}} - {{- if (eq $include_dlt "yes") -}} + {{- if (eq $include_sdp "yes") -}} {{- "\n " -}} { "task_key": "refresh_pipeline", @@ -58,7 +58,7 @@ This job runs {{.project_name}}_pipeline on a schedule. {{- "\n " -}} { "task_key": "main_task", - {{- if (eq $include_dlt "yes") }} + {{- if (eq $include_sdp "yes") }} "depends_on": [ { "task_key": "refresh_pipeline", diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_pipeline.py.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_pipeline.py.tmpl index c8579ae659..73c30ab543 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_pipeline.py.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/resources/{{.project_name}}_pipeline.py.tmpl @@ -13,7 +13,7 @@ from databricks.bundles.pipelines import Pipeline "libraries": [ { "notebook": { - "path": "src/dlt_pipeline.ipynb", + "path": "src/sdp_pipeline.ipynb", }, }, ], diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/notebook.ipynb.tmpl index 6782a053ba..fbc12f872e 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/notebook.ipynb.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -14,7 +14,7 @@ "source": [ "# Default notebook\n", "\n", - "This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}.job.yml." + "This default notebook is executed using Databricks Jobs as defined in resources/{{.project_name}}.job.yml." ] }, { diff --git a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/sdp_pipeline.ipynb.tmpl similarity index 97% rename from libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl rename to libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/sdp_pipeline.ipynb.tmpl index 62c4fb1f12..5e70f5549c 100644 --- a/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/experimental-jobs-as-code/template/{{.project_name}}/src/sdp_pipeline.ipynb.tmpl @@ -12,7 +12,7 @@ } }, "source": [ - "# DLT pipeline\n", + "# SDP pipeline\n", "\n", "This Lakeflow Spark Declarative Pipeline definition is executed using a pipeline defined in resources/{{.project_name}}.pipeline.yml." ] @@ -86,7 +86,7 @@ "notebookMetadata": { "pythonIndentUnit": 2 }, - "notebookName": "dlt_pipeline", + "notebookName": "sdp_pipeline", "widgets": {} }, "kernelspec": {