From 020f86bed5bd089804de21ebd93ec6208c5cdf44 Mon Sep 17 00:00:00 2001 From: Kay Qiu Date: Tue, 2 Sep 2025 12:51:09 +0800 Subject: [PATCH 1/2] rename data pipeline --- .../manifest.json | 4 ++-- .../manifest.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json b/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json index 9a8f4a52..cf0a015a 100644 --- a/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json +++ b/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json @@ -1,6 +1,6 @@ { "name": "AI-Develop RAG pipeline using SQL database in Fabric", - "description":"Use this Retrieval Augmented Generation (RAG) data pipeline template to get your data ready in SQL Database in fabric for building Generative AI and Agentic AI applications. \n\n Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric.\n\n As a part of configuring the pipeline, you will be required to provide values for predefined variables such as \"apiKey\", \"cognitiveServiceEndpoint\", \"openAIEndpoint\", \"openAIKey\" etc., by selecting the pipeline canvas and navigating to the variables menu. Additionally, the pipeline configuration will also depend on Python Notebook and UserDataFunction. \n\nThe source files and documentation for this pipeline can can be found at:\n https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", + "description":"Use this Retrieval Augmented Generation (RAG) pipeline template to get your data ready in SQL Database in fabric for building Generative AI and Agentic AI applications. \n\n Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric.\n\n As a part of configuring the pipeline, you will be required to provide values for predefined variables such as \"apiKey\", \"cognitiveServiceEndpoint\", \"openAIEndpoint\", \"openAIKey\" etc., by selecting the pipeline canvas and navigating to the variables menu. Additionally, the pipeline configuration will also depend on Python Notebook and UserDataFunction. \n\nThe source files and documentation for this pipeline can can be found at:\n https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", "image": "Notebookazureblob_to_lakehouseFunctionsExtract TextIf ConditionText ExtractionResultsTrue+FalseTextExtractio...TextExtractio...+FunctionsGenerate ChunksFunctionsRedact PII DataIf ConditionPII Reaction ResultsTrue+FalseRedactionFailure...TextRedactio...+FunctionsGenerateEmbeddingsIf ConditionGenerateEmbeddings...True+FalseGenerateEmbeddi...GenerateEmbeddi...+FunctionsCreate DatabaseObjectsFunctionsSave Data", "icons": [ "TridentNotebook", @@ -19,7 +19,7 @@ "author": "sqlgenai@microsoft.com", "annotations": ["AI", "RAG", "SQL Database", "Embedding", "Chunking","Vector", "Redact PII","Azure Blob Storage"], "services": ["SQL Database in Fabric", "Azure Blob Storage"], - "categories": ["AI", "SQL Database", "Data Pipeline"], + "categories": ["AI", "SQL Database", "Pipeline"], "scope": ["PBI"], "documentation" : "https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline" } \ No newline at end of file diff --git a/templates/Copy data from sample data WWI to Datawarehouse/manifest.json b/templates/Copy data from sample data WWI to Datawarehouse/manifest.json index 72ff6686..be2c15ee 100644 --- a/templates/Copy data from sample data WWI to Datawarehouse/manifest.json +++ b/templates/Copy data from sample data WWI to Datawarehouse/manifest.json @@ -1,6 +1,6 @@ { "name": "Copy data from sample data to Warehouse", - "description": "Use this template to copy data from sample data (Retail Data Model from Wide World Importers) to your Fabric Warehouse.\n\nThis template is pre-connected to sample data (Retail Data Model from Wide World Importers) on the source side, which allows you to successfully build and run your pipeline in 5 minutes. \n\nIf you want to copy data from other sample data supported in data pipelines, please use the Copy assistant to create your pipeline.", + "description": "Use this template to copy data from sample data (Retail Data Model from Wide World Importers) to your Fabric Warehouse.\n\nThis template is pre-connected to sample data (Retail Data Model from Wide World Importers) on the source side, which allows you to successfully build and run your pipeline in 5 minutes. \n\nIf you want to copy data from other sample data supported in pipelines, please use the Copy assistant to create your pipeline.", "image": "Copy dataCopy_d5n", "icons": ["Copy"], "requires": { From 6f9d565863c58c74681cc338b579d1d583b87eb6 Mon Sep 17 00:00:00 2001 From: Kay Qiu Date: Tue, 2 Sep 2025 13:34:40 +0800 Subject: [PATCH 2/2] rename data pipeline --- .../AI-Develop RAG pipeline using SQL database in Fabric.json | 2 +- .../manifest.json | 4 ++-- .../manifest.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/templates/AI-Develop RAG pipeline using SQL database in Fabric/AI-Develop RAG pipeline using SQL database in Fabric.json b/templates/AI-Develop RAG pipeline using SQL database in Fabric/AI-Develop RAG pipeline using SQL database in Fabric.json index 65b5a529..717fffa4 100644 --- a/templates/AI-Develop RAG pipeline using SQL database in Fabric/AI-Develop RAG pipeline using SQL database in Fabric.json +++ b/templates/AI-Develop RAG pipeline using SQL database in Fabric/AI-Develop RAG pipeline using SQL database in Fabric.json @@ -10,7 +10,7 @@ "resources": [ { "name": "AI-Develop RAG pipeline using SQL database in Fabric", - "description": "This Retrieval Augmented Generation (RAG) data pipeline will get your data ready for building Generative AI and Agentic AI applications. Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric. Documentation: https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", + "description": "This Retrieval Augmented Generation (RAG) pipeline will get your data ready for building Generative AI and Agentic AI applications. Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric. Documentation: https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", "type": "pipelines", "apiVersion": "2018-06-01", "properties": { diff --git a/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json b/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json index 9a8f4a52..cf0a015a 100644 --- a/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json +++ b/templates/AI-Develop RAG pipeline using SQL database in Fabric/manifest.json @@ -1,6 +1,6 @@ { "name": "AI-Develop RAG pipeline using SQL database in Fabric", - "description":"Use this Retrieval Augmented Generation (RAG) data pipeline template to get your data ready in SQL Database in fabric for building Generative AI and Agentic AI applications. \n\n Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric.\n\n As a part of configuring the pipeline, you will be required to provide values for predefined variables such as \"apiKey\", \"cognitiveServiceEndpoint\", \"openAIEndpoint\", \"openAIKey\" etc., by selecting the pipeline canvas and navigating to the variables menu. Additionally, the pipeline configuration will also depend on Python Notebook and UserDataFunction. \n\nThe source files and documentation for this pipeline can can be found at:\n https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", + "description":"Use this Retrieval Augmented Generation (RAG) pipeline template to get your data ready in SQL Database in fabric for building Generative AI and Agentic AI applications. \n\n Triggered by Azure Blob Storage events, the pipeline copies the file to the Lakehouse, extracts the content from the file, chunks the content, redacts any PII information, generates embeddings, and stores the chunks and embeddings in SQL Database in Fabric.\n\n As a part of configuring the pipeline, you will be required to provide values for predefined variables such as \"apiKey\", \"cognitiveServiceEndpoint\", \"openAIEndpoint\", \"openAIKey\" etc., by selecting the pipeline canvas and navigating to the variables menu. Additionally, the pipeline configuration will also depend on Python Notebook and UserDataFunction. \n\nThe source files and documentation for this pipeline can can be found at:\n https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline", "image": "Notebookazureblob_to_lakehouseFunctionsExtract TextIf ConditionText ExtractionResultsTrue+FalseTextExtractio...TextExtractio...+FunctionsGenerate ChunksFunctionsRedact PII DataIf ConditionPII Reaction ResultsTrue+FalseRedactionFailure...TextRedactio...+FunctionsGenerateEmbeddingsIf ConditionGenerateEmbeddings...True+FalseGenerateEmbeddi...GenerateEmbeddi...+FunctionsCreate DatabaseObjectsFunctionsSave Data", "icons": [ "TridentNotebook", @@ -19,7 +19,7 @@ "author": "sqlgenai@microsoft.com", "annotations": ["AI", "RAG", "SQL Database", "Embedding", "Chunking","Vector", "Redact PII","Azure Blob Storage"], "services": ["SQL Database in Fabric", "Azure Blob Storage"], - "categories": ["AI", "SQL Database", "Data Pipeline"], + "categories": ["AI", "SQL Database", "Pipeline"], "scope": ["PBI"], "documentation" : "https://github.com/Azure-Samples/fabric-sqldb-ai-ragpipeline" } \ No newline at end of file diff --git a/templates/Copy data from sample data WWI to Datawarehouse/manifest.json b/templates/Copy data from sample data WWI to Datawarehouse/manifest.json index 72ff6686..be2c15ee 100644 --- a/templates/Copy data from sample data WWI to Datawarehouse/manifest.json +++ b/templates/Copy data from sample data WWI to Datawarehouse/manifest.json @@ -1,6 +1,6 @@ { "name": "Copy data from sample data to Warehouse", - "description": "Use this template to copy data from sample data (Retail Data Model from Wide World Importers) to your Fabric Warehouse.\n\nThis template is pre-connected to sample data (Retail Data Model from Wide World Importers) on the source side, which allows you to successfully build and run your pipeline in 5 minutes. \n\nIf you want to copy data from other sample data supported in data pipelines, please use the Copy assistant to create your pipeline.", + "description": "Use this template to copy data from sample data (Retail Data Model from Wide World Importers) to your Fabric Warehouse.\n\nThis template is pre-connected to sample data (Retail Data Model from Wide World Importers) on the source side, which allows you to successfully build and run your pipeline in 5 minutes. \n\nIf you want to copy data from other sample data supported in pipelines, please use the Copy assistant to create your pipeline.", "image": "Copy dataCopy_d5n", "icons": ["Copy"], "requires": {