diff --git a/docs.json b/docs.json
index 47a1b692..e42e4f80 100644
--- a/docs.json
+++ b/docs.json
@@ -68,7 +68,9 @@
"group": "Application Orchestration",
"pages": [
"en/guides/application-orchestrate/readme",
+ "en/guides/application-orchestrate/creating-an-application",
"en/guides/application-orchestrate/chatbot-application",
+ "en/guides/application-orchestrate/multiple-llms-debugging",
"en/guides/application-orchestrate/text-generator",
"en/guides/application-orchestrate/agent",
{
@@ -90,6 +92,8 @@
"group": "Node Description",
"pages": [
"en/guides/workflow/node/start",
+ "en/guides/workflow/node/end",
+ "en/guides/workflow/node/answer",
"en/guides/workflow/node/llm",
"en/guides/workflow/node/knowledge-retrieval",
"en/guides/workflow/node/question-classifier",
@@ -105,8 +109,6 @@
"en/guides/workflow/node/http-request",
"en/guides/workflow/node/agent",
"en/guides/workflow/node/tools",
- "en/guides/workflow/node/end",
- "en/guides/workflow/node/answer",
"en/guides/workflow/node/loop"
]
},
@@ -175,10 +177,10 @@
},
{
"group": "Publishing",
- "pages": [
- {
- "group": "Publish as a Web App",
+ "pages": [ {
+ "group": "Publish as a Single-page Web App",
"pages": [
+ "en/guides/application-publishing/launch-your-webapp-quickly/README",
"en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings",
"en/guides/application-publishing/launch-your-webapp-quickly/text-generator",
"en/guides/application-publishing/launch-your-webapp-quickly/conversation-application"
@@ -204,8 +206,32 @@
{
"group": "Integrate External Ops Tools",
"pages": [
+ "en/guides/monitoring/integrate-external-ops-tools/README",
+ "en/guides/monitoring/integrate-external-ops-tools/integrate-langsmith",
"en/guides/monitoring/integrate-external-ops-tools/integrate-langfuse",
- "en/guides/monitoring/integrate-external-ops-tools/integrate-langsmith"
+ "en/guides/monitoring/integrate-external-ops-tools/integrate-opik"
+ ]
+ }
+ ]
+ },
+ {
+ "group": "Extensions",
+ "pages": [
+ {
+ "group": "API-Based Extension",
+ "pages": [
+ "en/guides/extension/api-based-extension/README",
+ "en/guides/extension/api-based-extension/external-data-tool",
+ "en/guides/extension/api-based-extension/cloudflare-workers",
+ "en/guides/extension/api-based-extension/moderation"
+ ]
+ },
+ {
+ "group": "Code-Based Extension",
+ "pages": [
+ "en/guides/extension/code-based-extension/README",
+ "en/guides/extension/code-based-extension/external-data-tool",
+ "en/guides/extension/code-based-extension/moderation"
]
}
]
@@ -213,6 +239,7 @@
{
"group": "Collaboration",
"pages": [
+ "en/guides/workspace/README",
"en/guides/workspace/app",
"en/guides/workspace/invite-and-manage-members"
]
@@ -352,7 +379,7 @@
"group": "Development",
"pages": [
{
- "group": "DifySandbox",
+ "group": "Backend",
"pages": [
"en/development/backend/sandbox/README",
"en/development/backend/sandbox/contribution"
@@ -389,7 +416,6 @@
"en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app",
"en/learn-more/use-cases/private-ai-ollama-deepseek-dify",
"en/learn-more/use-cases/build-an-notion-ai-assistant",
- "en/learn-more/use-cases/building-an-ai-thesis-slack-bot",
"en/learn-more/use-cases/create-a-midjourney-prompt-bot-with-dify",
"en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes",
"en/learn-more/use-cases/how-to-integrate-dify-chatbot-to-your-wix-website",
@@ -432,8 +458,8 @@
"group": "User Agreement",
"pages": [
"en/policies/agreement/README",
- "https://dify.ai/terms",
- "https://dify.ai/privacy",
+ "en/policies/agreement/tos",
+ "en/policies/agreement/privacy",
"en/policies/agreement/get-compliance-report"
]
}
@@ -928,8 +954,8 @@
"group": "用户协议",
"pages": [
"zh-hans/policies/agreement/README",
- "https://dify.ai/terms",
- "https://dify.ai/privacy",
+ "zh-hans/policies/agreement/tos",
+ "zh-hans/policies/agreement/privacy",
"zh-hans/policies/agreement/get-compliance-report"
]
}
@@ -1133,7 +1159,8 @@
"group": "外部Opsツール統合",
"pages": [
"ja-jp/guides/monitoring/integrate-external-ops-tools/integrate-langfuse",
- "ja-jp/guides/monitoring/integrate-external-ops-tools/integrate-langsmith"
+ "ja-jp/guides/monitoring/integrate-external-ops-tools/integrate-langsmith",
+ "ja-jp/guides/monitoring/integrate-external-ops-tools/integrate-opik"
]
}
]
@@ -1385,8 +1412,8 @@
"group": "ユーザ規約",
"pages": [
"ja-jp/policies/agreement/README",
- "https://dify.ai/terms",
- "https://dify.ai/privacy",
+ "ja-jp/policies/agreement/tos",
+ "ja-jp/policies/agreement/privacy",
"ja-jp/policies/agreement/get-compliance-report"
]
}
@@ -1399,17 +1426,29 @@
]
},
"redirects": [
+ {
+ "source": "plugins/schema-definition/model",
+ "destination": "en/plugins/schema-definition/model/model-designing-rules"
+ },
+ {
+ "source": "plugins/schema-definition",
+ "destination": "en/plugins/schema-definition/manifest"
+ },
+ {
+ "source": "guides/application-orchestrate/app-toolkits",
+ "destination": "guides/application-orchestrate/app-toolkits/readme"
+ },
{
"source": "ja-jp/plugins/plugin-auto-publish-pr",
- "destination": "ja-jp/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ "destination": "ja-jp/plugins/publish-plugins/plugin-auto-publish-pr"
},
{
"source": "zh-hans/plugins/plugin-auto-publish-pr",
- "destination": "zh-hans/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ "destination": "zh-hans/plugins/publish-plugins/plugin-auto-publish-pr"
},
{
"source": "en/plugins/plugin-auto-publish-pr",
- "destination": "en/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ "destination": "en/plugins/publish-plugins/plugin-auto-publish-pr"
},
{
"source": "zh-hans/getting-started/install-self-hosted",
@@ -2292,16 +2331,16 @@
"destination": "/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/node"
},
{
- "source": "/plugins/best-practice/readme",
- "destination": "/en/plugins/best-practice/readme"
+ "source": "/plugins/best-practice",
+ "destination": "/en/plugins/best-practice/README"
},
{
"source": "/plugins/best-practice/develop-a-slack-bot-plugin",
"destination": "/en/plugins/best-practice/develop-a-slack-bot-plugin"
},
{
- "source": "/plugins/publish-plugins/readme",
- "destination": "/en/plugins/publish-plugins/readme"
+ "source": "/plugins/publish-plugins",
+ "destination": "/en/plugins/publish-plugins/README"
},
{
"source": "/plugins/publish-plugins/publish-to-dify-marketplace/readme",
@@ -2328,8 +2367,8 @@
"destination": "/en/plugins/faq"
},
{
- "source": "/development/backend/readme",
- "destination": "/en/development/backend/readme"
+ "source": "/development/backend",
+ "destination": "/en/development/backend/sandbox/README"
},
{
"source": "/development/backend/sandbox/readme",
@@ -2340,8 +2379,8 @@
"destination": "/en/development/backend/sandbox/contribution"
},
{
- "source": "/development/models-integration/readme",
- "destination": "/en/development/models-integration/readme"
+ "source": "/development/models-integration",
+ "destination": "/en/development/models-integration/hugging-face"
},
{
"source": "/development/models-integration/hugging-face",
@@ -2380,16 +2419,16 @@
"destination": "/en/development/models-integration/aws-bedrock-deepseek"
},
{
- "source": "/development/migration/readme",
- "destination": "/en/development/migration/readme"
+ "source": "/development/migration",
+ "destination": "/en/development/migration/migrate-to-v1"
},
{
"source": "/development/migration/migrate-to-v1",
"destination": "/en/development/migration/migrate-to-v1"
},
{
- "source": "/learn-more/use-cases/readme",
- "destination": "/en/learn-more/use-cases/readme"
+ "source": "/learn-more/use-cases",
+ "destination": "/en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app"
},
{
"source": "/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app",
@@ -2428,16 +2467,16 @@
"destination": "/en/learn-more/use-cases/building-an-ai-thesis-slack-bot"
},
{
- "source": "/learn-more/extended-reading/readme",
- "destination": "/en/learn-more/extended-reading/readme"
+ "source": "/learn-more/extended-reading",
+ "destination": "/en/learn-more/extended-reading/what-is-llmops"
},
{
"source": "/learn-more/extended-reading/what-is-llmops",
"destination": "/en/learn-more/extended-reading/what-is-llmops"
},
{
- "source": "/learn-more/extended-reading/retrieval-augment/readme",
- "destination": "/en/learn-more/extended-reading/retrieval-augment/readme"
+ "source": "/learn-more/extended-reading/retrieval-augment",
+ "destination": "/en/learn-more/extended-reading/retrieval-augment/README"
},
{
"source": "/learn-more/extended-reading/retrieval-augment/hybrid-search",
@@ -2456,8 +2495,8 @@
"destination": "/en/learn-more/extended-reading/how-to-use-json-schema-in-dify"
},
{
- "source": "/learn-more/faq/readme",
- "destination": "/en/learn-more/faq/readme"
+ "source": "/learn-more/faq",
+ "destination": "/en/learn-more/faq/install-faq"
},
{
"source": "/learn-more/faq/install-faq",
@@ -2485,7 +2524,7 @@
},
{
"source": "/features/workflow",
- "destination": "/en/features/workflow"
+ "destination": "/en/guides/workflow/README"
}
],
"navbar": {
@@ -2504,7 +2543,7 @@
"footer": {
"socials": {
"x": "https://x.com/dify_ai",
- "github": "https://github.com/langgenius/dify-docs",
+ "github": "https://github.com/langgenius/dify-docs-mintlify",
"linkedin": "https://www.linkedin.com/company/langgenius"
}
}
diff --git a/en/SUMMARY.md b/en/SUMMARY.md
index 2d08be43..23149302 100644
--- a/en/SUMMARY.md
+++ b/en/SUMMARY.md
@@ -2,248 +2,248 @@
## Getting Started
-* [Welcome to Dify](README.md)
- * [Features and Specifications](getting-started/readme/features-and-specifications.md)
- * [List of Model Providers](getting-started/readme/model-providers.md)
-* [Dify Community](getting-started/install-self-hosted/README.md)
- * [Deploy with Docker Compose](getting-started/install-self-hosted/docker-compose.md)
- * [Start with Local Source Code](getting-started/install-self-hosted/local-source-code.md)
- * [Deploy with aaPanel](getting-started/install-self-hosted/bt-panel.md)
- * [Start Frontend Docker Container Separately](getting-started/install-self-hosted/start-the-frontend-docker-container.md)
- * [Environment Variables Explanation](getting-started/install-self-hosted/environments.md)
- * [FAQs](getting-started/install-self-hosted/faqs.md)
-* [Dify Cloud](getting-started/cloud.md)
-* [Dify Premium on AWS](getting-started/dify-premium-on-aws.md)
+* [Welcome to Dify](README)
+ * [Features and Specifications](getting-started/readme/features-and-specifications)
+ * [List of Model Providers](getting-started/readme/model-providers)
+* [Dify Community](getting-started/install-self-hosted/README)
+ * [Deploy with Docker Compose](getting-started/install-self-hosted/docker-compose)
+ * [Start with Local Source Code](getting-started/install-self-hosted/local-source-code)
+ * [Deploy with aaPanel](getting-started/install-self-hosted/bt-panel)
+ * [Start Frontend Docker Container Separately](getting-started/install-self-hosted/start-the-frontend-docker-container)
+ * [Environment Variables Explanation](getting-started/install-self-hosted/environments)
+ * [FAQs](getting-started/install-self-hosted/faqs)
+* [Dify Cloud](getting-started/cloud)
+* [Dify Premium on AWS](getting-started/dify-premium-on-aws)
## Guides
-* [Model](guides/model-configuration/README.md)
- * [Add New Provider](guides/model-configuration/new-provider.md)
- * [Predefined Model Integration](guides/model-configuration/predefined-model.md)
- * [Custom Model Integration](guides/model-configuration/customizable-model.md)
- * [Interfaces](guides/model-configuration/interfaces.md)
- * [Schema](guides/model-configuration/schema.md)
- * [Load Balancing](guides/model-configuration/load-balancing.md)
-* [Application Orchestration](guides/application-orchestrate/README.md)
- * [Create Application](guides/application-orchestrate/creating-an-application.md)
- * [Chatbot Application](guides/application-orchestrate/chatbot-application.md)
- * [Multiple Model Debugging](guides/application-orchestrate/multiple-llms-debugging.md)
- * [Agent](guides/application-orchestrate/agent.md)
- * [Application Toolkits](guides/application-orchestrate/app-toolkits/README.md)
- * [Moderation Tool](guides/application-orchestrate/app-toolkits/moderation-tool.md)
-* [Workflow](guides/workflow/README.md)
- * [Key Concepts](guides/workflow/key-concepts.md)
- * [Variables](guides/workflow/variables.md)
- * [Node Description](guides/workflow/node/README.md)
- * [Start](guides/workflow/node/start.md)
- * [End](guides/workflow/node/end.md)
- * [Answer](guides/workflow/node/answer.md)
- * [LLM](guides/workflow/node/llm.md)
- * [Knowledge Retrieval](guides/workflow/node/knowledge-retrieval.md)
- * [Question Classifier](guides/workflow/node/question-classifier.md)
- * [Conditional Branch IF/ELSE](guides/workflow/node/ifelse.md)
- * [Code Execution](guides/workflow/node/code.md)
- * [Template](guides/workflow/node/template.md)
- * [Doc Extractor](guides/workflow/node/doc-extractor.md)
- * [List Operator](guides/workflow/node/list-operator.md)
- * [Variable Aggregator](guides/workflow/node/variable-aggregator.md)
- * [Variable Assigner](guides/workflow/node/variable-assigner.md)
- * [Iteration](guides/workflow/node/iteration.md)
- * [Parameter Extraction](guides/workflow/node/parameter-extractor.md)
- * [HTTP Request](guides/workflow/node/http-request.md)
- * [Agent](guides/workflow/node/agent.md)
- * [Tools](guides/workflow/node/tools.md)
- * [Loop](guides/workflow/node/loop.md)
- * [Shortcut Key](guides/workflow/shortcut-key.md)
- * [Orchestrate Node](guides/workflow/orchestrate-node.md)
- * [File Upload](guides/workflow/file-upload.md)
- * [Error Handling](guides/workflow/error-handling/README.md)
- * [Predefined Error Handling Logic](guides/workflow/error-handling/predefined-error-handling-logic.md)
- * [Error Type](guides/workflow/error-handling/error-type.md)
- * [Additional Features](guides/workflow/additional-features.md)
- * [Debug and Preview](guides/workflow/debug-and-preview/README.md)
- * [Preview and Run](guides/workflow/debug-and-preview/yu-lan-yu-yun-hang.md)
- * [Step Run](guides/workflow/debug-and-preview/step-run.md)
- * [Conversation/Run Logs](guides/workflow/debug-and-preview/log.md)
- * [Checklist](guides/workflow/debug-and-preview/checklist.md)
- * [Run History](guides/workflow/debug-and-preview/history.md)
- * [Application Publishing](guides/workflow/publish.md)
- * [Bulletin: Image Upload Replaced by File Upload](guides/workflow/bulletin.md)
-* [Knowledge](guides/knowledge-base/README.md)
- * [Create Knowledge](guides/knowledge-base/create-knowledge-and-upload-documents.md)
- * [1. Import Text Data](guides/knowledge-base/create-knowledge-and-upload-documents/1.-import-text-data/README.md)
- * [1.1 Import Data from Notion](guides/knowledge-base/create-knowledge-and-upload-documents/1.-import-text-data/1.1-import-data-from-notion.md)
- * [1.2 Import Data from Website](guides/knowledge-base/sync-from-website.md)
- * [2. Choose a Chunk Mode](guides/knowledge-base/create-knowledge-and-upload-documents/2.-choose-a-chunk-mode.md)
- * [3. Select the Indexing Method and Retrieval Setting](guides/knowledge-base/create-knowledge-and-upload-documents/3.-select-the-indexing-method-and-retrieval-setting.md)
- * [Manage Knowledge](guides/knowledge-base/knowledge-and-documents-maintenance.md)
- * [Maintain Documents](guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.md)
- * [Maintain Knowledge via API](guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api.md)
- * [Metadata](guides/knowledge-base/metadata.md)
- * [Integrate Knowledge Base within Application](guides/knowledge-base/integrate-knowledge-within-application.md)
- * [Retrieval Test / Citation and Attributions](guides/knowledge-base/retrieval-test-and-citation.md)
- * [Knowledge Request Rate Limit](guides/knowledge-base/knowledge-request-rate-limit.md)
- * [Connect to an External Knowledge Base](guides/knowledge-base/connect-external-knowledge.md)
- * [External Knowledge API](guides/knowledge-base/external-knowledge-api-documentation.md)
-* [Tools](guides/tools/README.md)
- * [Quick Tool Integration](guides/tools/quick-tool-integration.md)
- * [Advanced Tool Integration](guides/tools/advanced-tool-integration.md)
- * [Tool Configuration](guides/tools/tool-configuration/README.md)
- * [Google](guides/tools/tool-configuration/google.md)
- * [Bing](guides/tools/tool-configuration/bing.md)
- * [SearchApi](guides/tools/tool-configuration/searchapi.md)
- * [StableDiffusion](guides/tools/tool-configuration/stable-diffusion.md)
- * [Dall-e](guides/tools/tool-configuration/dall-e.md)
- * [Perplexity Search](guides/tools/tool-configuration/perplexity.md)
- * [AlphaVantage](guides/tools/tool-configuration/alphavantage.md)
- * [Youtube](guides/tools/tool-configuration/youtube.md)
- * [SearXNG](guides/tools/tool-configuration/searxng.md)
- * [Serper](guides/tools/tool-configuration/serper.md)
- * [SiliconFlow (Flux AI Supported)](guides/tools/tool-configuration/siliconflow.md)
- * [ComfyUI](guides/tools/tool-configuration/comfyui.md)
-* [Publishing](guides/application-publishing/README.md)
- * [Publish as a Single-page Web App](guides/application-publishing/launch-your-webapp-quickly/README.md)
- * [Web App Settings](guides/application-publishing/launch-your-webapp-quickly/web-app-settings.md)
- * [Text Generator Application](guides/application-publishing/launch-your-webapp-quickly/text-generator.md)
- * [Conversation Application](guides/application-publishing/launch-your-webapp-quickly/conversation-application.md)
- * [Embedding In Websites](guides/application-publishing/embedding-in-websites.md)
- * [Developing with APIs](guides/application-publishing/developing-with-apis.md)
- * [Re-develop Based on Frontend Templates](guides/application-publishing/based-on-frontend-templates.md)
-* [Annotation](guides/annotation/README.md)
- * [Logs and Annotation](guides/annotation/logs.md)
- * [Annotation Reply](guides/annotation/annotation-reply.md)
-* [Monitoring](guides/monitoring/README.md)
- * [Data Analysis](guides/monitoring/analysis.md)
- * [Integrate External Ops Tools](guides/monitoring/integrate-external-ops-tools/README.md)
- * [Integrate LangSmith](guides/monitoring/integrate-external-ops-tools/integrate-langsmith.md)
- * [Integrate Langfuse](guides/monitoring/integrate-external-ops-tools/integrate-langfuse.md)
- * [Integrate Opik](guides/monitoring/integrate-external-ops-tools/integrate-opik.md)
-* [Extension](guides/extension/README.md)
- * [API-Based Extension](guides/extension/api-based-extension/README.md)
- * [External Data Tool](guides/extension/api-based-extension/external-data-tool.md)
- * [Deploy API Tools with Cloudflare Workers](guides/extension/api-based-extension/cloudflare-workers.md)
- * [Moderation](guides/extension/api-based-extension/moderation.md)
- * [Code-Based Extension](guides/extension/code-based-extension/README.md)
- * [External Data Tool](guides/extension/code-based-extension/external-data-tool.md)
- * [Moderation](guides/extension/code-based-extension/moderation.md)
-* [Collaboration](guides/workspace/README.md)
- * [Discover](guides/workspace/app.md)
- * [Invite and Manage Members](guides/workspace/invite-and-manage-members.md)
-* [Management](guides/management/README.md)
- * [App Management](guides/management/app-management.md)
- * [Team Members Management](guides/management/team-members-management.md)
- * [Personal Account Management](guides/management/personal-account-management.md)
- * [Subscription Management](guides/management/subscription-management.md)
- * [Version Control](guides/management/version-control.md)
+* [Model](guides/model-configuration/README)
+ * [Add New Provider](guides/model-configuration/new-provider)
+ * [Predefined Model Integration](guides/model-configuration/predefined-model)
+ * [Custom Model Integration](guides/model-configuration/customizable-model)
+ * [Interfaces](guides/model-configuration/interfaces)
+ * [Schema](guides/model-configuration/schema)
+ * [Load Balancing](guides/model-configuration/load-balancing)
+* [Application Orchestration](guides/application-orchestrate/README)
+ * [Create Application](guides/application-orchestrate/creating-an-application)
+ * [Chatbot Application](guides/application-orchestrate/chatbot-application)
+ * [Multiple Model Debugging](guides/application-orchestrate/multiple-llms-debugging)
+ * [Agent](guides/application-orchestrate/agent)
+ * [Application Toolkits](guides/application-orchestrate/app-toolkits/README)
+ * [Moderation Tool](guides/application-orchestrate/app-toolkits/moderation-tool)
+* [Workflow](guides/workflow/README)
+ * [Key Concepts](guides/workflow/key-concepts)
+ * [Variables](guides/workflow/variables)
+ * [Node Description](guides/workflow/node/README)
+ * [Start](guides/workflow/node/start)
+ * [End](guides/workflow/node/end)
+ * [Answer](guides/workflow/node/answer)
+ * [LLM](guides/workflow/node/llm)
+ * [Knowledge Retrieval](guides/workflow/node/knowledge-retrieval)
+ * [Question Classifier](guides/workflow/node/question-classifier)
+ * [Conditional Branch IF/ELSE](guides/workflow/node/ifelse)
+ * [Code Execution](guides/workflow/node/code)
+ * [Template](guides/workflow/node/template)
+ * [Doc Extractor](guides/workflow/node/doc-extractor)
+ * [List Operator](guides/workflow/node/list-operator)
+ * [Variable Aggregator](guides/workflow/node/variable-aggregator)
+ * [Variable Assigner](guides/workflow/node/variable-assigner)
+ * [Iteration](guides/workflow/node/iteration)
+ * [Parameter Extraction](guides/workflow/node/parameter-extractor)
+ * [HTTP Request](guides/workflow/node/http-request)
+ * [Agent](guides/workflow/node/agent)
+ * [Tools](guides/workflow/node/tools)
+ * [Loop](guides/workflow/node/loop)
+ * [Shortcut Key](guides/workflow/shortcut-key)
+ * [Orchestrate Node](guides/workflow/orchestrate-node)
+ * [File Upload](guides/workflow/file-upload)
+ * [Error Handling](guides/workflow/error-handling/README)
+ * [Predefined Error Handling Logic](guides/workflow/error-handling/predefined-error-handling-logic)
+ * [Error Type](guides/workflow/error-handling/error-type)
+ * [Additional Features](guides/workflow/additional-features)
+ * [Debug and Preview](guides/workflow/debug-and-preview/README)
+ * [Preview and Run](guides/workflow/debug-and-preview/yu-lan-yu-yun-hang)
+ * [Step Run](guides/workflow/debug-and-preview/step-run)
+ * [Conversation/Run Logs](guides/workflow/debug-and-preview/log)
+ * [Checklist](guides/workflow/debug-and-preview/checklist)
+ * [Run History](guides/workflow/debug-and-preview/history)
+ * [Application Publishing](guides/workflow/publish)
+ * [Bulletin: Image Upload Replaced by File Upload](guides/workflow/bulletin)
+* [Knowledge](guides/knowledge-base/README)
+ * [Create Knowledge](guides/knowledge-base/create-knowledge-and-upload-documents)
+ * [1. Import Text Data](guides/knowledge-base/create-knowledge-and-upload-documents/1.-import-text-data/README)
+ * [1.1 Import Data from Notion](guides/knowledge-base/create-knowledge-and-upload-documents/1.-import-text-data/1.1-import-data-from-notion)
+ * [1.2 Import Data from Website](guides/knowledge-base/sync-from-website)
+ * [2. Choose a Chunk Mode](guides/knowledge-base/create-knowledge-and-upload-documents/2.-choose-a-chunk-mode)
+ * [3. Select the Indexing Method and Retrieval Setting](guides/knowledge-base/create-knowledge-and-upload-documents/3.-select-the-indexing-method-and-retrieval-setting)
+ * [Manage Knowledge](guides/knowledge-base/knowledge-and-documents-maintenance)
+ * [Maintain Documents](guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents)
+ * [Maintain Knowledge via API](guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api)
+ * [Metadata](guides/knowledge-base/metadata)
+ * [Integrate Knowledge Base within Application](guides/knowledge-base/integrate-knowledge-within-application)
+ * [Retrieval Test / Citation and Attributions](guides/knowledge-base/retrieval-test-and-citation)
+ * [Knowledge Request Rate Limit](guides/knowledge-base/knowledge-request-rate-limit)
+ * [Connect to an External Knowledge Base](guides/knowledge-base/connect-external-knowledge)
+ * [External Knowledge API](guides/knowledge-base/external-knowledge-api-documentation)
+* [Tools](guides/tools/README)
+ * [Quick Tool Integration](guides/tools/quick-tool-integration)
+ * [Advanced Tool Integration](guides/tools/advanced-tool-integration)
+ * [Tool Configuration](guides/tools/tool-configuration/README)
+ * [Google](guides/tools/tool-configuration/google)
+ * [Bing](guides/tools/tool-configuration/bing)
+ * [SearchApi](guides/tools/tool-configuration/searchapi)
+ * [StableDiffusion](guides/tools/tool-configuration/stable-diffusion)
+ * [Dall-e](guides/tools/tool-configuration/dall-e)
+ * [Perplexity Search](guides/tools/tool-configuration/perplexity)
+ * [AlphaVantage](guides/tools/tool-configuration/alphavantage)
+ * [Youtube](guides/tools/tool-configuration/youtube)
+ * [SearXNG](guides/tools/tool-configuration/searxng)
+ * [Serper](guides/tools/tool-configuration/serper)
+ * [SiliconFlow (Flux AI Supported)](guides/tools/tool-configuration/siliconflow)
+ * [ComfyUI](guides/tools/tool-configuration/comfyui)
+* [Publishing](guides/application-publishing/README)
+ * [Publish as a Single-page Web App](guides/application-publishing/launch-your-webapp-quickly/README)
+ * [Web App Settings](guides/application-publishing/launch-your-webapp-quickly/web-app-settings)
+ * [Text Generator Application](guides/application-publishing/launch-your-webapp-quickly/text-generator)
+ * [Conversation Application](guides/application-publishing/launch-your-webapp-quickly/conversation-application)
+ * [Embedding In Websites](guides/application-publishing/embedding-in-websites)
+ * [Developing with APIs](guides/application-publishing/developing-with-apis)
+ * [Re-develop Based on Frontend Templates](guides/application-publishing/based-on-frontend-templates)
+* [Annotation](guides/annotation/README)
+ * [Logs and Annotation](guides/annotation/logs)
+ * [Annotation Reply](guides/annotation/annotation-reply)
+* [Monitoring](guides/monitoring/README)
+ * [Data Analysis](guides/monitoring/analysis)
+ * [Integrate External Ops Tools](guides/monitoring/integrate-external-ops-tools/README)
+ * [Integrate LangSmith](guides/monitoring/integrate-external-ops-tools/integrate-langsmith)
+ * [Integrate Langfuse](guides/monitoring/integrate-external-ops-tools/integrate-langfuse)
+ * [Integrate Opik](guides/monitoring/integrate-external-ops-tools/integrate-opik)
+* [Extension](guides/extension/README)
+ * [API-Based Extension](guides/extension/api-based-extension/README)
+ * [External Data Tool](guides/extension/api-based-extension/external-data-tool)
+ * [Deploy API Tools with Cloudflare Workers](guides/extension/api-based-extension/cloudflare-workers)
+ * [Moderation](guides/extension/api-based-extension/moderation)
+ * [Code-Based Extension](guides/extension/code-based-extension/README)
+ * [External Data Tool](guides/extension/code-based-extension/external-data-tool)
+ * [Moderation](guides/extension/code-based-extension/moderation)
+* [Collaboration](guides/workspace/README)
+ * [Discover](guides/workspace/app)
+ * [Invite and Manage Members](guides/workspace/invite-and-manage-members)
+* [Management](guides/management/README)
+ * [App Management](guides/management/app-management)
+ * [Team Members Management](guides/management/team-members-management)
+ * [Personal Account Management](guides/management/personal-account-management)
+ * [Subscription Management](guides/management/subscription-management)
+ * [Version Control](guides/management/version-control)
## Workshop
-* [Basic](workshop/basic/README.md)
- * [How to Build an AI Image Generation App](workshop/basic/build-ai-image-generation-app.md)
-* [Intermediate](workshop/intermediate/README.md)
- * [Build An Article Reader Using File Upload](workshop/intermediate/article-reader.md)
- * [Building a Smart Customer Service Bot Using a Knowledge Base](workshop/intermediate/customer-service-bot.md)
- * [Generating analysis of Twitter account using Chatflow Agent](workshop/intermediate/twitter-chatflow.md)
+* [Basic](workshop/basic/README)
+ * [How to Build an AI Image Generation App](workshop/basic/build-ai-image-generation-app)
+* [Intermediate](workshop/intermediate/README)
+ * [Build An Article Reader Using File Upload](workshop/intermediate/article-reader)
+ * [Building a Smart Customer Service Bot Using a Knowledge Base](workshop/intermediate/customer-service-bot)
+ * [Generating analysis of Twitter account using Chatflow Agent](workshop/intermediate/twitter-chatflow)
## Community
-* [Seek Support](community/support.md)
-* [Become a Contributor](community/contribution.md)
-* [Contributing to Dify Documentation](community/docs-contribution.md)
+* [Seek Support](community/support)
+* [Become a Contributor](community/contribution)
+* [Contributing to Dify Documentation](community/docs-contribution)
## Plugins
-* [Introduction](plugins/introduction.md)
-* [Quick Start](plugins/quick-start/README.md)
- * [Install and Use Plugins](plugins/quick-start/install-plugins.md)
- * [Develop Plugins](plugins/quick-start/develop-plugins/README.md)
- * [Initialize Development Tools](plugins/quick-start/develop-plugins/initialize-development-tools.md)
- * [Tool Plugin](plugins/quick-start/develop-plugins/tool-plugin.md)
- * [Model Plugin](plugins/quick-start/develop-plugins/model-plugin/README.md)
- * [Create Model Providers](plugins/quick-start/develop-plugins/model-plugin/create-model-providers.md)
- * [Integrate the Predefined Model](plugins/quick-start/develop-plugins/model-plugin/predefined-model.md)
- * [Integrate the Customizable Model](plugins/quick-start/develop-plugins/model-plugin/customizable-model.md)
- * [Agent Strategy Plugin](plugins/quick-start/develop-plugins/agent-strategy-plugin.md)
- * [Extension Plugin](plugins/quick-start/develop-plugins/extension-plugin.md)
- * [Bundle](plugins/quick-start/develop-plugins/bundle.md)
- * [Debug Plugin](plugins/quick-start/debug-plugin.md)
-* [Manage Plugins](plugins/manage-plugins.md)
-* [Schema Specification](plugins/schema-definition/README.md)
- * [Manifest](plugins/schema-definition/manifest.md)
- * [Endpoint](plugins/schema-definition/endpoint.md)
- * [Tool](plugins/schema-definition/tool.md)
- * [Agent](plugins/schema-definition/agent.md)
- * [Model](plugins/schema-definition/model/README.md)
- * [Model Designing Rules](plugins/schema-definition/model/model-designing-rules.md)
- * [Model Schema](plugins/schema-definition/model/model-schema.md)
- * [General Specifications](plugins/schema-definition/general-specifications.md)
- * [Persistent Storage](plugins/schema-definition/persistent-storage.md)
- * [Reverse Invocation of the Dify Service](plugins/schema-definition/reverse-invocation-of-the-dify-service/README.md)
- * [App](plugins/schema-definition/reverse-invocation-of-the-dify-service/app.md)
- * [Model](plugins/schema-definition/reverse-invocation-of-the-dify-service/model.md)
- * [Tool](plugins/schema-definition/reverse-invocation-of-the-dify-service/tool.md)
- * [Node](plugins/schema-definition/reverse-invocation-of-the-dify-service/node.md)
-* [Best Practice](plugins/best-practice/README.md)
- * [Develop a Slack Bot Plugin](plugins/best-practice/develop-a-slack-bot-plugin.md)
-* [Publish Plugins](plugins/publish-plugins/README.md)
- * [Publish to Dify Marketplace](plugins/publish-plugins/publish-to-dify-marketplace/README.md)
- * [Plugin Developer Guidelines](plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines.md)
- * [Plugin Privacy Protection Guidelines](plugins/publish-plugins/publish-to-dify-marketplace/plugin-privacy-protection-guidelines.md)
- * [Publish to Your Personal GitHub Repository](plugins/publish-plugins/publish-plugin-on-personal-github-repo.md)
- * [Package the Plugin File and Publish it](plugins/publish-plugins/package-plugin-file-and-publish.md)
-* [FAQ](plugins/faq.md)
+* [Introduction](plugins/introduction)
+* [Quick Start](plugins/quick-start/README)
+ * [Install and Use Plugins](plugins/quick-start/install-plugins)
+ * [Develop Plugins](plugins/quick-start/develop-plugins/README)
+ * [Initialize Development Tools](plugins/quick-start/develop-plugins/initialize-development-tools)
+ * [Tool Plugin](plugins/quick-start/develop-plugins/tool-plugin)
+ * [Model Plugin](plugins/quick-start/develop-plugins/model-plugin/README)
+ * [Create Model Providers](plugins/quick-start/develop-plugins/model-plugin/create-model-providers)
+ * [Integrate the Predefined Model](plugins/quick-start/develop-plugins/model-plugin/predefined-model)
+ * [Integrate the Customizable Model](plugins/quick-start/develop-plugins/model-plugin/customizable-model)
+ * [Agent Strategy Plugin](plugins/quick-start/develop-plugins/agent-strategy-plugin)
+ * [Extension Plugin](plugins/quick-start/develop-plugins/extension-plugin)
+ * [Bundle](plugins/quick-start/develop-plugins/bundle)
+ * [Debug Plugin](plugins/quick-start/debug-plugin)
+* [Manage Plugins](plugins/manage-plugins)
+* [Schema Specification](plugins/schema-definition/README)
+ * [Manifest](plugins/schema-definition/manifest)
+ * [Endpoint](plugins/schema-definition/endpoint)
+ * [Tool](plugins/schema-definition/tool)
+ * [Agent](plugins/schema-definition/agent)
+ * [Model](plugins/schema-definition/model/README)
+ * [Model Designing Rules](plugins/schema-definition/model/model-designing-rules)
+ * [Model Schema](plugins/schema-definition/model/model-schema)
+ * [General Specifications](plugins/schema-definition/general-specifications)
+ * [Persistent Storage](plugins/schema-definition/persistent-storage)
+ * [Reverse Invocation of the Dify Service](plugins/schema-definition/reverse-invocation-of-the-dify-service/README)
+ * [App](plugins/schema-definition/reverse-invocation-of-the-dify-service/app)
+ * [Model](plugins/schema-definition/reverse-invocation-of-the-dify-service/model)
+ * [Tool](plugins/schema-definition/reverse-invocation-of-the-dify-service/tool)
+ * [Node](plugins/schema-definition/reverse-invocation-of-the-dify-service/node)
+* [Best Practice](plugins/best-practice/README)
+ * [Develop a Slack Bot Plugin](plugins/best-practice/develop-a-slack-bot-plugin)
+* [Publish Plugins](plugins/publish-plugins/README)
+ * [Publish to Dify Marketplace](plugins/publish-plugins/publish-to-dify-marketplace/README)
+ * [Plugin Developer Guidelines](plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines)
+ * [Plugin Privacy Protection Guidelines](plugins/publish-plugins/publish-to-dify-marketplace/plugin-privacy-protection-guidelines)
+ * [Publish to Your Personal GitHub Repository](plugins/publish-plugins/publish-plugin-on-personal-github-repo)
+ * [Package the Plugin File and Publish it](plugins/publish-plugins/package-plugin-file-and-publish)
+* [FAQ](plugins/faq)
## Development
-* [Backend](development/backend/README.md)
- * [DifySandbox](development/backend/sandbox/README.md)
- * [Contribution Guide](development/backend/sandbox/contribution.md)
-* [Models Integration](development/models-integration/README.md)
- * [Integrate Open Source Models from Hugging Face](development/models-integration/hugging-face.md)
- * [Integrate Open Source Models from Replicate](development/models-integration/replicate.md)
- * [Integrate Local Models Deployed by Xinference](development/models-integration/xinference.md)
- * [Integrate Local Models Deployed by OpenLLM](development/models-integration/openllm.md)
- * [Integrate Local Models Deployed by LocalAI](development/models-integration/localai.md)
- * [Integrate Local Models Deployed by Ollama](development/models-integration/ollama.md)
- * [Integrate Models on LiteLLM Proxy](development/models-integration/litellm.md)
- * [Integrating with GPUStack for Local Model Deployment](development/models-integration/gpustack.md)
- * [Integrating AWS Bedrock Models (DeepSeek)](development/models-integration/aws-bedrock-deepseek.md)
-* [Migration](development/migration/README.md)
- * [Migrating Community Edition to v1.0.0](development/migration/migrate-to-v1.md)
+* [Backend](development/backend/README)
+ * [DifySandbox](development/backend/sandbox/README)
+ * [Contribution Guide](development/backend/sandbox/contribution)
+* [Models Integration](development/models-integration/README)
+ * [Integrate Open Source Models from Hugging Face](development/models-integration/hugging-face)
+ * [Integrate Open Source Models from Replicate](development/models-integration/replicate)
+ * [Integrate Local Models Deployed by Xinference](development/models-integration/xinference)
+ * [Integrate Local Models Deployed by OpenLLM](development/models-integration/openllm)
+ * [Integrate Local Models Deployed by LocalAI](development/models-integration/localai)
+ * [Integrate Local Models Deployed by Ollama](development/models-integration/ollama)
+ * [Integrate Models on LiteLLM Proxy](development/models-integration/litellm)
+ * [Integrating with GPUStack for Local Model Deployment](development/models-integration/gpustack)
+ * [Integrating AWS Bedrock Models (DeepSeek)](development/models-integration/aws-bedrock-deepseek)
+* [Migration](development/migration/README)
+ * [Migrating Community Edition to v1.0.0](development/migration/migrate-to-v1)
## Learn More
-* [Use Cases](learn-more/use-cases/README.md)
- * [DeepSeek & Dify Integration Guide: Building AI Applications with Multi-Turn Reasoning](learn-more/use-cases/integrate-deepseek-to-build-an-ai-app.md)
- * [Private Deployment of Ollama + DeepSeek + Dify: Build Your Own AI Assistant](learn-more/use-cases/private-ai-ollama-deepseek-dify.md)
- * [Build a Notion AI Assistant](learn-more/use-cases/build-an-notion-ai-assistant.md)
- * [Create a MidJourney Prompt Bot with Dify](learn-more/use-cases/create-a-midjourney-prompt-bot-with-dify.md)
- * [Create an AI Chatbot with Business Data in Minutes](learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.md)
- * [Integrating Dify Chatbot into Your Wix Website](learn-more/use-cases/how-to-integrate-dify-chatbot-to-your-wix-website.md)
- * [How to connect with AWS Bedrock Knowledge Base?](learn-more/use-cases/how-to-connect-aws-bedrock.md)
- * [Building the Dify Scheduler](learn-more/use-cases/dify-schedule.md)
- * [Building an AI Thesis Slack Bot on Dify](learn-more/use-cases/building-an-ai-thesis-slack-bot.md)
-* [Extended Reading](learn-more/extended-reading/README.md)
- * [What is LLMOps?](learn-more/extended-reading/what-is-llmops.md)
- * [Retrieval-Augmented Generation (RAG)](learn-more/extended-reading/retrieval-augment/README.md)
- * [Hybrid Search](learn-more/extended-reading/retrieval-augment/hybrid-search.md)
- * [Re-ranking](learn-more/extended-reading/retrieval-augment/rerank.md)
- * [Retrieval Modes](learn-more/extended-reading/retrieval-augment/retrieval.md)
- * [How to Use JSON Schema Output in Dify?](learn-more/extended-reading/how-to-use-json-schema-in-dify.md)
-* [FAQ](learn-more/faq/README.md)
- * [Self-Host](learn-more/faq/install-faq.md)
- * [LLM Configuration and Usage](learn-more/faq/use-llms-faq.md)
- * [Plugins](learn-more/faq/plugins.md)
+* [Use Cases](learn-more/use-cases/README)
+ * [DeepSeek & Dify Integration Guide: Building AI Applications with Multi-Turn Reasoning](learn-more/use-cases/integrate-deepseek-to-build-an-ai-app)
+ * [Private Deployment of Ollama + DeepSeek + Dify: Build Your Own AI Assistant](learn-more/use-cases/private-ai-ollama-deepseek-dify)
+ * [Build a Notion AI Assistant](learn-more/use-cases/build-an-notion-ai-assistant)
+ * [Create a MidJourney Prompt Bot with Dify](learn-more/use-cases/create-a-midjourney-prompt-bot-with-dify)
+ * [Create an AI Chatbot with Business Data in Minutes](learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes)
+ * [Integrating Dify Chatbot into Your Wix Website](learn-more/use-cases/how-to-integrate-dify-chatbot-to-your-wix-website)
+ * [How to connect with AWS Bedrock Knowledge Base?](learn-more/use-cases/how-to-connect-aws-bedrock)
+ * [Building the Dify Scheduler](learn-more/use-cases/dify-schedule)
+ * [Building an AI Thesis Slack Bot on Dify](learn-more/use-cases/building-an-ai-thesis-slack-bot)
+* [Extended Reading](learn-more/extended-reading/README)
+ * [What is LLMOps?](learn-more/extended-reading/what-is-llmops)
+ * [Retrieval-Augmented Generation (RAG)](learn-more/extended-reading/retrieval-augment/README)
+ * [Hybrid Search](learn-more/extended-reading/retrieval-augment/hybrid-search)
+ * [Re-ranking](learn-more/extended-reading/retrieval-augment/rerank)
+ * [Retrieval Modes](learn-more/extended-reading/retrieval-augment/retrieval)
+ * [How to Use JSON Schema Output in Dify?](learn-more/extended-reading/how-to-use-json-schema-in-dify)
+* [FAQ](learn-more/faq/README)
+ * [Self-Host](learn-more/faq/install-faq)
+ * [LLM Configuration and Usage](learn-more/faq/use-llms-faq)
+ * [Plugins](learn-more/faq/plugins)
## Policies
-* [Open Source License](policies/open-source.md)
-* [User Agreement](policies/agreement/README.md)
+* [Open Source License](policies/open-source)
+* [User Agreement](policies/agreement/README)
* [Terms of Service](https://dify.ai/terms)
* [Privacy Policy](https://dify.ai/privacy)
- * [Get Compliance Report](policies/agreement/get-compliance-report.md)
+ * [Get Compliance Report](policies/agreement/get-compliance-report)
## Features
-* [Workflow](features/workflow.md)
+* [Workflow](features/workflow)
diff --git a/en/community/contribution.mdx b/en/community/contribution.mdx
index db2a5d67..93d371af 100644
--- a/en/community/contribution.mdx
+++ b/en/community/contribution.mdx
@@ -1,5 +1,5 @@
---
-title: "Contributing"
+title: Become a Contributor
---
So you're looking to contribute to Dify - that's awesome, we can't wait to see what you do. As a startup with limited headcount and funding, we have grand ambitions to design the most intuitive workflow for building and managing LLM applications. Any help from the community counts, truly.
@@ -8,7 +8,7 @@ We need to be nimble and ship fast given where we are, but we also want to make
This guide, like Dify itself, is a constant work in progress. We highly appreciate your understanding if at times it lags behind the actual project, and welcome any feedback for us to improve.
-In terms of licensing, please take a minute to read our short [License and Contributor Agreement](https://github.com/langgenius/dify/blob/main/LICENSE). The community also adheres to the [code of conduct](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
+In terms of licensing, please take a minute to read our short [License and Contributor Agreement](https://github.com/langgenius/dify/blob/main/LICENSE). The community also adheres to the [code of conduct](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT).
### Before you jump in
@@ -79,7 +79,7 @@ Dify requires the following dependencies to build, make sure they're installed o
#### 4. Installations
-Dify is composed of a backend and a frontend. Navigate to the backend directory by `cd api/`, then follow the [Backend README](https://github.com/langgenius/dify/blob/main/api/README.md) to install it. In a separate terminal, navigate to the frontend directory by `cd web/`, then follow the [Frontend README](https://github.com/langgenius/dify/blob/main/web/README.md) to install.
+Dify is composed of a backend and a frontend. Navigate to the backend directory by `cd api/`, then follow the [Backend README](https://github.com/langgenius/dify/blob/main/api/README) to install it. In a separate terminal, navigate to the frontend directory by `cd web/`, then follow the [Frontend README](https://github.com/langgenius/dify/blob/main/web/README) to install.
Check the [installation FAQ](https://docs.dify.ai/learn-more/faq/install-faq) for a list of common issues and steps to troubleshoot.
@@ -89,9 +89,9 @@ To validate your set up, head over to http://localhost:3000 (the default, or you
### Developing
-If you are adding a model provider,[this guide](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/README.md) is for you.
+If you are adding a model provider,[this guide](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/README) is for you.
-If you are adding tools used in Agent Assistants and Workflows, [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README.md) is for you.
+If you are adding tools used in Agent Assistants and Workflows, [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README) is for you.
> **Note** : If you want to contribute to a new tool, please make sure you've left your contact information on the tool's 'YAML' file, and submitted a corresponding docs PR in the [Dify-docs](https://github.com/langgenius/dify-docs/tree/main/en/guides/tools/tool-configuration) repository.
@@ -153,7 +153,7 @@ The website is bootstrapped on [Next.js](https://nextjs.org/) boilerplate in Typ
At last, time to open a pull request (PR) to our repo. For major features, we first merge them into the `deploy/dev` branch for testing, before they go into the `main` branch. If you run into issues like merge conflicts or don't know how to open a pull request, check out [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests).
-And that's it\! Once your PR is merged, you will be featured as a contributor in our [README](https://github.com/langgenius/dify/blob/main/README.md).
+And that's it\! Once your PR is merged, you will be featured as a contributor in our [README](https://github.com/langgenius/dify/blob/main/README).
### Getting Help
diff --git a/en/community/docs-contribution.mdx b/en/community/docs-contribution.mdx
index 5e14f0f9..f415e6bb 100644
--- a/en/community/docs-contribution.mdx
+++ b/en/community/docs-contribution.mdx
@@ -3,7 +3,7 @@ title: Contributing to Dify Documentation
---
-Dify documentation is an [open-source project](https://github.com/langgenius/dify-docs), and we welcome contributions. Whether you've spotted an issue while reading the docs or you're keen to contribute your own content, we encourage you to submit an issue or initiate a pull request on GitHub. We'll address your PR promptly.
+Dify documentation is an [open-source project](https://github.com/langgenius/dify-docs-mintlify), and we welcome contributions. Whether you've spotted an issue while reading the docs or you're keen to contribute your own content, we encourage you to submit an issue or initiate a pull request on GitHub. We'll address your PR promptly.
## How to Contribute
diff --git a/en/development/models-integration/localai.mdx b/en/development/models-integration/localai.mdx
index 55547a4a..a87800c5 100644
--- a/en/development/models-integration/localai.mdx
+++ b/en/development/models-integration/localai.mdx
@@ -13,7 +13,7 @@ Dify allows integration with LocalAI for local deployment of large language mode
You can refer to the official [Getting Started](https://localai.io/basics/getting_started/) guide for deployment, or quickly integrate following the steps below:
-(These steps are derived from [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README.md))
+(These steps are derived from [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README))
1. First, clone the LocalAI code repository and navigate to the specified directory.
diff --git a/en/development/models-integration/ollama.mdx b/en/development/models-integration/ollama.mdx
index 0f8ddd8b..9e474e8a 100644
--- a/en/development/models-integration/ollama.mdx
+++ b/en/development/models-integration/ollama.mdx
@@ -2,8 +2,9 @@
title: Integrate Local Models Deployed by Ollama
---
-
-.png>)
+
+
+
[Ollama](https://github.com/jmorganca/ollama) is a cross-platform inference framework client (MacOS, Windows, Linux) designed for seamless deployment of large language models (LLMs) such as Llama 2, Mistral, Llava, and more. With its one-click setup, Ollama enables local execution of LLMs, providing enhanced data privacy and security by keeping your data on your own machine.
diff --git a/en/getting-started/dify-for-education.mdx b/en/getting-started/dify-for-education.mdx
index b80dbb68..541efca3 100644
--- a/en/getting-started/dify-for-education.mdx
+++ b/en/getting-started/dify-for-education.mdx
@@ -98,11 +98,11 @@ Limitations:
### Update Email
-Email `` to change your email address.
+Email [support@dify.ai](mailto:support@dify.ai) to change your email address.
### Update School Information
-- **For verified accounts**: Email `` to modify school details.
+- **For verified accounts**: Email [support@dify.ai](mailto:support@dify.ai) to modify school details.
- **For expired verification**: Enter new school information during re-verification.
@@ -138,11 +138,11 @@ Your educational verification application may be rejected or revoked in the foll
4. Educational privilege misuse
5. False institutional information
-To appeal a rejected/revoked verification, please email `` for assistance.
+To appeal a rejected/revoked verification, please email [support@dify.ai](mailto:support@dify.ai) for assistance.
- **Will my educational verification remain valid after I graduate or leave school?**
-Your verification remains valid while your educational email is active. Email `` for assistance if your educational email expires.
+Your verification remains valid while your educational email is active. Email [support@dify.ai](mailto:support@dify.ai) for assistance if your educational email expires.
- **I don’t have an educational email. How can I apply for educational verification?**
diff --git a/en/getting-started/install-self-hosted/aa-panel.mdx b/en/getting-started/install-self-hosted/aa-panel.mdx
index 32c826f7..b6f5e981 100644
--- a/en/getting-started/install-self-hosted/aa-panel.mdx
+++ b/en/getting-started/install-self-hosted/aa-panel.mdx
@@ -34,7 +34,7 @@ title: Deploy with aaPanel
2. The first time you will be prompteINLINE_CODE_P`Docker Compose` the `Docker` and `Docker Compose` services, click Install Now. If it is already installed, please ignore it.
-3INLINE_COD`One-Click Install`e ins`install` is complete, find `Dify` in `One-Click Install` and click `install`
+3. INLINE_COD`One-Click Install`e ins`install` is complete, find `Dify` in `One-Click Install` and click `install`
4. configure basic information such as the domain name, ports to complete the installation
> \[!IMPORTANT]
@@ -48,8 +48,11 @@ title: Deploy with aaPanel
- Allow external access: If you nee`8088`ct access through `IP+Port`, please check. If you have set up a domain name, please do not check here.
- Port: De`Docker`0 `8088`, can be modified by yourself
+6. After submission, the panel will automatically initialize the application, which will take about `1-3` minutes. It can be accessed after the initialization is completed.
-6. After submission, the panel will automatically initialize the application, which will take about `1-3` minutes. It can be accessed after the initializa`Docker`1p the admin account:
+### Access Dify
+
+Access administrator initialization page to set up the admin account:
```bash
# If you have set domain
@@ -65,11 +68,6 @@ Dify web interface address:
# If you have set domain
http://yourdomain/
-# If you choose to access through `IP+Port`
-http://your_server_ip```bash
-# If you have set domain
-http://yourdomain/
-
# If you choose to access through `IP+Port`
http://your_server_ip:8088/
-```
\ No newline at end of file
+```
diff --git a/en/getting-started/install-self-hosted/docker-compose.mdx b/en/getting-started/install-self-hosted/docker-compose.mdx
index 1b03c8bc..1b657e75 100644
--- a/en/getting-started/install-self-hosted/docker-compose.mdx
+++ b/en/getting-started/install-self-hosted/docker-compose.mdx
@@ -41,7 +41,7 @@ title: Deploy with Docker Compose
> \[!IMPORTANT]
>
-> Dify 0.6.12 has introduced significant enhancements to Docker Compose deployment, designed to improve your setup and update experience. For more information, read the [README.md](https://github.com/langgenius/dify/blob/main/docker/README.md).
+> Dify 0.6.12 has introduced significant enhancements to Docker Compose deployment, designed to improve your setup and update experience. For more information, read the [README.md](https://github.com/langgenius/dify/blob/main/docker/README).
### Clone Dify
diff --git a/en/getting-started/install-self-hosted/environments.mdx b/en/getting-started/install-self-hosted/environments.mdx
index 4725abe4..276cd25e 100644
--- a/en/getting-started/install-self-hosted/environments.mdx
+++ b/en/getting-started/install-self-hosted/environments.mdx
@@ -413,6 +413,10 @@ Used to store uploaded data set files, team/tenant encryption keys,`https://cons
For example: `http://unstructured:8000/general/v0/general`
+- TOP_K_MAX_VALUE
+
+ The maximum top-k value of RAG, default 10.
+
#### Multi-modal Configuration
- MULTIMODAL_SENDINLINE_CODE_PLACEHO`https://app.dify.ai`736ORMA`https://app.dify.ai`8The format o`https://app.dify.ai`9age sent when the multi-modal model is input`https://udify.app/`0fault `https://udify.app/`1ase`https://udify.app/`2al `url`. The delay of the call in `url` mode will be lower than that in `base64` mode. It is generally recommended to use the more compatible `base64` mode. If configured as `url`, you need to configure `FILES_URL` as an externally accessible address so that the multi-modal model can access the image.
@@ -585,7 +589,7 @@ Used to set the browser policy for session cookies used for identity verificatio
### Chunk Length Configuration
-#### MAXIMUM_CHUNK_TOKEN_LENGTH
+#### INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH
Configuration for document chunk length. It is used to control the size of text segments when processing long documents. Default: 500. Maximum: 4000.
diff --git a/en/getting-started/install-self-hosted/readme.mdx b/en/getting-started/install-self-hosted/readme.mdx
index b0add896..0ccc1ccd 100644
--- a/en/getting-started/install-self-hosted/readme.mdx
+++ b/en/getting-started/install-self-hosted/readme.mdx
@@ -12,4 +12,4 @@ Dify, an open-source project on [GitHub](https://github.com/langgenius/dify), ca
To maintain code quality, we require all code contributions - even from those with direct commit access - to be submitted as pull requests. These must be reviewed and approved by the core development team before merging.
-We welcome contributions from everyone! If you're interested in helping, please check our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) for more information on getting started.
+We welcome contributions from everyone! If you're interested in helping, please check our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING) for more information on getting started.
diff --git a/en/getting-started/install-self-hosted/start-the-frontend-docker-container.mdx b/en/getting-started/install-self-hosted/start-the-frontend-docker-container.mdx
index 9b9625e3..c9336cb0 100644
--- a/en/getting-started/install-self-hosted/start-the-frontend-docker-container.mdx
+++ b/en/getting-started/install-self-hosted/start-the-frontend-docker-container.mdx
@@ -1,8 +1,7 @@
---
-title: Start the frontend Docker container separately
+title: Start Frontend Docker Container Separately
---
-
When developing the backend separately, you may only need to start the backend service from source code without building and launching the frontend locally. In this case, you can directly start the frontend service by pulling the Docker image and running the container. Here are the specific steps:
#### Pull the Docker image for the frontend service from DockerHub:
diff --git a/en/getting-started/readme/features-and-specifications.mdx b/en/getting-started/readme/features-and-specifications.mdx
index 9fdf9854..deaea297 100644
--- a/en/getting-started/readme/features-and-specifications.mdx
+++ b/en/getting-started/readme/features-and-specifications.mdx
@@ -22,7 +22,7 @@ We adopt transparent policies around product specifications to ensure decisions
Open Source License
-
Apache License 2.0 with commercial licensing
+
[Apache License 2.0 with commercial licensing](../../policies/open-source)
Official R&D Team
@@ -30,7 +30,7 @@ We adopt transparent policies around product specifications to ensure decisions
Community Contributors
-
Over 290 people (As of Q2 2024)
+
Over [290](https://ossinsight.io/analyze/langgenius/dify#overview) people (As of Q2 2024)
Backend Technology
diff --git a/en/guides/application-orchestrate/agent.mdx b/en/guides/application-orchestrate/agent.mdx
index 329c5cdc..7fc1480a 100644
--- a/en/guides/application-orchestrate/agent.mdx
+++ b/en/guides/application-orchestrate/agent.mdx
@@ -50,6 +50,14 @@ You can set up a conversation opener and initial questions for your Agent Assist

+## Uploading Documentation File
+
+Some LLMs now natively support file processing, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build-with-claude/pdf-support) and [Gemini 1.5 Pro](https://ai.google.dev/api/files). You can check the LLMs' websites for details on their file upload capabilities.
+
+Select an LLM that supports file reading and enable the "Documentation" feature. This enables the Chatbot to recognize files without complex configurations.
+
+
+
## Debugging and Preview
After orchestrating the intelligent assistant, you can debug and preview it before publishing as an application to verify the assistant's task completion effectiveness.
diff --git a/en/guides/application-orchestrate/chatbot-application.mdx b/en/guides/application-orchestrate/chatbot-application.mdx
index 00077365..70da5543 100644
--- a/en/guides/application-orchestrate/chatbot-application.mdx
+++ b/en/guides/application-orchestrate/chatbot-application.mdx
@@ -2,29 +2,29 @@
title: Chatbot Application
---
-Conversation applications use a one-question-one-answer mode to have a continuous conversation with the user.
+Chatbot applications use a one-question-one-answer mode to have a continuous conversation with the user.
### Applicable scenarios
-Conversation applications can be used in fields such as customer service, online education, healthcare, financial services, etc. These applications can help organizations improve work efficiency, reduce labor costs, and provide a better user experience.
+Chatbot applications can be used in fields such as customer service, online education, healthcare, financial services, etc. These applications can help organizations improve work efficiency, reduce labor costs, and provide a better user experience.
### How to compose
-Conversation applications supports: prompts, variables, context, opening remarks, and suggestions for the next question.
+Chatbot applications supports: prompts, variables, context, opening remarks, and suggestions for the next question.
-Here, we use a interviewer application as an example to introduce the way to compose a conversation applications.
+Here, we use a interviewer application as an example to introduce the way to compose a Chatbot applications.
#### Step 1 Create an application
-Click the "Create Application" button on the homepage to create an application. Fill in the application name, and select **"Chat App"** as the application type.
+Click the "Create Application" button on the homepage to create an application. Fill in the application name, and select **"Chatbot"**.
-
+
#### Step 2: Compose the Application
After the application is successfully created, it will automatically redirect to the application overview page. Click on the button on the left menu: **"Orchestrate"** to compose the application.
-
+
**2.1 Fill in Prompts**
@@ -40,42 +40,52 @@ For a better experience, we will add an opening dialogue: `"Hello, {{name}}. I'm
To add the opening dialogue, click the "Add Feature" button in the upper left corner, and enable the "Conversation remarkers" feature:
-
+
And then edit the opening remarks:
-
+
**2.2 Adding Context**
-If an application wants to generate content based on private contextual conversations, it can use our [knowledge](/en/guides/knowledge-base/readme) feature. Click the "Add" button in the context to add a knowledge base.
+If an application wants to generate content based on private contextual conversations, it can use our [knowledge](../knowledge-base/) feature. Click the "Add" button in the context to add a knowledge base.
-
+
-**2.3 Debugging**
+**2.3 Uploading Documentation File**
+
+Some LLMs now natively support file processing, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build-with-claude/pdf-support) and [Gemini 1.5 Pro](https://ai.google.dev/api/files). You can check the LLMs' websites for details on their file upload capabilities.
+
+Select an LLM that supports file reading and enable the "Documentation" feature. This enables the Chatbot to recognize files without complex configurations.
+
+
+
+**2.4 Debugging**
Enter user inputs on the right side and check the respond content.
-
+
If the results are not satisfactory, you can adjust the prompts and model parameters. Click on the model name in the upper right corner to set the parameters of the model:
-
+
-**Debugging with multiple models:**
+**Multiple Model Debugging:**
-If debugging with a single model feels inefficient, you can utilize the **Debug as Multiple Models** feature to batch-test the models' response effectiveness.
-
-
-
-Supports adding up to 4 LLMs at the same time.
-
-
-
-> ⚠️ When using the multi-model debugging feature, if only some large models are visible, it is because other large models' keys have not been added yet. You can manually add multiple models' keys in ["Add New Provider"](/en/guides/model-configuration/new-provider).
+If the LLM’s response is unsatisfactory, you can refine the prompt or switch to different underlying models for comparison. To simultaneously observe how multiple models respond to the same question, see [Multiple Model Debugging](./multiple-llms-debugging).
**2.4 Publish App**
-After debugging your application, click the **"Publish"** button in the top right corner to create a standalone AI application. In addition to experiencing the application via a public URL, you can also perform secondary development based on APIs, embed it into websites, and more. For details, please refer to [Publishing](/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings).
+After debugging your application, click the **"Publish"** button in the top right corner to create a standalone AI application. In addition to experiencing the application via a public URL, you can also perform secondary development based on APIs, embed it into websites, and more. For details, please refer to [Publishing](https://docs.dify.ai/guides/application-publishing).
-If you want to customize the application that you share, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-conversation). Based on the template, you can modify the application to meet your specific needs and style requirements.
\ No newline at end of file
+If you want to customize the application that you share, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-conversation). Based on the template, you can modify the application to meet your specific needs and style requirements.
+
+### FAQ
+
+**How to add a third-party tool within the chatbot?**
+
+The chatbot app does not support adding third-party tools. You can add third-party tools within your [agent](../application-orchestrate/agent).
+
+**How to use metadata filtering when creating a Chatbot?**
+
+For instructions, see **Metadata Filtering > Chatbot** in *[Integrate Knowledge Base within Application](https://docs.dify.ai/guides/knowledge-base/integrate-knowledge-within-application)*.
diff --git a/en/guides/application-orchestrate/creating-an-application.mdx b/en/guides/application-orchestrate/creating-an-application.mdx
index be1b7dbe..713a1ccd 100644
--- a/en/guides/application-orchestrate/creating-an-application.mdx
+++ b/en/guides/application-orchestrate/creating-an-application.mdx
@@ -28,6 +28,13 @@ If you need to create a blank application on Dify, you can select "Studio" from
When creating an application for the first time, you might need to first understand the [basic concepts](./#application_type) of the five different types of applications on Dify: Chatbot, Text Generator, Agent, Chatflow and Workflow.
+
+
When selecting a specific application type, you can customize it by providing a name, choosing an appropriate icon(or uploading your favorite image as an icon), and writing a clear and concise description of its purpose. These details will help team members easily understand and use the application in the future.

diff --git a/en/guides/application-orchestrate/multiple-llms-debugging.mdx b/en/guides/application-orchestrate/multiple-llms-debugging.mdx
new file mode 100644
index 00000000..2d3c6877
--- /dev/null
+++ b/en/guides/application-orchestrate/multiple-llms-debugging.mdx
@@ -0,0 +1,25 @@
+---
+title: Multiple Model Debugging
+---
+
+The **Chatbot** application supports the **“Debug as Multiple Models”** feature, allowing you to simultaneously compare how different models respond to the same question.
+
+
+
+You can add up to **4** large language models at once.
+
+
+
+During debugging, if you find a model that performs well, click **“Debug as Single Model”** to switch to a dedicated preview window for the model.
+
+
+
+## FAQ
+
+### 1. Why aren't additional models visible when adding LLMs?
+
+Go to [“Model Providers”](/guides/model-configuration), and follow the instructions to add the keys for multiple models manually.
+
+### 2. How to exit Multiple Model Debugging mode?
+
+Select any model and click **“Debug with a Single Model”** to exit multiple model debugging mode.
diff --git a/en/guides/application-orchestrate/readme.mdx b/en/guides/application-orchestrate/readme.mdx
index d7ddeb22..644690d4 100644
--- a/en/guides/application-orchestrate/readme.mdx
+++ b/en/guides/application-orchestrate/readme.mdx
@@ -1,5 +1,5 @@
---
-title: Introduction
+title: Application Orchestration
---
In Dify, an "application" refers to a practical scenario application built on large language models like GPT. By creating an application, you can apply intelligent AI technology to specific needs. It encompasses both the engineering paradigm for developing AI applications and the specific deliverables.
diff --git a/en/guides/application-publishing/based-on-frontend-templates.mdx b/en/guides/application-publishing/based-on-frontend-templates.mdx
index 77bf9a39..ae95fd7e 100644
--- a/en/guides/application-publishing/based-on-frontend-templates.mdx
+++ b/en/guides/application-publishing/based-on-frontend-templates.mdx
@@ -1,8 +1,7 @@
---
-title: Based on WebApp Template
+title: Re-develop Based on Frontend Templates
---
-
If developers are developing new products from scratch or in the product prototype design phase, you can quickly launch AI sites using Dify. At the same time, Dify hopes that developers can fully freely create different forms of front-end applications. For this reason, we provide:
* **SDK** for quick access to the Dify API in various languages
diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
index 4d60fc05..383fb405 100644
--- a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
+++ b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
@@ -2,7 +2,6 @@
title: Publish as a Single-page Web App
---
-
One of the benefits of creating AI applications with Dify is that you can publish a Single-page AI web app accessible to all users on the internet within minutes.
* If you're using the self-hosted open-source version, the application will run on your server
diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx
index e82afc6b..5c986603 100644
--- a/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx
+++ b/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx
@@ -26,7 +26,9 @@ Fill in the necessary details and click the "Start Conversation" button to begin
Click the "New Conversation" button to start a new conversation. Hover over a conversation to pin or delete it.
-
+
+
+
### Conversation Opener
diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings.mdx
index 57512a61..c08f87e3 100644
--- a/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings.mdx
+++ b/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings.mdx
@@ -1,8 +1,7 @@
---
-title: Overview
+title: Web App Settings
---
-
Web applications are designed for application users. When an application developer creates an application on Dify, a corresponding web application is generated. Users of the web application can use it without logging in. The web application is adapted for various device sizes: PC, tablet, and mobile.
The content of the web application aligns with the configuration of the published application. When the application's configuration is modified and the "Publish" button is clicked on the prompt orchestration page, the web application's content will be updated according to the current configuration of the application.
diff --git a/en/guides/extension/api-based-extension/README.mdx b/en/guides/extension/api-based-extension/README.mdx
index bb6235b2..5b4e28e5 100644
--- a/en/guides/extension/api-based-extension/README.mdx
+++ b/en/guides/extension/api-based-extension/README.mdx
@@ -2,7 +2,6 @@
title: API-Based Extension
---
-
Developers can extend module capabilities through the API extension module. Currently supported module extensions include:
* `moderation`
@@ -38,14 +37,15 @@ POST {Your-API-Endpoint}
`Authorization`
-
Bearer {api\_key}
+
Bearer `{api_key}`
The API Key is transmitted as a token. You need to parse the `api_key` and verify if it matches the provided API Key to ensure API security.
+
### Request Body
-```JSON
+```BASH
{
"point": string, // Extension point, different modules may contain multiple extension points
"params": {
@@ -56,7 +56,7 @@ POST {Your-API-Endpoint}
### API Response
-```JSON
+```BASH
{
... // For the content returned by the API, see the specific module's design specifications for different extension points.
}
@@ -69,14 +69,14 @@ When configuring API-based Extension in Dify, Dify will send a request to the AP
### Header
-```JSON
+```BASH
Content-Type: application/json
Authorization: Bearer {api_key}
```
### Request Body
-```JSON
+```BASH
{
"point": "ping"
}
@@ -84,13 +84,12 @@ Authorization: Bearer {api_key}
### Expected API response
-```JSON
+```BASH
{
"result": "pong"
}
```
-\\
## For Example
@@ -102,14 +101,14 @@ Here we take the external data tool as an example, where the scenario is to retr
### **Header**
-```JSON
+```BASH
Content-Type: application/json
Authorization: Bearer 123456
```
### **Request Body**
-```JSON
+```BASH
{
"point": "app.external_data_tool.query",
"params": {
@@ -125,7 +124,7 @@ Authorization: Bearer 123456
### **API Response**
-```JSON
+```BASH
{
"result": "City: London\nTemperature: 10°C\nRealFeel®: 8°C\nAir Quality: Poor\nWind Direction: ENE\nWind Speed: 8 km/h\nWind Gusts: 14 km/h\nPrecipitation: Light rain"
}
@@ -137,14 +136,16 @@ The code is based on the Python FastAPI framework.
#### **Install dependencies.**
-
pip install 'fastapi[all]' uvicorn
-
+```bash
+pip install 'fastapi[all]' uvicorn
+```
#### Write code according to the interface specifications.
-
+```
#### Launch the API service.
The default port is 8000. The complete address of the API is: `http://127.0.0.1:8000/api/dify/receive`with the configured API Key '123456'.
-
uvicorn main:app --reload --host 0.0.0.0
-
+```code
+uvicorn main:app --reload --host 0.0.0.0
+```
#### Configure this API in Dify.
@@ -221,7 +223,7 @@ The default port is 8000. The complete address of the API is: `http://127.0.0.1:
When debugging the App, Dify will request the configured API and send the following content (example):
-```JSON
+```BASH
{
"point": "app.external_data_tool.query",
"params": {
@@ -237,7 +239,7 @@ When debugging the App, Dify will request the configured API and send the follow
API Response:
-```JSON
+```BASH
{
"result": "City: London\nTemperature: 10°C\nRealFeel®: 8°C\nAir Quality: Poor\nWind Direction: ENE\nWind Speed: 8 km/h\nWind Gusts: 14 km/h\nPrecipitation: Light rain"
}
@@ -249,11 +251,11 @@ Since Dify's cloud version can't access internal network API services, you can u
1. Visit the Ngrok official website at [https://ngrok.com](https://ngrok.com/), register, and download the Ngrok file.
-
+
2. After downloading, go to the download directory. Unzip the package and run the initialization script as instructed:
-```
+```bash
$ unzip /path/to/ngrok.zip
$ ./ngrok config add-authtoken 你的Token
```
@@ -264,7 +266,7 @@ $ ./ngrok config add-authtoken 你的Token
Run the following command to start:
-```
+```bash
$ ./ngrok http [port number]
```
@@ -282,4 +284,4 @@ Now, this API endpoint is accessible publicly. You can configure this endpoint i
We recommend that you use Cloudflare Workers to deploy your API extension, because Cloudflare Workers can easily provide a public address and can be used for free.
-[cloudflare-workers](cloudflare-workers)
+[Deploy API Tools with Cloudflare Workers](cloudflare-workers)
diff --git a/en/guides/extension/api-based-extension/cloudflare-workers.mdx b/en/guides/extension/api-based-extension/cloudflare-workers.mdx
index 5dade47d..cb92a107 100644
--- a/en/guides/extension/api-based-extension/cloudflare-workers.mdx
+++ b/en/guides/extension/api-based-extension/cloudflare-workers.mdx
@@ -7,7 +7,7 @@ title: Deploy API Tools with Cloudflare Workers
Since the Dify API Extension requires a publicly accessible internet address as an API Endpoint, we need to deploy our API extension to a public internet address. Here, we use Cloudflare Workers for deploying our API extension.
-We clone the [Example GitHub Repository](https://github.com/crazywoola/dify-extension-workers), which contains a simple API extension. We can modify this as a base.
+Clone the [Example GitHub Repository](https://github.com/crazywoola/dify-extension-workers), which contains a simple API extension. We can modify this as a base.
```bash
git clone https://github.com/crazywoola/dify-extension-workers.git
@@ -59,6 +59,32 @@ alt=""
/>
+Additionally, you can use the `npm run dev` command to deploy locally for testing.
+
+```bash
+npm install
+npm run dev
+```
+
+Related output:
+
+```bash
+$ npm run dev
+> dev
+> wrangler dev src/index.ts
+
+ ⛅️ wrangler 3.99.0
+-------------------
+
+Your worker has access to the following bindings:
+- Vars:
+ - TOKEN: "ban****ool"
+⎔ Starting local server...
+[wrangler:inf] Ready on http://localhost:58445
+```
+
+After this, you can use tools like Postman to debug the local interface.
+
## Other Logic TL;DR
### About Bearer Auth
diff --git a/en/guides/extension/api-based-extension/external-data-tool.mdx b/en/guides/extension/api-based-extension/external-data-tool.mdx
index 3f6da5c1..dfea1091 100644
--- a/en/guides/extension/api-based-extension/external-data-tool.mdx
+++ b/en/guides/extension/api-based-extension/external-data-tool.mdx
@@ -2,7 +2,6 @@
title: External Data Tools
---
-
External data tools are used to fetch additional data from external sources after the end user submits data, and then assemble this data into prompts as additional context information for the LLM. Dify provides a default tool for external API calls, check [External Data Tool](https://docs.dify.ai/guides/knowledge-base/external-data-tool) for details.
For developers deploying Dify locally, to meet more customized needs or to avoid developing an additional API Server, you can directly insert custom external data tool logic in the form of a plugin based on the Dify service. After extending custom tools, your custom tool options will be added to the dropdown list of tool types, and team members can use these custom tools to fetch external data.
@@ -17,7 +16,7 @@ Here is an example of extending an external data tool for `Weather Search`, with
4. Preview the frontend interface
5. Debug the extension
-### 1. **Initialize the Directory**
+### 1. Initialize the Directory
To add a custom type `Weather Search`, you need to create the relevant directory and files under `api/core/external_data_tool`.
@@ -32,7 +31,7 @@ To add a custom type `Weather Search`, you need to create the relevant directory
└── schema.json
```
-### 2. **Add Frontend Component Specifications**
+### 2. Add Frontend Component Specifications
* `schema.json`, which defines the frontend component specifications, detailed in [.](./ "mention")
@@ -79,7 +78,7 @@ To add a custom type `Weather Search`, you need to create the relevant directory
`weather_search.py` code template, where you can implement the specific business logic.
-Note: The class variable `name` must be the custom type name, consistent with the directory and file name, and must be unique.
+ The class variable `name` must be the custom type name, consistent with the directory and file name, and must be unique.
```python
@@ -130,17 +129,12 @@ class WeatherSearch(ExternalDataTool):
return f'Weather in {city} is 0°C'
```
-
-
-### 4. **Debug the Extension**
+### 4. Debug the Extension
Now, you can select the custom `Weather Search` external data tool extension type in the Dify application orchestration interface for debugging.
-## Implementation Class Template
+#### Implementation Class Template
```python
from typing import Optional
@@ -181,7 +175,7 @@ class WeatherSearch(ExternalDataTool):
### Detailed Introduction to Implementation Class Development
-### def validate_config
+#### def validate_config
`schema.json` form validation method, called when the user clicks "Publish" to save the configuration.
@@ -193,4 +187,4 @@ class WeatherSearch(ExternalDataTool):
User-defined data query implementation, the returned result will be replaced into the specified variable.
* `inputs`: Variables passed by the end user
-* `query`: Current conversation input content from the end user, a fixed parameter for conversational applications.
\ No newline at end of file
+* `query`: Current conversation input content from the end user, a fixed parameter for conversational applications.
diff --git a/en/guides/extension/api-based-extension/moderation-extension.mdx b/en/guides/extension/api-based-extension/moderation-extension.mdx
index 1a7eb9e7..9fb59d68 100644
--- a/en/guides/extension/api-based-extension/moderation-extension.mdx
+++ b/en/guides/extension/api-based-extension/moderation-extension.mdx
@@ -2,7 +2,6 @@
title: Moderation
---
-
This module is used to review the content input by end-users and the output from LLMs within the application, divided into two types of extension points.
Please read [.](./ "mention") to complete the development and integration of basic API service capabilities.
diff --git a/en/guides/extension/api-based-extension/moderation.mdx b/en/guides/extension/api-based-extension/moderation.mdx
index 5839fdd3..724e1eab 100644
--- a/en/guides/extension/api-based-extension/moderation.mdx
+++ b/en/guides/extension/api-based-extension/moderation.mdx
@@ -2,7 +2,6 @@
title: Sensitive Content Moderation
---
-
This module is used to review the content input by end-users and the output content of the LLM within the application. It is divided into two types of extension points.
### Extension Points
@@ -17,7 +16,7 @@ This module is used to review the content input by end-users and the output cont
#### Request Body
-```
+```json
{
"point": "app.moderation.input", // Extension point type, fixed as app.moderation.input here
"params": {
diff --git a/en/guides/extension/code-based-extension/external-data-tool.mdx b/en/guides/extension/code-based-extension/external-data-tool.mdx
index 1d06366f..66fd52d6 100644
--- a/en/guides/extension/code-based-extension/external-data-tool.mdx
+++ b/en/guides/extension/code-based-extension/external-data-tool.mdx
@@ -2,7 +2,6 @@
title: External Data Tools
---
-
External data tools are used to fetch additional data from external sources after the end user submits data, and then assemble this data into prompts as additional context information for the LLM. Dify provides a default tool for external API calls, check [api-based-extension](/en/guides/extension/api-based-extension) for details.
For developers deploying Dify locally, to meet more customized needs or to avoid developing an additional API Server, you can directly insert custom external data tool logic in the form of a plugin based on the Dify service. After extending custom tools, your custom tool options will be added to the dropdown list of tool types, and team members can use these custom tools to fetch external data.
@@ -34,7 +33,7 @@ To add a custom type `Weather Search`, you need to create the relevant directory
### 2. **Add Frontend Component Specifications**
-* `schema.json`, which defines the frontend component specifications, detailed in [Code Based Extension](.)
+* `schema.json`, which defines the frontend component specifications, detailed in [.](./ "mention")
```json
{
@@ -78,9 +77,9 @@ To add a custom type `Weather Search`, you need to create the relevant directory
`weather_search.py` code template, where you can implement the specific business logic.
-
+
Note: The class variable `name` must be the custom type name, consistent with the directory and file name, and must be unique.
-
+
```python
from typing import Optional
@@ -130,12 +129,6 @@ class WeatherSearch(ExternalDataTool):
return f'Weather in {city} is 0°C'
```
-
-
-
-
### 4. **Debug the Extension**
Now, you can select the custom `Weather Search` external data tool extension type in the Dify application orchestration interface for debugging.
@@ -193,4 +186,4 @@ class WeatherSearch(ExternalDataTool):
User-defined data query implementation, the returned result will be replaced into the specified variable.
* `inputs`: Variables passed by the end user
-* `query`: Current conversation input content from the end user, a fixed parameter for conversational applications.
\ No newline at end of file
+* `query`: Current conversation input content from the end user, a fixed parameter for conversational applications.
diff --git a/en/guides/extension/code-based-extension/moderation.mdx b/en/guides/extension/code-based-extension/moderation.mdx
index 7a16678b..6aae34de 100644
--- a/en/guides/extension/code-based-extension/moderation.mdx
+++ b/en/guides/extension/code-based-extension/moderation.mdx
@@ -2,7 +2,6 @@
title: Sensitive Content Moderation
---
-
In addition to the system's built-in content moderation types, Dify also supports user-defined content moderation rules. This method is suitable for developers customizing their own private deployments. For instance, in an enterprise internal customer service setup, it may be required that users, while querying or customer service agents while responding, not only avoid entering words related to violence, sex, and illegal activities but also avoid specific terms forbidden by the enterprise or violating internally established moderation logic. Developers can extend custom content moderation rules at the code level in a private deployment of Dify.
## Quick Start
@@ -19,7 +18,7 @@ Here is an example of extending a `Cloud Service` content moderation type, with
To add a custom type `Cloud Service`, create the relevant directories and files under the `api/core/moderation` directory.
-```Plain
+```plaintext
.
└── api
└── core
@@ -32,7 +31,7 @@ To add a custom type `Cloud Service`, create the relevant directories and files
### 2. Add Frontend Component Specifications
-* `schema.json`: This file defines the frontend component specifications. For details, see [.](./ "mention").
+* `schema.json`: This file defines the frontend component specifications.
```json
{
@@ -106,9 +105,9 @@ To add a custom type `Cloud Service`, create the relevant directories and files
`cloud_service.py` code template where you can implement specific business logic.
-
+
Note: The class variable name must be the same as the custom type name, matching the directory and file names, and must be unique.
-
+
```python
from core.moderation.base import Moderation, ModerationAction, ModerationInputsResult, ModerationOutputsResult
@@ -205,12 +204,6 @@ class CloudServiceModeration(Moderation):
return False
```
-
-
-
-
### 4. Debug the Extension
At this point, you can select the custom `Cloud Service` content moderation extension type for debugging in the Dify application orchestration interface.
@@ -278,7 +271,7 @@ class CloudServiceModeration(Moderation):
The `schema.json` form validation method is called when the user clicks "Publish" to save the configuration.
* `config` form parameters
- * `{{variable}}` custom variable of the form
+ * `{variable}` custom variable of the form
* `inputs_config` input moderation preset response
* `enabled` whether it is enabled
* `preset_response` input preset response
@@ -314,4 +307,5 @@ Output validation function
* `direct_output`: directly output the preset response
* `overridden`: override the passed variable values
* `preset_response`: preset response (returned only when action=direct_output)
- * `text`: overridden content of the LLM response (returned only when action=overridden).
\ No newline at end of file
+ * `text`: overridden content of the LLM response (returned only when action=overridden).
+
diff --git a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
index 431b18ae..279b6a3e 100644
--- a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
+++ b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
@@ -164,12 +164,5 @@ HTTP Status Code: 500
You can learn how to develop external knowledge base plugins through the following video tutorial using LlamaCloud as an example:
-
For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
diff --git a/en/guides/knowledge-base/api-documentation/external-knowledge-api.mdx b/en/guides/knowledge-base/api-documentation/external-knowledge-api.mdx
index 11f4f944..279b6a3e 100644
--- a/en/guides/knowledge-base/api-documentation/external-knowledge-api.mdx
+++ b/en/guides/knowledge-base/api-documentation/external-knowledge-api.mdx
@@ -1,415 +1,168 @@
---
-title: Chatbot
-version: 'English'
+title: External Knowledge API
---
-## Overview
-Chat applications support session persistence, allowing previous chat history to be used as context for responses. This can be applicable for chatbots, customer service AI, etc.
+## Endpoint
-## Base URL
```
-https://api.dify.ai/v1
+POST /retrieval
```
-## Authentication
-The Service API uses API-Key authentication.
+## Header
-> **Important**: Strongly recommend storing your API Key on the server-side, not shared or stored on the client-side, to avoid possible API-Key leakage that can lead to serious consequences.
+This API is used to connect to a knowledge base that is independent of the Dify and maintained by developers. For more details, please refer to [Connecting to an External Knowledge Base](https://docs.dify.ai/guides/knowledge-base/connect-external-knowledge-base). You can use `API-Key` in the `Authorization` HTTP Header to verify permissions. The authentication logic is defined by you in the retrieval API, as shown below:
-For all API requests, include your API Key in the Authorization HTTP Header:
```
Authorization: Bearer {API_KEY}
```
-## API Endpoints
+## Request Body Elements
-### Send Chat Message
-`POST /chat-messages`
+The request accepts the following data in JSON format.
-Send a request to the chat application.
+| Property | Required | Type | Description | Example value |
+|----------|----------|------|-------------|---------------|
+| knowledge_id | TRUE | string | Your knowledge's unique ID | AAA-BBB-CCC |
+| query | TRUE | string | User's query | What is Dify? |
+| retrieval_setting | TRUE | object | Knowledge's retrieval parameters | See below |
+| metadata_condition | FALSE | Object | Original array filtering | See below |
-#### Request Body
+The `retrieval_setting` property is an object containing the following keys:
-- **query** (string) Required
- - User Input / Question Content
+| Property | Required | Type | Description | Example value |
+|----------|----------|------|-------------|---------------|
+| top_k | TRUE | int | Maximum number of retrieved results | 5 |
+| score_threshold | TRUE | float | The score limit of relevance of the result to the query, scope: 0~1 | 0.5 |
-- **inputs** (object) Required
- - Allows the entry of various variable values defined by the App
- - Contains multiple key/value pairs, each key corresponding to a specific variable
- - At least one key/value pair required
+The `metadata_condition` property is an object containing the following keys:
-- **response_mode** (string) Required
- - Modes supported:
- - `streaming`: Streaming mode (recommended), implements typewriter-like output through SSE
- - `blocking`: Blocking mode, returns result after execution completes
- - Note: Due to Cloudflare restrictions, requests will timeout after 100 seconds
- - Note: blocking mode is not supported in Agent Assistant mode
+| Attribute | Required | Type | Description | Example Value |
+|-----------|----------|------|-------------|--------------|
+| logical_operator | No | String | Logical operator, values can be `and` or `or`, default is `and` | and |
+| conditions | Yes | Array (Object) | List of conditions | See below |
-- **user** (string) Required
- - User identifier for retrieval and statistics
- - Should be uniquely defined within the application
+Each object in the `conditions` array contains the following keys:
-- **conversation_id** (string) Optional
- - Conversation ID to continue based on previous chat records
+| Attribute | Required | Type | Description | Example Value |
+|-----------|----------|------|-------------|--------------|
+| name | Yes | Array (String) | Names of the metadata to filter | `["category", "tag"]` |
+| comparison_operator | Yes | String | Comparison operator | `contains` |
+| value | No | String | Comparison value, can be omitted when the operator is `empty`, `not empty`, `null`, or `not null` | `"AI"` |
-- **files** (array[object]) Optional
- - File list for image input with text understanding
- - Available only when model supports Vision capability
- - Properties:
- - `type` (string): Supported type: image
- - `transfer_method` (string): 'remote_url' or 'local_file'
- - `url` (string): Image URL (for remote_url)
- - `upload_file_id` (string): Uploaded file ID (for local_file)
+Supported `comparison_operator` operators:
-- **auto_generate_name** (bool) Optional
- - Auto-generate title, default is false
- - Can achieve async title generation via conversation rename API
+- `contains`: Contains a certain value
+- `not contains`: Does not contain a certain value
+- `start with`: Starts with a certain value
+- `end with`: Ends with a certain value
+- `is`: Equals a certain value
+- `is not`: Does not equal a certain value
+- `empty`: Is empty
+- `not empty`: Is not empty
+- `=`: Equals
+- `≠`: Not equal
+- `>`: Greater than
+- `<`: Less than
+- `≥`: Greater than or equal to
+- `≤`: Less than or equal to
+- `before`: Before a certain date
+- `after`: After a certain date
-#### Response Types
+## Request Syntax
-**Blocking Mode Response (ChatCompletionResponse)**
-Returns complete App result with Content-Type: application/json
-
-```typescript
-interface ChatCompletionResponse {
- message_id: string;
- conversation_id: string;
- mode: string;
- answer: string;
- metadata: {
- usage: Usage;
- retriever_resources: RetrieverResource[];
- };
- created_at: number;
-}
-```
-
-**Streaming Mode Response (ChunkChatCompletionResponse)**
-Returns stream chunks with Content-Type: text/event-stream
-
-Events:
-- `message`: LLM text chunk event
-- `agent_message`: LLM text chunk event (Agent Assistant mode)
-- `agent_thought`: Agent reasoning process
-- `message_file`: New file created by tool
-- `message_end`: Stream end event
-- `message_replace`: Content replacement event
-- `workflow_started`: Workflow execution start
-- `node_started`: Node execution start
-- `node_finished`: Node execution completion
-- `workflow_finished`: Workflow execution completion
-- `parallel_branch_started`: Parallel branch start
-- `parallel_branch_finished`: Parallel branch completion
-- `iteration_started`: Iteration start
-- `iteration_next`: Next iteration
-- `iteration_completed`: Iteration completion
-- `error`: Exception event
-- `ping`: Keep-alive event (every 10s)
-
-### File Upload
-`POST /files/upload`
-
-Upload files (currently only images) for multimodal understanding.
-
-#### Supported Formats
-- png
-- jpg
-- jpeg
-- webp
-- gif
-
-#### Request Body
-Requires multipart/form-data:
-- `file` (File) Required
-- `user` (string) Required
-
-#### Response
-```typescript
-interface FileUploadResponse {
- id: string;
- name: string;
- size: number;
- extension: string;
- mime_type: string;
- created_by: string;
- created_at: number;
-}
-```
-
-### Stop Generate
-`POST /chat-messages/:task_id/stop`
-
-Stop ongoing generation (streaming mode only).
-
-#### Request Body
-- `user` (string) Required
-
-#### Response
```json
+POST /retrieval HTTP/1.1
+-- header
+Content-Type: application/json
+Authorization: Bearer your-api-key
+-- data
{
- "result": "success"
+ "knowledge_id": "your-knowledge-id",
+ "query": "your question",
+ "retrieval_setting":{
+ "top_k": 2,
+ "score_threshold": 0.5
+ }
}
```
-### Message Feedback
-`POST /messages/:message_id/feedbacks`
+## Response Elements
-Submit user feedback on messages.
+If the action is successful, the service sends back an HTTP 200 response.
-#### Request Body
-- `rating` (string) Required: "like" | "dislike" | null
-- `user` (string) Required
+The following data is returned in JSON format by the service.
+
+| Property | Required | Type | Description | Example value |
+|----------|----------|------|-------------|---------------|
+| records | TRUE | List[Object] | A list of records from querying the knowledge base. | See below |
+
+The `records` property is a list object containing the following keys:
+
+| Property | Required | Type | Description | Example value |
+|----------|----------|------|-------------|---------------|
+| content | TRUE | string | Contains a chunk of text from a data source in the knowledge base. | Dify:The Innovation Engine for GenAI Applications |
+| score | TRUE | float | The score of relevance of the result to the query, scope: 0~1 | 0.5 |
+| title | TRUE | string | Document title | Dify Introduction |
+| metadata | FALSE | json | Contains metadata attributes and their values for the document in the data source. | See example |
+
+## Response Syntax
-#### Response
```json
+HTTP/1.1 200
+Content-type: application/json
{
- "result": "success"
+ "records": [{
+ "metadata": {
+ "path": "s3://dify/knowledge.txt",
+ "description": "dify knowledge document"
+ },
+ "score": 0.98,
+ "title": "knowledge.txt",
+ "content": "This is the document for external knowledge."
+ },
+ {
+ "metadata": {
+ "path": "s3://dify/introduce.txt",
+ "description": "dify introduce"
+ },
+ "score": 0.66,
+ "title": "introduce.txt",
+ "content": "The Innovation Engine for GenAI Applications"
+ }
+ ]
}
```
-### Get Conversation History
-`GET /messages`
+## Errors
-Returns historical chat records in reverse chronological order.
+If the action fails, the service sends back the following error information in JSON format:
-#### Query Parameters
-- `conversation_id` (string) Required
-- `user` (string) Required
-- `first_id` (string) Optional: First chat record ID
-- `limit` (int) Optional: Default 20
+| Property | Required | Type | Description | Example value |
+|----------|----------|------|-------------|---------------|
+| error_code | TRUE | int | Error code | 1001 |
+| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ' format. |
-#### Response
-```typescript
-interface ConversationHistory {
- data: Array<{
- id: string;
- conversation_id: string;
- inputs: Record;
- query: string;
- message_files: Array<{
- id: string;
- type: string;
- url: string;
- belongs_to: 'user' | 'assistant';
- }>;
- agent_thoughts: Array<{
- id: string;
- message_id: string;
- position: number;
- thought: string;
- observation: string;
- tool: string;
- tool_input: string;
- created_at: number;
- message_files: string[];
- }>;
- answer: string;
- created_at: number;
- feedback?: {
- rating: 'like' | 'dislike';
- };
- retriever_resources: RetrieverResource[];
- }>;
- has_more: boolean;
- limit: number;
-}
-```
+The `error_code` property has the following types:
-### Get Conversations
-`GET /conversations`
+| Code | Description |
+|------|-------------|
+| 1001 | Invalid Authorization header format. |
+| 1002 | Authorization failed |
+| 2001 | The knowledge does not exist |
-Retrieve conversation list for current user.
+### HTTP Status Codes
-#### Query Parameters
-- `user` (string) Required
-- `last_id` (string) Optional
-- `limit` (int) Optional: Default 20
-- `pinned` (boolean) Optional
+**AccessDeniedException**
+The request is denied because of missing access permissions. Check your permissions and retry your request.
+HTTP Status Code: 403
-#### Response
-```typescript
-interface ConversationList {
- data: Array<{
- id: string;
- name: string;
- inputs: Record;
- introduction: string;
- created_at: number;
- }>;
- has_more: boolean;
- limit: number;
-}
-```
+**InternalServerException**
+An internal server error occurred. Retry your request.
+HTTP Status Code: 500
-### Delete Conversation
-`DELETE /conversations/:conversation_id`
+## Development Example
-Delete a conversation.
+You can learn how to develop external knowledge base plugins through the following video tutorial using LlamaCloud as an example:
-#### Request Body
-- `user` (string) Required
-#### Response
-```json
-{
- "result": "success"
-}
-```
-
-### Rename Conversation
-`POST /conversations/{conversation_id}/name`
-
-#### Request Body
-- `name` (string) Optional
-- `auto_generate` (boolean) Optional: Default false
-- `user` (string) Required
-
-#### Response
-```typescript
-interface RenamedConversation {
- id: string;
- name: string;
- inputs: Record;
- introduction: string;
- created_at: number;
-}
-```
-
-### Speech to Text
-`POST /audio-to-text`
-
-Convert audio to text.
-
-#### Request Body (multipart/form-data)
-- `file` (File) Required
- - Supported formats: mp3, mp4, mpeg, mpga, m4a, wav, webm
- - Size limit: 15MB
-- `user` (string) Required
-
-#### Response
-```typescript
-interface AudioToTextResponse {
- text: string;
-}
-```
-
-### Get Application Parameters
-`GET /parameters`
-
-Retrieve application configuration and settings.
-
-#### Query Parameters
-- `user` (string) Required
-
-#### Response
-```typescript
-interface ApplicationParameters {
- opening_statement: string;
- suggested_questions_after_answer: {
- enabled: boolean;
- };
- speech_to_text: {
- enabled: boolean;
- };
- retriever_resource: {
- enabled: boolean;
- };
- annotation_reply: {
- enabled: boolean;
- };
- user_input_form: Array<{
- 'text-input' | 'paragraph' | 'select': {
- label: string;
- variable: string;
- required: boolean;
- default: string;
- options?: string[];
- };
- }>;
- file_upload: {
- image: {
- enabled: boolean;
- number_limits: number;
- transfer_methods: string[];
- };
- };
- system_parameters: {
- image_file_size_limit: string;
- };
-}
-```
-
-### Get Application Meta Information
-`GET /meta`
-
-Retrieve tool icons and metadata.
-
-#### Query Parameters
-- `user` (string) Required
-
-#### Response
-```typescript
-interface ApplicationMeta {
- tool_icons: Record;
-}
-```
-
-## Type Definitions
-
-### Usage
-```typescript
-interface Usage {
- prompt_tokens: number;
- prompt_unit_price: string;
- prompt_price_unit: string;
- prompt_price: string;
- completion_tokens: number;
- completion_unit_price: string;
- completion_price_unit: string;
- completion_price: string;
- total_tokens: number;
- total_price: string;
- currency: string;
- latency: number;
-}
-```
-
-### RetrieverResource
-```typescript
-interface RetrieverResource {
- position: number;
- content: string;
- score: string;
- dataset_id: string;
- dataset_name: string;
- document_id: string;
- document_name: string;
- segment_id: string;
-}
-```
-
-## Error Codes
-
-Common error codes you may encounter:
-- 404: Conversation does not exist
-- 400: invalid_param - Abnormal parameter input
-- 400: app_unavailable - App configuration unavailable
-- 400: provider_not_initialize - No available model credential configuration
-- 400: provider_quota_exceeded - Model invocation quota insufficient
-- 400: model_currently_not_support - Current model unavailable
-- 400: completion_request_error - Text generation failed
-- 500: Internal server error
-
-For file uploads:
-- 400: no_file_uploaded - File must be provided
-- 400: too_many_files - Only one file accepted
-- 400: unsupported_preview - File does not support preview
-- 400: unsupported_estimate - File does not support estimation
-- 413: file_too_large - File is too large
-- 415: unsupported_file_type - Unsupported extension
-- 503: s3_connection_failed - Unable to connect to S3
-- 503: s3_permission_denied - No permission for S3
-- 503: s3_file_too_large - Exceeds S3 size limit
\ No newline at end of file
+For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
diff --git a/en/guides/knowledge-base/connect-external-knowledge-base.mdx b/en/guides/knowledge-base/connect-external-knowledge-base.mdx
index 1cedfee0..3f2c83fc 100644
--- a/en/guides/knowledge-base/connect-external-knowledge-base.mdx
+++ b/en/guides/knowledge-base/connect-external-knowledge-base.mdx
@@ -43,7 +43,7 @@ With the LlamaCloud plugin, you can directly use LlamaCloud's powerful retrieval
For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
-##### Video Tutorial
+#### Video Tutorial
The following video demonstrates in detail how to use the LlamaCloud plugin to connect to external knowledge bases:
diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/readme.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/readme.mdx
index ed4dda72..f0b3ef93 100644
--- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/readme.mdx
+++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/readme.mdx
@@ -21,11 +21,11 @@ Drag and drop or select files to upload. The number of files allowed for **batch
When creating a **Knowledge**, you can import data from online sources. The knowledge supports the following two types of online data:
-
+
Learn how to import data from Notion
-
+
Learn how to sync data from websites
diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/sync-from-notion.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/sync-from-notion.mdx
index 0ad9c5d5..82f9f042 100644
--- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/sync-from-notion.mdx
+++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/import-content-data/sync-from-notion.mdx
@@ -15,25 +15,27 @@ Dify datasets support importing from Notion and setting up **synchronization** s
After completing the authorization verification, go to the create dataset page, click **Sync from Notion Content**, and select the authorized pages you need to import.
-### Segmentation and Cleaning
+
-Next, choose your **segmentation settings** and **indexing method**, then **Save and Process**. Wait for Dify to process this data for you, which typically requires token consumption in the LLM provider. Dify supports importing not only standard page types but also aggregates and saves page properties under the database type.
+### Chunking and Cleaning
-_**Please note: Images and files are not currently supported for import, and tabular data will be converted to text display.**_
+Next, choose a [chunking mode](sync-from-notion.md#chunking-and-cleaning) and [indexing method](../setting-indexing-methods.md) for your knowledge base, then save it and wait for the automatically processing. Dify not only supports importing standard Notion pages but can also consolidate and save page attributes from database-type pages.
-
+_**Note: images and files cannot be imported, and data from tables will be converted to text.**_
+
+
### Synchronizing Notion Data
-If your Notion content is modified, you can directly click **Sync** in the Dify dataset **Document List Page** to perform a one-click data synchronization. This step requires token consumption.
+If your Notion content has been updated, you can sync the changes by clicking the **Sync** button for the corresponding page in the document list of your knowledge base. Syncing involves an embedding process, which will consume tokens from your embedding model.

### Integration Configuration Method for Community Edition Notion
-Notion integration can be done in two ways: **internal integration** and **public integration**. You can configure them as needed in Dify. For specific differences between the two integration methods, please refer to [Notion Official Documentation](https://developers.notion.com/docs/authorization).
+Notion offers two integration options: **internal integration** and **public integration**. For more details on the differences between these two methods, please refer to the [official Notion documentation](https://developers.notion.com/docs/authorization).
-### 1. **Using Internal Integration**
+#### 1. Using Internal Integration
First, create an integration in the integration settings page [Create Integration](https://www.notion.so/my-integrations). By default, all integrations start as internal integrations; internal integrations will be associated with the workspace you choose, so you need to be the workspace owner to create an integration.
@@ -45,13 +47,16 @@ Click the **New integration** button. The type is **Internal** by default (canno
After creating the integration, you can update its settings as needed under the Capabilities tab and click the **Show** button under Secrets to copy the secrets.
+
+
After copying, go back to the Dify source code, and configure the relevant environment variables in the **.env** file. The environment variables are as follows:
-**NOTION_INTEGRATION_TYPE**=internal or **NOTION_INTEGRATION_TYPE**=public
+```
+NOTION_INTEGRATION_TYPE = internal or NOTION_INTEGRATION_TYPE = public
+NOTION_INTERNAL_SECRET=you-internal-secret
+```
-**NOTION_INTERNAL_SECRET**=your-internal-secret
-
-### 2. **Using Public Integration**
+#### **Using Public Integration**
**You need to upgrade the internal integration to a public integration.** Navigate to the Distribution page of the integration, and toggle the switch to make the integration public. When switching to the public setting, you need to fill in additional information in the Organization Information form below, including your company name, website, and redirect URL, then click the **Submit** button.
@@ -63,8 +68,10 @@ After successfully making the integration public on the integration settings pag
Go back to the Dify source code, and configure the relevant environment variables in the **.env** file. The environment variables are as follows:
-* **NOTION_INTEGRATION_TYPE**=public
-* **NOTION_CLIENT_SECRET**=your-client-secret
-* **NOTION_CLIENT_ID**=your-client-id
+```
+NOTION_INTEGRATION_TYPE=public
+NOTION_CLIENT_SECRET=your-client-secret
+NOTION_CLIENT_ID=your-client-id
+```
After configuration, you can operate the Notion data import and synchronization functions in the dataset.
diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx
index 82cd146a..cc4c5e4a 100644
--- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx
+++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx
@@ -25,8 +25,8 @@ After selecting **High Quality** mode, the indexing method for the knowledge bas

-
-
+### Enable Q&A Mode (Optional, Community Edition Only)
+
When this mode is enabled, the system segments the uploaded text and automatically generates Q\&A pairs for each segment after summarizing its content.
Compared with the common **Q to P** strategy (user questions matched with text paragraphs), the Q\&A mode uses a **Q to Q** strategy (questions matched with questions).
@@ -41,7 +41,7 @@ When a user asks a question, the system identifies the most similar question and

-
+
@@ -52,6 +52,7 @@ If the performance of the economical indexing method does not meet your expectat

+
## Setting the Retrieval Setting
@@ -152,6 +153,11 @@ The **"Weight Settings"** and **"Rerank Model"** settings support the following
In **Economical Indexing** mode, only the inverted index approach is available. An inverted index is a data structure designed for fast keyword retrieval within documents, commonly used in online search engines. Inverted indexing supports only the **TopK** setting.
**TopK:** Determines how many text chunks, deemed most similar to the user’s query, are retrieved. It also automatically adjusts the number of chunks based on the chosen model’s context window. The default value is **3**, and higher numbers will recall more text chunks.
+
+
+
+
+
diff --git a/en/guides/knowledge-base/external-knowledge-api.mdx b/en/guides/knowledge-base/external-knowledge-api.mdx
index 8c822144..2a682aa6 100644
--- a/en/guides/knowledge-base/external-knowledge-api.mdx
+++ b/en/guides/knowledge-base/external-knowledge-api.mdx
@@ -1,5 +1,5 @@
---
-title: 'External Knowledge API'
+title: External Knowledge API
---
## Endpoint
@@ -140,7 +140,7 @@ If the action fails, the service sends back the following error information in J
| Property | Required | Type | Description | Example value |
|----------|----------|------|-------------|---------------|
| error_code | TRUE | int | Error code | 1001 |
-| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ' format. |
+| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ``' format. |
The `error_code` property has the following types:
@@ -164,6 +164,5 @@ HTTP Status Code: 500
You can learn how to develop external knowledge base plugins through the following video tutorial using LlamaCloud as an example:
-{% embed url="https://www.youtube.com/embed/FaOzKZRS-2E" %}
For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
diff --git a/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx b/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx
index aa5c99dd..e36ae0ea 100644
--- a/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx
+++ b/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx
@@ -18,8 +18,9 @@ Here, you can modify the knowledge base's name, description, permissions, indexi
* **"Only Me"**: Restricts access to the knowledge base owner.
* **"All team members"**: Grants access to every member of the team.
* **"Partial team members"**: Allows selective access to specific team members.
+
+ Users without appropriate permissions cannot access the knowledge base. When granting access to team members (Options 2 or 3), authorized users are granted full permissions, including the ability to view, edit, and delete knowledge base content.
- Users without appropriate permissions cannot access the knowledge base. When granting access to team members (Options 2 or 3), authorized users are granted full permissions, including the ability to view, edit, and delete knowledge base content.
* **Indexing Mode**: For detailed explanations, please refer to the [documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods).
* **Embedding Model**: Allows you to modify the embedding model for the knowledge base. Changing the embedding model will re-embed all documents in the knowledge base, and the original embeddings will be deleted.
* **Retrieval Settings**: For detailed explanations, please refer to the [documentation](/en/learn-more/extended-reading/retrieval-augment/retrieval).
diff --git a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx
index aaedcd7a..0cea0f8b 100644
--- a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx
+++ b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx
@@ -1,5 +1,5 @@
---
-title: Knowledge Base and Document Maintenance
+title: Maintain Documents
---
## Manage Documentations in the Knowledge Base
@@ -70,7 +70,7 @@ Different [chunking modes](/en/guides/knowledge-base/create-knowledge-and-upload
**Parent-child Mode**
- In[Parent-child](/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text#parent-child-mode) mode, content is divided into parent chunks and child chunks.
+ In [Parent-child](/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text#parent-child-mode) mode, content is divided into parent chunks and child chunks.
* **Parent chunks**
@@ -235,8 +235,6 @@ Go to **Chunk Settings**, adjust the settings, and click **Save & Process** to s
### Metadata
-In addition to capturing metadata (e.g., title, URL, keywords, or a web page description) from various source documents, metadata is also used as structured fields during the chunk retrieval process for filtering or displaying citation sources.
-
-
+For more details on metadata, see [_Metadata_](/en/guides/knowledge-base/metadata).
***
\ No newline at end of file
diff --git a/en/guides/knowledge-base/metadata.mdx b/en/guides/knowledge-base/metadata.mdx
index 738de16a..18006939 100644
--- a/en/guides/knowledge-base/metadata.mdx
+++ b/en/guides/knowledge-base/metadata.mdx
@@ -19,6 +19,7 @@ This guide aims to help you understand metadata and effectively manage your know
@@ -40,6 +41,7 @@ alt="metadata_field"
@@ -148,6 +150,7 @@ To create a new metadata field:
@@ -170,6 +173,7 @@ To edit a metadata field:
@@ -214,6 +218,7 @@ To add metadata in bulk:
@@ -222,6 +227,7 @@ To add metadata in bulk:
@@ -232,6 +238,7 @@ To add metadata in bulk:
@@ -240,6 +247,7 @@ To add metadata in bulk:
@@ -248,6 +256,7 @@ To add metadata in bulk:
@@ -256,6 +265,7 @@ To add metadata in bulk:
@@ -274,6 +284,7 @@ To update metadata in bulk:
@@ -282,6 +293,7 @@ To update metadata in bulk:
@@ -292,6 +304,7 @@ To update metadata in bulk:
@@ -308,6 +321,7 @@ Use **Apply to All Documents** to control changes:
@@ -330,6 +344,7 @@ On the document details page, click **Start labeling** to begin editing.
To add a single document's metadata fields and values:
1. Click **+Add Metadata** to:
+

- Create new fields via **+New Metadata**.
diff --git a/en/guides/knowledge-base/retrieval-test-and-citation.mdx b/en/guides/knowledge-base/retrieval-test-and-citation.mdx
index d20e633b..2fec4b82 100644
--- a/en/guides/knowledge-base/retrieval-test-and-citation.mdx
+++ b/en/guides/knowledge-base/retrieval-test-and-citation.mdx
@@ -57,13 +57,13 @@ If you want to permanently modify the retrieval method for the knowledge base, g
### 2. Citation and Attribution
-When testing the knowledge base effect within the application, you can go to **Workspace -- Add Feature -- Citation and Attribution** to enable the citation attribution feature.应用内的“上下文”添加知识库后,可以在 **“添加功能”** 内开启 **“引用与归属”**。在应用内输入问题后,若涉及已关联的知识库文档,将标注内容的引用来源。你可以通过此方式检查知识库所召回的内容分段是否符合预期。
+When testing the knowledge base effect within the application, you can go to **Workspace -- Add Feature -- Citation and Attribution** to enable the citation attribution feature.

After enabling the feature, when the large language model responds to a question by citing content from the knowledge base, you can view specific citation paragraph information below the response content, including **original segment text, segment number, matching degree**, etc. Clicking **Link to Knowledge** above the cited segment allows quick access to the segment list in the knowledge base, facilitating developers in debugging and editing.
-
+
### View Linked Applications in the Knowledge Base
diff --git a/en/guides/management/personal-account-management.mdx b/en/guides/management/personal-account-management.mdx
index 511a2d0c..f0934e47 100644
--- a/en/guides/management/personal-account-management.mdx
+++ b/en/guides/management/personal-account-management.mdx
@@ -53,7 +53,7 @@ To change the display language, click on your avatar in the upper right corner o
* Indonesian
* Ukrainian (Ukraine)
-Dify welcomes community volunteers to contribute additional language versions. Visit the [GitHub repository](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) to contribute!
+Dify welcomes community volunteers to contribute additional language versions. Visit the [GitHub repository](https://github.com/langgenius/dify/blob/main/CONTRIBUTING) to contribute!
### View Apps Linked to Your Account
diff --git a/en/guides/management/version-control.mdx b/en/guides/management/version-control.mdx
index beea11bb..ceaa29c4 100644
--- a/en/guides/management/version-control.mdx
+++ b/en/guides/management/version-control.mdx
@@ -15,6 +15,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
@@ -25,6 +26,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
@@ -33,6 +35,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
@@ -41,6 +44,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
diff --git a/en/guides/model-configuration/customizable-model.mdx b/en/guides/model-configuration/customizable-model.mdx
index b712dcdd..5b193a3d 100644
--- a/en/guides/model-configuration/customizable-model.mdx
+++ b/en/guides/model-configuration/customizable-model.mdx
@@ -2,6 +2,9 @@
title: Custom Model Integration
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
### Introduction
@@ -300,4 +303,4 @@ In `llm.py`, create a Xinference LLM class, which we will name `XinferenceAILarg
"""
```
-For an explanation of interface methods, see: [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces.md). For specific implementations, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py).
+For an explanation of interface methods, see: [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces). For specific implementations, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py).
diff --git a/en/guides/model-configuration/interfaces.mdx b/en/guides/model-configuration/interfaces.mdx
index 23f7728f..a26002bb 100644
--- a/en/guides/model-configuration/interfaces.mdx
+++ b/en/guides/model-configuration/interfaces.mdx
@@ -1,7 +1,10 @@
---
-title: Interface Methods
+title: Interface
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
This section describes the interface methods and parameter explanations that need to be implemented by providers and various model types.
diff --git a/en/guides/model-configuration/load-balancing.mdx b/en/guides/model-configuration/load-balancing.mdx
index b4f87252..03c6f3ff 100644
--- a/en/guides/model-configuration/load-balancing.mdx
+++ b/en/guides/model-configuration/load-balancing.mdx
@@ -2,7 +2,6 @@
title: Load Balancing
---
-
Model rate limits are restrictions imposed by model providers on the number of times users or customers can access API services within a specified time frame. These limits help prevent API abuse or misuse, ensure fair access for all users, and control the overall load on the infrastructure.
In enterprise-level large-scale model API calls, high concurrent requests can exceed rate limits and affect user access. Load balancing can distribute API requests across multiple API endpoints, ensuring all users receive the fastest response and the highest model invocation throughput, thereby ensuring stable business operations.
diff --git a/en/guides/model-configuration/new-provider.mdx b/en/guides/model-configuration/new-provider.mdx
index 2cecf721..aab68976 100644
--- a/en/guides/model-configuration/new-provider.mdx
+++ b/en/guides/model-configuration/new-provider.mdx
@@ -2,6 +2,9 @@
title: Adding a New Provider
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
### Provider Configuration Methods
@@ -33,7 +36,7 @@ These three configuration methods **can coexist**, meaning a provider can suppor
Adding a new provider mainly involves several steps. Here is a brief outline to give you an overall understanding. Detailed steps will be introduced below.
-* Create a provider YAML file and write it according to the [Provider Schema](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md).
+* Create a provider YAML file and write it according to the [Provider Schema](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema).
* Create provider code and implement a `class`.
* Create corresponding model type `modules` under the provider `module`, such as `llm` or `text_embedding`.
* Create same-named code files under the corresponding model `module`, such as `llm.py`, and implement a `class`.
@@ -82,7 +85,7 @@ provider_credential_schema: # Provider credential rules, since Anthropic only s
en_US: Enter your API URL
```
-If the connected provider offers customizable models, such as `OpenAI` which provides fine-tuned models, we need to add [`model_credential_schema`](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md). Taking `OpenAI` as an example:
+If the connected provider offers customizable models, such as `OpenAI` which provides fine-tuned models, we need to add [`model_credential_schema`](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema). Taking `OpenAI` as an example:
```yaml
model_credential_schema:
@@ -122,7 +125,7 @@ model_credential_schema:
en_US: Enter your API Base
```
-You can also refer to the [YAML configuration information](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md) in the directories of other providers under the `model_providers` directory.
+You can also refer to the [YAML configuration information](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema) in the directories of other providers under the `model_providers` directory.
**Implement Provider Code**
@@ -159,11 +162,11 @@ You can also reserve the `validate_provider_credentials` implementation first an
**Adding Models**
-[**Adding Predefined Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/predefined-model)**👈🏻**
+[**Adding Predefined Models**](/en-us/guides/model-configuration/predefined-model)**👈🏻**
For predefined models, we can connect them by simply defining a YAML file and implementing the calling code.
-[**Adding Custom Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/customizable-model) **👈🏻**
+[**Adding Custom Models**](/en-us/guides/model-configuration/customizable-model) **👈🏻**
For custom models, we only need to implement the calling code to connect them, but the parameters they handle may be more complex.
diff --git a/en/guides/model-configuration/predefined-model.mdx b/en/guides/model-configuration/predefined-model.mdx
index 5b1b0161..d9044da2 100644
--- a/en/guides/model-configuration/predefined-model.mdx
+++ b/en/guides/model-configuration/predefined-model.mdx
@@ -2,6 +2,9 @@
title: Predefined Model Integration
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
After completing the supplier integration, the next step is to integrate the models under the supplier.
@@ -65,7 +68,7 @@ pricing: # Pricing information
It is recommended to prepare all model configurations before starting the implementation of the model code.
-Similarly, you can refer to the YAML configuration information in the directories of other suppliers under the `model_providers` directory. The complete YAML rules can be found in: Schema[^1].
+Similarly, you can refer to the YAML configuration information in the directories of other suppliers under the `model_providers` directory. The complete YAML rules can be found in: [Schema](#provider).
#### Implementing Model Invocation Code
@@ -169,9 +172,9 @@ Create an Anthropic LLM class in `llm.py`, which we will name `AnthropicLargeLan
"""
```
-For interface method descriptions, see: [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/interfaces.md), and for specific implementation, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
+For interface method descriptions, see: [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/interfaces), and for specific implementation, refer to: [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py).
-[^1]: #### Provider
+#### Provider
* `provider` (string) Supplier identifier, e.g., `openai`
* `label` (object) Supplier display name, i18n, can be set in `en_US` English and `zh_Hans` Chinese
diff --git a/en/guides/model-configuration/readme.mdx b/en/guides/model-configuration/readme.mdx
index 6f9c1219..e95d10a0 100644
--- a/en/guides/model-configuration/readme.mdx
+++ b/en/guides/model-configuration/readme.mdx
@@ -3,6 +3,9 @@ title: Model
description: Learn about the Different Models Supported by Dify.
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
Dify is a development platform for AI application based on LLM Apps, when you are using Dify for the first time, you need to go to **Settings --> Model Providers** to add and configure the LLM you are going to use.
diff --git a/en/guides/model-configuration/schema.mdx b/en/guides/model-configuration/schema.mdx
index 984f2ede..575589e7 100644
--- a/en/guides/model-configuration/schema.mdx
+++ b/en/guides/model-configuration/schema.mdx
@@ -1,7 +1,10 @@
---
-title: Configuration Rules
+title: Schema
---
+
+ "Models" have been fully integrated into a "Plugin" ecosystem. For detailed development instructions on model plugins, please refer to [Plugin Development](/en/plugins/quick-start/develop-plugins/model-plugin/README). The following content has been archived.
+
- Provider rules are based on the [Provider](#Provider) entity.
- Model rules are based on the [AIModelEntity](#AIModelEntity) entity.
diff --git a/en/guides/monitoring/README.mdx b/en/guides/monitoring/README.mdx
index cc19eccb..49a24311 100644
--- a/en/guides/monitoring/README.mdx
+++ b/en/guides/monitoring/README.mdx
@@ -1,8 +1,7 @@
---
-title: Introduction
+title: Monitoring
---
-
You can monitor and track the performance of your application in a production environment within the **Overview** section. In the data analytics dashboard, you can analyze various metrics such as usage costs, latency, user feedback, and performance in the production environment. By continuously debugging and iterating, you can continually improve your application.
-
+
diff --git a/en/guides/monitoring/integrate-external-ops-tools/integrate-langfuse.mdx b/en/guides/monitoring/integrate-external-ops-tools/integrate-langfuse.mdx
index e705972b..d49c04d2 100644
--- a/en/guides/monitoring/integrate-external-ops-tools/integrate-langfuse.mdx
+++ b/en/guides/monitoring/integrate-external-ops-tools/integrate-langfuse.mdx
@@ -2,13 +2,12 @@
title: Integrate Langfuse
---
-
### What is Langfuse
Langfuse is an open-source LLM engineering platform that helps teams collaborate on debugging, analyzing, and iterating their applications.
-Introduction to Langfuse: [https://langfuse.com/](https://langfuse.com/)
+ Introduction to Langfuse: [https://langfuse.com/](https://langfuse.com/)
***
@@ -18,9 +17,7 @@ Introduction to Langfuse: [https://langfuse.com/](https://langfuse.com/)
1. Register and log in to Langfuse on the [official website](https://langfuse.com/)
2. Create a project in Langfuse. After logging in, click **New** on the homepage to create your own project. The **project** will be used to associate with **applications** in Dify for data monitoring.
-
-
-
-
+
+
+
***
diff --git a/en/guides/workflow/additional-features.mdx b/en/guides/workflow/additional-features.mdx
index de1acad5..89ab5369 100644
--- a/en/guides/workflow/additional-features.mdx
+++ b/en/guides/workflow/additional-features.mdx
@@ -9,8 +9,8 @@ Click the **"Features"** button in the upper right corner of the application to
@@ -23,8 +23,8 @@ Workflow type applications only support the **"Image Upload"** feature. When ena
diff --git a/en/guides/workflow/bulletin.mdx b/en/guides/workflow/bulletin.mdx
index 3956ee16..c334992a 100644
--- a/en/guides/workflow/bulletin.mdx
+++ b/en/guides/workflow/bulletin.mdx
@@ -37,7 +37,7 @@ We no longer recommend using the standalone “Image Upload” feature and inste
* **Chatflow Applications**
-If you have already created Chatflow applications with the “Image Upload” feature enabled and activated the Vision feature in the LLM node, the system will automatically switch the feature, and it will not affect the application’s image upload capability. If you need to update and republish the application, select the file variable in the Vision variable selection box of the LLM node, clear the item from the checklist, and republish the application.\
+If you have already created Chatflow applications with the “Image Upload” feature enabled and activated the Vision feature in the LLM node, the system will automatically switch the feature, and it will not affect the application’s image upload capability. If you need to update and republish the application, select the file variable in the Vision variable selection box of the LLM node, clear the item from the checklist, and republish the application.

@@ -65,7 +65,7 @@ If you wish to add the “Image Upload” feature to a Chatflow application, ref
Existing Workflow applications will not be affected, but please complete the manual migration before the official deprecation.
-If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](/en/guides/workflow/node/start) node. Then, reference this file variable in subsequent node instead of using the `sys.files` variable.\
+If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](/en/guides/workflow/node/start) node. Then, reference this file variable in subsequent node instead of using the `sys.files` variable.
### FAQs:
diff --git a/en/guides/workflow/error-handling/README.mdx b/en/guides/workflow/error-handling/README.mdx
index fc237ddd..6f478637 100644
--- a/en/guides/workflow/error-handling/README.mdx
+++ b/en/guides/workflow/error-handling/README.mdx
@@ -1,5 +1,5 @@
---
-title: Introduction
+title: Error Handling
---
Workflow applications often comprise multiple interconnected nodes operating in sequence. When an error occurs—such as an API request failure, an LLM output issue, or an unexpected exception—it can disrupt the entire process. Such disruptions force developers to spend significant time troubleshooting, especially in workflows with complex node dependencies.
@@ -28,10 +28,10 @@ For instance, a code node on the alternative path can split the content into sma
The following four types of nodes have added error-handling feature. Click on the title to read the detailed documents:
-* [LLM](/en/guides/workflow/nodes/llm)
-* [HTTP](/en/guides/workflow/nodes/http-request)
-* [Code](/en/guides/workflow/nodes/code)
-* [Tools](/en/guides/workflow/nodes/tools)
+* [LLM](/en/guides/workflow/node/llm)
+* [HTTP](/en/guides/workflow/node/http-request)
+* [Code](/en/guides/workflow/node/code)
+* [Tools](/en/guides/workflow/node/tools)
**Retry on Failure**
diff --git a/en/guides/workflow/error-handling/error-type.mdx b/en/guides/workflow/error-handling/error-type.mdx
index 4f8bf31e..02979ffd 100644
--- a/en/guides/workflow/error-handling/error-type.mdx
+++ b/en/guides/workflow/error-handling/error-type.mdx
@@ -32,7 +32,9 @@ This article summarizes the potential exceptions and corresponding error types t
2. Start the sandbox service
3. Verify proxy settings
-
+
+
+
3. **Depth Limit Error (DepthLimitError)**
@@ -105,9 +107,10 @@ The following 3 errors commonly occur during runtime:
An error that occurs during tool execution itself, such as when reaching the target API's request limit.
+
+
+
-
- 
2. **Tool Parameter Error (ToolParameterError)**
An error occurs when the configured tool node parameters are invalid, such as passing parameters that don't match the tool node's defined parameters.
diff --git a/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx b/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx
index d55c77c8..d99bfda4 100644
--- a/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx
+++ b/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx
@@ -5,13 +5,13 @@ title: Predefined Error Handling Logic
Here are four types of nodes that provide with predefined logic for handling unexpected situations:
-• [LLM](/en/guides/workflow/nodes/llm)
+• [LLM](/en/guides/workflow/node/llm)
-• [HTTP](/en/guides/workflow/nodes/http-request)
+• [HTTP](/en/guides/workflow/node/http-request)
-• [Code](/en/guides/workflow/nodes/code)
+• [Code](/en/guides/workflow/node/code)
-• [Tool](/en/guides/workflow/nodes/tools)
+• [Tool](/en/guides/workflow/node/tools)
The error handling feature provides three predefined options:
@@ -40,7 +40,9 @@ When a node fails to execute, the workflow automatically uses the developer’s
The predefined default value’s data type must match the node’s output variable type. For example, if the output variable of a code node is set to the data type `array[number]`, the default value must also be of the `array[number]` data type.
-
+
+
+
### Logic: Fail Branch
diff --git a/en/guides/workflow/file-upload.mdx b/en/guides/workflow/file-upload.mdx
index a39210b3..bec1c464 100644
--- a/en/guides/workflow/file-upload.mdx
+++ b/en/guides/workflow/file-upload.mdx
@@ -2,9 +2,146 @@
title: File Upload
---
-[前面的内容保持不变...]
+Compared to chat text, document files can contain vast amounts of information, such as academic reports and legal contracts. However, Large Language Models (LLMs) are inherently limited to processing only text or images, making it challenging to extract the rich contextual information within these files. As a result, application users often resort to manually copying and pasting large amounts of information to converse with LLMs, significantly increasing unnecessary operational overhead.
-
+The file upload feature addresses this limitation by allowing files to be uploaded, parsed, referenced, and downloaded as File variables within workflow applications. **This empowers developers to easily construct complex workflows capable of understanding and processing various media types, including images, audio, and video.**
+
+## Application Scenarios
+
+1. **Document Analysis**: Upload academic research reports for LLMs to quickly summarize key points and answer related questions based on the file content.
+2. **Code Review**: Developers can upload code files to receive optimization suggestions and bug detection.
+3. **Learning Assistance**: Students can upload assignments or study materials for personalized explanations and guidance.
+4. **Legal Aid**: Upload complete contract texts for LLMs to assist in reviewing clauses and identifying potential risks.
+
+## Difference Between File Upload and Knowledge Base
+
+Both file upload and knowledge base provide additional contextual information for LLMs, but they differ significantly in usage scenarios and functionality:
+
+1. **Information Source**:
+ * File Upload: Allows end-users to dynamically upload files during conversations, providing immediate, personalized contextual information.
+ * Knowledge Base: Pre-set and managed by application developers, containing a relatively fixed set of information.
+2. **Usage Flexibility**:
+ * File Upload: More flexible, users can upload different types of files based on specific needs.
+ * Knowledge Base: Content is relatively fixed but can be reused across multiple sessions.
+3. **Information Processing**:
+ * File Upload: Requires document extractors or other tools to convert file content into text that LLMs can understand.
+ * Knowledge Base: Usually pre-processed and indexed, ready for direct retrieval.
+4. **Application Scenarios**:
+ * File Upload: Suitable for scenarios that need to process user-specific documents, such as document analysis, personalized learning assistance, etc.
+ * Knowledge Base: Suitable for scenarios that require access to a large amount of preset information, such as customer service, product inquiries, etc.
+5. **Data Persistence**:
+ * File Upload: Typically for temporary use, not stored long-term in the system.
+ * Knowledge Base: Exists as a long-term part of the application, can be continuously updated and maintained.
+
+## Quick Start: Building a Chatflow / Workflow Application with File Upload Feature
+
+Dify supports file uploads in both [ChatFlow](key-concepts) and [WorkFlow](key-concepts#chatflow-and-workflow) type applications, processing them through variables for LLMs. Application developers can refer to the following methods to enable file upload functionality:
+
+* In Workflow applications:
+ * Add file variables in the ["Start Node"](node/start)
+* In ChatFlow applications:
+ * Enable file upload in ["Additional Features"](additional-features) to allow direct file uploads in the chat window
+ * Add file variables in the "[Start Node"](node/start)
+ * Note: These two methods can be configured simultaneously and are independent of each other. The file upload settings in additional features (including upload method and quantity limit) do not affect the file variables in the start node. For example, if you only want to create file variables through the start node, you don't need to enable the file upload feature in additional features.
+
+These two methods provide flexible file upload options for applications to meet the needs of different scenarios.
+
+**File Types**
+
+`File` variables and `array[file]` variables support the following file types and formats:
+
+| File Type | Supported Formats |
+|-------------|--------------------------------------------------------------------|
+| Documents | TXT, MARKDOWN, PDF, HTML, XLSX, XLS, DOCX, CSV, EML, MSG, PPTX, PPT, XML, EPUB. |
+| Images | JPG, JPEG, PNG, GIF, WEBP, SVG. |
+| Audio | MP3, M4A, WAV, WEBM, AMR. |
+| Video | MP4, MOV, MPEG, MPGA. |
+| Others | Custom file extension support |
+
+#### Method 1: Using an LLM with File Processing Capabilities
+
+Some LLMs, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build-with-claude/pdf-support), now support direct processing and analysis of file content, enabling the use of file variables in the LLM node's prompts.
+
+> To prevent potential issues, application developers should verify the supported file types on the LLM's official website before utilizing the file variable.
+
+1. Click to create a Chatflow or Workflow application.
+2. Add an LLM node and select an LLM with file processing capabilities.
+3. Add a file variable in the start node.
+4. Enter the file variable in the system prompt of the LLM node.
+5. Complete the setup.
+
+
+
+**Method 2: Enable File Upload in Application Chat Box (Chatflow Only)**
+
+1. Click the **"Features"** button in the upper right corner of the Chatflow application to add more functionality to the application. After enabling this feature, application users can upload and update files at any time during the application dialogue. A maximum of 10 files can be uploaded simultaneously, with a size limit of 15MB per file.
+
+
+
+Enabling this feature does not grant LLMs the ability to directly read files. A **Document Extractor** is still needed to parse documents into text for LLM comprehension.
+
+* For audio files, models like `gpt-4o-audio-preview` that support multimodal input can process audio directly without additional extractors.
+* For video and other file types, there are currently no corresponding extractors. Application developers need to [integrate external tools](../extension/api-based-extension/external-data-tool) for processing.
+
+2. Add a Document Extractor node, and select the `sys.files` variable in the input variables.
+3. Add an LLM node and select the output variable of the Document Extractor node in the system prompt.
+4. Add an "Answer" node at the end, filling in the output variable of the LLM node.
+
+
+
+Once enabled, users can upload files and engage in conversations in the dialogue box. However, with this method, the LLM application does not have the ability to remember file contents, and files need to be uploaded for each conversation.
+
+
+
+If you want the LLM to remember file contents during conversations, please refer to Method 3.
+
+**Method 3: Enable File Upload by Adding File Variables**
+
+**1. Add File Variables in the "Start" Node**
+
+Add input fields in the application's "Start" node, choosing either **"Single File"** or **"File List"** as the field type for the variable.
+
+
+
+* **Single File**
+
+ Allows the application user to upload only one file.
+* **File List**
+
+ Allows the application user to batch upload multiple files at once.
+
+> For ease of operation, we will use a single file variable as an example.
+
+**File Parsing**
+
+There are two main ways to use file variables:
+
+1. Using tool nodes to convert file content:
+ * For document-type files, you can use the "Document Extractor" node to convert file content into text form.
+ * This method is suitable for cases where file content needs to be parsed into a format that the model can understand (such as string, array\[string], etc.).
+2. Using file variables directly in LLM nodes:
+ * For certain types of files (such as images), you can use file variables directly in LLM nodes.
+ * For example, for file variables of image type, you can enable the vision feature in the LLM node and then directly reference the corresponding file variable in the variable selector.
+
+The choice between these methods depends on the file type and your specific requirements. Next, we will detail the specific steps for both methods.
+
+**2. Add Document Extractor Node**
+
+After uploading, files are stored in single file variables, which LLMs cannot directly read. Therefore, a **"Document Extractor"** node needs to be added first to extract content from uploaded document files and send it to the LLM node for information processing.
+
+Use the file variable from the "Start" node as the input variable for the **"Document Extractor"** node.
+
+
+
+Fill in the output variable of the "Document Extractor" node in the system prompt of the LLM node.
+
+
After completing these settings, application users can paste file URLs or upload local files in the WebApp, then interact with the LLM about the document content. Users can replace files at any time during the conversation, and the LLM will obtain the latest file content.
@@ -18,7 +155,7 @@ For certain file types (such as images), file variables can be directly used wit
Below is an example configuration:
-
+
It's important to note that when directly using file variables in LLM node, the developers need to ensure that the file variable contains only image files; otherwise, errors may occur. If users might upload different types of files, we need to use list operator node for filtering files.
@@ -26,7 +163,7 @@ It's important to note that when directly using file variables in LLM node, the
Placing file variables in answer nodes or end nodes will provide a file download card in the conversation box when the application reaches that node. Clicking the card allows for file download.
-
+
## Advanced Usage
diff --git a/en/guides/workflow/key-concepts.mdx b/en/guides/workflow/key-concepts.mdx
index 2cd7b6fa..1b29d97c 100644
--- a/en/guides/workflow/key-concepts.mdx
+++ b/en/guides/workflow/key-concepts.mdx
@@ -2,10 +2,9 @@
title: Key Concepts
---
+### Nodes
-### node
-
-**node are the key components of a workflow**. By connecting node with different functionalities, you can execute a series of operations within the workflow.
+**Nodes are the key components of a workflow**. By connecting node with different functionalities, you can execute a series of operations within the workflow.
For core workflow node, please refer to [Block Description](/en/guides/workflow/node).
diff --git a/en/guides/workflow/node/answer.mdx b/en/guides/workflow/node/answer.mdx
index 4d6e0246..784718c3 100644
--- a/en/guides/workflow/node/answer.mdx
+++ b/en/guides/workflow/node/answer.mdx
@@ -2,7 +2,6 @@
title: Answer
---
-
Defining Reply Content in a Chatflow Process. In a text editor, you have the flexibility to determine the reply format. This includes crafting a fixed block of text, utilizing output variables from preceding steps as the reply content, or merging custom text with variables for the response.
Answer node can be seamlessly integrated at any point to dynamically deliver content into the dialogue responses. This setup supports a live-editing configuration mode, allowing for both text and image content to be arranged together. The configurations include:
@@ -13,19 +12,15 @@ Answer node can be seamlessly integrated at any point to dynamically deliver con
Example 1: Output plain text.
-
-
-
-
diff --git a/en/guides/workflow/node/code.mdx b/en/guides/workflow/node/code.mdx
index cbc64847..76940572 100644
--- a/en/guides/workflow/node/code.mdx
+++ b/en/guides/workflow/node/code.mdx
@@ -16,7 +16,7 @@ The code node supports running Python/NodeJS code to perform data transformation
This node significantly enhances the flexibility for developers, allowing them to embed custom Python or JavaScript scripts within the workflow and manipulate variables in ways that preset nodes cannot achieve. Through configuration options, you can specify the required input and output variables and write the corresponding execution code:
-
+
## Configuration
diff --git a/en/guides/workflow/node/end.mdx b/en/guides/workflow/node/end.mdx
index 9464d29d..344325d8 100644
--- a/en/guides/workflow/node/end.mdx
+++ b/en/guides/workflow/node/end.mdx
@@ -19,7 +19,7 @@ End nodes are not supported within Chatflow.
### 2 Scenarios
-In the following [long story generation workflow](/en/guides/workflow/nodes/iteration#example-2-long-article-iterative-generation-another-scheduling-method), the variable `Output` declared by the end node is the output of the upstream code node. This means the workflow will end after the Code node completes execution and will output the execution result of Code.
+In the following [long story generation workflow](/en/guides/workflow/node/iteration#example-2-long-article-iterative-generation-another-scheduling-method), the variable `Output` declared by the end node is the output of the upstream code node. This means the workflow will end after the Code node completes execution and will output the execution result of Code.

diff --git a/en/guides/workflow/node/http-request.mdx b/en/guides/workflow/node/http-request.mdx
index 18e7785c..b8b9ca82 100644
--- a/en/guides/workflow/node/http-request.mdx
+++ b/en/guides/workflow/node/http-request.mdx
@@ -18,7 +18,9 @@ This node supports common HTTP request methods:
You can configure various aspects of the HTTP request, including URL, request headers, query parameters, request body content, and authentication information.
-
+
+
+
***
diff --git a/en/guides/workflow/node/ifelse.mdx b/en/guides/workflow/node/ifelse.mdx
index 4c8783e3..24132f49 100644
--- a/en/guides/workflow/node/ifelse.mdx
+++ b/en/guides/workflow/node/ifelse.mdx
@@ -2,7 +2,6 @@
title: Conditional Branch IF/ELSE
---
-
### Definition
Allows you to split the workflow into two branches based on if/else conditions.
@@ -44,4 +43,4 @@ Taking the above **Text Summary Workflow** as an example:
For complex condition judgments, you can set multiple condition judgments and configure **AND** or **OR** between conditions to take the **intersection** or **union** of the conditions, respectively.
-
+
diff --git a/en/guides/workflow/node/iteration.mdx b/en/guides/workflow/node/iteration.mdx
index dce81269..5bfd52ee 100644
--- a/en/guides/workflow/node/iteration.mdx
+++ b/en/guides/workflow/node/iteration.mdx
@@ -23,19 +23,7 @@ An iteration node consists of three core components: **Input Variables**, **Iter
**Output Variables:** Outputs only array variables (`Array[List]`).
-
-
-
-
-
+
### Scenarios
@@ -53,15 +41,21 @@ alt=""
1. Configure the story title (title) and outline (outline) in the **Start Node**.
-
+
+
+
2. Use a **Generate Subtitles and Outlines Node** to convert the story title and outline into complete text.
-
+
+
+
3. Use a **Extract Subtitles and Outlines Node** to convert the story text into an array (Array) structure. The parameter to extract is `sections`, and the parameter type is `Array[Object]`.
-
+
+
+
The effectiveness of parameter extraction is influenced by the model's inference capability and the instructions given. Using a model with stronger inference capabilities and adding examples in the **instructions** can improve the parameter extraction results.
@@ -73,7 +67,9 @@ The effectiveness of parameter extraction is influenced by the model's inference
Configure the input variables `GenerateOverallOutline/output` and `Iteration/item` in the LLM Node.
-
+
+
+
Built-in variables for iteration: `items[object]` and `index[number]`.
@@ -163,7 +159,9 @@ The output variable of the iteration node is in array format and cannot be direc
**Convert Using a Code Node**
-
+
+
+
CODE Example:
@@ -177,7 +175,9 @@ def main(articleSections: list):
**Convert Using a Template Node**
-
+
+
+
CODE Example:
diff --git a/en/guides/workflow/node/llm.mdx b/en/guides/workflow/node/llm.mdx
index dca136ac..0e55fbab 100644
--- a/en/guides/workflow/node/llm.mdx
+++ b/en/guides/workflow/node/llm.mdx
@@ -110,7 +110,7 @@ The main parameter terms are explained as follows:
If you do not understand what these parameters are, you can choose to load presets and select from the three presets: Creative, Balanced, and Precise.
-
+
***
@@ -213,7 +213,7 @@ Click **Add Field** and set parameters below:
1. Click the AI Generate icon, select a model (like GPT-4o), and describe what you need:
- > “I need a JSON Schema for user profiles with username (string), age (number), and interests (array).”
+ > "I need a JSON Schema for user profiles with username (string), age (number), and interests (array)."
2. Click **Generate** to create a schema:
@@ -286,7 +286,7 @@ Click **Add Field** and set parameters below:
1. Click the AI Generate icon, select a model (like GPT-4o), and describe what you need:
- > “I need a JSON Schema for user profiles with username (string), age (number), and interests (array).”
+ > "I need a JSON Schema for user profiles with username (string), age (number), and interests (array)."
2. Click **Generate** to create a schema:
@@ -325,7 +325,7 @@ To enable workflow applications to read "[Knowledge Base](../../knowledge-base/)
2. Fill in the **output variable** `result` of the knowledge retrieval node into the **context variable** of the LLM node;
3. Insert the **context variable** into the application prompt to give the LLM the ability to read text within the knowledge base.
-.png)
+
The `result` variable output by the Knowledge Retrieval Node also includes segmented reference information. You can view the source of information through the **Citation and Attribution** feature.
@@ -341,9 +341,9 @@ To enable workflow applications to read document contents, such as building a Ch
* Add a document extractor node upstream of the LLM node, using the file variable as an input variable;
* Fill in the **output variable** `text` of the document extractor node into the prompt of the LLM node.
-For more information, please refer to [File Upload](../file-upload.md).
+For more information, please refer to [File Upload](../file-upload).
-.png)
+
* **Error Handling**
@@ -358,40 +358,16 @@ For more information about exception handling methods, please refer to the [Erro
* **Structured Outputs**
-**Scenario**: You are building a user feedback analysis workflow on Dify. The LLM node needs to read user reviews and return standardized ratings and comments in a consistent format for downstream nodes to process.
+**Case: Customer Information Intake Form**
-**Solution**: In your workflow’s LLM node, you can use the JSON Schema Editor to define a structured format. This ensures the LLM generates results in a strict predefined format instead of unstructured text.
+Watch the following video to learn how to use JSON Schema Editor to collect customer information:
-1. Define your output structure in the JSON Schema Editor:
-
-```json
-{
- "type": "object",
- "properties": {
- "rating": {
- "type": "integer",
- "description": "user's rating"
- },
- "comment": {
- "type": "string",
- "description": "user's comments"
- }
- },
- "required": ["rating", "comment"]
-}
-```
-
-2. Test it with a review in your Start node:
-
- > “This product is excellent!”
-
-3. The LLM node will return a clean, structured response:
-
-```json
-{
- "structured_output": {
- "comment": "This product is excellent!",
- "rating": 5
- }
-}
-```
+
diff --git a/en/guides/workflow/node/parameter-extractor.mdx b/en/guides/workflow/node/parameter-extractor.mdx
index 8fdef194..ae30e13e 100644
--- a/en/guides/workflow/node/parameter-extractor.mdx
+++ b/en/guides/workflow/node/parameter-extractor.mdx
@@ -25,7 +25,7 @@ In this example: The Arxiv paper retrieval tool requires **paper author** or **p

-1. **Extracting structured data and using the** [**HTTP Request**](/en/guides/workflow/nodes/http-request), which can request any accessible URL, suitable for obtaining external retrieval results, webhooks, generating images, and other scenarios.
+3. **Extracting structured data and using the** [**HTTP Request**](/en/guides/workflow/nodes/http-request), which can request any accessible URL, suitable for obtaining external retrieval results, webhooks, generating images, and other scenarios.
***
diff --git a/en/guides/workflow/node/template.mdx b/en/guides/workflow/node/template.mdx
index b6fbc922..d137fc09 100644
--- a/en/guides/workflow/node/template.mdx
+++ b/en/guides/workflow/node/template.mdx
@@ -4,12 +4,11 @@ title: Template
Template lets you dynamically format and combine variables from previous nodes into a single text-based output using Jinja2, a powerful templating syntax for Python. It's useful for combining data from multiple sources into a specific structure required by subsequent nodes. The simple example below shows how to assemble an article by piecing together various previous outputs:
-
+
Beyond naive use cases, you can create more complex templates as per Jinja's [documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/) for a variety of tasks. Here's one template that structures retrieved chunks and their relevant metadata from a knowledge retrieval node into a formatted markdown:
```Plain
-{% raw %}
{% for item in chunks %}
### Chunk {{ loop.index }}.
### Similarity: {{ item.metadata.score | default('N/A') }}
@@ -21,7 +20,6 @@ Beyond naive use cases, you can create more complex templates as per Jinja's [do
---
{% endfor %}
-{% endraw %}
```

diff --git a/en/guides/workflow/node/tools.mdx b/en/guides/workflow/node/tools.mdx
index 4bd2d5ca..ce311bd7 100644
--- a/en/guides/workflow/node/tools.mdx
+++ b/en/guides/workflow/node/tools.mdx
@@ -17,18 +17,11 @@ If built-in tools do not meet your needs, you can create custom tools in the **D
You can also orchestrate a more complex workflow and publish it as a tool.
-
-
-
-
+
+
+
+
+
Configuring a tool node generally involves two steps:
diff --git a/en/guides/workflow/node/variable-assigner.mdx b/en/guides/workflow/node/variable-assigner.mdx
index 70a8a0cb..031d04ca 100644
--- a/en/guides/workflow/node/variable-assigner.mdx
+++ b/en/guides/workflow/node/variable-assigner.mdx
@@ -158,26 +158,36 @@ The data type of the target variable determines its operation method. Below are
1. Target variable data type: `String`
• **Overwrite**: Directly overwrite the target variable with the source variable.
+
• **Clear**: Clear the contents of the selected target variable.
+
• **Set**: Manually assign a value without requiring a source variable.
2. Target variable data type: `Number`
• **Overwrite**: Directly overwrite the target variable with the source variable.
+
• **Clear**: Clear the contents of the selected target variable.
+
• **Set**: Manually assign a value without requiring a source variable.
+
• **Arithmetic**: Perform addition, subtraction, multiplication, or division on the target variable.
3. Target variable data type: `Object`
• **Overwrite**: Directly overwrite the target variable with the source variable.
+
• **Clear**: Clear the contents of the selected target variable.
+
• **Set**: Manually assign a value without requiring a source variable.
4. Target variable data type: `Array`
• **Overwrite**: Directly overwrite the target variable with the source variable.
+
• **Clear**: Clear the contents of the selected target variable.
+
• **Append**: Add a new element to the array in the target variable.
+
• **Extend**: Add a new array to the target variable, effectively adding multiple elements at once.
diff --git a/en/guides/workspace/README.mdx b/en/guides/workspace/README.mdx
index afd62c6a..e845d1cc 100644
--- a/en/guides/workspace/README.mdx
+++ b/en/guides/workspace/README.mdx
@@ -3,7 +3,7 @@ title: Collaboration
---
-Dify is a multi-user platform where workspaces are the basic units of team collaboration. Members of a workspace can create and edit applications and knowledge bases, and can also directly use public applications created by other team members in the Discover area.
+Dify is a multi-user platform where workspaces are the basic units of team collaboration. Members of a workspace can create and edit applications and knowledge bases, and can also directly use public applications created by other team members in the [Discover](https://docs.dify.ai/guides/workspace/app) area.
### Login Methods
diff --git a/en/learn-more/extended-reading/retrieval-augment/hybrid-search.mdx b/en/learn-more/extended-reading/retrieval-augment/hybrid-search.mdx
index 6366cc46..7aad80ac 100644
--- a/en/learn-more/extended-reading/retrieval-augment/hybrid-search.mdx
+++ b/en/learn-more/extended-reading/retrieval-augment/hybrid-search.mdx
@@ -42,8 +42,6 @@ Different retrieval systems excel at finding various subtle relationships betwee
Definition: Generating query embeddings and querying the text segments most similar to their vector representations.
-
-
**TopK:** Used to filter the text fragments most similar to the user's query. The system will dynamically adjust the number of fragments based on the context window size of the selected model. The default value is 3.
**Score Threshold:** Used to set the similarity threshold for filtering text fragments, i.e., only recalling text fragments that exceed the set score. The system's default is to turn off this setting, meaning it does not filter the similarity values of recalled text fragments. When enabled, the default value is 0.5.
@@ -54,8 +52,6 @@ Definition: Generating query embeddings and querying the text segments most simi
Definition: Indexing all words in the document, allowing users to query any word and return text fragments containing those words.
-
-
**TopK:** Used to filter the text fragments most similar to the user's query. The system will dynamically adjust the number of fragments based on the context window size of the selected model. The default value is 3.
**Rerank Model:** After configuring the Rerank model's API key on the "Model Providers" page, you can enable the "Rerank Model" in the retrieval settings. The system will perform semantic re-ranking on the recalled document results after full-text retrieval to optimize the ranking results. When the Rerank model is set, the TopK and Score Threshold settings only take effect in the Rerank step.
@@ -86,4 +82,3 @@ Modify the retrieval mode of an existing dataset by entering the "Dataset -> Sel
Modify the retrieval mode when creating an application by entering the "Prompt Arrangement -> Context -> Select Dataset -> Settings" page.
-
diff --git a/en/learn-more/faq/install-faq.mdx b/en/learn-more/faq/install-faq.mdx
index e7ea3a04..474e7c65 100644
--- a/en/learn-more/faq/install-faq.mdx
+++ b/en/learn-more/faq/install-faq.mdx
@@ -64,7 +64,7 @@ This might be due to switching the domain/URL, causing cross-domain issues betwe
`APP_API_URL:` Backend URL for the WebApp API.
`APP_WEB_URL:` URL for the WebApp.
-For more information, please refer to: [Environment Variables](../../getting-started/install-self-hosted/environments.md)
+For more information, please refer to: [Environment Variables](../../getting-started/install-self-hosted/environments)
### 5. How to upgrade the version after deployment?
diff --git a/en/learn-more/use-cases/how-to-connect-aws-bedrock.mdx b/en/learn-more/use-cases/how-to-connect-aws-bedrock.mdx
index ef9b5e27..8bd2b69f 100644
--- a/en/learn-more/use-cases/how-to-connect-aws-bedrock.mdx
+++ b/en/learn-more/use-cases/how-to-connect-aws-bedrock.mdx
@@ -19,7 +19,7 @@ Visit [AWS Bedrock](https://aws.amazon.com/bedrock/) and create the Knowledge Ba
### 2. Build the Backend API Service
-The Dify platform cannot directly connect to AWS Bedrock Knowledge Base. The developer needs to refer to Dify's [API definition](../../guides/knowledge-base/external-knowledge-api-documentation.md) on external knowledge base connection, manually create the backend API service, and establish a connection with AWS Bedrock. Please refer to the specific architecture diagram:
+The Dify platform cannot directly connect to AWS Bedrock Knowledge Base. The developer needs to refer to Dify's [API definition](../../guides/knowledge-base/external-knowledge-api-documentation) on external knowledge base connection, manually create the backend API service, and establish a connection with AWS Bedrock. Please refer to the specific architecture diagram:

diff --git a/en/link-check-report-error.md b/en/link-check-report-error.md
index dc9c30cb..6f928f99 100644
--- a/en/link-check-report-error.md
+++ b/en/link-check-report-error.md
@@ -5,7 +5,7 @@
## 来自 community/contribution.md
-* [Become a Contributor](/community/contribution) ✅ | [code of conduct](https://github.com/langgenius/.github/blob/main/CODE\_OF\_CONDUCT.md) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/README.md) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README.md) ❌
+* [Become a Contributor](/community/contribution) ✅ | [code of conduct](https://github.com/langgenius/.github/blob/main/CODE\_OF\_CONDUCT) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/README) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README) ❌
## 来自 development/models-integration/hugging-face.md
@@ -17,7 +17,7 @@
## 来自 development/models-integration/localai.md
-* [Integrate Local Models Deployed by LocalAI](./development/models-integration/localai) ✅ | [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README.md) ❌
+* [Integrate Local Models Deployed by LocalAI](./development/models-integration/localai) ✅ | [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README) ❌
## 来自 development/models-integration/ollama.md
@@ -57,7 +57,7 @@
## 来自 guides/model-configuration/customizable-model.md
-* [Custom Model Integration](./guides/model-configuration/customizable-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces.md) ❌ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py) ❌
+* [Custom Model Integration](./guides/model-configuration/customizable-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces) ❌ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py) ❌
## 来自 guides/model-configuration/new-provider.md
diff --git a/en/link-check-report.md b/en/link-check-report.md
index 2e291a79..acd10bfc 100644
--- a/en/link-check-report.md
+++ b/en/link-check-report.md
@@ -9,8 +9,8 @@
* [Welcome to Dify](./README) ✅ | [**Quick Start**](https://docs.dify.ai/application/creating-an-application) ✅ | [**self-deploy Dify** ](https://docs.dify.ai/getting-started/install-self-hosted) ✅ | [**integrate open source models**](https://docs.dify.ai/advanced/model-configuration) ✅ | [**specifications and roadmap**](https://docs.dify.ai/getting-started/readme/features-and-specifications) ✅ | [**Star us on GitHub**](https://github.com/langgenius/dify) ✅
* [Features and Specifications](./getting-started/readme/features-and-specifications) ✅
* [List of Model Providers](./getting-started/readme/model-providers) ✅ | [here](https://github.com/langgenius/dify/discussions/categories/ideas) ✅ | [contribution.md](../../community/contribution.md "mention") ❌
-* [Dify Community](./getting-started/install-self-hosted/README) ✅ | [GitHub](https://github.com/langgenius/dify) ✅ | [Docker Compose Deployment](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose) ✅ | [Local Source Code Start](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code) ✅ | [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) ✅
- * [Deploy with Docker Compose](./getting-started/install-self-hosted/docker-compose) ✅ | [Docker Desktop installation guide for Mac](https://docs.docker.com/desktop/mac/install/) ✅ | [Docker installation guide](https://docs.docker.com/engine/install/) ✅ | [the Docker Compose installation guide](https://docs.docker.com/compose/install/) ✅ | [Docker Desktop installation guide for using the WSL 2 backend on Windows.](https://docs.docker.com/desktop/windows/install/#wsl-2-backend) ✅ | [README.md](https://github.com/langgenius/dify/blob/main/docker/README.md) ✅ | [Docker documentation](https://docs.docker.com/compose/install/) ✅ | [FAQs](./faqs) ✅
+* [Dify Community](./getting-started/install-self-hosted/README) ✅ | [GitHub](https://github.com/langgenius/dify) ✅ | [Docker Compose Deployment](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose) ✅ | [Local Source Code Start](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code) ✅ | [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING) ✅
+ * [Deploy with Docker Compose](./getting-started/install-self-hosted/docker-compose) ✅ | [Docker Desktop installation guide for Mac](https://docs.docker.com/desktop/mac/install/) ✅ | [Docker installation guide](https://docs.docker.com/engine/install/) ✅ | [the Docker Compose installation guide](https://docs.docker.com/compose/install/) ✅ | [Docker Desktop installation guide for using the WSL 2 backend on Windows.](https://docs.docker.com/desktop/windows/install/#wsl-2-backend) ✅ | [README.md](https://github.com/langgenius/dify/blob/main/docker/README) ✅ | [Docker documentation](https://docs.docker.com/compose/install/) ✅ | [FAQs](./faqs) ✅
* [Start with Local Source Code](./getting-started/install-self-hosted/local-source-code) ✅ | [Docker Desktop installation guide for Mac](https://docs.docker.com/desktop/mac/install/) ✅ | [Docker installation guide](https://docs.docker.com/engine/install/) ✅ | [the Docker Compose installation guide](https://docs.docker.com/compose/install/) ✅ | [Docker Desktop installation guide for using the WSL 2 backend on Windows.](https://docs.docker.com/desktop/windows/install/#wsl-2-backend) ✅ | [Link](https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-14.-what-to-do-if-this-error-occurs-in-text-to-speech) ✅ | [pyenv](https://github.com/pyenv/pyenv) ✅ | [Poetry](https://python-poetry.org/docs/) ✅ | [Node.js v18.x (LTS)](http://nodejs.org/) ✅ | [NPM version 8.x.x](https://www.npmjs.com/) ✅ | [Yarn](https://yarnpkg.com/) ✅ | [https://nodejs.org/en/download](https://nodejs.org/en/download) ✅ | [http://127.0.0.1:3000](http://127.0.0.1:3000/) ✅
* [Deploy with aaPanel](./getting-started/install-self-hosted/bt-panel) ✅ | [aaPanel installation guide](https://www.aapanel.com/new/download.html#install) ✅
* [Start Frontend Docker Container Separately](./getting-started/install-self-hosted/start-the-frontend-docker-container) ✅ | [http://127.0.0.1:3000](http://127.0.0.1:3000/) ✅
@@ -22,9 +22,9 @@
## Guides
* [Model](./guides/model-configuration/README) ✅ | [OpenAI](https://platform.openai.com/account/api-keys) ✅ | [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service/) ❌ | [Anthropic](https://console.anthropic.com/account/keys) ✅ | [iFLYTEK SPARK](https://www.xfyun.cn/solutions/xinghuoAPI) ✅ | [WENXINYIYAN](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application) ✅ | [TONGYI](https://dashscope.console.aliyun.com/api-key\_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k) ✅ | [Minimax](https://api.minimax.chat/user-center/basic-information/interface-key) ✅ | [Ollama](https://docs.dify.ai/tutorials/model-configuration/ollama) ✅ | [LocalAI](https://github.com/mudler/LocalAI) ✅ | [GPUStack](https://github.com/gpustack/gpustack) ✅ | [Jina Embeddings](https://jina.ai/embeddings/) ✅ | [**Rerank Models**](https://docs.dify.ai/advanced/retrieval-augment/rerank) ✅ | [Jina Reranker](https://jina.ai/reranker) ✅ | [PKCS1\_OAEP](https://pycryptodome.readthedocs.io/en/latest/src/cipher/oaep.html) ✅ | [Hugging Face](../../development/models-integration/hugging-face) ✅ | [Replicate](../../development/models-integration/replicate) ✅ | [Xinference](../../development/models-integration/xinference) ✅ | [OpenLLM](../../development/models-integration/openllm) ✅
- * [Add New Provider](./guides/model-configuration/new-provider) ✅ | [Provider Schema](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md) ✅ | [AI Model Entity](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md#aimodelentity) ✅ | [`model_credential_schema`](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md) ✅ | [YAML configuration information](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md) ✅ | [AnthropicProvider](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/model_providers/anthropic/anthropic.py) ❌ | [**Adding Predefined Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/predefined-model) ✅ | [**Adding Custom Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/customizable-model) ✅
- * [Predefined Model Integration](./guides/model-configuration/predefined-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/interfaces.md) ✅ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py) ❌
- * [Custom Model Integration](./guides/model-configuration/customizable-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces.md) ❌ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py) ❌
+ * [Add New Provider](./guides/model-configuration/new-provider) ✅ | [Provider Schema](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema) ✅ | [AI Model Entity](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema.md#aimodelentity) ✅ | [`model_credential_schema`](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema) ✅ | [YAML configuration information](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/schema) ✅ | [AnthropicProvider](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/model_providers/anthropic/anthropic.py) ❌ | [**Adding Predefined Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/predefined-model) ✅ | [**Adding Custom Models**](https://docs.dify.ai/v/zh-hans/guides/model-configuration/customizable-model) ✅
+ * [Predefined Model Integration](./guides/model-configuration/predefined-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model_runtime/docs/en_US/interfaces) ✅ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model_providers/anthropic/llm/llm.py) ❌
+ * [Custom Model Integration](./guides/model-configuration/customizable-model) ✅ | [Interfaces](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/docs/en\_US/interfaces) ❌ | [llm.py](https://github.com/langgenius/dify-runtime/blob/main/lib/model\_providers/anthropic/llm/llm.py) ❌
* [Interfaces](./guides/model-configuration/interfaces) ✅ | [[PromptMessage](#PromptMessage) ✅ | [UserPromptMessage](#UserPromptMessage) ✅ | [SystemPromptMessage](#SystemPromptMessage) ✅ | [UserPromptMessage](#UserPromptMessage) ✅ | [AssistantPromptMessage](#AssistantPromptMessage) ✅ | [ToolPromptMessage](#ToolPromptMessage) ✅ | [[PromptMessageTool](#PromptMessageTool) ✅ | [[LLMResultChunk](#LLMResultChunk) ✅ | [LLMResult](#LLMResult) ✅ | [[LLMResultChunk](#LLMResultChunk) ✅ | [LLMResult](#LLMResult) ✅ | [TextEmbeddingResult](#TextEmbeddingResult) ✅ | [RerankResult](#RerankResult) ✅
* [Schema](./guides/model-configuration/schema) ✅ | [Provider](#Provider) ✅ | [AIModelEntity](#AIModelEntity) ✅ | [[ModelType](#ModelType) ✅ | [[ConfigurateMethod](#ConfigurateMethod) ✅ | [ProviderCredentialSchema](#ProviderCredentialSchema) ✅ | [ModelCredentialSchema](#ModelCredentialSchema) ✅ | [ModelType](#ModelType) ✅ | [[ModelFeature](#ModelFeature) ✅ | [LLMMode](#LLMMode) ✅ | [[ParameterRule](#ParameterRule) ✅ | [PriceConfig](#PriceConfig) ✅ | [[CredentialFormSchema](#CredentialFormSchema) ✅ | [[CredentialFormSchema](#CredentialFormSchema) ✅ | [FormType](#FormType) ✅ | [[FormOption](#FormOption) ✅ | [[FormShowOnObject](#FormShowOnObject) ✅ | [[FormShowOnObject](#FormShowOnObject) ✅
* [Load Balancing](./guides/model-configuration/load-balancing) ✅ | [subscribing to SaaS paid services](../../getting-started/cloud) ✅
@@ -89,7 +89,7 @@
* [Knowledge Request Rate Limit](./guides/knowledge-base/knowledge-request-rate-limit) ✅
* [Connect to an External Knowledge Base](./guides/knowledge-base/connect-external-knowledge) ✅ | [AWS Bedrock](https://aws.amazon.com/bedrock/) ✅ | [External Knowledge Base API](./external-knowledge-api-documentation) ✅ | [External Knowledge API](./external-knowledge-api-documentation) ✅ | [External Knowledge API](./external-knowledge-api-documentation) ✅ | [how-to-connect-aws-bedrock.md](../../learn-more/use-cases/how-to-connect-aws-bedrock.md "mention") ❌
* [External Knowledge API](./guides/knowledge-base/external-knowledge-api-documentation) ✅ | [Connecting to an External Knowledge Base](https://docs.dify.ai/guides/knowledge-base/connect-external-knowledge-base) ✅
-* [Tools](./guides/tools/README) ✅ | [documentation](https://docs.dify.ai/plugins/introduction) ✅ | [Dify Development Contribution Documentation](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) ✅ | [official documentation](https://swagger.io/specification/) ✅ | [dify-tools-worker](https://github.com/crazywoola/dify-tools-worker) ✅
+* [Tools](./guides/tools/README) ✅ | [documentation](https://docs.dify.ai/plugins/introduction) ✅ | [Dify Development Contribution Documentation](https://github.com/langgenius/dify/blob/main/CONTRIBUTING) ✅ | [official documentation](https://swagger.io/specification/) ✅ | [dify-tools-worker](https://github.com/crazywoola/dify-tools-worker) ✅
* [Quick Tool Integration](./guides/tools/quick-tool-integration) ✅ | [Develop Plugins](https://docs.dify.ai/plugins/quick-start/develop-plugins) ✅
* [Advanced Tool Integration](./guides/tools/advanced-tool-integration) ✅ | [Develop Plugins](https://docs.dify.ai/plugins/quick-start/develop-plugins) ✅ | [Quick Integration](https://docs.dify.ai/tutorials/quick-tool-integration) ✅
* [Tool Configuration](./guides/tools/tool-configuration/README) ✅ | [Install and Use Plugins](https://docs.dify.ai/plugins/quick-start/install-plugins) ✅ | [StableDiffusion](./stable-diffusion) ✅ | [SearXNG](./searxng) ✅
@@ -136,7 +136,7 @@
* [Management](./guides/management/README) ✅
* [App Management](./guides/management/app-management) ✅ | [Tool](../workflow/node/tools) ✅ | [Upgrade Dify](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose#upgrade-dify) ✅
* [Team Members Management](./guides/management/team-members-management) ✅ | [environment variables](https://docs.dify.ai/getting-started/install-self-hosted/environments) ✅
- * [Personal Account Management](./guides/management/personal-account-management) ✅ | [GitHub repository](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) ✅
+ * [Personal Account Management](./guides/management/personal-account-management) ✅ | [GitHub repository](https://github.com/langgenius/dify/blob/main/CONTRIBUTING) ✅
* [Subscription Management](./guides/management/subscription-management) ✅ | [Marked replies](https://docs.dify.ai/guides/biao-zhu/logs) ✅ | [Dify pricing](https://dify.ai/pricing) ✅
* [Version Control](./guides/management/version-control) ✅
@@ -152,7 +152,7 @@
## Community
* [Seek Support](./community/support) ✅ | [GitHub](https://github.com/langgenius/dify) ✅ | [Discord community](https://discord.gg/8Tpq4AcN9c) ✅ | [Reddit](https://www.reddit.com/r/difyai/) ✅ | [hello@dify.ai](mailto:hello@dify.ai) ✅
-* [Become a Contributor](./community/contribution) ✅ | [License and Contributor Agreement](https://github.com/langgenius/dify/blob/main/LICENSE) ✅ | [code of conduct](https://github.com/langgenius/.github/blob/main/CODE\_OF\_CONDUCT.md) ❌ | [Find](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) ✅ | [open](https://github.com/langgenius/dify/issues/new/choose) ✅ | [@perzeusss](https://github.com/perzeuss) ✅ | [Feature Request Copilot](https://udify.app/chat/MK2kVSnw1gakVwMX) ✅ | [@yeuoly](https://github.com/Yeuoly) ✅ | [@jyong](https://github.com/JohnJyong) ✅ | [@GarfieldDai](https://github.com/GarfieldDai) ✅ | [@iamjoel](https://github.com/iamjoel) ✅ | [@zxhlyh](https://github.com/zxhlyh) ✅ | [@guchenhe](https://github.com/guchenhe) ✅ | [@crazywoola](https://github.com/crazywoola) ✅ | [@takatost](https://github.com/takatost) ✅ | [community feedback board](https://github.com/langgenius/dify/discussions/categories/ideas) ✅ | [Docker](https://www.docker.com/) ✅ | [Docker Compose](https://docs.docker.com/compose/install/) ✅ | [Node.js v18.x (LTS)](http://nodejs.org) ✅ | [npm](https://www.npmjs.com/) ✅ | [Yarn](https://yarnpkg.com/) ✅ | [Python](https://www.python.org/) ✅ | [Backend README](https://github.com/langgenius/dify/blob/main/api/README.md) ✅ | [Frontend README](https://github.com/langgenius/dify/blob/main/web/README.md) ✅ | [installation FAQ](https://docs.dify.ai/learn-more/faq/install-faq) ✅ | [http://localhost:3000](http://localhost:3000) ✅ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/README.md) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README.md) ❌ | [Dify-docs](https://github.com/langgenius/dify-docs/tree/main/en/guides/tools/tool-configuration) ✅ | [Flask](https://flask.palletsprojects.com/en/3.0.x/) ✅ | [SQLAlchemy](https://www.sqlalchemy.org/) ✅ | [Celery](https://docs.celeryq.dev/en/stable/getting-started/introduction.html) ✅ | [Next.js](https://nextjs.org/) ✅ | [Tailwind CSS](https://tailwindcss.com/) ✅ | [React-i18next](https://react.i18next.com/) ✅ | [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) ✅ | [README](https://github.com/langgenius/dify/blob/main/README.md) ✅ | [Discord](https://discord.com/invite/8Tpq4AcN9c) ✅
+* [Become a Contributor](./community/contribution) ✅ | [License and Contributor Agreement](https://github.com/langgenius/dify/blob/main/LICENSE) ✅ | [code of conduct](https://github.com/langgenius/.github/blob/main/CODE\_OF\_CONDUCT) ❌ | [Find](https://github.com/langgenius/dify/issues?q=is:issue+is:closed) ✅ | [open](https://github.com/langgenius/dify/issues/new/choose) ✅ | [@perzeusss](https://github.com/perzeuss) ✅ | [Feature Request Copilot](https://udify.app/chat/MK2kVSnw1gakVwMX) ✅ | [@yeuoly](https://github.com/Yeuoly) ✅ | [@jyong](https://github.com/JohnJyong) ✅ | [@GarfieldDai](https://github.com/GarfieldDai) ✅ | [@iamjoel](https://github.com/iamjoel) ✅ | [@zxhlyh](https://github.com/zxhlyh) ✅ | [@guchenhe](https://github.com/guchenhe) ✅ | [@crazywoola](https://github.com/crazywoola) ✅ | [@takatost](https://github.com/takatost) ✅ | [community feedback board](https://github.com/langgenius/dify/discussions/categories/ideas) ✅ | [Docker](https://www.docker.com/) ✅ | [Docker Compose](https://docs.docker.com/compose/install/) ✅ | [Node.js v18.x (LTS)](http://nodejs.org) ✅ | [npm](https://www.npmjs.com/) ✅ | [Yarn](https://yarnpkg.com/) ✅ | [Python](https://www.python.org/) ✅ | [Backend README](https://github.com/langgenius/dify/blob/main/api/README) ✅ | [Frontend README](https://github.com/langgenius/dify/blob/main/web/README) ✅ | [installation FAQ](https://docs.dify.ai/learn-more/faq/install-faq) ✅ | [http://localhost:3000](http://localhost:3000) ✅ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/model\_runtime/README) ❌ | [this guide](https://github.com/langgenius/dify/blob/main/api/core/tools/README) ❌ | [Dify-docs](https://github.com/langgenius/dify-docs/tree/main/en/guides/tools/tool-configuration) ✅ | [Flask](https://flask.palletsprojects.com/en/3.0.x/) ✅ | [SQLAlchemy](https://www.sqlalchemy.org/) ✅ | [Celery](https://docs.celeryq.dev/en/stable/getting-started/introduction.html) ✅ | [Next.js](https://nextjs.org/) ✅ | [Tailwind CSS](https://tailwindcss.com/) ✅ | [React-i18next](https://react.i18next.com/) ✅ | [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) ✅ | [README](https://github.com/langgenius/dify/blob/main/README) ✅ | [Discord](https://discord.com/invite/8Tpq4AcN9c) ✅
* [Contributing to Dify Documentation](./community/docs-contribution) ✅ | [open-source project](https://github.com/langgenius/dify-docs) ✅ | [Issues page](https://github.com/langgenius/dify-docs/issues) ✅ | [Discord](https://discord.com/invite/8Tpq4AcN9c) ✅
## Plugins
@@ -207,7 +207,7 @@
* [Integrate Open Source Models from Replicate](./development/models-integration/replicate) ✅ | [Language models](https://replicate.com/collections/language-models) ✅ | [Embedding models](https://replicate.com/collections/embedding-models) ✅ | [registered address](https://replicate.com/signin?next=/docs) ✅ | [get address](https://replicate.com/signin?next=/docs) ✅ | [Language models](https://replicate.com/collections/language-models) ✅ | [Embedding models](https://replicate.com/collections/embedding-models) ✅
* [Integrate Local Models Deployed by Xinference](./development/models-integration/xinference) ✅ | [Xorbits inference](https://github.com/xorbitsai/inference) ✅ | [local deployment](https://github.com/xorbitsai/inference/blob/main/README.md#local) ✅ | [distributed deployment](https://github.com/xorbitsai/inference/blob/main/README.md#distributed) ✅ | [Xinference built-in models](https://inference.readthedocs.io/en/latest/models/builtin/index.html) ✅ | [Xinference builtin models](https://github.com/xorbitsai/inference/blob/main/README.md#builtin-models) ✅ | [Xorbits Inference](https://github.com/xorbitsai/inference) ✅
* [Integrate Local Models Deployed by OpenLLM](./development/models-integration/openllm) ✅ | [OpenLLM](https://github.com/bentoml/OpenLLM) ✅ | [Supported Model List](https://github.com/bentoml/OpenLLM#-supported-models) ✅ | [OpenLLM](https://github.com/bentoml/OpenLLM) ✅
- * [Integrate Local Models Deployed by LocalAI](./development/models-integration/localai) ✅ | [LocalAI](https://github.com/go-skynet/LocalAI) ✅ | [Getting Started](https://localai.io/basics/getting_started/) ✅ | [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README.md) ❌
+ * [Integrate Local Models Deployed by LocalAI](./development/models-integration/localai) ✅ | [LocalAI](https://github.com/go-skynet/LocalAI) ✅ | [Getting Started](https://localai.io/basics/getting_started/) ✅ | [LocalAI Data query example](https://github.com/go-skynet/LocalAI/blob/master/examples/langchain-chroma/README) ❌
* [Integrate Local Models Deployed by Ollama](./development/models-integration/ollama) ✅ | [ollama](<../../.gitbook/assets/ollama (1) ❌ | [Ollama](https://github.com/jmorganca/ollama) ✅ | [https://ollama.com/download](https://ollama.com/download) ✅ | [Ollama Models](https://ollama.com/library) ✅ | [FAQ](#faq) ✅
* [Integrate Models on LiteLLM Proxy](./development/models-integration/litellm) ✅ | [LiteLLM Proxy](https://github.com/BerriAI/litellm) ✅ | [Detailed docs on how to setup litellm config - here](https://docs.litellm.ai/docs/proxy/configs) ✅ | [LiteLLM](https://github.com/BerriAI/litellm) ✅ | [LiteLLM Proxy Server](https://docs.litellm.ai/docs/simple\_proxy) ❌
* [Integrating with GPUStack for Local Model Deployment](./development/models-integration/gpustack) ✅ | [GPUStack](https://github.com/gpustack/gpustack) ✅ | [Documentation](https://docs.gpustack.ai) ✅ | [Github Repo](https://github.com/gpustack/gpustack) ✅
diff --git a/en/plugins/best-practice/README.mdx b/en/plugins/best-practice/README.mdx
index e2299b17..3eacfaa8 100644
--- a/en/plugins/best-practice/README.mdx
+++ b/en/plugins/best-practice/README.mdx
@@ -5,7 +5,7 @@ title: Best Practice
For more comprehensive best practices on developing plugins with advanced functionality, please refer to the extended plugin development guidelines.
-
- develop-a-slack-bot-plugin.md
+
+ Gain a solid understanding of how to build a Slack Bot that’s powered by AI—one that can respond to user questions right inside Slack.
diff --git a/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx b/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx
index cb4f3548..503da49d 100644
--- a/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx
+++ b/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx
@@ -36,7 +36,7 @@ Slack is an open, real-time communication platform with a robust API. Among its
### Prerequisites
-- **Dify plugin developing tool**: For more information, see [Initializing the Development Tool](../tool-initialization.mdx).
+- **Dify plugin developing tool**: For more information, see [Initializing the Development Tool](/en/plugins/quick-start/develop-plugins/initialize-development-tools).
- **Python environment (version ≥ 3.12)**: Refer to this [Python Installation Tutorial](https://pythontest.com/python/installing-python-3-11/) or ask an LLM for a complete setup guide.
- Create a Slack App and Get an OAuth Token
@@ -70,7 +70,7 @@ dify plugin init
Follow the prompts to provide basic project info. Select the `extension` template, and grant both `Apps` and `Endpoints` permissions.
-For additional details on reverse-invoking Dify services within a plugin, see [Reverse Invocation: App](../../../api-documentation/fan-xiang-diao-yong-dify-fu-wu/app.md).
+For additional details on reverse-invoking Dify services within a plugin, see [Reverse Invocation: App](../../../api-documentation/fan-xiang-diao-yong-dify-fu-wu/app).

@@ -320,15 +320,14 @@ For a complete Dify plugin project example, visit the [GitHub repository](https:
If you want to explore more about plugin development, check the following:
**Quick Starts:**
-- [Develop an Extension Plugin](../extension.md)
+- [Develop an Extension Plugin](../extension)
- [Develop a Model Plugin](../model/)
-- [Bundle Plugins: Packaging Multiple Plugins](../bundle.md)
+- [Bundle Plugins: Packaging Multiple Plugins](../bundle)
**Plugin Interface Docs:**
-- [Manifest](../../../api-documentation/manifest.md) structure
-- [Endpoint](../../../api-documentation/endpoint.md) definitions
+- [Manifest](../../../api-documentation/manifest) structure
+- [Endpoint](../../../api-documentation/endpoint) definitions
- [Reverse-Calling Dify Services](../../../api-documentation/fan-xiang-diao-yong-dify-fu-wu/)
-- [Tools](../../../api-documentation/tool.md)
+- [Tools](../../../api-documentation/tool)
- [Models](../../../api-documentation/model/)
-```
diff --git a/en/plugins/best-practice/how-to-use-mcp-zapier.mdx b/en/plugins/best-practice/how-to-use-mcp-zapier.mdx
index 376e8251..caea75ee 100644
--- a/en/plugins/best-practice/how-to-use-mcp-zapier.mdx
+++ b/en/plugins/best-practice/how-to-use-mcp-zapier.mdx
@@ -1,5 +1,5 @@
---
-title: Dify MCP Plugin Guide | One-Click Connection to Zapier and Automated Email Sending
+title: One-Click Connection to Zapier and Automated Email Sending
---
**This article will help you:**
diff --git a/en/plugins/faq.mdx b/en/plugins/faq.mdx
index 762ab10d..d61e1c5c 100644
--- a/en/plugins/faq.mdx
+++ b/en/plugins/faq.mdx
@@ -26,3 +26,6 @@ docker compose up -d
Once this field is added, the Dify platform will allow the installation of all plugins that are not listed (and thus not verified) in the Dify Marketplace.
**Note**: For security reasons, always install plugins from unknown sources in a test or sandbox environment first. Confirm their safety before deploying to the production environment.
+
+If you want to more strictly control which plugins are allowed to be installed, refer to [Signing Plugins for Third-Party Signature Verification](./publish-plugins/signing-plugins-for-third-party-signature-verification).
+
diff --git a/en/plugins/publish-plugins/publish-to-dify-marketplace/README.mdx b/en/plugins/publish-plugins/publish-to-dify-marketplace/README.mdx
index bbe39220..da456c2a 100644
--- a/en/plugins/publish-plugins/publish-to-dify-marketplace/README.mdx
+++ b/en/plugins/publish-plugins/publish-to-dify-marketplace/README.mdx
@@ -10,8 +10,8 @@ Follow these steps to submit your plugin as a Pull Request (PR) in the [GitHub r
To publish your plugin on the Dify Marketplace, follow these steps:
-1. Develop and test your plugin according to the [Plugin Developer Guidelines](plugin-developer-guidelines.md).
-2. Write a [Plugin Privacy Policy](plugin-privacy-protection-guidelines.md) for your plugin in line with Dify’s privacy policy requirements. In your plugin’s [Manifest](../../schema-definition/manifest.md) file, include the file path or URL for this privacy policy.
+1. Develop and test your plugin according to the [Plugin Developer Guidelines](plugin-developer-guidelines).
+2. Write a [Plugin Privacy Policy](plugin-privacy-protection-guidelines) for your plugin in line with Dify’s privacy policy requirements. In your plugin’s [Manifest](../../schema-definition/manifest) file, include the file path or URL for this privacy policy.
3. Package your plugin for distribution.
4. Fork the [Dify Plugins](https://github.com/langgenius/dify-plugins) repository on GitHub.
5. Create an organization directory under the repository’s main structure, then create a subdirectory named after your plugin. Place your plugin’s source code and the packaged `.pkg` file in that subdirectory.
@@ -25,7 +25,7 @@ Here is the general plugin review process:
**Note**:
-The Contributor Agreement mentioned above refers to [Plugin Developer Guidelines](plugin-developer-guidelines.md).
+The Contributor Agreement mentioned above refers to [Plugin Developer Guidelines](plugin-developer-guidelines).
***
@@ -68,7 +68,7 @@ Respond proactively to reviewer questions and feedback.
**3. Plugin Functionality and Relevance**
-* Test plugins according to [Plugin Development Guidelines](plugin-developer-guidelines.md).
+* Test plugins according to [Plugin Development Guidelines](plugin-developer-guidelines).
* Ensure the plugin's purpose is reasonable within the Dify ecosystem.
[Dify.AI](https://dify.ai/) reserves the right to accept or reject plugin submissions.
diff --git a/en/plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines.mdx b/en/plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines.mdx
index 88611251..ddb0dff3 100644
--- a/en/plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines.mdx
+++ b/en/plugins/publish-plugins/publish-to-dify-marketplace/plugin-developer-guidelines.mdx
@@ -12,7 +12,7 @@ title: Plugin Developer Guidelines
* Setup and usage instructions
* Required codes, APIs, credentials, or other information needed to connect the plugin to services
* Ensure collected user information is only used for service connectivity and plugin improvements.
-* Prepare the plugin's privacy policy file or URL according to the [Plugin Privacy Protection Guidelines](plugin-developer-guidelines.md).
+* Prepare the plugin's privacy policy file or URL according to the [Plugin Privacy Protection Guidelines](plugin-developer-guidelines).
2. **Validate Plugin Value Proposition**
diff --git a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
index 252214fe..7eec8007 100644
--- a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
+++ b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
@@ -11,7 +11,7 @@ Below, you'll see how to develop a plugin that supports **Function Calling** to
- Dify plugin scaffolding tool
- Python environment (version ≥ 3.12)
-For details on preparing the plugin development tool, see [Initializing the Development Tool](initialize-development-tools.md).
+For details on preparing the plugin development tool, see [Initializing the Development Tool](initialize-development-tools).
**Tip**: Run `dify version` in your terminal to confirm that the scaffolding tool is installed.
@@ -252,9 +252,7 @@ features:
After modifying the plugin configuration and restarting, you will see the **Memory** toggle. Click the toggle button on the right to enable memory.
-
-
-
+
Once enabled, you can adjust the memory window size using the slider, which determines how many previous conversation turns the model can “remember”.
@@ -1134,9 +1132,9 @@ class BasicAgentAgentStrategy(AgentStrategy):
### 3. Debugging the Plugin
-After finalizing the plugin's declaration file and implementation code, run `python -m main` in the plugin directory to restart it. Next, confirm the plugin runs correctly. Dify offers remote debugging—go to ["Plugin Management"](https://console-plugin.dify.dev/plugins) to obtain your debug key and remote server address.
+After finalizing the plugin's declaration file and implementation code, run `python -m main` in the plugin directory to restart it. Next, confirm the plugin runs correctly. Dify offers remote debugging—go to ["Plugin Management"](https://cloud.dify.ai/plugins) to obtain your debug key and remote server address.
-
+
Back in your plugin project, copy `.env.example` to `.env` and insert the relevant remote server and debug key info.
@@ -1147,7 +1145,6 @@ REMOTE_INSTALL_PORT=5003
REMOTE_INSTALL_KEY=****-****-****-****-****
```
-
Then run:
```bash
@@ -1156,8 +1153,6 @@ python -m main
You'll see the plugin installed in your Workspace, and team members can also access it.
-