From 6f82ad304b37550c35ec252b7d5e28a449e74b34 Mon Sep 17 00:00:00 2001
From: AllenWriter
Date: Wed, 23 Apr 2025 10:38:13 +0800
Subject: [PATCH] Docs: sync docs 4.22
---
conversion.log | 7 +
docs.json | 50 +-
en/community/contribution.mdx | 2 +-
.../models-integration/gpustack.mdx | 11 +-
en/getting-started/cloud.mdx | 2 +-
en/getting-started/dify-for-education.mdx | 167 +++
en/getting-started/dify-premium.mdx | 4 +
.../install-self-hosted/environments.mdx | 9 +-
.../install-self-hosted/faqs.mdx | 22 +-
.../install-self-hosted/local-source-code.mdx | 3 +-
en/getting-started/readme/model-providers.mdx | 8 +
.../creating-an-application.mdx | 2 +-
.../launch-your-webapp-quickly/README.mdx | 2 +-
.../external-knowledge-api-documentation.mdx | 53 +-
.../connect-external-knowledge-base.mdx | 34 +-
.../knowledge-base/external-knowledge-api.mdx | 15 +-
.../maintain-dataset-via-api.mdx | 4 +-
.../sync-from-website.mdx | 4 +-
.../knowledge-request-rate-limit.mdx | 42 +
en/guides/management/version-control.mdx | 8 +-
en/guides/workflow/node/agent.mdx | 9 +-
en/guides/workflow/node/loop.mdx | 78 +-
en/guides/workflow/node/template.mdx | 35 +-
en/guides/workflow/nodes/README.mdx | 10 -
en/guides/workflow/nodes/agent.mdx | 78 --
en/guides/workflow/nodes/answer.mdx | 31 -
en/guides/workflow/nodes/code.mdx | 122 --
en/guides/workflow/nodes/doc-extractor.mdx | 65 -
en/guides/workflow/nodes/end.mdx | 32 -
en/guides/workflow/nodes/http-request.mdx | 65 -
en/guides/workflow/nodes/ifelse.mdx | 47 -
en/guides/workflow/nodes/iteration.mdx | 186 ---
.../workflow/nodes/knowledge-retrieval.mdx | 41 -
en/guides/workflow/nodes/list-operator.mdx | 80 --
en/guides/workflow/nodes/llm.mdx | 177 ---
en/guides/workflow/nodes/loop.mdx | 85 --
.../workflow/nodes/parameter-extractor.mdx | 61 -
.../workflow/nodes/question-classifier.mdx | 57 -
en/guides/workflow/nodes/start.mdx | 150 ---
en/guides/workflow/nodes/template.mdx | 32 -
en/guides/workflow/nodes/tools.mdx | 64 -
.../workflow/nodes/variable-aggregator.mdx | 42 -
.../workflow/nodes/variable-assigner.mdx | 183 ---
en/guides/workflow/shortcut-key.mdx | 12 +-
.../build-an-notion-ai-assistant copy.mdx | 167 +++
.../building-an-ai-thesis-slack-bot.mdx | 1 -
...-chatbot-with-business-data-in-minutes.mdx | 2 +-
.../private-ai-ollama-deepseek-dify.mdx | 4 +-
.../best-practice/how-to-use-mcp-zapier.mdx | 129 ++
en/plugins/introduction.mdx | 4 +-
en/plugins/publish-plugins/README.mdx | 5 +-
.../package-plugin-file-and-publish.mdx | 1 -
.../plugin-auto-publish-pr.mdx | 298 +++++
...for-third-party-signature-verification.mdx | 96 ++
en/plugins/quick-start/debug-plugin.mdx | 2 +-
.../quick-start/develop-plugins/README.mdx | 2 +-
.../develop-plugins/agent-strategy-plugin.mdx | 201 ++-
.../quick-start/develop-plugins/bundle.mdx | 2 +-
.../model-plugin/customizable-model.mdx | 4 +-
.../develop-plugins/tool-plugin.mdx | 2 +-
en/plugins/quick-start/install-plugins.mdx | 14 +-
en/plugins/schema-definition/manifest.mdx | 6 +-
.../models-integration/gpustack.mdx | 9 +-
ja-jp/getting-started/cloud.mdx | 34 +-
ja-jp/getting-started/dify-for-education.mdx | 171 +++
ja-jp/getting-started/dify-premium.mdx | 4 +
.../install-self-hosted/environments.mdx | 9 +-
.../install-self-hosted/faq.mdx | 22 +-
.../install-self-hosted/local-source-code.mdx | 4 +-
.../readme/model-providers.mdx | 10 +-
.../app-toolkits/moderation-tool.mdx | 9 +-
.../external-knowledge-api-documentation.mdx | 16 +
.../connect-external-knowledge-base.mdx | 34 +
.../connect-external-knowledge-base.mdx.bak | 123 --
.../readme.mdx | 4 +-
ja-jp/guides/knowledge-base/faq.mdx.bak | 20 -
...grate-knowledge-within-application.mdx.bak | 204 ----
...nowledge-and-documents-maintenance.mdx.bak | 134 --
.../introduction.mdx | 48 -
.../introduction.mdx.bak | 48 -
.../maintain-dataset-via-api.mdx | 222 ++--
.../maintain-knowledge-documents.mdx.bak | 133 --
.../readme.mdx | 96 ++
.../knowledge-request-rate-limit.mdx | 40 +
ja-jp/guides/knowledge-base/metadata.mdx | 527 ++++----
ja-jp/guides/knowledge-base/metadata.mdx.bak | 409 -------
ja-jp/guides/knowledge-base/readme.mdx.bak | 39 -
.../retrieval-test-and-citation.md | 68 ++
.../retrieval-test-and-citation.mdx | 11 +-
.../workflow/additional-feature.mdx.bak | 110 --
.../workflow/application-publishing.mdx.bak | 25 -
ja-jp/guides/workflow/bulletin.mdx.bak | 131 --
ja-jp/guides/workflow/concepts.mdx.bak | 34 -
ja-jp/guides/workflow/file-upload.mdx.bak | 201 ---
ja-jp/guides/workflow/node/agent.mdx | 12 +-
ja-jp/guides/workflow/node/code.mdx.bak | 94 --
.../workflow/node/doc-extractor.mdx.bak | 67 -
ja-jp/guides/workflow/node/end.mdx.bak | 30 -
.../workflow/node/knowledge-retrieval.mdx.bak | 59 -
.../workflow/node/list-operator.mdx.bak | 93 --
ja-jp/guides/workflow/node/llm.mdx.bak | 156 ---
ja-jp/guides/workflow/node/loop.mdx | 94 +-
ja-jp/guides/workflow/node/loop.mdx.bak | 135 ++
.../workflow/node/parameter-extractor.mdx.bak | 73 --
.../workflow/node/question-classifier.mdx.bak | 65 -
ja-jp/guides/workflow/node/start.mdx.bak | 88 --
ja-jp/guides/workflow/node/template.mdx | 48 +-
ja-jp/guides/workflow/node/tools.mdx.bak | 32 -
.../workflow/node/variable-assigner.mdx.bak | 169 ---
ja-jp/guides/workflow/nodes/README.mdx | 7 -
ja-jp/guides/workflow/nodes/agent.mdx | 99 --
ja-jp/guides/workflow/nodes/answer.mdx | 19 -
ja-jp/guides/workflow/nodes/code.mdx | 94 --
ja-jp/guides/workflow/nodes/code.mdx.bak | 94 --
ja-jp/guides/workflow/nodes/doc-extractor.mdx | 67 -
.../workflow/nodes/doc-extractor.mdx.bak | 67 -
ja-jp/guides/workflow/nodes/end.mdx | 30 -
ja-jp/guides/workflow/nodes/end.mdx.bak | 30 -
ja-jp/guides/workflow/nodes/http-request.mdx | 36 -
ja-jp/guides/workflow/nodes/ifelse.mdx | 55 -
ja-jp/guides/workflow/nodes/iteration.mdx | 189 ---
.../workflow/nodes/knowledge-retrieval.mdx | 59 -
.../nodes/knowledge-retrieval.mdx.bak | 59 -
ja-jp/guides/workflow/nodes/list-operator.mdx | 93 --
.../workflow/nodes/list-operator.mdx.bak | 93 --
ja-jp/guides/workflow/nodes/llm.mdx | 156 ---
ja-jp/guides/workflow/nodes/llm.mdx.bak | 156 ---
ja-jp/guides/workflow/nodes/loop.mdx | 91 --
.../workflow/nodes/parameter-extractor.mdx | 73 --
.../nodes/parameter-extractor.mdx.bak | 73 --
.../workflow/nodes/question-classifier.mdx | 65 -
.../nodes/question-classifier.mdx.bak | 65 -
ja-jp/guides/workflow/nodes/start.mdx | 88 --
ja-jp/guides/workflow/nodes/start.mdx.bak | 88 --
ja-jp/guides/workflow/nodes/template.mdx | 48 -
ja-jp/guides/workflow/nodes/tools.mdx | 32 -
ja-jp/guides/workflow/nodes/tools.mdx.bak | 32 -
.../workflow/nodes/variable-aggregator.mdx | 45 -
.../workflow/nodes/variable-assigner.mdx | 169 ---
.../workflow/nodes/variable-assigner.mdx.bak | 169 ---
ja-jp/guides/workflow/publish.mdx.bak | 24 -
ja-jp/guides/workflow/variables.mdx.bak | 186 ---
.../building-an-ai-thesis-slack-bot.mdx | 1 -
.../develop-a-slack-bot-plugin.mdx | 1 -
.../develop-a-slack-bot-plugin.mdx.bak | 331 -----
.../best-practice/how-to-use-mcp-zapier.mdx | 127 ++
ja-jp/plugins/publish-plugins/README.mdx | 2 +
ja-jp/plugins/publish-plugins/README.mdx.bak | 67 -
.../package-plugin-file-and-publish.mdx.bak | 43 -
.../plugin-auto-publish-pr.mdx | 296 +++++
...ish-plugin-on-personal-github-repo.mdx.bak | 87 --
...for-third-party-signature-verification.mdx | 96 ++
ja-jp/plugins/quick-start/debug-plugin.mdx | 6 +-
.../develop-plugins/agent-strategy-plugin.mdx | 176 ++-
.../agent-strategy-plugin.mdx.bak | 1082 -----------------
.../quick-start/develop-plugins/bundle.mdx | 2 +-
.../model-plugin/customizable-model.mdx | 4 +-
.../develop-plugins/tool-plugin.mdx | 2 +-
ja-jp/plugins/schema-definition/manifest.mdx | 6 +-
.../schema-definition/manifest.mdx.bak | 2 +-
scripts/convert_gitbook_to_mintlify.py | 79 ++
scripts/high-light-convert-4.22.py | 133 ++
scripts/high-light-convert.py | 170 +++
zh-hans/community/contribution.mdx | 2 +-
.../development/backend/sandbox/README.mdx | 2 +-
.../development/migration/migrate-to-v1.mdx | 15 +-
.../models-integration/gpustack.mdx | 11 +-
zh-hans/getting-started/cloud.mdx | 33 +-
.../getting-started/dify-for-education.mdx | 173 +++
zh-hans/getting-started/dify-premium.mdx | 4 +
.../install-self-hosted/environments.mdx | 12 +-
.../install-self-hosted/faq.mdx | 24 +-
.../install-self-hosted/local-source-code.mdx | 4 +-
.../readme/model-providers.mdx | 8 +
.../guides/annotation/annotation-reply.mdx | 2 +-
.../guides/application-orchestrate/agent.mdx | 1 -
.../chatbot-application.mdx | 2 +-
.../creating-an-application.mdx | 4 +-
.../guides/application-orchestrate/readme.mdx | 2 +-
.../based-on-frontend-templates.mdx | 8 +-
.../developing-with-apis.mdx | 2 +-
.../text-generator.mdx | 4 +-
.../external-knowledge-api-documentation.mdx | 16 +-
.../connect-external-knowledge-base.mdx | 28 +-
.../maintain-dataset-via-api.mdx | 4 +-
zh-hans/guides/management/version-control.mdx | 2 +-
.../guides/monitoring/annotation-reply.mdx | 2 +-
.../api-based/cloudflare-workers.mdx | 29 +-
.../tools/extensions/api-based/moderation.mdx | 116 +-
.../code-based/external-data-tool.mdx | 4 +-
.../extensions/code-based/moderation.mdx | 4 +-
.../tools/tool-configuration/searxng.mdx | 31 +-
zh-hans/guides/workflow/file-upload.mdx | 8 -
zh-hans/guides/workflow/node/agent.mdx | 8 +
zh-hans/guides/workflow/node/llm.mdx | 2 +-
zh-hans/guides/workflow/node/loop.mdx | 92 +-
.../workflow/node/parameter-extractor.mdx | 2 +-
zh-hans/guides/workflow/node/template.mdx | 31 +-
zh-hans/guides/workflow/nodes/README.mdx | 7 -
zh-hans/guides/workflow/nodes/agent.mdx | 73 --
zh-hans/guides/workflow/nodes/answer.mdx | 21 -
zh-hans/guides/workflow/nodes/code.mdx | 122 --
.../guides/workflow/nodes/doc-extractor.mdx | 65 -
zh-hans/guides/workflow/nodes/end.mdx | 31 -
.../guides/workflow/nodes/http-request.mdx | 63 -
zh-hans/guides/workflow/nodes/ifelse.mdx | 50 -
zh-hans/guides/workflow/nodes/iteration.mdx | 175 ---
.../workflow/nodes/knowledge-retrieval.mdx | 53 -
.../guides/workflow/nodes/list-operator.mdx | 88 --
zh-hans/guides/workflow/nodes/llm.mdx | 158 ---
zh-hans/guides/workflow/nodes/loop.mdx | 89 --
.../workflow/nodes/parameter-extractor.mdx | 66 -
.../workflow/nodes/question-classifier.mdx | 58 -
zh-hans/guides/workflow/nodes/start.mdx | 70 --
zh-hans/guides/workflow/nodes/template.mdx | 48 -
zh-hans/guides/workflow/nodes/tools.mdx | 56 -
.../workflow/nodes/variable-aggregator.mdx | 45 -
.../workflow/nodes/variable-assigner.mdx | 167 ---
zh-hans/guides/workflow/shortcut-key.mdx | 2 +-
zh-hans/guides/workspace/app.mdx | 8 +-
zh-hans/guides/workspace/readme.mdx | 14 +-
.../prompt-engineering-1/README.mdx | 9 +-
.../prompt-engineering-template.mdx | 14 +-
zh-hans/learn-more/faq/plugins.mdx | 2 +
.../building-an-ai-thesis-slack-bot.mdx | 136 +++
...ious-im-platforms-by-using-langbot.mdx.mdx | 105 ++
.../learn-more/use-cases/dify-on-dingtalk.mdx | 6 +-
.../learn-more/use-cases/dify-on-wechat.mdx | 204 ++--
.../best-practice/how-to-use-mcp-zapier.mdx | 129 ++
zh-hans/plugins/publish-plugins/README.mdx | 2 +
.../package-plugin-file-and-publish.mdx | 1 -
.../plugin-auto-publish-pr.mdx | 297 +++++
...ish-plugin-on-personal-github-repo.mdx.bak | 92 --
...for-third-party-signature-verification.mdx | 100 ++
zh-hans/plugins/quick-start/debug-plugin.mdx | 4 +-
.../develop-plugins/agent-strategy-plugin.mdx | 174 ++-
.../model-plugin/customizable-model.mdx | 2 +-
.../develop-plugins/tool-plugin.mdx | 4 +-
.../plugins/schema-definition/manifest.mdx | 6 +-
.../model.mdx | 20 +-
zh-hans/url-report/link-check-report.md | 2 +-
241 files changed, 5218 insertions(+), 10971 deletions(-)
create mode 100644 en/getting-started/dify-for-education.mdx
create mode 100644 en/guides/knowledge-base/knowledge-request-rate-limit.mdx
delete mode 100644 en/guides/workflow/nodes/README.mdx
delete mode 100644 en/guides/workflow/nodes/agent.mdx
delete mode 100644 en/guides/workflow/nodes/answer.mdx
delete mode 100644 en/guides/workflow/nodes/code.mdx
delete mode 100644 en/guides/workflow/nodes/doc-extractor.mdx
delete mode 100644 en/guides/workflow/nodes/end.mdx
delete mode 100644 en/guides/workflow/nodes/http-request.mdx
delete mode 100644 en/guides/workflow/nodes/ifelse.mdx
delete mode 100644 en/guides/workflow/nodes/iteration.mdx
delete mode 100644 en/guides/workflow/nodes/knowledge-retrieval.mdx
delete mode 100644 en/guides/workflow/nodes/list-operator.mdx
delete mode 100644 en/guides/workflow/nodes/llm.mdx
delete mode 100644 en/guides/workflow/nodes/loop.mdx
delete mode 100644 en/guides/workflow/nodes/parameter-extractor.mdx
delete mode 100644 en/guides/workflow/nodes/question-classifier.mdx
delete mode 100644 en/guides/workflow/nodes/start.mdx
delete mode 100644 en/guides/workflow/nodes/template.mdx
delete mode 100644 en/guides/workflow/nodes/tools.mdx
delete mode 100644 en/guides/workflow/nodes/variable-aggregator.mdx
delete mode 100644 en/guides/workflow/nodes/variable-assigner.mdx
create mode 100644 en/learn-more/use-cases/build-an-notion-ai-assistant copy.mdx
create mode 100644 en/plugins/best-practice/how-to-use-mcp-zapier.mdx
create mode 100644 en/plugins/publish-plugins/plugin-auto-publish-pr.mdx
create mode 100644 en/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx
create mode 100644 ja-jp/getting-started/dify-for-education.mdx
delete mode 100644 ja-jp/guides/knowledge-base/connect-external-knowledge-base.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/faq.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/integrate-knowledge-within-application.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx
delete mode 100644 ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx.bak
create mode 100644 ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/readme.mdx
create mode 100644 ja-jp/guides/knowledge-base/knowledge-request-rate-limit.mdx
delete mode 100644 ja-jp/guides/knowledge-base/metadata.mdx.bak
delete mode 100644 ja-jp/guides/knowledge-base/readme.mdx.bak
create mode 100644 ja-jp/guides/knowledge-base/retrieval-test-and-citation.md
delete mode 100644 ja-jp/guides/workflow/additional-feature.mdx.bak
delete mode 100644 ja-jp/guides/workflow/application-publishing.mdx.bak
delete mode 100644 ja-jp/guides/workflow/bulletin.mdx.bak
delete mode 100644 ja-jp/guides/workflow/concepts.mdx.bak
delete mode 100644 ja-jp/guides/workflow/file-upload.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/code.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/doc-extractor.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/end.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/knowledge-retrieval.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/list-operator.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/llm.mdx.bak
create mode 100644 ja-jp/guides/workflow/node/loop.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/parameter-extractor.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/question-classifier.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/start.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/tools.mdx.bak
delete mode 100644 ja-jp/guides/workflow/node/variable-assigner.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/README.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/agent.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/answer.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/code.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/code.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/doc-extractor.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/doc-extractor.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/end.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/end.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/http-request.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/ifelse.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/iteration.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/knowledge-retrieval.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/knowledge-retrieval.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/list-operator.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/list-operator.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/llm.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/llm.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/loop.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/parameter-extractor.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/parameter-extractor.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/question-classifier.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/question-classifier.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/start.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/start.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/template.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/tools.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/tools.mdx.bak
delete mode 100644 ja-jp/guides/workflow/nodes/variable-aggregator.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/variable-assigner.mdx
delete mode 100644 ja-jp/guides/workflow/nodes/variable-assigner.mdx.bak
delete mode 100644 ja-jp/guides/workflow/publish.mdx.bak
delete mode 100644 ja-jp/guides/workflow/variables.mdx.bak
delete mode 100644 ja-jp/plugins/best-practice/develop-a-slack-bot-plugin.mdx.bak
create mode 100644 ja-jp/plugins/best-practice/how-to-use-mcp-zapier.mdx
delete mode 100644 ja-jp/plugins/publish-plugins/README.mdx.bak
delete mode 100644 ja-jp/plugins/publish-plugins/package-plugin-file-and-publish.mdx.bak
create mode 100644 ja-jp/plugins/publish-plugins/plugin-auto-publish-pr.mdx
delete mode 100644 ja-jp/plugins/publish-plugins/publish-plugin-on-personal-github-repo.mdx.bak
create mode 100644 ja-jp/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx
delete mode 100644 ja-jp/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx.bak
create mode 100644 scripts/convert_gitbook_to_mintlify.py
create mode 100644 scripts/high-light-convert-4.22.py
create mode 100644 scripts/high-light-convert.py
create mode 100644 zh-hans/getting-started/dify-for-education.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/README.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/agent.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/answer.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/code.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/doc-extractor.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/end.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/http-request.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/ifelse.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/iteration.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/knowledge-retrieval.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/list-operator.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/llm.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/loop.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/parameter-extractor.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/question-classifier.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/start.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/template.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/tools.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/variable-aggregator.mdx
delete mode 100644 zh-hans/guides/workflow/nodes/variable-assigner.mdx
create mode 100644 zh-hans/learn-more/use-cases/building-an-ai-thesis-slack-bot.mdx
create mode 100644 zh-hans/learn-more/use-cases/connect-dify-to-various-im-platforms-by-using-langbot.mdx.mdx
create mode 100644 zh-hans/plugins/best-practice/how-to-use-mcp-zapier.mdx
create mode 100644 zh-hans/plugins/publish-plugins/plugin-auto-publish-pr.mdx
delete mode 100644 zh-hans/plugins/publish-plugins/publish-plugin-on-personal-github-repo.mdx.bak
create mode 100644 zh-hans/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx
diff --git a/conversion.log b/conversion.log
index d8a08540..1f2a26c9 100644
--- a/conversion.log
+++ b/conversion.log
@@ -1,3 +1,10 @@
2025-03-31 11:58:54,955 - md-to-mdx - INFO - 处理文件: zh-hans/guides/monitoring/integrate-external-ops-tools/integrate-opik.md
2025-03-31 11:58:54,964 - md-to-mdx - INFO - 转换完成: zh-hans/guides/monitoring/integrate-external-ops-tools/integrate-opik.mdx
2025-03-31 11:58:54,964 - md-to-mdx - INFO - 已删除源文件: zh-hans/guides/monitoring/integrate-external-ops-tools/integrate-opik.md
+2025-04-22 13:56:11,988 - md-to-mdx - ERROR - 无效的输入路径: ja-jp/guides/knowledge-base/metadata.mdx
+2025-04-22 13:57:34,348 - md-to-mdx - INFO - 处理文件: ja-jp/guides/knowledge-base/metadata.md
+2025-04-22 13:57:34,372 - md-to-mdx - INFO - 转换完成: ja-jp/guides/knowledge-base/metadata.mdx
+2025-04-22 13:57:34,372 - md-to-mdx - INFO - 已删除源文件: ja-jp/guides/knowledge-base/metadata.md
+2025-04-22 14:03:25,635 - md-to-mdx - INFO - 处理文件: ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/readme.md
+2025-04-22 14:03:25,646 - md-to-mdx - INFO - 转换完成: ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/readme.mdx
+2025-04-22 14:03:25,647 - md-to-mdx - INFO - 已删除源文件: ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/readme.md
diff --git a/docs.json b/docs.json
index 623e581d..ced4ba93 100644
--- a/docs.json
+++ b/docs.json
@@ -45,7 +45,8 @@
]
},
"en/getting-started/cloud",
- "en/getting-started/dify-premium"
+ "en/getting-started/dify-premium",
+ "en/getting-started/dify-for-education"
]
},
{
@@ -166,6 +167,7 @@
"en/guides/knowledge-base/metadata",
"en/guides/knowledge-base/integrate-knowledge-within-application",
"en/guides/knowledge-base/retrieval-test-and-citation",
+ "en/guides/knowledge-base/knowledge-request-rate-limit",
"en/guides/knowledge-base/connect-external-knowledge-base",
"en/guides/knowledge-base/external-knowledge-api"
]
@@ -320,13 +322,15 @@
"group": "Best Practice",
"pages": [
"en/plugins/best-practice/README",
- "en/plugins/best-practice/develop-a-slack-bot-plugin"
+ "en/plugins/best-practice/develop-a-slack-bot-plugin",
+ "en/plugins/best-practice/how-to-use-mcp-zapier"
]
},
{
"group": "Publish Plugins",
"pages": [
"en/plugins/publish-plugins/README",
+ "en/plugins/publish-plugins/plugin-auto-publish-pr",
{
"group": "Publish to Dify Marketplace",
"pages": [
@@ -336,7 +340,8 @@
]
},
"en/plugins/publish-plugins/publish-plugin-on-personal-github-repo",
- "en/plugins/publish-plugins/package-plugin-file-and-publish"
+ "en/plugins/publish-plugins/package-plugin-file-and-publish",
+ "en/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification"
]
},
"en/plugins/faq"
@@ -383,6 +388,7 @@
"en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app",
"en/learn-more/use-cases/private-ai-ollama-deepseek-dify",
"en/learn-more/use-cases/build-an-notion-ai-assistant",
+ "en/learn-more/use-cases/building-an-ai-thesis-slack-bot",
"en/learn-more/use-cases/create-a-midjourney-prompt-bot-with-dify",
"en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes",
"en/learn-more/use-cases/how-to-integrate-dify-chatbot-to-your-wix-website",
@@ -467,7 +473,8 @@
"zh-hans/getting-started/install-self-hosted/faq"
]
},
- "zh-hans/getting-started/dify-premium"
+ "zh-hans/getting-started/dify-premium",
+ "zh-hans/getting-started/dify-for-education"
]
},
{
@@ -800,13 +807,15 @@
{
"group": "最佳实践",
"pages": [
- "zh-hans/plugins/best-practice/develop-a-slack-bot-plugin"
+ "zh-hans/plugins/best-practice/develop-a-slack-bot-plugin",
+ "zh-hans/plugins/best-practice/how-to-use-mcp-zapier"
]
},
{
"group": "发布插件",
"pages": [
"zh-hans/plugins/publish-plugins/README",
+ "zh-hans/plugins/publish-plugins/plugin-auto-publish-pr",
{
"group": "发布至 Dify Marketplace",
"pages": [
@@ -816,7 +825,8 @@
]
},
"zh-hans/plugins/publish-plugins/publish-plugin-on-personal-github-repo",
- "zh-hans/plugins/publish-plugins/package-plugin-file-and-publish"
+ "zh-hans/plugins/publish-plugins/package-plugin-file-and-publish",
+ "zh-hans/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification"
]
},
"zh-hans/plugins/faq"
@@ -865,6 +875,8 @@
"zh-hans/learn-more/use-cases/train-a-qa-chatbot-that-belongs-to-you",
"zh-hans/learn-more/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code",
"zh-hans/learn-more/use-cases/build-an-notion-ai-assistant",
+ "zh-hans/learn-more/use-cases/building-an-ai-thesis-slack-bot",
+ "zh-hans/learn-more/use-cases/connect-dify-to-various-im-platforms-by-using-langbot",
"zh-hans/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes",
"zh-hans/learn-more/use-cases/practical-implementation-of-building-llm-applications-using-a-full-set-of-open-source-tools",
"zh-hans/learn-more/use-cases/dify-on-wechat",
@@ -955,7 +967,8 @@
"ja-jp/getting-started/install-self-hosted/faq"
]
},
- "ja-jp/getting-started/dify-premium"
+ "ja-jp/getting-started/dify-premium",
+ "ja-jp/getting-started/dify-for-education"
]
},
{
@@ -1072,7 +1085,7 @@
{
"group": "ナレッジベースの管理",
"pages": [
- "ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/introduction",
+ "ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/readme",
"ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents",
"ja-jp/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api"
]
@@ -1080,6 +1093,7 @@
"ja-jp/guides/knowledge-base/metadata",
"ja-jp/guides/knowledge-base/integrate-knowledge-within-application",
"ja-jp/guides/knowledge-base/retrieval-test-and-citation",
+ "ja-jp/guides/knowledge-base/knowledge-request-rate-limit",
"ja-jp/guides/knowledge-base/connect-external-knowledge-base",
"ja-jp/guides/knowledge-base/api-documentation/external-knowledge-api-documentation"
]
@@ -1255,13 +1269,15 @@
"group": "ベストプラクティス",
"pages": [
"ja-jp/plugins/best-practice/README",
- "ja-jp/plugins/best-practice/develop-a-slack-bot-plugin"
+ "ja-jp/plugins/best-practice/develop-a-slack-bot-plugin",
+ "ja-jp/plugins/best-practice/how-to-use-mcp-zapier"
]
},
{
"group": "プラグインの公開",
"pages": [
"ja-jp/plugins/publish-plugins/README",
+ "ja-jp/plugins/publish-plugins/plugin-auto-publish-pr",
{
"group": "Difyマーケットプレイスへの公開",
"pages": [
@@ -1271,7 +1287,8 @@
]
},
"ja-jp/plugins/publish-plugins/publish-plugin-on-personal-github-repo",
- "ja-jp/plugins/publish-plugins/package-plugin-file-and-publish"
+ "ja-jp/plugins/publish-plugins/package-plugin-file-and-publish",
+ "ja-jp/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification"
]
},
"ja-jp/plugins/faq"
@@ -1319,6 +1336,7 @@
"ja-jp/learn-more/use-cases/private-ai-ollama-deepseek-dify",
"ja-jp/learn-more/use-cases/create-a-midjoureny-prompt-word-robot-with-zero-code",
"ja-jp/learn-more/use-cases/build-an-notion-ai-assistant",
+ "ja-jp/learn-more/use-cases/building-an-ai-thesis-slack-bot",
"ja-jp/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes",
"ja-jp/learn-more/use-cases/how-to-integrate-dify-chatbot-to-your-wix-website",
"ja-jp/learn-more/use-cases/how-to-connect-aws-bedrock",
@@ -1377,6 +1395,18 @@
]
},
"redirects": [
+ {
+ "source": "ja-jp/plugins/plugin-auto-publish-pr",
+ "destination": "ja-jp/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ },
+ {
+ "source": "zh-hans/plugins/plugin-auto-publish-pr",
+ "destination": "zh-hans/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ },
+ {
+ "source": "en/plugins/plugin-auto-publish-pr",
+ "destination": "en/plugins/publish-plugins/plugin-auto-publish-pr.mdx"
+ },
{
"source": "zh-hans/getting-started/install-self-hosted",
"destination": "zh-hans/getting-started/install-self-hosted/readme"
diff --git a/en/community/contribution.mdx b/en/community/contribution.mdx
index 1f93b839..db2a5d67 100644
--- a/en/community/contribution.mdx
+++ b/en/community/contribution.mdx
@@ -75,7 +75,7 @@ Dify requires the following dependencies to build, make sure they're installed o
- [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org)
- [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
-- [Python](https://www.python.org/) version 3.10.x
+- [Python](https://www.python.org/) version 3.11.x
#### 4. Installations
diff --git a/en/development/models-integration/gpustack.mdx b/en/development/models-integration/gpustack.mdx
index c6f1b973..0ad2577c 100644
--- a/en/development/models-integration/gpustack.mdx
+++ b/en/development/models-integration/gpustack.mdx
@@ -1,11 +1,10 @@
---
-title: Integrating with GPUStack for Local Model Deployment
+title: Integrate Local Models Deployed by GPUStack
---
+[GPUStack](https://github.com/gpustack/gpustack) is an open-source GPU cluster manager for running AI models.
-[GPUStack](https://github.com/gpustack/gpustack) is an open-source GPU cluster manager for running large language models(LLMs).
-
-Dify allows integration with GPUStack for local deployment of large language model inference, embedding and reranking capabilities.
+Dify allows integration with GPUStack for local deployment of large language model inference, embedding, reranking, speech to text and text to speech to capabilities.
## Deploying GPUStack
@@ -39,7 +38,7 @@ Using a LLM hosted on GPUStack as an example:
3. Click `Save` to deploy the model.
-
+
## Create an API Key
@@ -63,6 +62,6 @@ Using a LLM hosted on GPUStack as an example:
Click "Save" to use the model in the application.
-
+
For more information about GPUStack, please refer to [Github Repo](https://github.com/gpustack/gpustack).
diff --git a/en/getting-started/cloud.mdx b/en/getting-started/cloud.mdx
index 98038d96..d56eaca2 100644
--- a/en/getting-started/cloud.mdx
+++ b/en/getting-started/cloud.mdx
@@ -30,4 +30,4 @@ A: In Dify Cloud, we anonymize application data to ensure privacy and reduce enc
**Q: What is the process for deleting my account and all associated data from Dify Cloud?**
-A: If you decide to delete your account and remove all associated data from Dify Cloud, you can simply send a request to our support team at support@dify.ai. We are committed to respecting your privacy and data rights, and upon request, we will erase all your data from our systems, adhering to data protection regulations.
+A: If you decide to delete your account and remove all associated data from Dify Cloud, you can visit [Account](https://cloud.dify.ai/account) or click on the avatar in the upper right corner, then click "Account". You will see a "Delete account" button; please click it and follow the instructions. We are committed to respecting your privacy and data rights, and upon request, we will erase all your data from our systems, adhering to data protection regulations.
diff --git a/en/getting-started/dify-for-education.mdx b/en/getting-started/dify-for-education.mdx
new file mode 100644
index 00000000..b80dbb68
--- /dev/null
+++ b/en/getting-started/dify-for-education.mdx
@@ -0,0 +1,167 @@
+---
+title: Dify for Education
+---
+
+## Overview
+
+**Dify for Education** offers discounted access to Dify for students, teachers, and educational institutions.
+
+## Value
+
+- **For Students**: Get discounted access for coursework or AI projects.
+
+- **For Teachers**: Use Dify as a teaching tool for AI application development
+
+- **For Educational Institutions & Schools**: Manage AI resources and enhance education.
+
+## Benefits
+
+Dify for Education offers **a 50% off coupon for the Dify Professional subscription plan**.
+
+
+More exclusive educational benefits will be added in the future.
+
+
+## How to Apply for Dify for Education
+
+### Prerequisites
+
+To qualify for Dify for Education, you must meet **all** of the following requirements:
+
+1. Be 18 or older.
+2. Be a current student, teacher, or educational staff member.
+3. Have a valid educational email address.
+
+### Steps
+
+1. **Create a Dify Account**
+
+Before applying for educational verification, [register a Dify account](https://cloud.dify.ai/signin) with your educational email.
+
+2. **Apply for Educational Verification**
+
+ 1. From your profile menu (top-right), select **Settings**.
+
+ 2. On the Settings page, click **Billing > Get Education Verified**.
+
+ 
+
+ 3. Enter your school’s full name *(no abbreviations)*.
+
+ 4. Select your role.
+
+ 5. Accept the *Terms & Agreements* and click **Submit**.
+
+ 
+
+ 6. Once verified, you will receive a Dify Education Coupon.
+
+## How to Check Dify Educational Verification Status
+
+Click profile picture (top-right) and look for `Edu` in subscription status.
+
+## How to Use Dify Education Coupon
+
+### Dify Education Coupon
+
+Your account will receive a Dify Education coupon after verification. Use this coupon to get discounts on the Dify Professional subscription plan.
+
+
+Discount: **50%**
+
+Validity: **12 months**
+
+Limitations:
+- Coupons must be used within the validity period; **expired coupons are invalid**.
+- Educational verification can only be applied for **once** per year, and only **one coupon** can be obtained per application.
+- Coupons can only be applied to workspaces where you are the Workspace Owner.
+
+
+### Use Education Coupon
+
+1. On the pricing page, select the Dify Professional plan.
+
+- Monthly payment: Click **Get Started** to go to the payment page.
+- Annual payment: Toggle **Annual Billing** and click **Get Started** to go to the payment page.
+
+2. On the payment page, your promo code applies automatically. Check the discount and discounted price.
+
+
+
+3. After confirming order details, select a payment method.
+
+4. Complete the payment to finalize subscription.
+
+5. Once subscribed, your subscription status will be updated to `Pro(Edu)`.
+
+## How to Update My Educational Information
+
+### Update Email
+
+Email `` to change your email address.
+
+### Update School Information
+
+- **For verified accounts**: Email `` to modify school details.
+
+- **For expired verification**: Enter new school information during re-verification.
+
+## How to Renew Dify Educational Verification
+
+Follow the steps in **Apply for Educational Verification** to renew verification.
+
+## FAQ
+
+- **How can I learn about Dify?**
+
+1. Read the [Dify official documentation](https://docs.dify.ai/) for guides.
+
+2. Join the [Dify Discord community](https://discord.com/invite/FngNHpbcY7) to share your experiences with developers.
+
+- **How much do Dify plans cost?**
+
+See [pricing page](https://dify.ai/pricing) for more information.
+
+- **Who cannot apply for educational verification?**
+
+1. Users under 18 years old.
+
+2. Users registered with personal emails (e.g., Gmail).
+
+- **Why was my educational verification rejected/revoked? What should I do if my application is rejected/revoked?**
+
+Your educational verification application may be rejected or revoked in the following situations:
+
+1. Did not accept ***Terms & Agreements*** when applying for educational verification
+2. Non-educational email registration
+3. Fraudulent email or information
+4. Educational privilege misuse
+5. False institutional information
+
+To appeal a rejected/revoked verification, please email `` for assistance.
+
+- **Will my educational verification remain valid after I graduate or leave school?**
+
+Your verification remains valid while your educational email is active. Email `` for assistance if your educational email expires.
+
+- **I don’t have an educational email. How can I apply for educational verification?**
+
+Educational verification requires a valid institutional email address. Personal emails are not eligible.
+
+- **I had a subscription that expired. Can I still use the promo code?**
+
+Yes. If your Stripe payment info are saved, the promo code will automatically apply when you resubscribe.
+
+- **What happens to my plan after educational verification is approved?**
+
+For Professional Plan users, the promo code applies at your next billing period. For other plans’ users, the promo code is saved and applies when you change it to Professional.
+
+- **What happens if I cancel my professional subscription plan after educational verification but before my next billing date?**
+
+Your plan continues until the billing period ends, but the promo code expires permanently.
+
+- **I canceled my subscription and then apply for Dify for Education. Can I still use the promo code?**
+
+Yes, but timing matters:
+1. Your current subscription must expire before resubscribing for the promo code to apply.
+2. The promo code will not apply if you renew or change plans before your subscription expires.
diff --git a/en/getting-started/dify-premium.mdx b/en/getting-started/dify-premium.mdx
index 36bb3346..3eeac16e 100644
--- a/en/getting-started/dify-premium.mdx
+++ b/en/getting-started/dify-premium.mdx
@@ -109,3 +109,7 @@ Just like self-hosted deploy, you may modify the environment variables under `.e
docker-compose down
docker-compose -f docker-compose.yaml -f docker-compose.override.yaml up -d
```
+
+## WebApp Logo & Branding Customization
+
+You can enable this feature in **Customization** under settings, enable **Remove Powered by Dify** and upload your own logo.
diff --git a/en/getting-started/install-self-hosted/environments.mdx b/en/getting-started/install-self-hosted/environments.mdx
index 22821a2d..4725abe4 100644
--- a/en/getting-started/install-self-hosted/environments.mdx
+++ b/en/getting-started/install-self-hosted/environments.mdx
@@ -1,7 +1,14 @@
---
-title: Environments
+title: Environments(待处理)
---
+
+This document may be outdated. Please refer to the latest configuration files:
+
+- [docker-compose.yaml](https://github.com/langgenius/dify/blob/5f8d20b5b2bb51f19547467167b18d9c0f6ffbb8/docker/docker-compose.yaml)
+
+- [.env.example](https://github.com/langgenius/dify/blob/5f8d20b5b2bb51f19547467167b18d9c0f6ffbb8/docker/.env.example)
+
### Common Variables
diff --git a/en/getting-started/install-self-hosted/faqs.mdx b/en/getting-started/install-self-hosted/faqs.mdx
index 43dbe48b..5ac717d8 100644
--- a/en/getting-started/install-self-hosted/faqs.mdx
+++ b/en/getting-started/install-self-hosted/faqs.mdx
@@ -2,7 +2,6 @@
title: FAQs
---
-
### 1. Not receiving reset password emails
You need to configure the `Mail` parameters in the `.env` file. For detailed instructions, please refer to ["Environment Variables Explanation: Mail-related configuration"](/en/getting-started/install-self-hosted/environments#mail-related-configuration).
@@ -67,5 +66,24 @@ EXPOSE_NGINX_PORT=80
EXPOSE_NGINX_SSL_PORT=443
```
+Other self-host issue please check this document [Self-Host Related](/en/learn-more/faq/install-faq)。
+
+### 6. How to resolve database connection errors in docker-api-1?
+
+**Issue Details**: When accessing `http://localhost`, you may encounter an `Internal Server Error`; and the following message might appear in the `docker-api-1` logs:
+
+```bash
+FATAL: no pg_hba.conf entry for host "172.19.0.7", user "postgres", database "dify", no encryption
+```
+
+**Solution**: Update the `/var/lib/postgresql/pgdata/pg_hba.conf` file inside the db container to allow connections from the network segment mentioned in the error message. For example:
+
+```bash
+docker exec -it docker-db-1 sh -c "echo 'host all all 172.19.0.0/16 trust' >> /var/lib/postgresql/data/pgdata/pg_hba.conf"
+docker-compose restart
+```
+
+### 7. How to change the file size limit for knowledge base uploads?
+
+Modify the `UPLOAD_FILE_SIZE_LIMIT` parameter in the `.env` file to adjust the default limit. Additionally, you should also sync the `NGINX_CLIENT_MAX_BODY_SIZE` parameter value to avoid potential issues.
-Other self-host issue please check this document [Self-Host Related](/en/learn-more/faq/install-faq)。
\ No newline at end of file
diff --git a/en/getting-started/install-self-hosted/local-source-code.mdx b/en/getting-started/install-self-hosted/local-source-code.mdx
index 218346b0..ee0471b0 100644
--- a/en/getting-started/install-self-hosted/local-source-code.mdx
+++ b/en/getting-started/install-self-hosted/local-source-code.mdx
@@ -221,7 +221,8 @@ Please visit [https://nodejs.org/en/download](https://nodejs.org/en/download) an
2. Install the dependencies.
```
- npm install
+ npm i -g pnpm
+ pnpm install
```
3. Configure the environment variables. Create a file named .env.local in the current directory and copy the contents from .env.example. Modify the values of these environment variables according to your requirements:
diff --git a/en/getting-started/readme/model-providers.mdx b/en/getting-started/readme/model-providers.mdx
index e2d29804..d5a1751d 100644
--- a/en/getting-started/readme/model-providers.mdx
+++ b/en/getting-started/readme/model-providers.mdx
@@ -384,6 +384,14 @@ Dify supports the below model providers out-of-box:
+
+
GPUStack
+
✔️(🔧️)(👓)
+
✔️
+
✔️
+
✔️
+
✔️
+
diff --git a/en/guides/application-orchestrate/creating-an-application.mdx b/en/guides/application-orchestrate/creating-an-application.mdx
index 6c0e14e7..be1b7dbe 100644
--- a/en/guides/application-orchestrate/creating-an-application.mdx
+++ b/en/guides/application-orchestrate/creating-an-application.mdx
@@ -26,7 +26,7 @@ If you need to create a blank application on Dify, you can select "Studio" from

-When creating an application for the first time, you might need to first understand the [basic concepts](./#application_type) of the four different types of applications on Dify: Chatbot, Text Generator, Agent, Chatflow and Workflow.
+When creating an application for the first time, you might need to first understand the [basic concepts](./#application_type) of the five different types of applications on Dify: Chatbot, Text Generator, Agent, Chatflow and Workflow.
When selecting a specific application type, you can customize it by providing a name, choosing an appropriate icon(or uploading your favorite image as an icon), and writing a clear and concise description of its purpose. These details will help team members easily understand and use the application in the future.
diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
index d0c55f0f..4d60fc05 100644
--- a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
+++ b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx
@@ -42,4 +42,4 @@ Currently, Web App supports multiple languages: English, Simplified Chinese, Tra
### Embedding Your AI Site
-You can also integrate Dify Web App into your own web project, blog, or any other web page. For more details, please take a refer to [Embedding In Websites](/en/guides/application-publishing/embedding-in-websites).
+You can also integrate Dify Web App into your own web project, blog, or any other web page. For more details, please refer to [Embedding In Websites](/en/guides/application-publishing/embedding-in-websites).
diff --git a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
index f555e5f9..431b18ae 100644
--- a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
+++ b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx
@@ -10,7 +10,7 @@ POST /retrieval
## Header
-This API is used to connect to a knowledge base that is independent of the Dify and maintained by developers. For more details, please refer to [Connecting to an External Knowledge Base](/en/guides/knowledge-base/connect-external-knowledge-base). You can use `API-Key` in the `Authorization` HTTP Header to verify permissions. The authentication logic is defined by you in the retrieval API, as shown below:
+This API is used to connect to a knowledge base that is independent of the Dify and maintained by developers. For more details, please refer to [Connecting to an External Knowledge Base](https://docs.dify.ai/guides/knowledge-base/connect-external-knowledge-base). You can use `API-Key` in the `Authorization` HTTP Header to verify permissions. The authentication logic is defined by you in the retrieval API, as shown below:
```
Authorization: Bearer {API_KEY}
@@ -25,6 +25,7 @@ The request accepts the following data in JSON format.
| knowledge_id | TRUE | string | Your knowledge's unique ID | AAA-BBB-CCC |
| query | TRUE | string | User's query | What is Dify? |
| retrieval_setting | TRUE | object | Knowledge's retrieval parameters | See below |
+| metadata_condition | FALSE | Object | Original array filtering | See below |
The `retrieval_setting` property is an object containing the following keys:
@@ -33,6 +34,40 @@ The `retrieval_setting` property is an object containing the following keys:
| top_k | TRUE | int | Maximum number of retrieved results | 5 |
| score_threshold | TRUE | float | The score limit of relevance of the result to the query, scope: 0~1 | 0.5 |
+The `metadata_condition` property is an object containing the following keys:
+
+| Attribute | Required | Type | Description | Example Value |
+|-----------|----------|------|-------------|--------------|
+| logical_operator | No | String | Logical operator, values can be `and` or `or`, default is `and` | and |
+| conditions | Yes | Array (Object) | List of conditions | See below |
+
+Each object in the `conditions` array contains the following keys:
+
+| Attribute | Required | Type | Description | Example Value |
+|-----------|----------|------|-------------|--------------|
+| name | Yes | Array (String) | Names of the metadata to filter | `["category", "tag"]` |
+| comparison_operator | Yes | String | Comparison operator | `contains` |
+| value | No | String | Comparison value, can be omitted when the operator is `empty`, `not empty`, `null`, or `not null` | `"AI"` |
+
+Supported `comparison_operator` operators:
+
+- `contains`: Contains a certain value
+- `not contains`: Does not contain a certain value
+- `start with`: Starts with a certain value
+- `end with`: Ends with a certain value
+- `is`: Equals a certain value
+- `is not`: Does not equal a certain value
+- `empty`: Is empty
+- `not empty`: Is not empty
+- `=`: Equals
+- `≠`: Not equal
+- `>`: Greater than
+- `<`: Less than
+- `≥`: Greater than or equal to
+- `≤`: Less than or equal to
+- `before`: Before a certain date
+- `after`: After a certain date
+
## Request Syntax
```json
@@ -105,7 +140,7 @@ If the action fails, the service sends back the following error information in J
| Property | Required | Type | Description | Example value |
|----------|----------|------|-------------|---------------|
| error_code | TRUE | int | Error code | 1001 |
-| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ``' format. |
+| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ' format. |
The `error_code` property has the following types:
@@ -124,3 +159,17 @@ HTTP Status Code: 403
**InternalServerException**
An internal server error occurred. Retry your request.
HTTP Status Code: 500
+
+## Development Example
+
+You can learn how to develop external knowledge base plugins through the following video tutorial using LlamaCloud as an example:
+
+
+
+For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
diff --git a/en/guides/knowledge-base/connect-external-knowledge-base.mdx b/en/guides/knowledge-base/connect-external-knowledge-base.mdx
index cb4aae2e..1cedfee0 100644
--- a/en/guides/knowledge-base/connect-external-knowledge-base.mdx
+++ b/en/guides/knowledge-base/connect-external-knowledge-base.mdx
@@ -21,9 +21,39 @@ The **Connect to External Knowledge Base** feature enables integration between t
-## Connection Example
+### Connection Examples
-[how-to-connect-aws-bedrock?](/en/learn-more/use-cases/how-to-connect-aws-bedrock)
+#### AWS Bedrock
+
+[How to connect with AWS Bedrock Knowledge Base](../../learn-more/use-cases/how-to-connect-aws-bedrock.md "mention")
+
+#### LlamaCloud
+
+Dify provides an official LlamaCloud plugin that helps you quickly connect to LlamaCloud knowledge bases.
+
+##### Plugin Installation
+
+1. Visit the Dify [Marketplace](https://marketplace.dify.ai/) and search for `LlamaCloud`
+2. Install and configure the LlamaCloud plugin according to the instructions
+3. Enable the plugin in the Dify platform
+4. Fill in the LlamaCloud API key and other necessary information following the plugin configuration wizard
+5. After configuration is complete, you can see the connected external knowledge base in your knowledge base list
+
+With the LlamaCloud plugin, you can directly use LlamaCloud's powerful retrieval capabilities in the Dify platform without writing custom APIs.
+
+For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
+
+##### Video Tutorial
+
+The following video demonstrates in detail how to use the LlamaCloud plugin to connect to external knowledge bases:
+
+
## FAQ
diff --git a/en/guides/knowledge-base/external-knowledge-api.mdx b/en/guides/knowledge-base/external-knowledge-api.mdx
index 6be576d4..8c822144 100644
--- a/en/guides/knowledge-base/external-knowledge-api.mdx
+++ b/en/guides/knowledge-base/external-knowledge-api.mdx
@@ -1,6 +1,5 @@
---
title: 'External Knowledge API'
-version: 'English'
---
## Endpoint
@@ -26,7 +25,7 @@ The request accepts the following data in JSON format.
| knowledge_id | TRUE | string | Your knowledge's unique ID | AAA-BBB-CCC |
| query | TRUE | string | User's query | What is Dify? |
| retrieval_setting | TRUE | object | Knowledge's retrieval parameters | See below |
-| metadata_condition | Yes | Object | Original array filtering | See below |
+| metadata_condition | FALSE | Object | Original array filtering | See below |
The `retrieval_setting` property is an object containing the following keys:
@@ -50,7 +49,7 @@ Each object in the `conditions` array contains the following keys:
| comparison_operator | Yes | String | Comparison operator | `contains` |
| value | No | String | Comparison value, can be omitted when the operator is `empty`, `not empty`, `null`, or `not null` | `"AI"` |
-Supported `comparison_operator` Operators
+Supported `comparison_operator` operators:
- `contains`: Contains a certain value
- `not contains`: Does not contain a certain value
@@ -141,7 +140,7 @@ If the action fails, the service sends back the following error information in J
| Property | Required | Type | Description | Example value |
|----------|----------|------|-------------|---------------|
| error_code | TRUE | int | Error code | 1001 |
-| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ``' format. |
+| error_msg | TRUE | string | The description of API exception | Invalid Authorization header format. Expected 'Bearer ' format. |
The `error_code` property has the following types:
@@ -160,3 +159,11 @@ HTTP Status Code: 403
**InternalServerException**
An internal server error occurred. Retry your request.
HTTP Status Code: 500
+
+## Development Example
+
+You can learn how to develop external knowledge base plugins through the following video tutorial using LlamaCloud as an example:
+
+{% embed url="https://www.youtube.com/embed/FaOzKZRS-2E" %}
+
+For more information about how it works, please refer to the plugin's [GitHub repository](https://github.com/langgenius/dify-official-plugins/tree/main/extensions/llamacloud).
diff --git a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api.mdx b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api.mdx
index 021f1774..6edcc5d9 100644
--- a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api.mdx
+++ b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-dataset-via-api.mdx
@@ -709,7 +709,7 @@ Response example:
Request example:
```bash
-curl --location --request DELETE 'https://api.dify.ai/v1/datasets/{dataset_id}/document/metadata/{metadata_id}' \
+curl --location --request DELETE 'https://api.dify.ai/v1/datasets/{dataset_id}/metadata/{metadata_id}' \
--header 'Authorization: Bearer {api_key}'
```
@@ -724,7 +724,7 @@ Response example:
Request example:
```bash
-curl --location --request DELETE 'https://api.dify.ai/v1/datasets/{dataset_id}/document/metadata/built-in/{action}' \
+curl --location --request DELETE 'https://api.dify.ai/v1/datasets/{dataset_id}/metadata/built-in/{action}' \
--header 'Authorization: Bearer {api_key}'
```
diff --git a/en/guides/knowledge-base/knowledge-base-creation/sync-from-website.mdx b/en/guides/knowledge-base/knowledge-base-creation/sync-from-website.mdx
index dc232a60..30de4fca 100644
--- a/en/guides/knowledge-base/knowledge-base-creation/sync-from-website.mdx
+++ b/en/guides/knowledge-base/knowledge-base-creation/sync-from-website.mdx
@@ -53,7 +53,7 @@ After importing the parsed text from the webpage, it is stored in the knowledge
### Jina Reader
-#### 1. Configuring Jina Reader Credentials
+#### 1. Configuring Jina Reader Credentials
Click on the avatar in the upper right corner, then go to the **DataSource** page, and click the **Configure** button next to Jina Reader.
@@ -67,7 +67,7 @@ Log in to the [Jina Reader website](https://jina.ai/reader/), complete registrat
-#### 2. Using Jina Reader to Crawl Web Content
+#### 2. Using Jina Reader to Crawl Web Content
On the knowledge base creation page, select Sync from website, choose Jina Reader as the provider, and enter the target URL to be crawled.
diff --git a/en/guides/knowledge-base/knowledge-request-rate-limit.mdx b/en/guides/knowledge-base/knowledge-request-rate-limit.mdx
new file mode 100644
index 00000000..b98131dc
--- /dev/null
+++ b/en/guides/knowledge-base/knowledge-request-rate-limit.mdx
@@ -0,0 +1,42 @@
+---
+title: Knowledge Request Rate Limit
+---
+
+We will implement Knowledge Request Rate Limits starting **February 24, 2025**, to ensure optimal service performance and a better knowledge experience for all users.
+
+## What is Knowledge Request Rate Limit?
+
+Knowledge Request Rate Limit refers to the maximum number of actions that a workspace can perform in the knowledge base within one minute. These actions include creating datasets, managing documents, and running queries in apps or workflows.
+
+## Limitations of Different Subscription Versions
+
+Knowledge Request Rate Limit will vary by subscription level:
+
+- **Sandbox**: 10/min
+- **Professional**: 100/min
+- **Team**: 1,000/min
+
+For example, if a Sandbox user performs 10 hit tests within one minute, their workspace will be temporarily unable to perform the restricted actions during the following minute.
+
+## Which Actions will be Limited by Knowledge Request Rate Limit when I Perform them?
+
+When you perform the following actions, you will be limited by the frequency of Knowledge Base requests:
+
+1. Creating empty datasets
+2. Deleting datasets
+3. Updating dataset settings
+4. Uploading documents
+5. Deleting documents
+6. Updating documents
+7. Disabling documents
+8. Enabling documents
+9. Archiving documents
+10. Restoring archived documents
+11. Pausing document processing
+12. Resuming document processing
+13. Adding segments
+14. Deleting segments
+15. Updating segments
+16. Bulk importing segments
+17. Performing hit tests
+18. Querying the knowledge in apps or workflows (*Note: Multi-path recalls count as a single request.*)
diff --git a/en/guides/management/version-control.mdx b/en/guides/management/version-control.mdx
index b6a4f66c..beea11bb 100644
--- a/en/guides/management/version-control.mdx
+++ b/en/guides/management/version-control.mdx
@@ -15,7 +15,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
- **Published Version**: Any version released to production.
@@ -25,7 +25,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
- **Previous Version**: Any older published version.
@@ -33,7 +33,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
- **Restore**: Return to any earlier version of your application.
@@ -41,7 +41,7 @@ This article explains how to manage versions in Dify's Chatflow and Workflow.
## Features
diff --git a/en/guides/workflow/node/agent.mdx b/en/guides/workflow/node/agent.mdx
index c94c9bf4..4fb802de 100644
--- a/en/guides/workflow/node/agent.mdx
+++ b/en/guides/workflow/node/agent.mdx
@@ -2,7 +2,6 @@
title: Agent
---
-
## Definition
An Agent Node is a component in Dify Chatflow/Workflow that enables autonomous tool invocation. By integrating different Agent reasoning strategies, LLMs can dynamically select and execute tools at runtime, thereby performing multi-step reasoning.
@@ -76,3 +75,11 @@ After choosing the Agent strategy, the configuration panel will display the rele
During execution, the Agent node generates detailed logs. You can see overall node execution information—including inputs and outputs, token usage, time spent, and status. Click Details to view the output from each round of Agent strategy execution.

+
+## Memory
+
+Enabling the **Memory** toggle empowers the Agent to retain and recall conversation context. By adjusting the **Window Size**, you can control how many previous conversation messages the Agent can “remember”. This allows the Agent to understand and reference earlier exchanges, delivering coherent and contextually relevant responses that enhance multi-turn dialogue experiences.
+
+For example, when users employ pronouns (such as “it”, “this”, or “they”) in subsequent messages, an Agent with Memory enabled can understand what these pronouns refer to from previous context without requiring users to restate complete information.
+
+
diff --git a/en/guides/workflow/node/loop.mdx b/en/guides/workflow/node/loop.mdx
index 1b131773..6b9383de 100644
--- a/en/guides/workflow/node/loop.mdx
+++ b/en/guides/workflow/node/loop.mdx
@@ -30,7 +30,7 @@ A **Loop** node executes repetitive tasks that depend on previous iteration resu
-## Configuration
+## Configurations
@@ -51,35 +51,83 @@ A **Loop** node executes repetitive tasks that depend on previous iteration resu
Upper limit on iterations to prevent infinite loops
10, 100, 1000
+
+
Loop Variables
+
Values that persist across iterations and remain accessible to nodes after the loop completes
+
A counter x < 50 increases by 1 each iteration. Use it for calculations inside the loop, then access its final value in later workflow steps.
+
+
+
Exit Loop Node
+
Immediately ends the loop when reached
+
Caps execution at 10 iterations, regardless of other conditions.
+
-
+
+The loop can be terminated by either the **Exit Loop Node** or the **Loop Termination Condition**. When either condition is met, the loop will immediately exit.
-## Usage Example
+If no exit conditions are specified, the loop will continue executing (similar to `while (true)`) until it reaches the **Maximum Loop Count**.
+
-**Goal: Generate random numbers (1-100) until a value below 50 appears.**
+## Example 1: Basic Loop
+
+**Goal: Generate random numbers between 1-100 until getting a number less than 50.**
**Steps**:
-1. Use `node` to generate a random number between 1-100.
+1. Set up a **Loop** node by configuring its **Loop Termination Condition** to trigger when the **Template** node returns `done`.
-2. Use `if` to evaluate the number:
+2. Set up a **Code** node that generates random integers between `1` and `100`.
- - If < 50: Output `done` and terminate loop.
+3. Set up an **IF/ELSE** node with the following logic:
- - If ≥ 50: Continue loop and generate another random number.
+ - For numbers ≥ 50: Output the `Current Number` and continue the loop
-3. Set the exit criterion to random_number < 50.
+ - For numbers < 50: Output the `Final Number`, and then use the **Template** node to return `done`
-4. Loop ends when a number below 50 appears.
+4. The workflow terminates automatically once a number below `50` is generated.
-
+
-## Planned Enhancements
+
-**Future releases will include:**
+## Example 2: Advanced Loop (with Variables and Exit Node)
- - Loop variables: Store and reference values across iterations for improved state management and conditional logic.
+**Goal: Design a workflow that generates a poem through four iterative refinements, with each version building upon the previous one.**
- - `break` node: Terminate loops from within the execution path, enabling more sophisticated control flow patterns.
+**Steps:**
+
+1. Set up a **Loop** node with those **Loop Variables**:
+
+ - num: A counter starting at 0, incrementing by 1 per iteration
+
+ - verse: A text variable initialized with `I haven't started creating yet`
+
+2. Set up an **IF/ELSE** node that evaluates the iteration count:
+
+ - When num > 3: Proceed the **Exit Loop** node
+
+ - When num ≤ 3: Proceed to the **LLM** node
+
+3. Set up an **LLM** node to generate poems.
+
+
+Example Prompt:
+
+You are a European literary figure who can create poetic verses based on `sys.query`.
+
+`verse` is your last creation. You can progress based on your previous work.
+
+
+The first iteration begins with the initial verse value `I haven't started creating yet`. Each subsequent iteration builds upon the previous output, with the new poem replacing the verse variable’s content.
+
+4. Set up a **Variable Assigner** node to manage state:
+
+ - Increment num by 1 after each iteration
+
+ - Update verse with the newly generated poem
+
+5. When executed, the workflow will produce four versions of your poem, each iteration building upon its previous output.
+
+
diff --git a/en/guides/workflow/node/template.mdx b/en/guides/workflow/node/template.mdx
index 816b1d14..b6fbc922 100644
--- a/en/guides/workflow/node/template.mdx
+++ b/en/guides/workflow/node/template.mdx
@@ -2,10 +2,9 @@
title: Template
---
-
Template lets you dynamically format and combine variables from previous nodes into a single text-based output using Jinja2, a powerful templating syntax for Python. It's useful for combining data from multiple sources into a specific structure required by subsequent nodes. The simple example below shows how to assemble an article by piecing together various previous outputs:
-
+
Beyond naive use cases, you can create more complex templates as per Jinja's [documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/) for a variety of tasks. Here's one template that structures retrieved chunks and their relevant metadata from a knowledge retrieval node into a formatted markdown:
@@ -25,8 +24,38 @@ Beyond naive use cases, you can create more complex templates as per Jinja's [do
{% endraw %}
```
-
+
This template node can then be used within a Chatflow to return intermediate outputs to the end user, before a LLM response is initiated.
> The `Answer` node in a Chatflow is non-terminal. It can be inserted anywhere to output responses at multiple points within the flow.
+
+Example: Support for rendering HTML forms:
+
+```html
+
+```
+
+
diff --git a/en/guides/workflow/nodes/README.mdx b/en/guides/workflow/nodes/README.mdx
deleted file mode 100644
index 267108f2..00000000
--- a/en/guides/workflow/nodes/README.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Node Description
----
-
-
-**Nodes are the key components of a workflow**, enabling the execution of a series of operations by connecting nodes with different functionalities.
-
-### Core Nodes
-
-
A Loop node executes repetitive tasks that depend on previous iteration results until exit conditions are met or the maximum loop count is reached.
diff --git a/en/guides/workflow/nodes/agent.mdx b/en/guides/workflow/nodes/agent.mdx
deleted file mode 100644
index c94c9bf4..00000000
--- a/en/guides/workflow/nodes/agent.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
----
-title: Agent
----
-
-
-## Definition
-
-An Agent Node is a component in Dify Chatflow/Workflow that enables autonomous tool invocation. By integrating different Agent reasoning strategies, LLMs can dynamically select and execute tools at runtime, thereby performing multi-step reasoning.
-
-## Configuration Steps
-
-### Add the Node
-
-In the Dify Chatflow/Workflow editor, drag the Agent node from the components panel onto the canvas.
-
-
-
-### Select an Agent Strategy
-
-In the node configuration panel, click Agent Strategy.
-
-
-
-From the dropdown menu, select the desired Agent reasoning strategy. Dify provides two built-in strategies, **Function Calling and ReAct**, which can be installed from the **Marketplace → Agent Strategies category**.
-
-
-
-#### 1. Function Calling
-
-Function Calling maps user commands to predefined functions or tools. The LLM first identifies user intent, then decides which function to call and extracts the required parameters. Its core mechanism involves explicitly calling external functions or tools.
-
-Pros:
-
-**• Precision:** For well-defined tasks, it can call the corresponding tool directly without requiring complex reasoning.
-
-**• Easier external feature integration:** Various external APIs or tools can be wrapped into functions for the model to call.
-
-**• Structured output:** The model outputs structured information about function calls, facilitating processing by downstream nodes.
-
-
-
-#### 2. ReAct (Reason + Act)
-
-ReAct enables the Agent to alternate between reasoning and taking action: the LLM first thinks about the current state and goal, then selects and calls the appropriate tool. The tool’s output in turn informs the LLM’s next step of reasoning and action. This cycle continues until the problem is resolved.
-
-Pros:
-
-**• Effective external information use:** It can leverage external tools to retrieve information and handle tasks that the model alone cannot accomplish.
-
-**• Improved explainability:** Because reasoning and actions are interwoven, there is a certain level of traceability in the Agent’s thought process.
-
-**• Wide applicability:** Suitable for scenarios that require external knowledge or need to perform specific actions, such as Q\&A, information retrieval, and task execution.
-
-
-
-Developers can contribute Agent strategy plugins to the public [repository](https://github.com/langgenius/dify-plugins). After review, these plugins will be listed in the Marketplace for others to install.
-
-### Configure Node Parameters
-
-After choosing the Agent strategy, the configuration panel will display the relevant options. For the Function Calling and ReAct strategies that ship with Dify, the available configuration items include:
-
-1. **Model:** Select the large language model that drives the Agent.
-2. **Tools List:** The approach to using tools is defined by the Agent strategy. Click + to add and configure tools the Agent can call.
- * Search: Select an installed tool plugin from the dropdown.
- * Authorization: Provide API keys and other credentials to enable the tool.
- * Tool Description and Parameter Settings: Provide a description to help the LLM understand when and why to use the tool, and configure any functional parameters.
-3. **Instruction**: Define the Agent’s task goals and context. Jinja syntax is supported to reference upstream node variables.
-4. **Query**: Receives user input.
-5. **Maximum Iterations:** Set the maximum number of execution steps for the Agent.
-6. **Output Variables:** Indicates the data structure output by the node.
-
-
-
-## Logs
-
-During execution, the Agent node generates detailed logs. You can see overall node execution information—including inputs and outputs, token usage, time spent, and status. Click Details to view the output from each round of Agent strategy execution.
-
-
diff --git a/en/guides/workflow/nodes/answer.mdx b/en/guides/workflow/nodes/answer.mdx
deleted file mode 100644
index 4d6e0246..00000000
--- a/en/guides/workflow/nodes/answer.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: Answer
----
-
-
-Defining Reply Content in a Chatflow Process. In a text editor, you have the flexibility to determine the reply format. This includes crafting a fixed block of text, utilizing output variables from preceding steps as the reply content, or merging custom text with variables for the response.
-
-Answer node can be seamlessly integrated at any point to dynamically deliver content into the dialogue responses. This setup supports a live-editing configuration mode, allowing for both text and image content to be arranged together. The configurations include:
-
-1. Outputting the reply content from a Language Model (LLM) node.
-2. Outputting generated images.
-3. Outputting plain text.
-
-Example 1: Output plain text.
-
-
-
-
-
diff --git a/en/guides/workflow/nodes/code.mdx b/en/guides/workflow/nodes/code.mdx
deleted file mode 100644
index cbc64847..00000000
--- a/en/guides/workflow/nodes/code.mdx
+++ /dev/null
@@ -1,122 +0,0 @@
----
-title: Code Execution
----
-
-
-## Table of Contents
-
-* [Introduction](#introduction)
-* [Usage Scenarios](#usage-scenarios)
-* [Local Deployment](#local-deployment)
-* [Security Policies](#security-policies)
-
-## Introduction
-
-The code node supports running Python/NodeJS code to perform data transformations within a workflow. It can simplify your workflow and is suitable for scenarios such as arithmetic operations, JSON transformations, text processing, and more.
-
-This node significantly enhances the flexibility for developers, allowing them to embed custom Python or JavaScript scripts within the workflow and manipulate variables in ways that preset nodes cannot achieve. Through configuration options, you can specify the required input and output variables and write the corresponding execution code:
-
-
-
-## Configuration
-
-If you need to use variables from other nodes in the code node, you must define the variable names in the `input variables` and reference these variables. You can refer to [Variable References](/en/guides/workflow/variables).
-
-## Usage Scenarios
-
-Using the code node, you can perform the following common operations:
-
-### Structured Data Processing
-
-In workflows, you often have to deal with unstructured data processing, such as parsing, extracting, and transforming JSON strings. A typical example is data processing from an HTTP node. In common API return structures, data may be nested within multiple layers of JSON objects, and you need to extract certain fields. The code node can help you perform these operations. Here is a simple example that extracts the `data.name` field from a JSON string returned by an HTTP node:
-
-```python
-def main(http_response: str) -> str:
- import json
- data = json.loads(http_response)
- return {
- # Note to declare 'result' in the output variables
- 'result': data['data']['name']
- }
-```
-
-### Mathematical Calculations
-
-When you need to perform complex mathematical calculations in a workflow, you can also use the code node. For example, calculating a complex mathematical formula or performing some statistical analysis on data. Here is a simple example that calculates the variance of an array:
-
-```python
-def main(x: list) -> float:
- return {
- # Note to declare 'result' in the output variables
- 'result': sum([(i - sum(x) / len(x)) ** 2 for i in x]) / len(x)
- }
-```
-
-### Data Concatenation
-
-Sometimes, you may need to concatenate multiple data sources, such as multiple knowledge retrievals, data searches, API calls, etc. The code node can help you integrate these data sources together. Here is a simple example that merges data from two knowledge bases:
-
-```python
-def main(knowledge1: list, knowledge2: list) -> list:
- return {
- # Note to declare 'result' in the output variables
- 'result': knowledge1 + knowledge2
- }
-```
-
-## Local Deployment
-
-If you are a local deployment user, you need to start a sandbox service to ensure that malicious code is not executed. This service requires the use of Docker. You can find specific information about the sandbox service [here](https://github.com/langgenius/dify/tree/main/docker/docker-compose.middleware.yaml). You can also start the service directly via `docker-compose`:
-
-```bash
-docker-compose -f docker-compose.middleware.yaml up -d
-```
-
-## Security Policies
-
-Both Python and JavaScript execution environments are strictly isolated (sandboxed) to ensure security. This means that developers cannot use functions that consume large amounts of system resources or may pose security risks, such as direct file system access, making network requests, or executing operating system-level commands. These limitations ensure the safe execution of the code while avoiding excessive consumption of system resources.
-
-### Advanced Features
-
-**Retry on Failure**
-
-For some exceptions that occur in the node, it is usually sufficient to retry the node again. When the error retry function is enabled, the node will automatically retry according to the preset strategy when an error occurs. You can adjust the maximum number of retries and the interval between each retry to set the retry strategy.
-
-- The maximum number of retries is 10
-- The maximum retry interval is 5000 ms
-
-
-
-**Error Handling**
-
-When processing information, code nodes may encounter code execution exceptions. Developers can follow these steps to configure fail branches, enabling contingency plans when nodes encounter exceptions, thus avoiding workflow interruptions.
-
-1. Enable "Error Handling" in the code node
-2. Select and configure an error handling strategy
-
-
-
-For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling).
-
-### FAQ
-
-**Why can't I save the code it in the code node?**
-
-Please check if the code contains potentially dangerous behaviors. For example:
-
-```python
-def main() -> dict:
- return {
- "result": open("/etc/passwd").read(),
- }
-```
-
-This code snippet has the following issues:
-
-* **Unauthorized file access:** The code attempts to read the "/etc/passwd" file, which is a critical system file in Unix/Linux systems that stores user account information.
-* **Sensitive information disclosure:** The "/etc/passwd" file contains important information about system users, such as usernames, user IDs, group IDs, home directory paths, etc. Direct access could lead to information leakage.
-
-Dangerous code will be automatically blocked by Cloudflare WAF. You can check if it's been blocked by looking at the "Network" tab in your browser's "Web Developer Tools".
-
-
-
diff --git a/en/guides/workflow/nodes/doc-extractor.mdx b/en/guides/workflow/nodes/doc-extractor.mdx
deleted file mode 100644
index 9336f83c..00000000
--- a/en/guides/workflow/nodes/doc-extractor.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: Doc Extractor
----
-
-
-#### Definition
-
-LLMs cannot directly read or interpret document contents. Therefore, it's necessary to parse and read information from user-uploaded documents through a document extractor node, convert it to text, and then pass the content to the LLM to process the file contents.
-
-#### Application Scenarios
-
-* Building LLM applications that can interact with files, such as ChatPDF or ChatWord;
-* Analyzing and examining the contents of user-uploaded files;
-
-#### Node Functionality
-
-The document extractor node can be understood as an information processing center. It recognizes and reads files in the input variables, extracts information, and converts it into string-type output variables for downstream nodes to call.
-
-
-
-The document extractor node structure is divided into input variables and output variables.
-
-**Input Variables**
-
-The document extractor only accepts variables with the following data structures:
-
-* `File`, a single file
-* `Array[File]`, multiple files
-
-The document extractor can only extract information from document-type files, such as the contents of TXT, Markdown, PDF, HTML, DOCX format files. It cannot process image, audio, video, or other file formats.
-
-**Output Variables**
-
-The output variable is fixed and named as text. The type of output variable depends on the input variable:
-
-* If the input variable is `File`, the output variable is `string`
-* If the input variable is `Array[File]`, the output variable is `array[string]`
-
-> Array variables generally need to be used in conjunction with list operation nodes. For details, please refer to [List Operator](/en/guides/workflow/nodes/list-operator).
-
-#### Configuration Example
-
-In a typical file interaction Q\&A scenario, the document extractor can serve as a preliminary step for the LLM node, extracting file information from the application and passing it to the downstream LLM node to answer user questions about the file.
-
-This section will introduce the usage of the document extractor node through a typical ChatPDF example workflow template.
-
-
-
-**Configuration Process:**
-
-1. Enable file upload for the application. Add a **single file variable** in the "Start" node and name it `pdf`.
-2. Add a document extractor node and select the `pdf` variable in the input variables.
-3. Add an LLM node and select the output variable of the document extractor node in the system prompt. The LLM can read the contents of the file through this output variable.
-
-
-
-Configure the end node by selecting the output variable of the LLM node in the end node.
-
-
-
-After configuration, the application will have file upload functionality, allowing users to upload PDF files and engage in conversation.
-
-
-For how to upload files in chat conversations and interact with the LLM, please refer to [Additional Features](/en/guides/workflow/additional-features).
-
diff --git a/en/guides/workflow/nodes/end.mdx b/en/guides/workflow/nodes/end.mdx
deleted file mode 100644
index 9464d29d..00000000
--- a/en/guides/workflow/nodes/end.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: End
----
-
-
-### 1 Definition
-
-Define the final output content of a workflow. Every workflow needs at least one end node after complete execution to output the final result.
-
-The end node is a termination point in the process; no further nodes can be added after it. In a workflow application, results are only output when the end node is reached. If there are conditional branches in the process, multiple end nodes need to be defined.
-
-The end node must declare one or more output variables, which can reference any upstream node's output variables.
-
-
-End nodes are not supported within Chatflow.
-
-
-***
-
-### 2 Scenarios
-
-In the following [long story generation workflow](/en/guides/workflow/nodes/iteration#example-2-long-article-iterative-generation-another-scheduling-method), the variable `Output` declared by the end node is the output of the upstream code node. This means the workflow will end after the Code node completes execution and will output the execution result of Code.
-
-
-
-**Single Path Execution Example:**
-
-
-
-**Multi-Path Execution Example:**
-
-
diff --git a/en/guides/workflow/nodes/http-request.mdx b/en/guides/workflow/nodes/http-request.mdx
deleted file mode 100644
index 18e7785c..00000000
--- a/en/guides/workflow/nodes/http-request.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: HTTP Request
----
-
-
-### Definition
-
-Allows sending server requests via the HTTP protocol, suitable for scenarios such as retrieving external data, webhooks, generating images, and downloading files. It enables you to send customized HTTP requests to specified web addresses, achieving interconnectivity with various external services.
-
-This node supports common HTTP request methods:
-
-* **GET**: Used to request the server to send a specific resource.
-* **POST**: Used to submit data to the server, typically for submitting forms or uploading files.
-* **HEAD**: Similar to GET requests, but the server only returns the response headers without the resource body.
-* **PATCH**: Used to apply partial modifications to a resource.
-* **PUT**: Used to upload resources to the server, typically for updating an existing resource or creating a new one.
-* **DELETE**: Used to request the server to delete a specified resource.
-
-You can configure various aspects of the HTTP request, including URL, request headers, query parameters, request body content, and authentication information.
-
-
-
-***
-
-### Scenarios
-
-* **Send Application Interaction Content to a Specific Server**
-
-One practical feature of this node is the ability to dynamically insert variables into different parts of the request based on the scenario. For example, when handling customer feedback requests, you can embed variables such as username or customer ID, feedback content, etc., into the request to customize automated reply messages or fetch specific customer information and send related resources to a designated server.
-
-
-
-The return values of an HTTP request include the response body, status code, response headers, and files. Notably, if the response contains a file, this node can automatically save the file for use in subsequent steps of the workflow. This design not only improves processing efficiency but also makes handling responses with files straightforward and direct.
-
-* **Send File**
-
-You can use an HTTP PUT request to send files from the application to other API services. In the request body, you can select the file variable within the `binary`. This method is commonly used in scenarios such as file transfer, document storage, or media processing.
-
-Example: Suppose you are developing a document management application and need to send a user-uploaded PDF file to a third-party service. You can use an HTTP request node to pass the file variable.
-
-Here is a configuration example:
-
-
-
-### Advanced Features
-
-**Retry on Failure**
-
-For some exceptions that occur in the node, it is usually sufficient to retry the node again. When the error retry function is enabled, the node will automatically retry according to the preset strategy when an error occurs. You can adjust the maximum number of retries and the interval between each retry to set the retry strategy.
-
-- The maximum number of retries is 10
-- The maximum retry interval is 5000 ms
-
-
-
-**Error Handling**
-
-When processing information, HTTP nodes may encounter exceptional situations such as network request timeouts or request limits. Application developers can follow these steps to configure fail branches, enabling contingency plans when nodes encounter exceptions and avoiding workflow interruptions.
-
-1. Enable "Error Handling" in the HTTP node
-2. Select and configure an error handling strategy
-
-For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling).
-
-
\ No newline at end of file
diff --git a/en/guides/workflow/nodes/ifelse.mdx b/en/guides/workflow/nodes/ifelse.mdx
deleted file mode 100644
index 4c8783e3..00000000
--- a/en/guides/workflow/nodes/ifelse.mdx
+++ /dev/null
@@ -1,47 +0,0 @@
----
-title: Conditional Branch IF/ELSE
----
-
-
-### Definition
-
-Allows you to split the workflow into two branches based on if/else conditions.
-
-A conditional branching node has three parts:
-
-* IF Condition: Select a variable, set the condition, and specify the value that satisfies the condition.
-* IF condition evaluates to `True`, execute the IF path.
-* IF condition evaluates to `False`, execute the ELSE path.
-* If the ELIF condition evaluates to `True`, execute the ELIF path;
-* If the ELIF condition evaluates to `False`, continue to evaluate the next ELIF path or execute the final ELSE path.
-
-**Condition Types**
-
-* Contains
-* Not contains
-* Starts with
-* Ends with
-* Is
-* Is not
-* Is empty
-* Is not empty
-
-***
-
-### Scenario
-
-
-
-Taking the above **Text Summary Workflow** as an example:
-
-* IF Condition: Select the `summarystyle` variable from the start node, with the condition **Contains** `technical`.
-* IF condition evaluates to `True`, follow the IF path by querying technology-related knowledge through the knowledge retrieval node, then respond via the LLM node (as shown in the upper half of the diagram);
-* IF condition evaluates to `False`, but an `ELIF` condition is added, where the input for the `summarystyle` variable **does not include** `technology`, yet the `ELIF` condition includes `science`, check if the condition in `ELIF` is `True`, then execute the steps defined within that path;
-* If the condition within `ELIF` is `False`, meaning the input variable contains neither `technology` nor `science`, continue to evaluate the next `ELIF` condition or execute the final `ELSE` path;
-* IF condition evaluates to `False`, i.e., the `summarystyle` variable input **does not contain** `technical`, execute the ELSE path, responding via the LLM2 node (lower part of the diagram).
-
-**Multiple Condition Judgments**
-
-For complex condition judgments, you can set multiple condition judgments and configure **AND** or **OR** between conditions to take the **intersection** or **union** of the conditions, respectively.
-
-
diff --git a/en/guides/workflow/nodes/iteration.mdx b/en/guides/workflow/nodes/iteration.mdx
deleted file mode 100644
index dce81269..00000000
--- a/en/guides/workflow/nodes/iteration.mdx
+++ /dev/null
@@ -1,186 +0,0 @@
----
-title: Iteration
----
-
-
-### Definition
-
-Sequentially performs the same operations on array elements until all results are outputted, functioning as a task batch processor. Iteration nodes typically work in conjunction with array variables.
-
-For example, when processing long text translations, inputting all content directly into an LLM node may reach the single conversation limit. To address the issue, upstream nodes first split the long text into multiple chunks, then use iteration nodes to perform batch translations, thus avoiding the message limit of a single LLM conversation.
-
-***
-
-### Functional Description
-
-Using iteration nodes requires input values to be formatted as list objects. The node sequentially processes all elements in the array variable from the iteration start node, applying identical processing steps to each element. Each processing cycle is called an iteration, culminating in the final output.
-
-An iteration node consists of three core components: **Input Variables**, **Iteration Workflow**, and **Output Variables**.
-
-**Input Variables:** Accepts only Array type data.
-
-**Iteration Workflow:** Supports multiple workflow nodes to orchestrate task sequences within the iteration node.
-
-**Output Variables:** Outputs only array variables (`Array[List]`).
-
-
-
-
-
-
-
-### Scenarios
-
-#### **Example 1: Long Article Iteration Generator**
-
-
-
-1. Enter the story title and outline in the **Start Node**.
-2. Use a **Generate Subtitles and Outlines Node** to use LLM to generate the complete content from user input.
-3. Use a **Extract Subtitles and Outlines Node** to convert the complete content into an array format.
-4. Use an **Iteration Node** to wrap an **LLM Node** and generate content for each chapter through multiple iterations.
-5. Add a **Direct Answer Node** inside the iteration node to achieve streaming output after each iteration.
-
-**Detailed Configuration Steps**
-
-1. Configure the story title (title) and outline (outline) in the **Start Node**.
-
-
-
-2. Use a **Generate Subtitles and Outlines Node** to convert the story title and outline into complete text.
-
-
-
-3. Use a **Extract Subtitles and Outlines Node** to convert the story text into an array (Array) structure. The parameter to extract is `sections`, and the parameter type is `Array[Object]`.
-
-
-
-
-The effectiveness of parameter extraction is influenced by the model's inference capability and the instructions given. Using a model with stronger inference capabilities and adding examples in the **instructions** can improve the parameter extraction results.
-
-
-4. Use the array-formatted story outline as the input for the iteration node and process it within the iteration node using an **LLM Node**.
-
-
-
-Configure the input variables `GenerateOverallOutline/output` and `Iteration/item` in the LLM Node.
-
-
-
-
-Built-in variables for iteration: `items[object]` and `index[number]`.
-
-`items[object]` represents the input item for each iteration;
-
-`index[number]` represents the current iteration round;
-
-
-5. Configure a **Direct Reply Node** inside the iteration node to achieve streaming output after each iteration.
-
-
-
-6. Complete debugging and preview.
-
-
-
-#### **Example 2: Long Article Iteration Generator (Another Arrangement)**
-
-
-
-* Enter the story title and outline in the **Start Node**.
-* Use an **LLM Node** to generate subheadings and corresponding content for the article.
-* Use a **Code Node** to convert the complete content into an array format.
-* Use an **Iteration Node** to wrap an **LLM Node** and generate content for each chapter through multiple iterations.
-* Use a **Template Conversion** Node to convert the string array output from the iteration node back to a string.
-* Finally, add a **Direct Reply Node** to directly output the converted string.
-
-***
-
-### Advanced Feature
-
-#### Parallel Mode
-
-The iteration node supports parallel processing, improving execution efficiency when enabled.
-
-
-
-Below illustrates the comparison between parallel and sequential execution in the iteration node.
-
-
-
-Parallel mode supports up to 10 concurrent iterations. When processing more than 10 tasks, the first 10 elements execute simultaneously, with remaining tasks processed after the completion of the initial batch.
-
-
-Avoid placing Direct Answer, Variable Assignment, or Tool nodes within the iteration node to prevent potential errors.
-
-
-* **Error response method**
-
-Iteration nodes process multiple tasks and may encounter errors during element processing. To prevent a single error from interrupting all tasks, configure the **Error Response Method**:
-
-* **Terminated**: Terminates the iteration node and outputs error messages when an exception is detected.
-* **Continue on error**: Ignores error messages and continues processing remaining elements. The output contains successful results with null values for errors.
-* **Remove abnormal output**: Ignores error messages and continues processing remaining elements. The output contains only successful results.
-
-Input and output variables maintain a one-to-one correspondence. For example:
-
-* Input: \[1, 2, 3]
-* Output: \[result-1, result-2, result-3]
-
-Error handling examples:
-
-* With **Continue on error**: \[result-1, null, result-3]
-* With **Remove abnormal output**: \[result-1, result-3]
-
-***
-
-### Reference
-
-#### How to Obtain Array-Formatted Content
-
-Array variables can be generated via the following nodes as iteration node inputs:
-
-* [Code Node](/en/guides/workflow/nodes/code)
-* [Parameter Extraction](/en/guides/workflow/nodes/parameter-extractor)
-* [Knowledge Base Retrieval](/en/guides/workflow/nodes/knowledge-retrieval)
-* [Iteration](/en/guides/workflow/nodes/iteration)
-* [Tools](/en/guides/workflow/nodes/tools)
-* [HTTP Request](/en/guides/workflow/nodes/http-request)
-
-***
-
-#### How to Convert an Array to Text
-
-The output variable of the iteration node is in array format and cannot be directly output. You can use a simple step to convert the array back to text.
-
-**Convert Using a Code Node**
-
-
-
-CODE Example:
-
-```python
-def main(articleSections: list):
- data = articleSections
- return {
- "result": "/n".join(data)
- }
-```
-
-**Convert Using a Template Node**
-
-
-
-CODE Example:
-
-```django
-{{ articleSections | join("/n") }}
-```
diff --git a/en/guides/workflow/nodes/knowledge-retrieval.mdx b/en/guides/workflow/nodes/knowledge-retrieval.mdx
deleted file mode 100644
index 28f67a53..00000000
--- a/en/guides/workflow/nodes/knowledge-retrieval.mdx
+++ /dev/null
@@ -1,41 +0,0 @@
----
-title: Knowledge Retrieval
----
-
-
-The Knowledge Base Retrieval Node is designed to query text content related to user questions from the Dify Knowledge Base, which can then be used as context for subsequent answers by the Large Language Model (LLM).
-
-
-
-Configuring the Knowledge Base Retrieval Node involves four main steps:
-
-1. **Selecting the Query Variable**
-2. **Choosing the Knowledge Base for Query**
-3. **Applying Metadata Filtering**
-4. **Configuring the Retrieval Strategy**
-
-**Selecting the Query Variable**
-
-In knowledge base retrieval scenarios, the query variable typically represents the user's input question. In the "Start" node of conversational applications, the system pre-sets "sys.query" as the user input variable. This variable can be used to query the knowledge base for text chunks most closely related to the user's question. The maximum query content sent to the knowledge base is 200 characters.
-
-**Choosing the Knowledge Base for Query**
-
-Within the knowledge base retrieval node, you can add an existing knowledge base from Dify. For instructions on creating a knowledge base within Dify, please refer to the knowledge base [help documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents).
-
-**Applying Metadata Filtering**
-
-Use **Metadata Filtering** to refine document search in your knowledge base. For details, see **Metadata Filtering** in *[Integrate Knowledge Base within Application](/en/guides/knowledge-base/integrate-knowledge-within-application)*.
-
-**Configuring the Retrieval Strategy**
-
-It's possible to modify the indexing strategy and retrieval mode for an individual knowledge base within the node. For a detailed explanation of these settings, refer to the knowledge base [help documentation](/en/guides/knowledge-base/retrieval-test-and-citation).
-
-
-
-Dify offers two recall strategies for different knowledge base retrieval scenarios: "N-to-1 Recall" and "Multi-way Recall". In the N-to-1 mode, knowledge base queries are executed through function calling, requiring the selection of a system reasoning model. In the multi-way recall mode, a Rerank model needs to be configured for result re-ranking. For a detailed explanation of these two recall strategies, refer to the retrieval mode explanation in the [help documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents#id-5-indexing-methods).
-
-
diff --git a/en/guides/workflow/nodes/list-operator.mdx b/en/guides/workflow/nodes/list-operator.mdx
deleted file mode 100644
index d5067187..00000000
--- a/en/guides/workflow/nodes/list-operator.mdx
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: List Operator
----
-
-
-File list variables support simultaneous uploading of multiple file types such as document files, images, audio, and video files. When application users upload files, all files are stored in the same `Array[File]` array-type variable, which **is not conducive to subsequent individual file processing.**
-
-> The `Array` data type means that the actual value of the variable could be \[1.mp3, 2.png, 3.doc]. LLMs only support reading single values such as image files or text content as input variables and cannot directly read array variables.
-
-#### Node Functionality
-
-The list operator can filter and extract attributes such as file format type, file name, and size, passing different format files to corresponding processing nodes to achieve precise control over different file processing flows.
-
-For example, in an application that allows users to upload both document files and image files simultaneously, different files need to be sorted through the **list operation node**, with different files being handled by different processes.
-
-
-
-List operation nodes are generally used to extract information from array variables, converting them into variable types that can be accepted by downstream nodes through setting conditions. Its structure is divided into input variables, filter conditions, sorting, taking the first N items, and output variables.
-
-
-
-**Input Variables**
-
-The list operation node only accepts variables with the following data structures:
-
-* Array\[string]
-* Array\[number]
-* Array\[file]
-
-**Filter Conditions**
-
-Process arrays in input variables by adding filter conditions. Sort out all array variables that meet the conditions from the array, which can be understood as filtering the attributes of variables.
-
-Example: Files may contain multiple dimensions of attributes, such as file name, file type, file size, etc. Filter conditions allow setting screening conditions to select and extract specific files from array variables.
-
-Supports extracting the following variables:
-
-* type: File category, including image, document, audio, and video types
-* size: File size
-* name: File name
-* url: Refers to files uploaded by application users via URL, can fill in the complete URL for filtering
-* extension: File extension
-* mime\_type: [MIME types](https://datatracker.ietf.org/doc/html/rfc2046) are standardized strings used to identify file content types. Example: "text/html" indicates an HTML document.
-* transfer\_method: File upload method, divided into local upload or upload via URL
-
-**Sorting**
-
-Provides the ability to sort arrays in input variables, supporting sorting based on file attributes.
-
-* Ascending order(ASC): Default sorting option, sorted from small to large. For letters and text, sorted in alphabetical order (A - Z)
-* Descending order(DESC): Sorted from large to small, for letters and text, sorted in reverse alphabetical order (Z - A)
-
-This option is often used in conjunction with first\_record and last\_record in output variables.
-
-**Take First N Items**
-
-You can choose a value between 1-20, used to select the first n items of the array variable.
-
-**Output Variables**
-
-Array elements that meet all filter conditions. Filter conditions, sorting, and limitations can be enabled separately. If enabled simultaneously, array elements that meet all conditions are returned.
-
-* Result: Filtering result, data type is array variable. If the array contains only 1 file, the output variable contains only 1 array element;
-* first\_record: The first element of the filtered array, i.e., result\[0];
-* last\_record: The last element of the filtered array, i.e., result\[array.length-1].
-
-***
-
-#### Configuration Example
-
-In file interaction Q\&A scenarios, application users may upload document files or image files simultaneously. LLMs only support the ability to recognize image files and do not support reading document files. At this time, the List Operation node is needed to preprocess the array of file variables and send different file types to corresponding processing nodes. The orchestration steps are as follows:
-
-1. Enable the [Features](/en/guides/workflow/additional-features) function and check both "Images" and "Document" types in the file types.
-2. Add two list operation nodes, setting to extract image and document variables respectively in the "List Operator" conditions.
-3. Extract document file variables and pass them to the "Doc Extractor" node; extract image file variables and pass them to the "LLM" node.
-4. Add a "Answer" node at the end, filling in the output variable of the LLM node.
-
-
-
-After the application user uploads both document files and images, document files are automatically diverted to the doc extractor node, and image files are automatically diverted to the LLM node to achieve joint processing of mixed files.
diff --git a/en/guides/workflow/nodes/llm.mdx b/en/guides/workflow/nodes/llm.mdx
deleted file mode 100644
index 7c6278c1..00000000
--- a/en/guides/workflow/nodes/llm.mdx
+++ /dev/null
@@ -1,177 +0,0 @@
----
-title: LLM
----
-
-### Definition
-
-Invokes the capabilities of large language models to process information input by users in the "Start" node (natural language, uploaded files, or images) and provide effective response information.
-
-
-
-***
-
-### Scenarios
-
-LLM is the core node of Chatflow/Workflow, utilizing the conversational/generative/classification/processing capabilities of large language models to handle a wide range of tasks based on given prompts and can be used in different stages of workflows.
-
-* **Intent Recognition**: In customer service scenarios, identifying and classifying user inquiries to guide downstream processes.
-* **Text Generation**: In content creation scenarios, generating relevant text based on themes and keywords.
-* **Content Classification**: In email batch processing scenarios, automatically categorizing emails, such as inquiries/complaints/spam.
-* **Text Conversion**: In translation scenarios, translating user-provided text into a specified language.
-* **Code Generation**: In programming assistance scenarios, generating specific business code or writing test cases based on user requirements.
-* **RAG**: In knowledge base Q\&A scenarios, reorganizing retrieved relevant knowledge to respond to user questions.
-* **Image Understanding**: Using multimodal models with vision capabilities to understand and answer questions about the information within images.
-* **File Analysis**: In file processing scenarios, use LLMs to recognize and analyze the information contained within files.
-
-By selecting the appropriate model and writing prompts, you can build powerful and reliable solutions within Chatflow/Workflow.
-
-***
-
-### How to Configure
-
-
-
-**Configuration Steps:**
-
-1. **Select a Model**: Dify supports major global models, including OpenAI's GPT series, Anthropic's Claude series, and Google's Gemini series. Choosing a model depends on its inference capability, cost, response speed, context window, etc. You need to select a suitable model based on the scenario requirements and task type.
-2. **Configure Model Parameters**: Model parameters control the generation results, such as temperature, TopP, maximum tokens, response format, etc. To facilitate selection, the system provides three preset parameter sets: Creative, Balanced, and Precise.
-3. **Write Prompts**: The LLM node offers an easy-to-use prompt composition page. Selecting a chat model or completion model will display different prompt composition structures.
-4. **Advanced Settings**: You can enable memory, set memory windows, and use the Jinja-2 template language for more complex prompts.
-
-
-If you are using Dify for the first time, you need to complete the [model configuration](/en/guides/model-configuration) in **System Settings-Model Providers** before selecting a model in the LLM node.
-
-
-#### **Writing Prompts**
-
-In the LLM node, you can customize the model input prompts. If you select a chat model, you can customize the System/User/Assistant sections.
-
-**Prompt Generator**
-
-If you're struggling to come up with effective system prompts (System), you can use the Prompt Generator to quickly create prompts suitable for your specific business scenarios, leveraging AI capabilities.
-
-
-
-In the prompt editor, you can call out the **variable insertion menu** by typing `/` or `{` to insert **special variable blocks** or **upstream node variables** into the prompt as context content.
-
-
-
-***
-
-### Explanation of Special Variables
-
-**Context Variables**
-
-Context variables are a special type of variable defined within the LLM node, used to insert externally retrieved text content into the prompt.
-
-
-
-In common knowledge base Q\&A applications, the downstream node of knowledge retrieval is typically the LLM node. The **output variable** `result` of knowledge retrieval needs to be configured in the **context variable** within the LLM node for association and assignment. After association, inserting the **context variable** at the appropriate position in the prompt can incorporate the externally retrieved knowledge into the prompt.
-
-This variable can be used not only as external knowledge introduced into the prompt context for LLM responses but also supports the application's [citation and attribution](/en/guides/knowledge-base/retrieval-test-and-citation#id-2-citation-and-attribution) feature due to its data structure containing segment reference information.
-
-
-If the context variable is associated with a common variable from an upstream node, such as a string type variable from the start node, the context variable can still be used as external knowledge, but the **citation and attribution** feature will be disabled.
-
-
-**File Variables**
-
-Some LLMs, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build-with-claude/pdf-support), now support direct processing of file content, enabling the use of file variables in prompts. To prevent potential issues, application developers should verify the supported file types on the LLM's official website before utilizing the file variable.
-
-
-
-> Refer to [File Upload](/en/guides/workflow/file-upload) for guidance on building a Chatflow/Workflow application with file upload functionality.
-
-**Conversation History**
-
-To achieve conversational memory in text completion models (e.g., gpt-3.5-turbo-Instruct), Dify designed the conversation history variable in the original [Prompt Expert Mode (discontinued)](/en/learn-more/extended-reading/prompt-engineering/prompt-engineering-1). This variable is carried over to the LLM node in Chatflow, used to insert chat history between the AI and the user into the prompt, helping the LLM understand the context of the conversation.
-
-
-The conversation history variable is not widely used and can only be inserted when selecting text completion models in Chatflow.
-
-
-
-
-**Model Parameters**
-
-The parameters of the model affect the output of the model. Different models have different parameters. The following figure shows the parameter list for `gpt-4`.
-
-
-
-The main parameter terms are explained as follows:
-
-**Temperature**: Usually a value between 0-1, it controls randomness. The closer the temperature is to 0, the more certain and repetitive the results; the closer it is to 1, the more random the results.
-
-**Top P**: Controls the diversity of the results. The model selects from candidate words based on probability, ensuring that the cumulative probability does not exceed the preset threshold P.
-
-**Presence Penalty**: Used to reduce the repetitive generation of the same entity or information by imposing penalties on content that has already been generated, making the model inclined to generate new or different content. As the parameter value increases, greater penalties are applied in subsequent generations to content that has already been generated, lowering the likelihood of repeating content.
-
-**Frequency Penalty**: Imposes penalties on words or phrases that appear too frequently by reducing their probability of generation. With an increase in parameter value, greater penalties are imposed on frequently occurring words or phrases. Higher parameter values reduce the frequency of these words, thereby increasing the lexical diversity of the text.
-
-If you do not understand what these parameters are, you can choose to load presets and select from the three presets: Creative, Balanced, and Precise.
-
-
-
-***
-
-### Advanced Features
-
-**Memory**: When enabled, each input to the intent classifier will include chat history from the conversation to help the LLM understand the context and improve question comprehension in interactive dialogues.
-
-**Memory Window**: When the memory window is closed, the system dynamically filters the amount of chat history passed based on the model's context window; when open, users can precisely control the amount of chat history passed (in terms of numbers).
-
-**Conversation Role Name Settings**: Due to differences in model training stages, different models adhere to role name instructions differently, such as Human/Assistant, Human/AI, Human/Assistant, etc. To adapt to the prompt response effects of multiple models, the system provides conversation role name settings. Modifying the role name will change the role prefix in the conversation history.
-
-**Jinja-2 Templates**: The LLM prompt editor supports Jinja-2 template language, allowing you to leverage this powerful Python template language for lightweight data transformation and logical processing. Refer to the [official documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/).
-
-**Retry on Failure**: For some exceptions that occur in the node, it is usually sufficient to retry the node again. When the error retry function is enabled, the node will automatically retry according to the preset strategy when an error occurs. You can adjust the maximum number of retries and the interval between each retry to set the retry strategy.
-
-- The maximum number of retries is 10
-- The maximum retry interval is 5000 ms
-
-
-
-**Error Handling**: Provides diverse node error handling strategies that can throw error messages when the current node fails without interrupting the main process, or continue completing tasks through backup paths. For detailed information, please refer to the [Error Handling](/en/guides/workflow/error-handling).
-
-***
-
-#### Use Cases
-
-* **Reading Knowledge Base Content**
-
-To enable workflow applications to read [Knowledge Base](/en/guides/knowledge-base) content, such as building an intelligent customer service application, please follow these steps:
-
-1. Add a knowledge base retrieval node upstream of the LLM node;
-2. Fill in the **output variable** `result` of the knowledge retrieval node into the **context variable** of the LLM node;
-3. Insert the **context variable** into the application prompt to give the LLM the ability to read text within the knowledge base.
-
-
-
-The `result` variable output by the Knowledge Retrieval Node also includes segmented reference information. You can view the source of information through the **Citation and Attribution** feature.
-
-
-Regular variables from upstream nodes can also be filled into context variables, such as string-type variables from the start node, but the **Citation and Attribution** feature will be ineffective.
-
-
-* **Reading Document Files**
-
-To enable workflow applications to read document contents, such as building a ChatPDF application, you can follow these steps:
-
-* Add a file variable in the "Start" node;
-* Add a document extractor node upstream of the LLM node, using the file variable as an input variable;
-* Fill in the **output variable** `text` of the document extractor node into the prompt of the LLM node.
-
-For more information, please refer to [File Upload](/en/guides/workflow/file-upload).
-
-
-
-* **Error Handling**
-
-When processing information, LLM nodes may encounter errors such as input text exceeding token limits or missing key parameters. Developers can follow these steps to configure exception branches, enabling contingency plans when node errors occur to avoid interrupting the entire flow:
-
-1. Enable "Error Handling" in the LLM node
-2. Select and configure an error handling strategy
-
-
-
-For more information about exception handling methods, please refer to the [Error Handling](/en/guides/workflow/error-handling).
diff --git a/en/guides/workflow/nodes/loop.mdx b/en/guides/workflow/nodes/loop.mdx
deleted file mode 100644
index 1b131773..00000000
--- a/en/guides/workflow/nodes/loop.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
----
-title: Loop
----
-
-## What is Loop Node?
-
-A **Loop** node executes repetitive tasks that depend on previous iteration results until exit conditions are met or the maximum loop count is reached.
-
-## Loop vs. Iteration
-
-
-
-
-
Type
-
Dependencies
-
Use Cases
-
-
-
-
-
Loop
-
Each iteration depends on previous results
-
Recursive operations, optimization problems
-
-
-
Iteration
-
Iterations execute independently
-
Batch processing, parallel data handling
-
-
-
-
-## Configuration
-
-
-
-
-
Parameter
-
Description
-
Example
-
-
-
-
-
Loop Termination Condition
-
Expression that determines when to exit the loop
-
x < 50, error_rate < 0.01
-
-
-
Maximum Loop Count
-
Upper limit on iterations to prevent infinite loops
-
10, 100, 1000
-
-
-
-
-
-
-## Usage Example
-
-**Goal: Generate random numbers (1-100) until a value below 50 appears.**
-
-**Steps**:
-
-1. Use `node` to generate a random number between 1-100.
-
-2. Use `if` to evaluate the number:
-
- - If < 50: Output `done` and terminate loop.
-
- - If ≥ 50: Continue loop and generate another random number.
-
-3. Set the exit criterion to random_number < 50.
-
-4. Loop ends when a number below 50 appears.
-
-
-
-## Planned Enhancements
-
-**Future releases will include:**
-
- - Loop variables: Store and reference values across iterations for improved state management and conditional logic.
-
- - `break` node: Terminate loops from within the execution path, enabling more sophisticated control flow patterns.
diff --git a/en/guides/workflow/nodes/parameter-extractor.mdx b/en/guides/workflow/nodes/parameter-extractor.mdx
deleted file mode 100644
index 8fdef194..00000000
--- a/en/guides/workflow/nodes/parameter-extractor.mdx
+++ /dev/null
@@ -1,61 +0,0 @@
----
-title: Parameter Extraction
----
-
-
-### 1 Definition
-
-Utilize LLM to infer and extract structured parameters from natural language for subsequent tool invocation or HTTP requests.
-
-Dify workflows provide a rich selection of [tools](/en/tools), most of which require structured parameters as input. The parameter extractor can convert user natural language into parameters recognizable by these tools, facilitating tool invocation.
-
-Some nodes within the workflow require specific data formats as inputs, such as the [iteration](/en/guides/workflow/nodes/iteration#definition) node, which requires an array format. The parameter extractor can conveniently achieve [structured parameter conversion](/en/guides/workflow/nodes/iteration#example-1-long-article-iteration-generator).
-
-***
-
-### 2 Scenarios
-
-1. **Extracting key parameters required by tools from natural language**, such as building a simple conversational Arxiv paper retrieval application.
-
-In this example: The Arxiv paper retrieval tool requires **paper author** or **paper ID** as input parameters. The parameter extractor extracts the paper ID **2405.10739** from the query "What is the content of this paper: 2405.10739" and uses it as the tool parameter for precise querying.
-
-
-
-2. **Converting text to structured data**, such as in the long story iteration generation application, where it serves as a pre-step for the [iteration node](/en/guides/workflow/nodes/iteration), converting chapter content in text format to an array format, facilitating multi-round generation processing by the iteration node.
-
-
-
-1. **Extracting structured data and using the** [**HTTP Request**](/en/guides/workflow/nodes/http-request), which can request any accessible URL, suitable for obtaining external retrieval results, webhooks, generating images, and other scenarios.
-
-***
-
-### 3 How to Configure
-
-**Configuration Steps**
-
-1. Select the input variable, usually the variable input for parameter extraction.
-2. Choose the model, as the parameter extractor relies on the LLM's inference and structured generation capabilities.
-3. Define the parameters to extract, which can be manually added or **quickly imported from existing tools**.
-4. Write instructions, where providing examples can help the LLM improve the effectiveness and stability of extracting complex parameters.
-
-**Advanced Settings**
-
-**Inference Mode**
-
-Some models support two inference modes, achieving parameter extraction through function/tool calls or pure prompt methods, with differences in instruction compliance. For instance, some models may perform better in prompt inference if function calling is less effective.
-
-* Function Call/Tool Call
-* Prompt
-
-**Memory**
-
-When memory is enabled, each input to the question classifier will include the chat history in the conversation to help the LLM understand the context and improve question comprehension during interactive dialogues.
-
-**Output Variables**
-
-* Extracted defined variables
-* Node built-in variables
-
-`__is_success Number` Extraction success status, with a value of 1 for success and 0 for failure.
-
-`__reason String` Extraction error reason
diff --git a/en/guides/workflow/nodes/question-classifier.mdx b/en/guides/workflow/nodes/question-classifier.mdx
deleted file mode 100644
index ab8378f9..00000000
--- a/en/guides/workflow/nodes/question-classifier.mdx
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: Question Classifier
----
-
-
-### 1. Definition
-
-By defining classification descriptions, the issue classifier can infer and match user inputs to the corresponding categories and output the classification results.
-
-***
-
-### 2. Scenarios
-
-Common use cases include **customer service conversation intent classification, product review classification, and bulk email classification**.
-
-In a typical product customer service Q\&A scenario, the issue classifier can serve as a preliminary step before knowledge base retrieval. It classifies the user's input question, directing it to different downstream knowledge base queries to accurately respond to the user's question.
-
-The following diagram is an example workflow template for a product customer service scenario:
-
-
-
-In this scenario, we set up three classification labels/descriptions:
-
-* Category 1: **Questions related to after-sales service**
-* Category 2: **Questions related to product usage**
-* Category 3: **Other questions**
-
-When users input different questions, the issue classifier will automatically classify them based on the set classification labels/descriptions:
-
-* "**How to set up contacts on iPhone 14?**" —> "**Questions related to product usage**"
-* "**What is the warranty period?**" —> "**Questions related to after-sales service**"
-* "**How's the weather today?**" —> "**Other questions**"
-
-***
-
-### 3. How to Configure
-
-
-
-**Configuration Steps:**
-
-1. **Select Input Variable**: This refers to the content to be classified, usually the user's question in a customer service Q\&A scenario, e.g., `sys.query`.
-2. **Choose Inference Model**: The issue classifier leverages the natural language classification and inference capabilities of large language models. Selecting an appropriate model can enhance classification effectiveness.
-3. **Write Classification Labels/Descriptions**: You can manually add multiple classifications by writing keywords or descriptive statements for each category, helping the large language model better understand the classification criteria.
-4. **Choose Corresponding Downstream Nodes**: After classification, the issue classification node can direct the flow to different paths based on the relationship between the classification and downstream nodes.
-
-#### Advanced Settings:
-
-**Instructions**: In **Advanced Settings - Instructions**, you can add supplementary instructions, such as more detailed classification criteria, to enhance the classifier's capabilities.
-
-**Memory**: When enabled, each input to the issue classifier will include chat history from the conversation to help the LLM understand the context and improve question comprehension in interactive dialogues.
-
-**Memory Window**: When the memory window is closed, the system dynamically filters the amount of chat history passed based on the model's context window; when open, users can precisely control the amount of chat history passed (in terms of numbers).
-
-**Output Variable**:
-
-`class_name` stores the classification output label. You can reference this classification result in downstream nodes when needed.
diff --git a/en/guides/workflow/nodes/start.mdx b/en/guides/workflow/nodes/start.mdx
deleted file mode 100644
index e2761a0c..00000000
--- a/en/guides/workflow/nodes/start.mdx
+++ /dev/null
@@ -1,150 +0,0 @@
----
-title: Start
----
-
-### Definition
-
-The **“Start”** node is a critical preset node in the Chatflow / Workflow application. It provides essential initial information, such as user input and [uploaded files](/en/guides/workflow/file-upload), to support the normal flow of the application and subsequent workflow nodes.
-
-### Configuring the Node
-
-On the Start node's settings page, you'll find two sections: **"Input Fields"** and preset **System Variables**.
-
-
-
-### Input Field
-
-Input field is configured by application developers to prompt users for additional information.
-
-For example, in a weekly report application, users might be required to provide background information such as name, work date range, and work details in a specific format. This preliminary information helps the LLM generate higher quality responses.
-
-Six types of input variables are supported, all of which can be set as required:
-
-* **Text:** Short text, filled in by the user, with a maximum length of 256 characters.
-* **Paragraph:** Long text, allowing users to input longer content.
-* **Select:** Fixed options set by the developer; users can only select from preset options and cannot input custom content.
-* **Number:** Only allows numerical input.
-* **Single File:** Allows users to upload a single file. Supports document types, images, audio, video, and other file types. Users can upload locally or paste a file URL. For detailed usage, refer to File Upload.
-* **File List:** Allows users to batch upload files. Supports document types, images, audio, video, and other file types. Users can upload locally or paste file URLs. For detailed usage, refer to File Upload.
-
-
-Dify's built-in document extractor node can only process certain document formats. For processing images, audio, or video files, refer to External Data Tools to set up corresponding file processing nodes.
-
-
-Once configured, users will be guided to provide necessary information to the LLM before using the application. More information will help to improve the LLM's question-answering efficiency.
-
-### System Variables
-
-System variables are preset system-level parameters in Chatflow / Workflow applications that can be globally accessed by other nodes in the application. They are typically used in advanced development scenarios, such as building multi-turn dialogue applications, collecting application logs and monitoring data, or recording usage behavior across different applications and users.
-
-**Workflow**
-
-Workflow application provides the following system variables:
-
-
-
-
-
Variable Name
-
Data Type
-
Description
-
Notes
-
-
-
-
-
sys.files [LEGACY]
-
Array[File]
-
File parameter, stores images uploaded by users when initially using the application
-
Image upload feature needs to be enabled in the "Features" section at the top right of the application orchestration page
-
-
-
sys.user_id
-
String
-
User ID, a unique identifier automatically assigned to each user when using the workflow application, used to distinguish different conversation users
-
-
-
-
sys.app_id
-
String
-
Application ID, a unique identifier assigned to each Workflow application by the system, used to distinguish different applications and record basic information of the current application
-
For users with development capabilities, this parameter can be used to differentiate and locate different Workflow applications
-
-
-
sys.workflow_id
-
String
-
Workflow ID, used to record all node information contained in the current Workflow application
-
For users with development capabilities, this parameter can be used to track and record node information within the Workflow
-
-
-
sys.workflow_run_id
-
String
-
Workflow application run ID, used to record the running status of the Workflow application
-
For users with development capabilities, this parameter can be used to track the application's run history
-
-
-
-
-**Chatflow**
-
-Chatflow application provides the following system variables:
-
-
-
-
-
Variable Name
-
Data Type
-
Description
-
Notes
-
-
-
-
-
sys.query
-
String
-
The initial content input by the user in the dialogue box
-
-
-
-
sys.files
-
Array[File]
-
Images uploaded by the user in the dialogue box
-
Image upload feature needs to be enabled in the "Features" section at the top right of the application orchestration page
-
-
-
sys.dialogue_count
-
Number
-
The number of dialogue turns during user interaction with the Chatflow application. Automatically increments by 1 after each turn. Can be used with if-else nodes to create rich branching logic. For example, at the Xth turn of dialogue, review the conversation history and provide analysis
-
-
-
-
sys.conversation_id
-
String
-
Unique identifier for the dialogue interaction session, grouping all related messages into the same conversation, ensuring the LLM continues the dialogue on the same topic and context
-
-
-
-
sys.user_id
-
String
-
Unique identifier assigned to each application user, used to distinguish different conversation users
-
-
-
-
sys.app_id
-
String
-
Application ID, a unique identifier assigned to each Workflow application by the system, used to distinguish different applications and record basic information of the current application
-
For users with development capabilities, this parameter can be used to differentiate and locate different Workflow applications
-
-
-
sys.workflow_id
-
String
-
Workflow ID, used to record all node information contained in the current Workflow application
-
For users with development capabilities, this parameter can be used to track and record node information within the Workflow
-
-
-
sys.workflow_run_id
-
String
-
Workflow application run ID, used to record the running status of the Workflow application
-
For users with development capabilities, this parameter can be used to track the application's run history
-
-
-
diff --git a/en/guides/workflow/nodes/template.mdx b/en/guides/workflow/nodes/template.mdx
deleted file mode 100644
index 816b1d14..00000000
--- a/en/guides/workflow/nodes/template.mdx
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: Template
----
-
-
-Template lets you dynamically format and combine variables from previous nodes into a single text-based output using Jinja2, a powerful templating syntax for Python. It's useful for combining data from multiple sources into a specific structure required by subsequent nodes. The simple example below shows how to assemble an article by piecing together various previous outputs:
-
-
-
-Beyond naive use cases, you can create more complex templates as per Jinja's [documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/) for a variety of tasks. Here's one template that structures retrieved chunks and their relevant metadata from a knowledge retrieval node into a formatted markdown:
-
-```Plain
-{% raw %}
-{% for item in chunks %}
-### Chunk {{ loop.index }}.
-### Similarity: {{ item.metadata.score | default('N/A') }}
-
-#### {{ item.title }}
-
-##### Content
-{{ item.content | replace('\n', '\n\n') }}
-
----
-{% endfor %}
-{% endraw %}
-```
-
-
-
-This template node can then be used within a Chatflow to return intermediate outputs to the end user, before a LLM response is initiated.
-
-> The `Answer` node in a Chatflow is non-terminal. It can be inserted anywhere to output responses at multiple points within the flow.
diff --git a/en/guides/workflow/nodes/tools.mdx b/en/guides/workflow/nodes/tools.mdx
deleted file mode 100644
index 4bd2d5ca..00000000
--- a/en/guides/workflow/nodes/tools.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
----
-title: Tools
----
-
-
-The workflow provides a rich selection of tools, categorized into three types:
-
-* **Built-in Tools**: Tools provided by Dify.
-* **Custom Tools**: Tools imported or configured via the OpenAPI/Swagger standard format.
-* **Workflows**: Workflows that have been published as tools.
-
-## Add and Use the Tool Node
-
-Before using built-in tools, you may need to **authorize** the tools.
-
-If built-in tools do not meet your needs, you can create custom tools in the **Dify menu navigation -- Tools** section.
-
-You can also orchestrate a more complex workflow and publish it as a tool.
-
-
-
-
-
-
-Configuring a tool node generally involves two steps:
-
-1. Authorizing the tool/creating a custom tool/publishing a workflow as a tool.
-2. Configuring the tool's input and parameters.
-
-For more information on how to create custom tools and configure them, please refer to the [Tool Configuration Guide](/en/guides/tools).
-
-### Advanced Features
-
-**Retry on Failure**
-
-For some exceptions that occur in the node, it is usually sufficient to retry the node again. When the error retry function is enabled, the node will automatically retry according to the preset strategy when an error occurs. You can adjust the maximum number of retries and the interval between each retry to set the retry strategy.
-
-- The maximum number of retries is 10
-- The maximum retry interval is 5000 ms
-
-
-
-**Error Handling**
-
-Tool nodes may encounter errors during information processing that could interrupt the workflow. Developers can follow these steps to configure fail branches, enabling contingency plans when nodes encounter exceptions, avoiding workflow interruptions.
-
-1. Enable "Error Handling" in the tool node
-2. Select and configure an error-handling strategy
-
-
-
-For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling).
-
-## Publishing Workflow Applications as Tools
-
-Workflow applications can be published as tools and used by nodes in other workflows. For information about creating custom tools and tool configuration, please refer to the [Tool Configuration Guide](/en/guides/tools).
diff --git a/en/guides/workflow/nodes/variable-aggregator.mdx b/en/guides/workflow/nodes/variable-aggregator.mdx
deleted file mode 100644
index 2d02fff3..00000000
--- a/en/guides/workflow/nodes/variable-aggregator.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: Variable Aggregator
----
-
-
-### 1 Definition
-
-Aggregate variables from multiple branches into a single variable to achieve unified configuration for downstream nodes.
-
-The variable aggregation node (formerly the variable assignment node) is a key node in the workflow. It is responsible for integrating the output results from different branches, ensuring that regardless of which branch is executed, its results can be referenced and accessed through a unified variable. This is particularly useful in multi-branch scenarios, as it maps variables with the same function from different branches into a single output variable, avoiding the need for repeated definitions in downstream nodes.
-
-***
-
-### 2 Scenarios
-
-Through variable aggregation, you can aggregate multiple outputs, such as from issue classification or conditional branching, into a single output for use and manipulation by downstream nodes, simplifying data flow management.
-
-**Multi-Branch Aggregation after Issue Classification**
-
-Without variable aggregation, the branches of Classification 1 and Classification 2, after different knowledge base retrievals, would require repeated definitions for downstream LLM and direct response nodes.
-
-
-
-By adding variable aggregation, the outputs of the two knowledge retrieval nodes can be aggregated into a single variable.
-
-
-
-**Multi-Branch Aggregation after IF/ELSE Conditional Branching**
-
-
-
-### 3 Format Requirements
-
-The variable aggregator supports aggregating various data types, including strings (`String`), numbers (`Number`), objects (`Object`), and arrays (`Array`).
-
-**The variable aggregator can only aggregate variables of the same data type**. If the first variable added to the variable aggregation node is of the `String` data type, subsequent connections will automatically filter and allow only `String` type variables to be added.
-
-**Aggregation Grouping**
-
-Starting from version v0.6.10, aggregation grouping is supported.
-
-When aggregation grouping is enabled, the variable aggregator can aggregate multiple groups of variables, with each group requiring the same data type for aggregation.
diff --git a/en/guides/workflow/nodes/variable-assigner.mdx b/en/guides/workflow/nodes/variable-assigner.mdx
deleted file mode 100644
index 70a8a0cb..00000000
--- a/en/guides/workflow/nodes/variable-assigner.mdx
+++ /dev/null
@@ -1,183 +0,0 @@
----
-title: Variable Assigner
----
-
-
-### Definition
-
-The variable assigner node is used to assign values to writable variables. Currently supported writable variables include:
-
-* [conversation variables](/en/guides/workflow/key-concepts#conversation-variables).
-
-Usage: Through the variable assigner node, you can assign workflow variables to conversation variables for temporary storage, which can be continuously referenced in subsequent conversations.
-
-
-
-***
-
-### Usage Scenario Examples
-
-Using the variable assigner node, you can write context from the conversation process, files uploaded to the dialog box, and user preference information into conversation variables. These stored variables can then be referenced in subsequent conversations to direct different processing flows or formulate responses.
-
-**Scenario 1**
-
-**Automatically judge and extract content**, store history in the conversation, record important user information through the session variable array within the conversation, and use this history content to personalize responses in subsequent chats.
-
-Example: After the conversation starts, LLM will automatically determine whether the user's input contains facts, preferences, or chat history that need to be remembered. If it has, LLM will first extract and store those information, then use it as context to respond. If there is no new information to remember, LLM will directly use the previously relevant memories to answer questions.
-
-
-
-**Configuration process:**
-
-1. **Set Conversation Variables:**
- * First, set up a Conversation Variables array `memories`, of type array\[object] to store user information, preferences, and chat history.
-2. **Determine and Extract Memories:**
- * Add a Conditional Branching node, using LLM to determine whether the user's input contains new information that needs to be remembered.
- * If there's new information, follow the upper branch and use an LLM node to extract this information.
- * If there's no new information, go down the branch and directly use existing memories to answer.
-3. **Variable Assignment/Writing:**
- * In the upper branch, use the variable assigner node to append the newly extracted information to the `memories` array.
- * Use the escape function to convert the text string output by LLM into a format suitable for storage in an array\[object].
-4. **Variable Reading and Usage:**
- * In subsequent LLM nodes, convert the contents of the `memories` array to a string and insert it into the prompt of LLM as context.
- * Use these memories to generate personalized responses.
-
-The code for the node in the upper diagram is as follows:
-
-1. Escape the string to object
-
-```python
-import json
-
-def main(arg1: str) -> object:
- try:
- # Parse the input JSON string
- input_data = json.loads(arg1)
-
- # Extract the memory object
- memory = input_data.get("memory", {})
-
- # Construct the return object
- result = {
- "facts": memory.get("facts", []),
- "preferences": memory.get("preferences", []),
- "memories": memory.get("memories", [])
- }
-
- return {
- "mem": result
- }
- except json.JSONDecodeError:
- return {
- "result": "Error: Invalid JSON string"
- }
- except Exception as e:
- return {
- "result": f"Error: {str(e)}"
- }
-```
-
-2. Escape object as string
-
-```python
-import json
-
-def main(arg1: list) -> str:
- try:
- # Assume arg1[0] is the dictionary we need to process
- context = arg1[0] if arg1 else {}
-
- # Construct the memory object
- memory = {"memory": context}
-
- # Convert the object to a JSON string
- json_str = json.dumps(memory, ensure_ascii=False, indent=2)
-
- # Wrap the JSON string in tags
- result = f"{json_str}"
-
- return {
- "result": result
- }
- except Exception as e:
- return {
- "result": f"Error: {str(e)}"
- }
-```
-
-**Scenario 2**
-
-**Recording initial user preferences input**: Remember the user's language preference input during the conversation and continue to use this language for responses in subsequent chatting.
-
-Example: Before the chatting, the user specifies "English" in the `language` input box. This language will be written to the conversation variable, and the LLM will reference this information when responding, continuing to use "English" in subsequent conversations.
-
-
-
-**Configuration Guide:**
-
-**Set conversation variable**: First, set a conversation variable `language`. Add a condition judgment node at the beginning of the conversation flow to check if the `language` variable is empty.
-
-**Variable writing/assignment**: At the start of the first round of chatting, if the `language` variable is empty, use an LLM node to extract the user's input language, then use a variable assigner node to write this language type into the conversation variable `language`.
-
-**Variable reading**: In subsequent conversation rounds, the `language` variable has stored the user's language preference. The LLM node references the language variable to respond using the user's preferred language type.
-
-**Scenario 3**
-
-**Assisting with Checklist checks**: Record user inputs within the conversation using conversation variables, update the contents of the Checklist, and check for missing items in subsequent conversations.
-
-Example: After starting the conversation, the LLM will ask the user to input items related to the Checklist in the chatting box. Once the user mentions content from the Checklist, it will be updated and stored in the Conversation Variable. The LLM will remind the user to continue supplementing missing items after each round of dialogue.
-
-
-
-**Configuration Process:**
-
-* **Set conversation variable:** First, set a conversation variable `ai_checklist`, and reference this variable within the LLM as context for checking.
-* **variable assigner/writing**: During each round of dialogue, check the value in `ai_checklist` within the LLM node and compare it with user input. If the user provides new information, update the Checklist and write the output content to `ai_checklist` using the variable assigner node.
-* **Variable reading:** Read the value in `ai_checklist` and compare it with user input in each round of dialogue until all checklist items are completed.
-
-***
-
-### Using the Variable Assigner Node
-
-Click the `+` icon on the right side of the node and select the **“Variable Assignment”** node. Configure the target variables and their corresponding source variables. This node allows you to assign values to multiple variables simultaneously.
-
-
-
-**Setting Variables:**
-
-Variable: Select the variable to be assigned, i.e., specify the target conversation variable that needs to be assigned.
-
-Set Variable: Select the variable to assign, i.e., specify the source variable that needs to be converted.
-
-The variable assignment logic illustrated in the image above assigns the user’s language preference, specified on the initial page `Start/language`, to the system-level conversation variable `language`.
-
-### **Operation Modes for Specifying Variables**
-
-The data type of the target variable determines its operation method. Below are the operation modes for different variable types:
-
-1. Target variable data type: `String`
-
- • **Overwrite**: Directly overwrite the target variable with the source variable.
- • **Clear**: Clear the contents of the selected target variable.
- • **Set**: Manually assign a value without requiring a source variable.
-
-2. Target variable data type: `Number`
-
- • **Overwrite**: Directly overwrite the target variable with the source variable.
- • **Clear**: Clear the contents of the selected target variable.
- • **Set**: Manually assign a value without requiring a source variable.
- • **Arithmetic**: Perform addition, subtraction, multiplication, or division on the target variable.
-
-3. Target variable data type: `Object`
-
- • **Overwrite**: Directly overwrite the target variable with the source variable.
- • **Clear**: Clear the contents of the selected target variable.
- • **Set**: Manually assign a value without requiring a source variable.
-
-4. Target variable data type: `Array`
-
- • **Overwrite**: Directly overwrite the target variable with the source variable.
- • **Clear**: Clear the contents of the selected target variable.
- • **Append**: Add a new element to the array in the target variable.
- • **Extend**: Add a new array to the target variable, effectively adding multiple elements at once.
-
diff --git a/en/guides/workflow/shortcut-key.mdx b/en/guides/workflow/shortcut-key.mdx
index 27552d06..19a30dbb 100644
--- a/en/guides/workflow/shortcut-key.mdx
+++ b/en/guides/workflow/shortcut-key.mdx
@@ -3,7 +3,7 @@ title: Shortcut Key
---
-The Chatflow / Workflow application orchestration page supports the following shortcut keys to help you improve the efficiency of orchestration nodes.
+The Chatflow / Workflow application orchestration page supports the following shortcut keys to help you improve the efficiency of orchestration blocks.
@@ -17,22 +17,22 @@ The Chatflow / Workflow application orchestration page supports the following sh
Ctrl + C
Command + C
-
Copy nodes
+
Copy blocks
Ctrl + V
Command + V
-
Paste nodes
+
Paste blocks
Ctrl + D
Command + D
-
Duplicate nodes
+
Duplicate blocks
Ctrl + O
Command + O
-
Organize nodes
+
Organize blocks
Ctrl + Z
@@ -87,7 +87,7 @@ The Chatflow / Workflow application orchestration page supports the following sh
Delete/Backspace
Delete/Backspace
-
Delete selected nodes
+
Delete selected blocks
Alt + R
diff --git a/en/learn-more/use-cases/build-an-notion-ai-assistant copy.mdx b/en/learn-more/use-cases/build-an-notion-ai-assistant copy.mdx
new file mode 100644
index 00000000..ab52ba17
--- /dev/null
+++ b/en/learn-more/use-cases/build-an-notion-ai-assistant copy.mdx
@@ -0,0 +1,167 @@
+---
+title: Build a Notion AI Assistant
+---
+
+
+### Intro
+
+Notion is a powerful tool for managing knowledge. Its flexibility and extensibility make it an excellent personal knowledge library and shared workspace. Many people use it to store their knowledge and work in collaboration with others, facilitating the exchange of ideas and the creation of new knowledge.
+
+However, this knowledge remains static, as users must search for the information they need and read through it to find the answers they're seeking. This process is neither particularly efficient nor intelligent.
+
+Have you ever dreamed of having an AI assistant based on your Notion library? This assistant would not only assist you in reviewing your knowledge base, but also engage in the communication like a seasoned butler, even answering other people's questions as if you were the master of your personal Notion library.
+
+### How to Make Your Notion AI Assistant Come True?
+
+Now, you can make this dream come true through [Dify](https://dify.ai/). Dify is an open-source LLMOps (Large Language Models Ops) platform.
+
+Large Language Models like ChatGPT and Claude, have been using their impressive abilities to reshape the world. Their powerful learning aptitude primarily attributable to robust training data. Luckily, they've evolved to be sufficiently intelligent to learn from the content you provide, thus making the process of ideating from your personal Notion library, a reality.
+
+Without Dify, you might need to acquaint yourself with langchain, an abstraction that streamlines the process of assembling these pieces.
+
+### How to Use Dify to Build Your Personal Notion AI Assistant?
+
+The process to train a Notion AI assistant is relatively straightforward. Just follow these steps:
+
+1. Login to Dify.
+2. Create a new datasets.
+3. Connect with Notion and your datasets.
+4. Start training.
+5. Create your own AI application.
+
+#### 1. Login to dify
+
+Click [here](https://dify.ai/) to login to Dify. You can conveniently log in using your GitHub or Google account.
+
+> If you are using GitHub account to login, how about getting this [project](https://github.com/langgenius/dify) a star? It really help us a lot!
+
+#### 2. Create new knowledge base
+
+Click the `Knowledge` button on the top side bar, followed by the `Create Knowledge` button.
+
+
+
+#### 3. Connect with Notion and Your Knowledge[](https://wsyfin.com/notion-dify#3-connect-with-notion-and-datasets)
+
+Select "Sync from Notion" and then click the "Connect" button..
+
+
+
+Afterward, you'll be redirected to the Notion login page. Log in with your Notion account.
+
+
+
+Check the permissions needed by Dify, and then click the "Select pages" button.
+
+
+
+Select the pages you want to synchronize with Dify, and press the "Allow access" button.
+
+
+
+#### 4. Start training[](https://wsyfin.com/notion-dify#4-start-training)
+
+Specifying the pages for AI need to study, enabling it to comprehend the content within this section of Notion. Then click the "next" button.
+
+
+
+We suggest selecting the "Automatic" and "High Quality" options to train your AI assistant. Then click the "Save & Process" button.
+
+
+
+Enjoy your coffee while waiting for the training process to complete.
+
+
+
+#### 5. Create Your AI application[](https://wsyfin.com/notion-dify#5-create-your-ai-application)
+
+You must create an AI application and link it with the knowledge you've recently created.
+
+Return to the dashboard, and click the "Create new APP" button. It's recommended to use the Chat App directly.
+
+
+
+Select the "Prompt Eng." and link your notion datasets in the "context".
+
+
+
+I recommend adding a 'Pre Prompt' to your AI application. Just like spells are essential to Harry Potter, similarly, certain tools or features can greatly enhance the ability of AI application.
+
+For example, if your Notion notes focus on problem-solving in software development, could write in one of the prompts:
+
+_I want you to act as an IT Expert in my Notion workspace, using your knowledge of computer science, network infrastructure, Notion notes, and IT security to solve the problems_.
+
+
+
+It's recommended to initially enable the AI to actively furnish the users with a starter sentence, providing a clue as to what they can ask. Furthermore, activating the 'Speech to Text' feature can allow users to interact with your AI assistant using their voice.
+
+
+
+Finally, Click the "Publish" button on the top right of the page. Now you can click the public URL in the "Monitoring" section to converse with your personalized AI assistant!
+
+
+
+### Utilizing API to Integrate With Your Project
+
+Each AI application baked by Dify can be accessed via its API. This method allows developers to tap directly into the robust characteristics of large language models (LLMs) within frontend applications, delivering a true "Backend-as-a-Service" (BaaS) experience.
+
+With effortless API integration, you can conveniently invoke your Notion AI application without the need for intricate configurations.
+
+Click the "API Reference" button on the page of Overview page. You can refer to it as your App's API document.
+
+
+
+#### 1. Generate API Secret Key[](https://wsyfin.com/notion-dify#1-generate-api-secret-key)
+
+For security reasons, it's recommended to create a new API secret key to access your AI application.
+
+
+
+#### 2. Retrieve Conversation ID[](https://wsyfin.com/notion-dify#2-retrieve-conversation-id)
+
+After chatting with your AI application, you can retrieve the session ID from the "Logs & Ann." pages.
+
+
+
+#### 3. Invoke API[](https://wsyfin.com/notion-dify#3-invoke-api)
+
+You can run the example request code on the API document to invoke your AI application in terminal.
+
+Remember to replace `YOUR SECRET KEY` and `conversation_id` on your code.
+
+> You can input empty `conversation_id` at the first time, and replace it after you receive response contained `conversation_id`.
+
+```
+curl --location --request POST 'https://api.dify.ai/v1/chat-messages' \
+--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+ "inputs": {},
+ "query": "eh",
+ "response_mode": "streaming",
+ "conversation_id": "",
+ "user": "abc-123"
+}'
+```
+
+Sending request in terminal and you will get a successful response.
+
+
+
+If you want to continue this chat, go to replace the `conversation_id` of the request code to the `conversation_id` you get from the response.
+
+And you can check all the conversation history on the "Logs & Ann." page.
+
+
+
+### Sync with notion periodically[](https://wsyfin.com/notion-dify#sync-with-notion-periodically)
+
+If your Notion's pages have updated, you can sync with Dify periodically to keep your AI assistant up-to-date. Your AI assistant will learn from the new content.
+
+
+
+### Summary
+
+In this tutorial, we have learned not only how to import Your Notion data into Dify, but also know how to use the API to integrate it with your project.
+
+[Dify](https://dify.ai/) is a user-friendly LLMOps platform targeted to empower more individuals to create sustainable, AI-native applications. With visual orchestration designed for various application types, Dify offers ready-to-use applications that can assist you in utilizing data to craft your distinctive AI assistant. Do not hesitate to contact us if you have any inquiries.
diff --git a/en/learn-more/use-cases/building-an-ai-thesis-slack-bot.mdx b/en/learn-more/use-cases/building-an-ai-thesis-slack-bot.mdx
index a048db86..1d28a4eb 100644
--- a/en/learn-more/use-cases/building-an-ai-thesis-slack-bot.mdx
+++ b/en/learn-more/use-cases/building-an-ai-thesis-slack-bot.mdx
@@ -2,7 +2,6 @@
title: Building an AI Thesis Slack Bot on Dify Cloud
---
-
> Author:Alec Lee. 2025/03/11
## 1. Overview
diff --git a/en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.mdx b/en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.mdx
index 4fafe15d..ddaaa4d2 100644
--- a/en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.mdx
+++ b/en/learn-more/use-cases/create-an-ai-chatbot-with-business-data-in-minutes.mdx
@@ -9,7 +9,7 @@ AI-powered customer service may be a standard feature for every business website
**Register or Deploy Dify.AI**
-Dify is an open source product which you can find on[ GitHub](https://github.com/langgenius/dify) and deploy it to your local or company intranet. Meanwhile, it provides a cloud SaaS version, access [Didy.AI ](https://dify.ai/)to register and use it.
+Dify is an open source product which you can find on[ GitHub](https://github.com/langgenius/dify) and deploy it to your local or company intranet. Meanwhile, it provides a cloud SaaS version, access [Dify.AI ](https://dify.ai/)to register and use it.
**Apply for API key from OpenAI and other model providers.**
diff --git a/en/learn-more/use-cases/private-ai-ollama-deepseek-dify.mdx b/en/learn-more/use-cases/private-ai-ollama-deepseek-dify.mdx
index 4599ca0d..874e0e2e 100644
--- a/en/learn-more/use-cases/private-ai-ollama-deepseek-dify.mdx
+++ b/en/learn-more/use-cases/private-ai-ollama-deepseek-dify.mdx
@@ -79,9 +79,9 @@ Go to **Profile → Settings → Model Providers** in the Dify platform. Select
> Note: The “DeepSeek” option in Model Providers refers to the online API service, whereas the Ollama option is used for a locally deployed DeepSeek model.
-Configure the Model:
+Configure the Model:
-• **Model Name**: Enter the deployed model name, e.g., `deepseek-r1:7b`.
+• **Model Name**: Enter the deployed model name, e.g., `deepseek-r1:7b`.
• **Base URL**: Set the Ollama client’s local service URL, typically `http://your_server_ip:11434`. If you encounter connection issues, please refer to the [FAQ](https://docs.dify.ai/learn-more/use-cases/private-ai-ollama-deepseek-dify#id-1.-connection-errors-when-using-docker);
diff --git a/en/plugins/best-practice/how-to-use-mcp-zapier.mdx b/en/plugins/best-practice/how-to-use-mcp-zapier.mdx
new file mode 100644
index 00000000..376e8251
--- /dev/null
+++ b/en/plugins/best-practice/how-to-use-mcp-zapier.mdx
@@ -0,0 +1,129 @@
+---
+title: Dify MCP Plugin Guide | One-Click Connection to Zapier and Automated Email Sending
+---
+
+**This article will help you:**
+
+This guide walks you through installing the MCP SSE plugin in your Dify workspace and configuring it to connect with Zapier’s MCP service for seamless automated email workflows.
+
+## Background
+
+Anthropic introduced the Model Context Protocol (MCP) in late 2024. As an emerging open protocol, MCP builds a bidirectional communication channel between LLMs and external applications, functioning like a "USB-C" interface for AI, helping models discover, understand, and safely call various external tools or APIs.
+
+In other words, developers no longer need to build complex custom integrations for each external service. Users can experience AI's ability to easily call a vast array of third-party applications, whether processing daily office tasks, analyzing data, or executing marketing automation. AI is moving from "intelligent conversation" to "efficient action."
+
+In the Dify community, MCP has garnered significant attention. Community developers have contributed multiple MCP plugins to the plugin marketplace, allowing you to easily connect external MCP services to Dify Agent applications or workflows.
+
+This article will use the MCP SSE plugin as an example to detail how to use MCP plugins within Dify to connect to Zapier and complete an automated email sending process.
+
+## Prerequisites
+
+- Dify Cloud version / Dify Community version ≥ v1.0.0
+- Zapier account
+
+## Initializing Zapier MCP Service in Dify
+
+Zapier's MCP Server packages its existing 7000+ applications and 30,000+ actions into a single MCP Server URL. You can select and configure the tools and actions you need in the Zapier backend, such as sending emails, creating records in CRM, sending notifications in Slack, etc. Simply enter the MCP Server URL into the Dify MCP plugin configuration, and the LLM can automatically call these tools to complete various tasks during conversations or processes.
+
+### Step 1: Request a Zapier MCP Server URL
+
+1. Visit the [Zapier MCP Settings page](https://actions.zapier.com/settings/mcp/).
+2. Obtain the MCP Server URL, which will be entered into the Dify plugin configuration later.
+
+
+
+3. Click "Edit MCP Actions" below the URL to enter the tools and actions addition page.
+
+4. Click **Add a new action**, then search for Gmail: Send Email.
+
+
+
+5. Taking "Send Email" as an example:
+
+Click "Connect" under the Gmail account, then log in and authorize your Gmail inbox.
+
+For fields like recipient, subject, and body, you can choose “Have AI guess a value for this field”. The Agent will then generate content dynamically based on the conversation context.
+
+
+
+6. After completing the setup, you can continue to add different Actions to enrich the Agent's available toolkit.
+
+### Step 2: Install the MCP SSE Plugin
+
+1. Go to the Dify plugin marketplace, search for the MCP SSE plugin and click install.
+
+
+
+> For optimal stability, we recommend using plugin version **v0.0.4**. You can change the version in the plugin’s detail settings.
+> 
+
+
+2. Click the To Authorize button on the plugin page, and paste the Zapier MCP Server URL obtained earlier into the plugin. Reference address format:
+
+```json
+{
+ "server_name": {
+ "url": "https://actions.zapier.com/mcp/*******/sse",
+ "headers": {},
+ "timeout": 5,
+ "sse_read_timeout": 300
+ }
+}
+```
+
+
+
+### Step 3: Create a Dify Agent Application and Enable MCP SSE Service
+
+1. Create an Agent-type application
+
+From the navigation, select "Studio", choose "Create from Blank" in the application list and select the Agent application type, then complete the creation by entering an application name.
+
+
+
+2. Add MCP tools
+
+Add both `Fetch MCP Tools` and `Call MCP Tool` in the application toolbar.
+
+
+
+3. Configure LLM
+
+Using MCP may consume a large number of tokens, so it is recommended to use a more cost-effective LLM. This article will use the `deepseek-chat` model as an example. You can apply for an API Key from [DeepSeek Platform](https://platform.deepseek.com/usage) and enter it in "Settings" → "Model Providers" → "DeepSeek".
+
+> If you don't find the DeepSeek model in your model providers, please go to the Dify plugin marketplace to install the DeepSeek plugin.
+
+## Use Case 1: Using MCP Service to Automatically Send a Single Email
+
+After configuration, you can automatically generate email drafts and send them to specified recipients simply by chatting with the Agent.
+
+
+
+Simply instruct the Agent via chat to send an email, and it will handle the task using the configured MCP service. After the MCP finishes running, the email will be automatically sent to the recipient.
+
+
+
+## Use Case 2: Configuring MCP Agent Strategy in Workflow
+
+In addition to adding the MCP SSE plugin as a tool to the Agent, you can also use the MCP Agent Strategy plugin in Workflow. After installation, configure it in the corresponding Agent node as follows:
+
+
+
+Use the following JSON structure as a template, replacing the `url` value with the MCP Server address, then copy and paste the complete modified JSON into the `MCP SERVER URL` configuration box:
+
+```json
+{
+ "server_name": {
+ "url": "https://actions.zapier.com/mcp/*******/sse",
+ "headers": {},
+ "timeout": 5,
+ "sse_read_timeout": 300
+ }
+}
+```
+
+
+
+Once configured, when the Workflow runs to that Agent node, it can execute tasks using the configured Zapier MCP Server according to the Prompt instructions. As shown below, calling Gmail to send an email:
+
+
diff --git a/en/plugins/introduction.mdx b/en/plugins/introduction.mdx
index 8e985ba1..9e26ed1a 100644
--- a/en/plugins/introduction.mdx
+++ b/en/plugins/introduction.mdx
@@ -15,7 +15,7 @@ To enable more agile development, we have opened up the ecosystem and provided a
The new plugin system goes beyond the limitations of the previous framework, offering richer and more powerful extension capabilities. It contains five distinct plugin types, each designed to solve well-defined scenarios, giving developers limitless freedom to customize and enhance Dify applications.
-Additionally, the plugin system is designed to be easily shared. You can distribute your plugins via the [Dify Marketplace](https://marketplace.dify.ai/), [GitHub](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo), or as a [Local file package](/en/plugins/publish-plugins/package-plugin-file-and-publish). Other developers can quickly install these plugins and benefit from them.
+Additionally, the plugin system is designed to be easily shared. You can distribute your plugins via the [Dify Marketplace](https://marketplace.dify.ai/), [GitHub](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo), or as a [Local file package](/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx). Other developers can quickly install these plugins and benefit from them.
> Dify Marketplace is an open ecosystem designed for developers, offering a broad range of resources—models, tools, AI Agents, Extensions, and plugin bundles. You can seamlessly integrate third-party services into your existing Dify applications through the Marketplace, enhancing their capabilities and advancing the overall Dify community.
@@ -58,7 +58,7 @@ Whether you're looking to integrate a new model or add a specialized tool to exp
* **Built-In Data Management**: Plugins can reliably store and manage data, making it easier to implement complex business logic.
* **Convenient Reverse Invocation**
- Plugins can now interact bidirectionally with Dify's core functions, including:
+ Plugins can now interact bidirectionally with Dify's core functions, including:
* AI model invokes
* Tool usage
diff --git a/en/plugins/publish-plugins/README.mdx b/en/plugins/publish-plugins/README.mdx
index 6d9b3e1f..468d258e 100644
--- a/en/plugins/publish-plugins/README.mdx
+++ b/en/plugins/publish-plugins/README.mdx
@@ -2,7 +2,6 @@
title: Publish Plugins
---
-
### Publish Methods
To accommodate the various publishing needs of developers, Dify provides three plugin publish methods:
@@ -70,4 +69,6 @@ For detailed instructions, please refer to:
* **Looking to promote a plugin** → **Recommended to use the Marketplace**, ensuring plugin quality through official review and increasing exposure.
* **Open-source sharing project** → **Recommended to use GitHub**, convenient for version management and community collaboration.
-* **Quick distribution or internal testing** → **Recommended to use plugin file**, allowing for straightforward and efficient installation and sharing.
\ No newline at end of file
+* **Quick distribution or internal testing** → **Recommended to use plugin file**, allowing for straightforward and efficient installation and sharing.
+
+> When installing plugins that are not from the Dify Marketplace, you may encounter third-party signature verification issues. For solutions, please refer to [Signing Plugins for Third-Party Signature Verification](./signing-plugins-for-third-party-signature-verification.mdx).
diff --git a/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx b/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx
index d30f1fb1..1ba6e80a 100644
--- a/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx
+++ b/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx
@@ -2,7 +2,6 @@
title: Package the Plugin File and Publish it
---
-
After completing plugin development, you can package your plugin project as a local file and share it with others. Once the plugin file is obtained, it can be installed into a Dify Workspace. This guide will show you how to package a plugin project as a local file and how to install plugins using local files.
### **Prerequisites**
diff --git a/en/plugins/publish-plugins/plugin-auto-publish-pr.mdx b/en/plugins/publish-plugins/plugin-auto-publish-pr.mdx
new file mode 100644
index 00000000..ebf66f74
--- /dev/null
+++ b/en/plugins/publish-plugins/plugin-auto-publish-pr.mdx
@@ -0,0 +1,298 @@
+---
+title: "Automatically Publish Plugins via PR"
+---
+
+### Background
+
+Updating plugins that others are actively using can be tedious. Traditionally, you would need to modify code, bump versions, push changes, create branches, package files, and submit PRs manually - a repetitive process that slows down development.
+
+Thus, we have created **Plugin Auto-PR**, a GitHub Actions workflow that automates the entire process. Now you can package, push, and create PRs with a single action, focusing on building great plugins.
+
+### Concepts
+
+#### GitHub Actions
+
+GitHub Actions automates your development tasks in GitHub.
+
+**How it works**: When triggered (e.g., by a code push), it runs your workflow in a cloud-based virtual machine, handling everything from build to deployment automatically.
+
+
+
+**Limits**:
+
+* Public repositories: Unlimited
+* Private repositories: 2000 minutes per month
+
+#### Plugin Auto-PR
+
+**How it works**:
+
+1. Workflow triggers when you push code to the main branch of your plugin source repository
+2. Workflow reads plugin information from the `manifest.yaml` file
+3. Automatically packages the plugin as a `.difypkg` file
+4. Pushes the packaged file to your forked `dify-plugins` repository
+5. Creates a new branch and commits changes
+6. Automatically creates a PR to merge into the upstream repository
+
+### Prerequisites
+
+#### Repository
+
+* You already have your own plugin source code repository (e.g., `your-name/plugin-source`)
+* You already have your own forked plugin repository (e.g., `your-name/dify-plugins`)
+* Your forked repository already has the plugin directory structure:
+
+```
+dify-plugins/
+└── your-author-name
+ └── plugin-name
+```
+
+#### Permission
+
+This workflow requires appropriate permissions to function:
+
+* You need to create a GitHub Personal Access Token (PAT) with sufficient permissions
+* The PAT must have permission to push code to your forked repository
+* The PAT must have permission to create PRs to the upstream repository
+
+### Parameters and Configuration
+
+#### Setup Requirements
+
+To get started with auto-publishing, you will need two key components:
+
+**manifest.yaml file**: This file drives the automation process:
+
+* `name`: Your plugin’s name (affects package and branch names)
+* `version`: Semantic version number (increment with each release)
+* `author`: Your GitHub username (determines repository paths)
+
+**PLUGIN\_ACTION Secret**: You need to add this secret to your plugin source repository:
+
+* Value: Must be a Personal Access Token (PAT) with sufficient permissions
+* Permission: Ability to push branches to your forked repository and create PRs to the upstream repository
+
+#### Automatically-Generated Parameters
+
+Once set up, the workflow automatically handles these parameters:
+
+* GitHub username: Read from the `author` field in `manifest.yaml`
+* Author folder name: Consistent with the `author` field
+* Plugin name: Read from the `name` field in `manifest.yaml`
+* Branch name: `bump-{plugin-name}-plugin-{version}`
+* Package filename: `{plugin-name}-{version}.difypkg`
+* PR title and content: Automatically generated based on plugin name and version
+
+### Step-by-Step Guide
+
+
+
+ Ensure you have forked the official `dify-plugins` repository and have your own plugin source repository.
+
+
+ Navigate to your plugin source repository, click **Settings > Secrets and variables > Actions > New repository secret**, and create a GitHub Secret:
+
+ * Name: `PLUGIN_ACTION`
+ * Value: GitHub Personal Access Token (PAT) with write permissions to the target repository (`your-name/dify-plugins`)
+
+
+
+
+ Create a `.github/workflows/` directory in your repository, create a file named `plugin-publish.yml` in this directory, and copy the following content into the file:
+
+ ```yaml
+ # .github/workflows/auto-pr.yml
+ name: Auto Create PR on Main Push
+
+ on:
+ push:
+ branches: [ main ] # Trigger on push to main
+
+ jobs:
+ create_pr: # Renamed job for clarity
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Print working directory # Kept for debugging
+ run: |
+ pwd
+ ls -la
+
+ - name: Download CLI tool
+ run: |
+ # Create bin directory in runner temp
+ mkdir -p $RUNNER_TEMP/bin
+ cd $RUNNER_TEMP/bin
+
+ # Download CLI tool
+ wget https://github.com/langgenius/dify-plugin-daemon/releases/download/0.0.6/dify-plugin-linux-amd64
+ chmod +x dify-plugin-linux-amd64
+
+ # Show download location and file
+ echo "CLI tool location:"
+ pwd
+ ls -la dify-plugin-linux-amd64
+
+ - name: Get basic info from manifest # Changed step name and content
+ id: get_basic_info
+ run: |
+ PLUGIN_NAME=$(grep "^name:" manifest.yaml | cut -d' ' -f2)
+ echo "Plugin name: $PLUGIN_NAME"
+ echo "plugin_name=$PLUGIN_NAME" >> $GITHUB_OUTPUT
+
+ VERSION=$(grep "^version:" manifest.yaml | cut -d' ' -f2)
+ echo "Plugin version: $VERSION"
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
+
+ # If the author's name is not your github username, you can change the author here
+ AUTHOR=$(grep "^author:" manifest.yaml | cut -d' ' -f2)
+ echo "Plugin author: $AUTHOR"
+ echo "author=$AUTHOR" >> $GITHUB_OUTPUT
+
+ - name: Package Plugin
+ id: package
+ run: |
+ # Use the downloaded CLI tool to package
+ cd $GITHUB_WORKSPACE
+ # Use variables for package name
+ PACKAGE_NAME="${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}.difypkg"
+ # Use CLI from runner temp
+ $RUNNER_TEMP/bin/dify-plugin-linux-amd64 plugin package . -o "$PACKAGE_NAME"
+
+ # Show packaging result
+ echo "Package result:"
+ ls -la "$PACKAGE_NAME"
+ echo "package_name=$PACKAGE_NAME" >> $GITHUB_OUTPUT
+
+ # Show full file path and directory structure (kept for debugging)
+ echo "\\nFull file path:"
+ pwd
+ echo "\\nDirectory structure:"
+ tree || ls -R
+
+ - name: Checkout target repo
+ uses: actions/checkout@v3
+ with:
+ # Use author variable for repository
+ repository: ${{steps.get_basic_info.outputs.author}}/dify-plugins
+ path: dify-plugins
+ token: ${{ secrets.PLUGIN_ACTION }}
+ fetch-depth: 1 # Fetch only the last commit to speed up checkout
+ persist-credentials: true # Persist credentials for subsequent git operations
+
+ - name: Prepare and create PR
+ run: |
+ # Debug info (kept)
+ echo "Debug: Current directory $(pwd)"
+ # Use variable for package name
+ PACKAGE_NAME="${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}.difypkg"
+ echo "Debug: Package name: $PACKAGE_NAME"
+ ls -la
+
+ # Move the packaged file to the target directory using variables
+ mkdir -p dify-plugins/${{ steps.get_basic_info.outputs.author }}/${{ steps.get_basic_info.outputs.plugin_name }}
+ mv "$PACKAGE_NAME" dify-plugins/${{ steps.get_basic_info.outputs.author }}/${{ steps.get_basic_info.outputs.plugin_name }}/
+
+ # Enter the target repository directory
+ cd dify-plugins
+
+ # Configure git
+ git config user.name "GitHub Actions"
+ git config user.email "actions@github.com"
+
+ # Ensure we are on the latest main branch
+ git fetch origin main
+ git checkout main
+ git pull origin main
+
+ # Create and switch to a new branch using variables and new naming convention
+ BRANCH_NAME="bump-${{ steps.get_basic_info.outputs.plugin_name }}-plugin-${{ steps.get_basic_info.outputs.version }}"
+ git checkout -b "$BRANCH_NAME"
+
+ # Add and commit changes (using git add .)
+ git add .
+ git status # for debugging
+ # Use variables in commit message
+ git commit -m "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin to version ${{ steps.get_basic_info.outputs.version }}"
+
+ # Push to remote (use force just in case the branch existed before from a failed run)
+ git push -u origin "$BRANCH_NAME" --force
+
+ # Confirm branch has been pushed and wait for sync (GitHub API might need a moment)
+ git branch -a
+ echo "Waiting for branch to sync..."
+ sleep 10 # Wait 10 seconds for branch sync
+
+ - name: Create PR via GitHub API
+ env:
+ GH_TOKEN: ${{ secrets.PLUGIN_ACTION }} # Use the provided token for authentication
+ run: |
+ gh pr create \
+ --repo langgenius/dify-plugins \
+ --head "${{ steps.get_basic_info.outputs.author }}:${{ steps.get_basic_info.outputs.plugin_name }}-${{ steps.get_basic_info.outputs.version }}" \
+ --base main \
+ --title "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin to version ${{ steps.get_basic_info.outputs.version }}" \
+ --body "bump ${{ steps.get_basic_info.outputs.plugin_name }} plugin package to version ${{ steps.get_basic_info.outputs.version }}
+
+ Changes:
+ - Updated plugin package file" || echo "PR already exists or creation skipped." # Handle cases where PR already exists
+
+ - name: Print environment info # Kept for debugging
+ run: |
+ echo "GITHUB_WORKSPACE: $GITHUB_WORKSPACE"
+ echo "Current directory contents:"
+ ls -R
+ ```
+
+
+ Ensure those following fields are correctly set:
+
+ ```yaml
+ version: 0.0.x # Version number
+ author: your-github-username # GitHub username/Author name
+ name: your-plugin-name # Plugin name
+ ```
+
+
+
+### Usage Guide
+
+#### First-time Setup
+
+When setting up the auto-publish workflow for the first time, complete these steps:
+
+1. Ensure you have forked the official `dify-plugins` repository
+2. Ensure your plugin source repository structure is correct
+3. Set up the `PLUGIN_ACTION Secret` in your plugin source repository
+4. Create the workflow file `.github/workflows/plugin-publish.yml`
+5. Ensure the `name` and `author` fields in the `manifest.yaml` file are correctly configured
+
+#### Subsequent Update
+
+To publish new versions after setup:
+
+1. Modify the code
+2. Update the `version` field in `manifest.yaml`
+
+
+
+3. Push all changes to the main branch
+4. Wait for GitHub Actions to complete packaging, branch creation, and PR submission
+
+### Outcome
+
+When you push code to the main branch of your plugin source repository, GitHub Actions will automatically execute the publishing process:
+
+* Package the plugin in `{plugin-name}-{version}.difypkg` format
+* Push the packaged file to the target repository
+* Create a PR to merge into the fork repository
+
+
+
+### Example Repository
+
+See [example repository](https://github.com/Yevanchen/exa-in-dify) to understand configuration and best practices.
+
diff --git a/en/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx b/en/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx
new file mode 100644
index 00000000..104a185b
--- /dev/null
+++ b/en/plugins/publish-plugins/signing-plugins-for-third-party-signature-verification.mdx
@@ -0,0 +1,96 @@
+---
+title: Signing Plugins for Third-Party Signature Verification
+---
+
+This feature is available only in the Dify Community Edition. Third-party signature verification is not currently supported on Dify Cloud Edition.
+
+Third-party signature verification allows Dify administrators to safely approve the installation of plugins not listed on the Dify Marketplace without completely disabling signature verification. This supports the following scenarios for example:
+
+* Dify administrators can add a signature to a plugin sent by the developer once it has been approved.
+* Plugin developers can add a signature to their plugin and publish it along with the public key for Dify administrators who cannot disable signature verification.
+
+Both Dify administrators and plugin developers can add a signature to a plugin using a pre-generated key pair. Additionally, administrators can configure Dify to enforce signature verification using specific public keys during plugin installation.
+
+## Generating a Key Pair for Signing and Verification
+
+Generate a new key pair for adding and verifying the plugin's signature with the following command:
+
+```bash
+dify signature generate -f your_key_pair
+```
+
+After running this command, two files will be generated in the current directory:
+
+* **Private Key**: `your_key_pair.private.pem`
+* **Public Key**: `your_key_pair.public.pem`
+
+The private key is used to sign the plugin, and the public key is used to verify the plugin's signature.
+
+Keep the private key secure. If it is compromised, an attacker could add a valid signature to any plugin, which would compromise Dify's security.
+
+## Adding a Signature to the Plugin and Veriyfing It
+
+Add a signature to your plugin by running the following command. Note that you must specify the **plugin file to sign** and the **private key**:
+
+```bash
+dify signature sign your_plugin_project.difypkg -p your_key_pair.private.pem
+```
+
+After executing the command, a new plugin file will be generated in the same directory with `signed` added to its original filename: `your_plugin_project.signed.difypkg`
+
+You can verify that the plugin has been correctly signed using this command. Here, you need to specify the **signed plugin file** and the **public key**:
+
+```bash
+dify signature verify your_plugin_project.signed.difypkg -p your_key_pair.public.pem
+```
+
+If you omit the public key argument, verification will use the Dify Marketplace public key. In that case, signature verification will fail for any plugin file not downloaded from the Dify Marketplace.
+
+## Enabling Third-Party Signature Verification
+
+Dify administrators can enforce signature verification using pre-approved public keys before installing a plugin.
+
+### Placing the Public Key
+
+Place the **public key** corresponding to the private key used for signing in a location that the plugin daemon can access.
+
+For example, create a `public_keys` directory under `docker/volumes/plugin_daemon` and copy the public key file there:
+
+```bash
+mkdir docker/volumes/plugin_daemon/public_keys
+cp your_key_pair.public.pem docker/volumes/plugin_daemon/public_keys
+```
+
+### Environment Variable Configuration
+
+In the `plugin_daemon` container, configure the following environment variables:
+
+* `THIRD_PARTY_SIGNATURE_VERIFICATION_ENABLED`
+ * Enables third-party signature verification.
+ * Set this to `true` to enable the feature.
+* `THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS`
+ * Specifies the path(s) to the public key file(s) used for signature verification.
+ * You can list multiple public key files separated by commas.
+
+Below is an example of a Docker Compose override file (`docker-compose.override.yaml`) configuring these variables:
+
+```yaml
+services:
+ plugin_daemon:
+ environment:
+ FORCE_VERIFYING_SIGNATURE: true
+ THIRD_PARTY_SIGNATURE_VERIFICATION_ENABLED: true
+ THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS: /app/storage/public_keys/your_key_pair.public.pem
+```
+
+Note that `docker/volumes/plugin_daemon` is mounted to `/app/storage` in the `plugin_daemon` container. Ensure that the path specified in `THIRD_PARTY_SIGNATURE_VERIFICATION_PUBLIC_KEYS` corresponds to the path inside the container.
+
+To apply these changes, restart the Dify service:
+
+```bash
+cd docker
+docker compose down
+docker compose up -d
+```
+
+After restarting the service, the third-party signature verification feature will be enabled in the current Community Edition environment.
diff --git a/en/plugins/quick-start/debug-plugin.mdx b/en/plugins/quick-start/debug-plugin.mdx
index 572fbe1b..bb5e6e85 100644
--- a/en/plugins/quick-start/debug-plugin.mdx
+++ b/en/plugins/quick-start/debug-plugin.mdx
@@ -9,7 +9,7 @@ Once plugin development is complete, the next step is to test whether the plugin
Go to ["Plugin"](https://cloud.dify.ai/plugins) page to get the debugging key and remote URL.
-
+
Go back to the plugin project, copy the `.env.example` file and rename it to `.env`. Fill it with the remote server address and debug key.
diff --git a/en/plugins/quick-start/develop-plugins/README.mdx b/en/plugins/quick-start/develop-plugins/README.mdx
index f72f8238..0d53983b 100644
--- a/en/plugins/quick-start/develop-plugins/README.mdx
+++ b/en/plugins/quick-start/develop-plugins/README.mdx
@@ -49,7 +49,7 @@ If you want to read detailed interface documentation for plugin projects, you ca
### **Contribution Guidelines**
-Want to contribute code and features to Dify Marketplace?
+Want to contribute code and features to Dify Marketplace?
We provide detailed development guidelines and contribution guidelines to help you understand our architecture design and contribution process:
diff --git a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
index 6a445c61..252214fe 100644
--- a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
+++ b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx
@@ -2,21 +2,18 @@
title: Agent Strategy Plugin
---
-
An **Agent Strategy Plugin** helps an LLM carry out tasks like reasoning or decision-making, including choosing and calling tools, as well as handling results. This allows the system to address problems more autonomously.
-Below, you’ll see how to develop a plugin that supports **Function Calling** to automatically fetch the current time.
+Below, you'll see how to develop a plugin that supports **Function Calling** to automatically fetch the current time.
### Prerequisites
- Dify plugin scaffolding tool
- Python environment (version ≥ 3.12)
-For details on preparing the plugin development tool, see [Initializing the Development Tool](/en/plugins/quick-start/develop-plugins/initialize-development-tools).
+For details on preparing the plugin development tool, see [Initializing the Development Tool](initialize-development-tools.md).
-
-**Tip**: Run `dify version` in your terminal to confirm that the scaffolding tool is installed.
-
+**Tip**: Run `dify version` in your terminal to confirm that the scaffolding tool is installed.
---
@@ -24,50 +21,60 @@ For details on preparing the plugin development tool, see [Initializing the Deve
Run the following command to create a development template for your Agent plugin:
-```
+```bash
dify plugin init
```
Follow the on-screen prompts and refer to the sample comments for guidance.
```bash
-➜ Dify Plugins Developing dify plugin init
+➜ ./dify-plugin-darwin-arm64 plugin init ─╯
Edit profile of the plugin
-Plugin name (press Enter to next step): # 填写插件的名称
-Author (press Enter to next step): Author name # 填写插件作者
-Description (press Enter to next step): Description # 填写插件的描述
+Plugin name (press Enter to next step): # Enter the plugin name
+Author (press Enter to next step): # Enter the plugin author
+Description (press Enter to next step): # Enter the plugin description
---
-Select the language you want to use for plugin development, and press Enter to con
+Select the language you want to use for plugin development, and press Enter to continue,
BTW, you need Python 3.12+ to develop the Plugin if you choose Python.
--> python # 选择 Python 环境
+-> python # Select Python environment
go (not supported yet)
---
-Based on the ability you want to extend, we have divided the Plugin into four type
+Based on the ability you want to extend, we have divided the Plugin into four types: Tool, Model, Extension, and Agent Strategy.
-- Tool: It's a tool provider, but not only limited to tools, you can implement an
+- Tool: It's a tool provider, but not only limited to tools, you can implement an endpoint there, for example, you need both Sending Message and Receiving Message if you are
- Model: Just a model provider, extending others is not allowed.
-- Extension: Other times, you may only need a simple http service to extend the fu
+- Extension: Other times, you may only need a simple http service to extend the functionalities, Extension is the right choice for you.
- Agent Strategy: Implement your own logics here, just by focusing on Agent itself
-What's more, we have provided the template for you, you can choose one of them b
+What's more, we have provided the template for you, you can choose one of them below:
tool
--> agent-strategy # 选择 Agent 策略模板
+-> agent-strategy # Select Agent strategy template
llm
text-embedding
---
-Configure the permissions of the plugin, use up and down to navigate, tab to sel
+Configure the permissions of the plugin, use up and down to navigate, tab to select, after selection, press enter to finish
Backwards Invocation:
Tools:
- Enabled: [✔] You can invoke tools inside Dify if it's enabled # 默认开启
+ Enabled: [✔] You can invoke tools inside Dify if it's enabled # Enabled by default
Models:
- Enabled: [✔] You can invoke models inside Dify if it's enabled # 默认开启
- LLM: [✔] You can invoke LLM models inside Dify if it's enabled # 默认开启
- Text Embedding: [✘] You can invoke text embedding models inside Dify if it'
+ Enabled: [✔] You can invoke models inside Dify if it's enabled # Enabled by default
+ LLM: [✔] You can invoke LLM models inside Dify if it's enabled # Enabled by default
+ → Text Embedding: [✘] You can invoke text embedding models inside Dify if it's enabled
Rerank: [✘] You can invoke rerank models inside Dify if it's enabled
-...
+ TTS: [✘] You can invoke TTS models inside Dify if it's enabled
+ Speech2Text: [✘] You can invoke speech2text models inside Dify if it's enabled
+ Moderation: [✘] You can invoke moderation models inside Dify if it's enabled
+Apps:
+ Enabled: [✘] Ability to invoke apps like BasicChat/ChatFlow/Agent/Workflow etc.
+Resources:
+Storage:
+ Enabled: [✘] Persistence storage for the plugin
+ Size: N/A The maximum size of the storage
+Endpoints:
+ Enabled: [✘] Ability to register endpoints
```
-After initialization, you’ll get a folder containing all the resources needed for plugin development. Familiarizing yourself with the overall structure of an Agent Strategy Plugin will streamline the development process:
+After initialization, you'll get a folder containing all the resources needed for plugin development. Familiarizing yourself with the overall structure of an Agent Strategy Plugin will streamline the development process:
```text
├── GUIDE.md # User guide and documentation
@@ -98,12 +105,12 @@ Agent Strategy Plugin development revolves around two files:
#### 2.1 Defining Parameters
-To build an Agent plugin, start by specifying the necessary parameters in `strategies/basic_agent.yaml`. These parameters define the plugin’s core features, such as calling an LLM or using tools.
+To build an Agent plugin, start by specifying the necessary parameters in `strategies/basic_agent.yaml`. These parameters define the plugin's core features, such as calling an LLM or using tools.
We recommend including the following four parameters first:
1. **model**: The large language model to call (e.g., GPT-4, GPT-4o-mini).
-2. **tools**: A list of tools that enhance your plugin’s functionality.
+2. **tools**: A list of tools that enhance your plugin's functionality.
3. **query**: The user input or prompt content sent to the model.
4. **maximum_iterations**: The maximum iteration count to prevent excessive computation.
@@ -155,7 +162,7 @@ extra:
source: strategies/basic_agent.py
```
-Once you’ve configured these parameters, the plugin will automatically generate a user-friendly interface so you can easily manage them:
+Once you've configured these parameters, the plugin will automatically generate a user-friendly interface so you can easily manage them:

@@ -185,16 +192,16 @@ class BasicAgentAgentStrategy(AgentStrategy):
params = BasicParams(**parameters)
```
-### 3. Invoking the Model
+#### 2.3 Invoking the Model
In an Agent Strategy Plugin, **invoking the model** is central to the workflow. You can invoke an LLM efficiently using `session.model.llm.invoke()` from the SDK, handling text generation, dialogue, and so forth.
-If you want the LLM **handle tools**, ensure it outputs structured parameters to match a tool’s interface. In other words, the LLM must produce input arguments that the tool can accept based on the user’s instructions.
+If you want the LLM **handle tools**, ensure it outputs structured parameters to match a tool's interface. In other words, the LLM must produce input arguments that the tool can accept based on the user's instructions.
Construct the following parameters:
* model
-* prompt\_messages
+* prompt_messages
* tools
* stop
* stream
@@ -218,7 +225,95 @@ This code achieves the following functionality: after a user inputs a command, t

-### 4. Handle a Tool
+#### 2.4 Adding Memory to the Model
+
+Adding **Memory** to your Agent plugin allows the model to remember previous conversations, making interactions more natural and effective. With memory enabled, the model can maintain context and provide more relevant responses.
+
+Steps:
+
+1. Configure Memory Functionality
+
+Add the `history-messages` feature to the Agent plugin’s YAML configuration file `strategies/agent.yaml`:
+
+```yaml
+identity:
+ name: basic_agent # Agent strategy name
+ author: novice # Author
+ label:
+ en_US: BasicAgent # English label
+description:
+ en_US: BasicAgent # English description
+features:
+ - history-messages # Enable history messages feature
+...
+```
+
+2. Enable Memory Settings
+
+After modifying the plugin configuration and restarting, you will see the **Memory** toggle. Click the toggle button on the right to enable memory.
+
+
+
+
+
+Once enabled, you can adjust the memory window size using the slider, which determines how many previous conversation turns the model can “remember”.
+
+3. Debug History Messages
+
+Add the following code to check the history messages:
+
+```python
+class BasicAgentAgentStrategy(AgentStrategy):
+ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
+ params = BasicParams(**parameters)
+ print(f"history_messages: {params.model.history_prompt_messages}")
+ ...
+```
+
+
+
+The console will display an output similar to:
+
+```
+history_messages: []
+history_messages: [UserPromptMessage(role=, content='hello, my name is novice', name=None), AssistantPromptMessage(role=, content='Hello, Novice! How can I assist you today?', name=None, tool_calls=[])]
+```
+
+4. Integrate History Messages into Model Calls
+
+Update the model call to incorporate conversation history with the current query:
+
+```python
+class BasicAgentAgentStrategy(AgentStrategy):
+ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
+ params = BasicParams(**parameters)
+
+ chunks: Generator[LLMResultChunk, None, None] | LLMResult = (
+ self.session.model.llm.invoke(
+ model_config=LLMModelConfig(**params.model.model_dump(mode="json")),
+ # Add history messages
+ prompt_messages=params.model.history_prompt_messages
+ + [UserPromptMessage(content=params.query)],
+ tools=[
+ self._convert_tool_to_prompt_message_tool(tool)
+ for tool in params.tools
+ ],
+ stop=params.model.completion_params.get("stop", [])
+ if params.model.completion_params
+ else [],
+ stream=True,
+ )
+ )
+ ...
+```
+
+5. Check the Outcome
+
+After implementing **Memory**, the model can respond based on conversation history. In the example below, the model successfully remembers the user’s name mentioned in previous conversation.
+
+
+
+#### 2.5 Handling a Tool
After specifying the tool parameters, the Agent Strategy Plugin must actually call these tools. Use `session.tool.invoke()` to make those requests.
@@ -240,7 +335,7 @@ Example code for method definition:
) -> Generator[ToolInvokeMessage, None, None]:...
```
-If you’d like the LLM itself to generate the parameters needed for tool calls, you can do so by combining the model’s output with your tool-calling code.
+If you'd like the LLM itself to generate the parameters needed for tool calls, you can do so by combining the model's output with your tool-calling code.
```python
tool_instances = (
@@ -260,13 +355,13 @@ With this in place, your Agent Strategy Plugin can automatically perform **Funct

-### 5. Creating Logs
+#### 2.6 Creating Logs
-Often, multiple steps are necessary to complete a complex task in an **Agent Strategy Plugin**. It’s crucial for developers to track each step’s results, analyze the decision process, and optimize strategy. Using `create_log_message` and `finish_log_message` from the SDK, you can log real-time states before and after calls, aiding in quick problem diagnosis.
+Often, multiple steps are necessary to complete a complex task in an **Agent Strategy Plugin**. It's crucial for developers to track each step's results, analyze the decision process, and optimize strategy. Using `create_log_message` and `finish_log_message` from the SDK, you can log real-time states before and after calls, aiding in quick problem diagnosis.
For example:
-- Log a “starting model call” message before calling the model, clarifying the task’s execution progress.
-- Log a “call succeeded” message once the model responds, ensuring the model’s output can be traced end to end.
+- Log a "starting model call" message before calling the model, clarifying the task's execution progress.
+- Log a "call succeeded" message once the model responds, ensuring the model's output can be traced end to end.
```python
model_log = self.create_log_message(
@@ -321,11 +416,11 @@ model_log = self.create_log_message(
yield model_log
```
-#### Sample code for agent-plugin functions
+### Sample code for agent-plugin functions
-
- #### Invoke Model
+
+#### Invoke Model
The following code demonstrates how to give the Agent strategy plugin the ability to invoke the model:
@@ -549,9 +644,10 @@ class BasicAgentAgentStrategy(AgentStrategy):
return tool_calls
```
-
-
- #### Handle Tools
+
+
+
+#### Handle Tools
The following code shows how to implement model calls for the Agent strategy plugin and send canonicalized requests to the tool.
@@ -775,9 +871,10 @@ class BasicAgentAgentStrategy(AgentStrategy):
return tool_calls
```
-
-
- #### Example of a complete function code
+
+
+
+#### Example of a complete function code
A complete sample plugin code that includes a **invoking model, handling tool** and a **function to output multiple rounds of logs**:
@@ -1032,14 +1129,14 @@ class BasicAgentAgentStrategy(AgentStrategy):
return tool_calls
```
-
+
### 3. Debugging the Plugin
-After finalizing the plugin’s declaration file and implementation code, run `python -m main` in the plugin directory to restart it. Next, confirm the plugin runs correctly. Dify offers remote debugging—go to [“Plugin Management”](https://console-plugin.dify.dev/plugins) to obtain your debug key and remote server address.
+After finalizing the plugin's declaration file and implementation code, run `python -m main` in the plugin directory to restart it. Next, confirm the plugin runs correctly. Dify offers remote debugging—go to ["Plugin Management"](https://console-plugin.dify.dev/plugins) to obtain your debug key and remote server address.
-
+
Back in your plugin project, copy `.env.example` to `.env` and insert the relevant remote server and debug key info.
@@ -1057,9 +1154,9 @@ Then run:
python -m main
```
-You’ll see the plugin installed in your Workspace, and team members can also access it.
+You'll see the plugin installed in your Workspace, and team members can also access it.
-
+
Browser Plugins
### Packaging the Plugin (Optional)
@@ -1073,7 +1170,7 @@ dify plugin package ./basic_agent/
A file named `google.difypkg` (for example) appears in your current folder—this is your final plugin package.
-**Congratulations!** You’ve fully developed, tested, and packaged your Agent Strategy Plugin.
+**Congratulations!** You've fully developed, tested, and packaged your Agent Strategy Plugin.
### Publishing the Plugin (Optional)
diff --git a/en/plugins/quick-start/develop-plugins/bundle.mdx b/en/plugins/quick-start/develop-plugins/bundle.mdx
index 56b34a36..637a5f0f 100644
--- a/en/plugins/quick-start/develop-plugins/bundle.mdx
+++ b/en/plugins/quick-start/develop-plugins/bundle.mdx
@@ -14,7 +14,7 @@ You can package multiple plugins into a Bundle using the Dify CLI tool. Bundle p
### **Prerequisites**
* Dify plugin scaffolding tool
-* Python environment, version ≥ 3.10;
+* Python environment, version ≥ 3.11;
For detailed instructions on preparing the plugin development scaffolding tool, please refer to [Initializing Development Tools](initialize-development-tools.md).
diff --git a/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx b/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx
index e5e56d83..ec747796 100644
--- a/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx
+++ b/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx
@@ -322,7 +322,9 @@ After finishing development, test the plugin to ensure it runs correctly. For mo
If you’d like to list this plugin on the Dify Marketplace, see:
-Publish to Dify Marketplace
+
+ publish-to-dify-marketplace
+
## Explore More
diff --git a/en/plugins/quick-start/develop-plugins/tool-plugin.mdx b/en/plugins/quick-start/develop-plugins/tool-plugin.mdx
index eee4462c..c9f7c5d7 100644
--- a/en/plugins/quick-start/develop-plugins/tool-plugin.mdx
+++ b/en/plugins/quick-start/develop-plugins/tool-plugin.mdx
@@ -211,7 +211,7 @@ extra:
* `identity` contains the tool's basic information, including name, author, labels, description, etc.
* `parameters` parameter list
* `name` (required) parameter name, must be unique, cannot duplicate other parameter names
- * `type` (required) parameter type, currently supports five types: `string`, `number`, `boolean`, `select`, `secret-input`, corresponding to string, number, boolean, dropdown menu, and encrypted input field. For sensitive information, please use `secret-input` type
+ * `type` (required) parameter type, currently supports nine types: `string`, `number`, `boolean`, `select`, `secret-input`, `file`, `files`, `model-selector`, `app-selector`, corresponding to string, number, boolean, dropdown menu, encrypted input field, file, file set, model selection, and application selection. For sensitive information, please use `secret-input` type
* `label` (required) parameter label, used for frontend display
* `form` (required) form type, currently supports two types: `llm` and `form`
* In Agent applications, `llm` means the parameter is inferred by LLM, `form` means parameters that can be preset to use the tool
diff --git a/en/plugins/quick-start/install-plugins.mdx b/en/plugins/quick-start/install-plugins.mdx
index 0fa81a2a..a64235d2 100644
--- a/en/plugins/quick-start/install-plugins.mdx
+++ b/en/plugins/quick-start/install-plugins.mdx
@@ -9,29 +9,25 @@ To install plugins, click **"Plugins"** in the top-right corner of the Dify plat

-#### Marketplace
+### Marketplace
Browse and select a plugin from the Marketplace. Click **"Install"** to add it to your current workspace effortlessly.

-#### GitHub
+### GitHub
Install plugins directly using GitHub repository links. This method requires plugins to meet code standards and include a `.difypkg` file attached to a Release. For more details, see [Publishing Plugins on GitHub](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo).

-#### Local Upload
+### Local File Upload
-After [packaging your plugin](/en/plugins/publish-plugins/package-plugin-file-and-publish), upload the resulting `.difypkg` file. This option is ideal for offline or test environments and allows organizations to maintain internal plugins without exposing sensitive information.
+Local files refer to file packages with the `.difypkg` extension, commonly used in offline environments or testing environments. Developers can use this method to install plugin files outside the official marketplace. For organizations, this allows developing and maintaining internal plugins and installing them through local upload to avoid exposing sensitive information.
#### Authorizing Plugins
-Some plugins require API Keys or other authorization to function properly. After installation, enter the necessary credentials to enable the plugin.
-
-> API Keys are sensitive information and are only valid for the current user. Other team members must manually input their credentials to use the plugin.
-
-
+For detailed instructions on packaging plugins and obtaining .difypkg files, please refer to: [Package Plugins](../publish-plugins/package-plugin-file-and-publish.mdx)
***
diff --git a/en/plugins/schema-definition/manifest.mdx b/en/plugins/schema-definition/manifest.mdx
index afcaa7c3..ebe05440 100644
--- a/en/plugins/schema-definition/manifest.mdx
+++ b/en/plugins/schema-definition/manifest.mdx
@@ -45,11 +45,15 @@ meta:
- "arm64"
runner:
language: "python"
- version: "3.10"
+ version: "3.11"
entrypoint: "main"
privacy: "./privacy.md"
```
+### **Version Management**
+
+The version of the plugin is managed by the `version` field in manifest file. The version number must be in the format of `major.minor.patch`, otherwise, auto-update may dose not work as expected.
+
### **Structure**
* `version` (version, required): Plugin version
diff --git a/ja-jp/development/models-integration/gpustack.mdx b/ja-jp/development/models-integration/gpustack.mdx
index 9872439e..8d60b114 100644
--- a/ja-jp/development/models-integration/gpustack.mdx
+++ b/ja-jp/development/models-integration/gpustack.mdx
@@ -1,9 +1,8 @@
---
-title: GPUStackとの統合によるローカルモデルのデプロイ
+title: GPUStackによってデプロイされたローカルモデルの統合
---
-
-[GPUStack](https://github.com/gpustack/gpustack)は、大規模言語モデル(LLM)を実行するために設計されたオープンソースのGPUクラスターマネージャーです。
+[GPUStack](https://github.com/gpustack/gpustack)は、AIモデルを実行するために設計されたオープンソースのGPUクラスターマネージャーです。
Difyは、大規模言語モデルの推論、埋め込み、再順位付け機能をローカル環境で展開するために、GPUStackとの統合を実現しています。
@@ -39,7 +38,7 @@ GPUStackにホストされたLLMを使用する方法の例です:
3. モデルを展開するために「Save」をクリックします。
-
+
## APIキーの作成方法
@@ -63,6 +62,6 @@ GPUStackにホストされたLLMを使用する方法の例です:
モデルをアプリケーションで使用するために、「Save」をクリックしてください。
-
+
GPUStackに関する詳細情報は、[Github Repo](https://github.com/gpustack/gpustack)を参照してください。
\ No newline at end of file
diff --git a/ja-jp/getting-started/cloud.mdx b/ja-jp/getting-started/cloud.mdx
index e27bdeb8..7c458c63 100644
--- a/ja-jp/getting-started/cloud.mdx
+++ b/ja-jp/getting-started/cloud.mdx
@@ -2,24 +2,32 @@
title: クラウドサービス
---
-
-**ヒント:** Difyは現在ベータテストフェーズにあります。ドキュメントと製品に不一致がある場合は、製品の実際の体験を優先してください。
+注意:Difyは現在ベータテスト段階にあります。ドキュメントと製品に不一致がある場合は、実際の製品体験をご参照ください。
-Difyはすべてのユーザーに[クラウドサービス](http://cloud.dify.ai)を提供しており、自分でデプロイすることなくDifyの完全な機能を利用できます。Difyのクラウドサービスを利用するには、GitHubまたはGoogleアカウントが必要です。
+Difyは誰でも[手軽にご利用いただける](https://cloud.dify.ai/apps)クラウドサービスです。[プランと価格](https://dify.ai/pricing)をご確認の上、お客様の要求に最適なプランをお選びください。
-1. [Difyクラウドサービス](https://cloud.dify.ai)にログインし、新しいワークスペースを作成するか、既存のワークスペースに参加します。
-2. モデルプロバイダーを設定するか、提供されているホスト型モデルプロバイダーを使用します。
-3. [アプリケーションを作成](/ja-jp/guides/application-orchestrate/creating-an-application)しましょう!
+まずは[Sandboxプラン](http://cloud.dify.ai/)からお試しいただけます。クレジットカード不要で200回のOpenAI APIコールを無料で試すことができます。クラウド版のSandboxプランのご利用には、GitHubまたはGoogleアカウント、および有効なOpenAI APIキーが必要です。使用手順は以下の通りです:
-### サブスクリプションプラン
+1. [Difyクラウド](https://cloud.dify.ai)にサインアップし、新しいワークスペースを作成するか、既存のワークスペースに参加します。
+2. モデルプロバイダーを設定するか、Dify提供のモデルプロバイダーを使えます。
+3. 以上で、[アプリケーションを作成](../guides/application-orchestrate/creating-an-application.md)できます!
-クラウドサービスには複数のサブスクリプションプランが用意されており、チームの状況に応じて選択できます。
+### よくある質問
-* サンドボックス(無料版)
-* プロフェッショナル版
-* チーム版
-* エンタープライズ版
+**Q: Difyクラウドを使用する際、私のデータはどのように処理・保存されますか?**
-各バージョンの価格設定については、[ここ](https://dify.ai/pricing)をご参照ください。
\ No newline at end of file
+A: Difyクラウドを使用すると、ユーザーデータは米国東部地域にあるAWSサーバーに安全に保存されます。これには、積極的に入力したデータとアプリから生成されたデータの両方が含まれます。私たちはデータのセキュリティと完全性を最優先事項としており、クラウドストレージソリューションにおける最高水準の基準に基づき管理されることを確保しています。
+
+**Q: APIキーや他の機密情報を保護するためにどのような対策がありますか?**
+
+A: Difyでは、APIキーや他の機密情報を保護することの重要性を深く理解しています。これらの情報は保存時に暗号化され、Dify側から閲覧することはできません。アクセスは、正当な所有者であるユーザーご本人のみに限定されます。
+
+**Q: Difyクラウドでのアプリの匿名化について説明していただけますか?**
+
+A: Difyクラウドでは、プライバシーを確保し、暗号化と復号化のオーバーヘッドを減らすためにアプリのデータを匿名化しています。これは、アプリで使用されるデータが特定可能なユーザーアカウントと直接関連付けられないことを意味します。データを匿名化することで、クラウドサービスのパフォーマンスを維持しながらプライバシーを強化しています。
+
+**Q: Difyクラウドから自分のアカウントと関連するすべてのデータを削除するプロセスはどのようになっていますか?**
+
+A: アカウントおよび関連データの削除をご希望の場合、[アカウント設定](https://cloud.dify.ai/account)ページにアクセスするか、画面右上のアバターをクリックして「アカウント」を選択してください。ページ内に「アカウントを削除」ボタンがありますので、クリックして画面の指示に従ってください。当社はお客様のプライバシーとデータに関する権利を尊重しており、ご依頼に基づき、データ保護規制に従って、システムからお客様に関連するすべてのデータを消去いたします。
diff --git a/ja-jp/getting-started/dify-for-education.mdx b/ja-jp/getting-started/dify-for-education.mdx
new file mode 100644
index 00000000..cc5bc970
--- /dev/null
+++ b/ja-jp/getting-started/dify-for-education.mdx
@@ -0,0 +1,171 @@
+---
+title: Dify 教育版
+---
+
+## はじめに
+
+Dify教育版は、学生、教師、教育機関に手軽で手頃なAI学習・開発ツールを提供し、AI教育をより身近で普及させることを目指しています。
+
+## メリット
+
+- **学生の方へ**:Dify SaaSサブスクリプションサービスを割安な価格で利用でき、より多くの計算リソースを獲得できます。授業の課題、研究プロジェクト、AIアプリケーション開発に役立ちます。
+
+- **教師の方へ**:Difyを教育現場でAI教材として活用し、学生たちがAIアプリケーション開発について深く学べるようサポートします。
+
+- **教育機関・学校の方へ**:チームでAIリソースを簡単に管理でき、教育効率を高め、教育分野におけるAIの可能性をさらに探求できます。
+
+## 特典
+
+現在のDify教育版特典は、**Difyプロフェッショナルプランのサブスクリプションが50%割引**になるクーポンです。
+
+
+今後も学習や授業をサポートするための教育版専用の特典を継続的に提供していく予定です。ご期待ください。
+
+
+## 申請方法
+
+### 申請資格
+
+Dify教育版認証を申請するには、以下の条件を**すべて**満たす必要があります:
+
+1. 18歳以上であること
+2. 現在、学生、教師、または教育機関の職員であること
+3. 教育用メールアドレスでDifyアカウントを登録していること
+
+### 申請手順
+
+1. **Difyアカウントの作成**
+
+教育版認証を申請する前に、教育用メールアドレスで[Difyアカウントを登録](https://cloud.dify.ai/signin)してください。
+
+2. **Dify教育版認証の申請**
+
+ 1. 教育用メールアドレスで登録したDifyアカウントにログインし、右上のユーザーアイコンをクリックして、ドロップダウンメニューから**設定**を選択します。
+
+ 2. 設定ページで、**請求 > 教育版認証を申請**をクリックします。
+
+ 
+
+ 3. 学校の正式名称を入力します *(略称は使用しないでください)*。
+
+ 4. 申請人の身分を選択します。
+
+ 5. **利用規約に同意する**にチェックを入れ、**送信**をクリックします。
+
+ 
+
+ 6. 認証が成功すると、Dify教育版専用の割引クーポンが付与されます。
+
+## Dify教育認証状況の確認方法
+
+教育認証の申請が成功すると、右上のユーザーアイコンをクリックして、ドロップダウンメニューからサブスクリプション状況を確認できます。サブスクリプション状況が`Edu`と表示されていれば、教育認証が成功しています。
+
+## クーポンの使用方法
+
+### 概要
+
+Dify教育認証が完了すると、教育クーポンが自動的にあなたのDifyアカウントに付与されます。Difyプロフェッショナルプランを購読する際にこのクーポンを使用できます。
+
+
+クーポン割引:**50%**
+
+クーポン有効期間:**12ヶ月**
+
+クーポン使用条件:
+- 有効期限内にクーポンを使用する必要があります。**有効期限が過ぎると使用できなくなります**。
+- 教育版認証は**年に一度**のみ申請可能で、一度に**1枚のクーポン**しか取得できません。
+- クーポンはワークスペースの所有者本人のみ利用可能です。
+
+
+# 使い方
+
+1. サブスクリプションページで「Dify プロフェッショナルプラン」を選択:
+- 月額払いの場合:今すぐ始めるボタンを直接クリックし、決済ページへ進みます。
+- 年額払いの場合:ページ右上の年額請求スイッチをオンにした後、今すぐ始めるボタンをクリックし決済ページへ進みます。
+
+2. 支払いページに進むと、クーポンコードが自動的に適用されます。このページでクーポンが有効になっていることを確認し、割引後の金額を確認できます。
+
+
+
+3. 注文内容に問題がなければ、お支払い方法を選択してください。
+
+4. お支払いが完了すると、サブスクリプションの設定が完了となります。
+
+5. サブスクリプション後、あなたのステータスは`Pro(Edu)`に更新されます。
+
+## 教育情報の更新方法
+
+### メールアドレスの更新
+
+メールアドレスを変更する必要がある場合は、現在のところ手動対応となっております。``までご連絡ください。
+
+### 学校情報の更新
+
+- **認証完了後に学校情報を変更したい場合**:教育認証が完了した後に学校情報を変更する必要がある場合は、``までご連絡ください。
+
+- **認証期限切れ後の学校情報変更**:教育認証の期限が切れた後は、再認証の際に新しい学校情報を入力することができます。
+
+## Dify教育認証の更新方法
+
+教育認証の期限が切れた場合は、前述の**Dify教育認証の完了**セクションを参照して、再度申請を行ってください。
+
+## よくある質問
+
+- **Difyを学習の方法は?**
+
+1. 公式ドキュメントを参照する:[Dify公式ドキュメント](https://docs.dify.ai/ja-jp)で、詳細な使用ガイドをご覧いただけます。
+
+2. コミュニティに参加する:[Dify Discordコミュニティ](https://discord.com/invite/FngNHpbcY7)に参加して、開発者と交流し、使用体験を共有しましょう。
+
+- **Difyプランの料金はいくらですか?**
+
+Difyの[料金ページ](https://dify.ai/pricing)で詳細をご確認いただけます。
+
+- **教育認証を申請できないのは誰ですか?**
+
+1. 18歳未満のユーザー。
+
+2. 個人用メールアドレス(Gmailなど)でアカウント登録したユーザー。
+
+- **教育認証が拒否/取り消された理由と対処法は?**
+
+以下の状況では、教育認証の申請が拒否または取り消される可能性があります:
+
+1. 教育認証申請時に**利用規約に同意する**にチェックを入れなかった
+2. 教育機関のメールアドレスを使用していない
+3. 偽の学校や企業のメールアドレスを使用している
+4. 虚偽の個人情報や学校情報を提供している
+5. 教育特典の不正使用
+
+申請が拒否されたり認証が取り消された場合は、``までご連絡ください。
+
+- **卒業または学校を離れた後も教育認証は有効ですか?**
+
+教育認証は教育機関のメールアドレスの状態に直接関連しています。教育メールアドレスが正常に使用できる限り、認証は有効です。
+
+教育メールアドレスが使用できなくなった場合は、``までご連絡ください。
+
+- **教育機関のメールアドレスを持っていない場合、教育認証を申請するにはどうすればよいですか?**
+
+現在、Dify教育認証は主に教育機関のメールアドレスによる身分確認を通じて行われています。教育機関のメールアドレスをお持ちでない場合は、誠に申し訳ありませんが、教育認証を申請することはできません。
+
+- **以前にサブスクリプションを利用していましたが、現在は期限切れです。教育優待クーポンはまだ使用可能ですか?**
+
+購読が期限切れの場合でも、アカウントにStripeの支払い情報が登録されている場合、次回の再購読時に教育優待クーポンが自動適用されます。
+
+- **現在Difyプランを購読中です。教育認証を申請した後、サブスクリプション内容はどうなりますか?**
+
+プロフェッショナルプランを購読中の場合、教育優待クーポンは次回の課金周期から自動適用されます。
+
+プロフェッショナルプラン以外を購読中の場合、教育優待クーポンはアカウントに保存され、将来プロフェッショナルプランを選択した際に自動使用されます。
+
+- **教育認証済みでDifyプロフェッショナルプランを購読中ですが、次回課金開始前に解約した場合はどうなりますか?**
+
+現在の課金周期終了時にサブスクリプションが解除され、同時に教育優待クーポンも失効し、再度有効化することはできません。
+
+- **解約後に教育認証を有効化した場合、割引は適用されますか?**
+
+教育優待は保持されますが、以下の点にご注意ください:
+
+1. 現在の購読期間終了後の再購読時にはクーポンが自動適用されます。
+2. 購読期間終了前の更新/延長時には適用されず、割引を利用するには現行プラン終了後の再購入が必要です。
diff --git a/ja-jp/getting-started/dify-premium.mdx b/ja-jp/getting-started/dify-premium.mdx
index 4c6f112d..903201ff 100644
--- a/ja-jp/getting-started/dify-premium.mdx
+++ b/ja-jp/getting-started/dify-premium.mdx
@@ -110,3 +110,7 @@ poetry run flask install-plugins --workers=2
docker-compose down
ocker-compose -f docker-compose.yaml -f docker-compose.override.yaml up -d
```
+
+## カスタマイズ Webアプリのロゴやブランド
+
+この機能は設定の**カスタマイズ**で有効にすることができます。**Powered by Difyを削除**を有効にして、独自のロゴをアップロードしてください。
diff --git a/ja-jp/getting-started/install-self-hosted/environments.mdx b/ja-jp/getting-started/install-self-hosted/environments.mdx
index 1e706d80..f54bdf9d 100644
--- a/ja-jp/getting-started/install-self-hosted/environments.mdx
+++ b/ja-jp/getting-started/install-self-hosted/environments.mdx
@@ -1,7 +1,14 @@
---
-title: 環境変数の説明
+title: 環境変数の説明 (待处理)
---
+
+このドキュメントは最新でない可能性があります。最新の構成ファイルをご参照ください:
+
+- [docker-compose.yaml](https://github.com/langgenius/dify/blob/5f8d20b5b2bb51f19547467167b18d9c0f6ffbb8/docker/docker-compose.yaml)
+
+- [.env.example](https://github.com/langgenius/dify/blob/5f8d20b5b2bb51f19547467167b18d9c0f6ffbb8/docker/.env.example)
+
### 公共変数
diff --git a/ja-jp/getting-started/install-self-hosted/faq.mdx b/ja-jp/getting-started/install-self-hosted/faq.mdx
index e6a99a8f..4723409d 100644
--- a/ja-jp/getting-started/install-self-hosted/faq.mdx
+++ b/ja-jp/getting-started/install-self-hosted/faq.mdx
@@ -67,5 +67,25 @@ EXPOSE_NGINX_PORT=80
EXPOSE_NGINX_SSL_PORT=443
```
+他のデプロイに関する質問は[ここに](/ja-jp/learn-more/faq/install-faq)います。
-他のデプロイに関する質問は[ここに](/ja-jp/learn-more/faq/install-faq)います。
\ No newline at end of file
+### 6. docker-api-1 でのデータベース接続エラーの解決方法とは?
+
+**問題の詳細**:`http://localhost`にアクセスする際に`Internal Server Error`が表示され、`docker-api-1`のログに以下のようなエラーが記録される場合:
+
+```bash
+FATAL: no pg_hba.conf entry for host "172.19.0.7", user "postgres", database "dify", no encryption
+```
+
+**解決策**:dbコンテナ内の`/var/lib/postgresql/pgdata/pg_hba.conf`を変更し、エラーメッセージに記載されているネットワークセグメントを認証リストに追加します。例:
+
+```bash
+docker exec -it docker-db-1 sh -c "echo 'host all all 172.19.0.0/16 trust' >> /var/lib/postgresql/data/pgdata/pg_hba.conf"
+docker-compose restart
+```
+
+他のデプロイに関する質問は[ローカルデプロイに関する](../../learn-more/faq/install-faq.md)をご確認ください。
+
+### 7. ナレッジベースのファイルアップロードサイズ制限を変更するには?
+
+`.env`ファイル内の`UPLOAD_FILE_SIZE_LIMIT`パラメータを変更して、デフォルトの制限を調整できます。同時に、潜在的な問題を避けるために`NGINX_CLIENT_MAX_BODY_SIZE`パラメータの値も更新する必要があります。
diff --git a/ja-jp/getting-started/install-self-hosted/local-source-code.mdx b/ja-jp/getting-started/install-self-hosted/local-source-code.mdx
index bc9920de..483aeb2c 100644
--- a/ja-jp/getting-started/install-self-hosted/local-source-code.mdx
+++ b/ja-jp/getting-started/install-self-hosted/local-source-code.mdx
@@ -211,8 +211,10 @@ https://nodejs.org/en/download から対応するOSのv18.x以上のインスト
2. 依存関係をインストール
```
- npm install
+ npm i -g pnpm
+ pnpm install
```
+
3. 環境変数を構成。現在のディレクトリに `.env.local` ファイルを作成し、`.env.example` の内容をコピーします。必要に応じてこれらの環境変数の値を変更します。
```
diff --git a/ja-jp/getting-started/readme/model-providers.mdx b/ja-jp/getting-started/readme/model-providers.mdx
index c1fa9194..5bd88a26 100644
--- a/ja-jp/getting-started/readme/model-providers.mdx
+++ b/ja-jp/getting-started/readme/model-providers.mdx
@@ -375,7 +375,15 @@ Difyは以下のモデルプロバイダーをサポートしています: