diff --git a/docs.json b/docs.json index 30e5dfce..c789c0c0 100644 --- a/docs.json +++ b/docs.json @@ -67,7 +67,7 @@ "group": "Application Orchestration", "pages": [ "en/guides/application-orchestrate/readme", - "en/guides/application-orchestrate/chatbot", + "en/guides/application-orchestrate/chatbot-application", "en/guides/application-orchestrate/text-generator", "en/guides/application-orchestrate/agent", { diff --git a/en/development/backend/sandbox/README.mdx b/en/development/backend/sandbox/README.mdx index f057456a..b98a0a38 100644 --- a/en/development/backend/sandbox/README.mdx +++ b/en/development/backend/sandbox/README.mdx @@ -18,4 +18,4 @@ title: DifySandbox You can access the [DifySandbox](https://github.com/langgenius/dify-sandbox) repository to obtain the project source code and follow the project documentation for deployment and usage instructions. ### Contribution -Please refer to the [Contribution Guide](contribution.md) to learn how you can participate in the development of DifySandbox. +Please refer to the [Contribution Guide](/en/development/backend/sandbox/contribution) to learn how you can participate in the development of DifySandbox. diff --git a/en/development/migration/migrate-to-v1.mdx b/en/development/migration/migrate-to-v1.mdx index e503c542..bb81b207 100644 --- a/en/development/migration/migrate-to-v1.mdx +++ b/en/development/migration/migrate-to-v1.mdx @@ -2,8 +2,7 @@ title: Upgrading Community Edition to v1.0.0 --- - -> This document primarily explains how to upgrade from an older Community Edition version to [v1.0.0](https://github.com/langgenius/dify/releases/tag/1.0.0). If you have not installed the Dify Community Edition yet, you can directly clone the [Dify project](https://github.com/langgenius/dify) and switch to the `1.0.0` branch. For installation commands, refer to the [documentation](https://docs.dify.ai/zh-hans/getting-started/install-self-hosted/docker-compose). +> This document primarily explains how to upgrade from an older Community Edition version to [v1.0.0](https://github.com/langgenius/dify/releases/tag/1.0.0). If you have not installed the Dify Community Edition yet, you can directly clone the [Dify project](https://github.com/langgenius/dify) and switch to the `1.0.0` branch. For installation commands, refer to the [documentation](/en/getting-started/install-self-hosted/docker-compose). To experience the plugin functionality in the Community Edition, you need to upgrade to version`v1.0.0`. This document will guide you through the steps of upgrading from older versions to `v1.0.0` to access the plugin ecosystem features. diff --git a/en/getting-started/cloud.mdx b/en/getting-started/cloud.mdx index 0b864e68..98038d96 100644 --- a/en/getting-started/cloud.mdx +++ b/en/getting-started/cloud.mdx @@ -12,7 +12,7 @@ Get started now with the [Sandbox plan](http://cloud.dify.ai), which includes a 1. Sign up to [Dify Cloud](https://cloud.dify.ai) and create a new Workspace or join an existing one. 2. Configure your model provider or use our hosted model provider. -3. You can [create an application](../guides/application-orchestrate/creating-an-application.md) now! +3. You can [create an application](/en/guides/application-orchestrate/readme) now! ### FAQs diff --git a/en/getting-started/dify-premium.mdx b/en/getting-started/dify-premium.mdx index 4201b9bc..36bb3346 100644 --- a/en/getting-started/dify-premium.mdx +++ b/en/getting-started/dify-premium.mdx @@ -5,7 +5,7 @@ title: Dify Premium on AWS Dify Premium is our AWS AMI offering that allows custom branding and is one-click deployable to your AWS VPC as an EC2. Head to [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) to subscribe. It's useful in a couple of scenarios: * You're looking to create one or a few applications as a small/medium business and you care about data residency. -* You are interested in [Dify Cloud](cloud.md), but your use case require more resource than supported by the [plans](https://dify.ai/pricing). +* You are interested in [Dify Cloud](/en/getting-started/cloud), but your use case require more resource than supported by the [plans](https://dify.ai/pricing). * You'd like to run a POC before adopting Dify Enterprise within your organization. ## Setup @@ -27,7 +27,7 @@ docker-compose pull docker-compose -f docker-compose.yaml -f docker-compose.override.yaml up -d ``` -> To upgrade to version v1.0.0, please refer to [Migrating Community Edition to v1.0.0](https://docs.dify.ai/development/migration/migrate-to-v1). +> To upgrade to version v1.0.0, please refer to [Migrating Community Edition to v1.0.0](/en/development/migration/migrate-to-v1). diff --git a/en/getting-started/install-self-hosted/docker-compose.mdx b/en/getting-started/install-self-hosted/docker-compose.mdx index dbcc3c9e..1b03c8bc 100644 --- a/en/getting-started/install-self-hosted/docker-compose.mdx +++ b/en/getting-started/install-self-hosted/docker-compose.mdx @@ -172,4 +172,4 @@ The full set of annotated environment variables along can be found under docker/ ### Read More -If you have any questions, please refer to [FAQs](faqs.md). +If you have any questions, please refer to [FAQs](/en/getting-started/install-self-hosted/faqs). diff --git a/en/getting-started/install-self-hosted/faqs.mdx b/en/getting-started/install-self-hosted/faqs.mdx index 9faffbb9..43dbe48b 100644 --- a/en/getting-started/install-self-hosted/faqs.mdx +++ b/en/getting-started/install-self-hosted/faqs.mdx @@ -5,7 +5,7 @@ title: FAQs ### 1. Not receiving reset password emails -You need to configure the `Mail` parameters in the `.env` file. For detailed instructions, please refer to ["Environment Variables Explanation: Mail-related configuration"](https://docs.dify.ai/getting-started/install-self-hosted/environments#mail-related-configuration). +You need to configure the `Mail` parameters in the `.env` file. For detailed instructions, please refer to ["Environment Variables Explanation: Mail-related configuration"](/en/getting-started/install-self-hosted/environments#mail-related-configuration). After modifying the configuration, run the following commands to restart the service: @@ -68,4 +68,4 @@ EXPOSE_NGINX_SSL_PORT=443 ``` -Other self-host issue please check this document [Self-Host Related](../../learn-more/faq/install-faq.md)。 \ No newline at end of file +Other self-host issue please check this document [Self-Host Related](/en/learn-more/faq/install-faq)。 \ No newline at end of file diff --git a/en/getting-started/install-self-hosted/install-faq.mdx b/en/getting-started/install-self-hosted/install-faq.mdx index f0ccc5ea..28314318 100644 --- a/en/getting-started/install-self-hosted/install-faq.mdx +++ b/en/getting-started/install-self-hosted/install-faq.mdx @@ -2,12 +2,17 @@ title: FAQ --- +### 1. How to reset the password if it is incorrect after local deployment initialization? -### 1. How to reset the password if the local deployment initialization fails with an incorrect password? +If you deployed using Docker Compose, you can reset the password with the following command: -If deployed using docker compose, you can execute the following command to reset the password: `docker exec -it docker-api-1 flask reset-password` Enter the account email and twice new passwords, and it will be reset. +``` +docker exec -it docker-api-1 flask reset-password +``` -### 2. How to resolve File not found error in the log when deploying locally? +Enter the account email and the new password twice. + +### 2. How to fix the "File not found" error in local deployment logs? ``` ERROR:root:Unknown Error in completion @@ -19,207 +24,191 @@ Traceback (most recent call last): FileNotFoundError: File not found ``` -This error may be caused by switching deployment methods, or deleting the `api/storage/privkeys` fil`api/storage/privkeys`crypt large model keys and can not be reversed if lost. You can reset the encryption public and private keys with the following command: +This error might be due to changing the deployment method or deleting the `api/storage/privkeys` directory. This file is used to encrypt the large model keys, so its loss is irreversible. You can reset the encryption key pair with the following commands: -* Docker compose deployment - -``` -docker exec -it docke` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`up - -Enter the api directory - -``` -flas` +* Docker Compose deployment + ``` + docker exec -it docker-api-1 flask reset-encrypt-key-pair + ``` * Source code startup -Enter the api directory + Navigate to the `api` directory -`reset. + ``` + flask reset-encrypt-key-pair + ``` -### 3. Unable to log` -flask reset-encrypt-key-pair -`hen logi` + Follow the prompts to reset. -Follow the prompts to reset. +### 3. Unable to log in after installation, or receiving a 401 error on subsequent interfaces after a successful login? -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? +This might be due to switching the domain/URL, causing cross-domain issues between the frontend and backend. Cross-domain and identity issues involve the following configurations: -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: +1. CORS Cross-Domain Configuration + 1. `CONSOLE_CORS_ALLOW_ORIGINS` -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS` `WEB_API_CORS_ALLOW_ORIGINS` WebAPP CORS cross-domain strategy, default to `*`, which allows access from all domain names. + Console CORS policy, default is `*`, meaning all domains can access. + 2. `WEB_API_CORS_ALLOW_ORIGINS` -### 4. After starting, the page keeps loading and checking the request prompts CORS error? + WebAPP CORS policy, default is `*`, meaning all domains can access. -This may be because the domain name/URL has been switched, resulting in cross-domain between the front end and the back end. Please ch`*`e all the following con`WEB_API_CORS_ALLOW_ORIGINS`compose.yml` to the new domain name: `CONSOLE_API_`*`:` The backend URL of the console API. `CONSOLE_WEB_URL:` The front-end URL of the console web. `SERVICE_API_URL:` Service API Url `APP_API_URL:` WebApp API backend Url. `APP_WEB_URL:` WebApp Url. +### 4. The page keeps loading after startup, and requests show CORS errors? -For more information, please check out: [Environments](environments.md) +This might be due to switching the domain/URL, causing cross-domain issues between the frontend and backend. Update the following configuration items in `docker-compose.yml` to the new domain: -### 5. How to upgrade ver`docker-compose.yml`t? +`CONSOLE_API_URL:` Backend URL for the console API. +`CONSOLE_WEB_URL:` Frontend URL for the console web. +`SERVICE_API_URL:` URL for the service API. +`APP_API_URL:` Backend URL for the WebApp API. +`APP_WEB_URL:` URL for the WebApp. -If you start up `api/storage/privkeys`0ease pull the latest images t`api/storage/privkeys`1rade. If you start up through s`api/storage/privkeys`2 pull the`api/storage/privkeys`3nd then start`api/storage/privkeys`4e the upgrade. +For more information, please refer to: [Environment Variables](/en/getting-started/install-self-hosted/environments) -When deploying and updating local source code, you need to enter the API directory and execute the following command to migrate the database structure to the latest version: +### 5. How to upgrade the version after deployment? + +If you started with an image, pull the latest image to complete the upgrade. If you started with source code, pull the latest code and then start it to complete the upgrade. + +For source code deployment updates, navigate to the `api` directory and run the following command to migrate the database structure to the latest version: `flask db upgrade` -### 6.How to configure the environment variables when use Notion import +### 6. How to configure environment variables when importing using Notion? -**Q: What is the Notion's Integration configuration address?** +[**Notion Integration Configuration Address**](https://www.notion.so/my-integrations). When performing a private deployment, set the following configurations: -A: [https://www.notion.so/my-integrations](https://www.notion.so/my-integrations) +1. **`NOTION_INTEGRATION_TYPE`**: This value should be configured as **public/internal**. Since Notion's OAuth redirect address only supports https, use Notion's internal integration for local deployment. +2. **`NOTION_CLIENT_SECRET`**: Notion OAuth client secret (for public integration type). +3. **`NOTION_CLIENT_ID`**: OAuth client ID (for public integration type). +4. **`NOTION_INTERNAL_SECRET`**: Notion internal integration secret. If the value of `NOTION_INTEGRATION_TYPE` is **internal**, configure this variable. -**Q: Which environment variables need to be configured?** +### 7. How to change the name of the space in the local deployment version? -A: Pl`api/storage/privkeys`5figuration when doing the privatized deployment +Modify it in the `tenants` table of the database. -1. **`NOTION_INTEGRATION_TYPE`** : The value should configrate as (**public/internal**). Since the Redirect address of Notion’s Oauth only supports https, if it is deployed locally, please use Notion’s internal integration -2. **`NOTION_CLIENT_SECRET`** : Notion OAuth client secret (userd for public i`api/storage/privkeys`6TION_CLIENT_ID`** : OAuth client ID (userd for public integration type) -4. **`NOTION_INTERNAL_SECRET`** : Notion Internal Integration Secret, If the value of `NOTION_INTEGRATION_TYPE` is **internal`api/storage/privkeys`7ure this variable. +### 8. Where to modify the domain for accessing the application? -### 7. How to change the name of the space in the`api/storage/privkeys`8version? +Find the `APP_WEB_URL` configuration domain in `docker_compose.yaml`. -Modify in the `tenants` table in the databas`api/storage/privkeys`9odify the domain name for accessing the application? +### 9. What to back up if a database migration occurs? -F` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`0ain name APP\_WEB\_URL in `docker_compose.yaml`. +Back up the database, configured storage, and vector database data. If deployed using Docker Compose, directly back up all data in the `dify/docker/volumes` directory. -### 9. If database migration is required, what things need to be backed up? +### 10. Why can't Docker deployment Dify access the local port using 127.0.0.1 when starting OpenLLM locally? -The database, confi` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`1rage, and vector database data need to be backed up. If deployed in Docker Compose mode, all data content in the `dify/docker/volumes`` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`2ectly backed up. +127.0.0.1 is the internal address of the container. Dify's configured server address needs to be the host's local network IP address. -### 10. Why is Docker deploying Dify and starting OpenLLM locally using 127.0.0.1, but unable to access the local port? +### 11. How to resolve the size and quantity limit of document uploads in the dataset for the local deployment version? -`127.0.0.1` is the internal address of the container, and the server address ` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`3quires the host LAN IP address. +Refer to the official website [Environment Variables Documentation](/en/getting-started/install-self-hosted/environments) for configuration. -### 11. How to solve the size and quantity limitations for uploading knowledge documents in the local deployment version? -` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`4fer to the official website environment variable description document to configure: +### 12. How to invite members via email in the local deployment version? -[Environments](environments.md) +In the local deployment version, invite members via email. After entering the email and sending the invitation, the page will display an invitation link. Copy the invitation link and forward it to the user. The user can open the link, log in via email, set a password, and log in to your space. -### 12. How does the local deployment edition invite members through email? +### 13. What to do if you encounter the error "Can't load tokenizer for 'gpt2'" in the local deployment version? -Local deployment edition, members can be invited through email. After entering the email invitation, the page displays the invitation link, copies the invitation link, and forwards it to users. Your team members can open the link and log in to your space by setting a password through email login. +``` +Can't load tokenizer for 'gpt2'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'gpt2' is the correct path to a directory containing all relevant files for a GPT2TokenizerFast tokenizer. +``` -### 13. How to solve listen tcp4 0.0.0.0:80: bind: address already in use? +Refer to the official website [Environment Variables Documentation](/en/getting-started/install-self-hosted/environments) for configuration, and the related [Issue](https://github.com/langgenius/dify/issues/1261). -This is because the port is occupied. You can use the `netstat -tunlp | grep 80` command to view the process that occupies the port, and then kill the process. For example, the apache and nginx processes occupy the port, you can use the `service apache2 stop` and `service nginx stop` command` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`5# 14. What to do if this error occurs in text-to-speech? +### 14. How to resolve a port 80 conflict in the local deployment version? + +If port 80 is occupied, stop the service occupying port 80 or modify the port mapping in `docker-compose.yaml` to map port 80 to another port. Typically, Apache and Nginx occupy this port, which can be resolved by stopping these two services. + +### 15. What to do if you encounter the error "[openai] Error: ffmpeg is not installed" during text-to-speech? ``` [openai] Error: ffmpeg is not installed ``` -Since OpenAI TTS has implemented audio stream segm` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`6 ` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`7normal use when deploying the source code. Here are the detailed steps: +Since OpenAI TTS implements audio stream segmentation, ffmpeg needs to be installed for source code deployment to work properly. Detailed steps: **Windows:** -1` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`8://ffmpeg.org/down` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -`9epel-release` +1. Visit [FFmpeg Official Website](https://ffmpeg.org/download.html) and download the precompiled Windows shared library. +2. Download and extract the FFmpeg folder, which will generate a folder like "ffmpeg-20200715-51db0a4-win64-static". +3. Move the extracted folder to your desired location, e.g., C:\Program Files\. +4. Add the absolute path of the FFmpeg bin directory to the system environment variables. +5. Open Command Prompt and enter "ffmpeg -version". If you see the FFmpeg version information, the installation is successful. + +**Ubuntu:** + +1. Open Terminal. +2. Enter the following commands to install FFmpeg: `sudo apt-get update`, then `sudo apt-get install ffmpeg`. +3. Enter "ffmpeg -version" to check if the installation is successful. + +**CentOS:** + +1. First, enable the EPEL repository. Enter in Terminal: `sudo yum install epel-release` 2. Then, enter: `sudo rpm -Uvh http://li.nux.ro/download/nux/dextop/el7/x86_64/nux-dextop-release-0-5.el7.nux.noarch.rpm` -3. Update the yum package, enter: `sudo yum update` +3. Update yum packages, enter: `sudo yum update` 4. Finally, install FFmpeg, enter: `sudo yum install ffmpeg ffmpeg-devel` -5. Enter "ffmpeg -version" to check if it has been successfully installed. +5. Enter "ffmpeg -version" to check if the installation is successful. **Mac OS X:** -1. Open the terminal. -2. If you haven't installed Homebrew yet, you can install it by entering the following command in the terminal: `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` -3. Install FFmpeg with Homebrew, enter: `brew install ffmpeg` -4. Enter "ffmpeg -version" to check if it has been successfully installed. +1. Open Terminal. +2. If you haven't installed Homebrew, you can install it by entering the following command in Terminal: `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` +3. Use Homebrew to install FFmpeg, enter: `brew install ffmpeg` +4. Enter "ffmpeg -version" to check if the installation is successful. -### 15. Migrate Vector Database to Another Vector Database - -If you want to migrate the vec` - -* Source code startup - -Enter the api directory - -`0om weaviate to a` - -* Source code startup - -Enter the api directory - -`1de, modify the environment variable in the `.env` file to the vector database you want to migrate to. etc: `VECTOR_STORE=qdrant` -2. If you are starting from docker-compos` - -* Source code startup - -Enter the api directory - -`2onment variable in the `docker-compose.yaml` file to the vector database you want to migrate to,` - -* Source code startup - -Enter the api directory - -`3etc: +### 16. How to resolve an Nginx configuration file mount failure during local deployment? ``` -# The type of ve` +Error response from daemon: failed to create task for container: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: error during container init: error mounting "/run/desktop/mnt/host/d/Documents/docker/nginx/nginx.conf" to rootfs at "/etc/nginx/nginx.conf": mount /run/desktop/mnt/host/d/Documents/docker/nginx/nginx.conf:/etc/nginx/nginx.conf (via /proc/self/fd/9), flags: 0x5000: not a directory: unknown: Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type +``` -* Source code startup +Download the complete project, navigate to the docker directory, and execute `docker-compose up -d`. -Enter the api directory +``` +git clone https://github.com/langgenius/dify.git +cd dify/docker +docker compose up -d +``` -`4e `weaviate`, `qdrant`, `milvus`, `analyticdb`. -` +### 17. Migrate weaviate to another vector database -* Source code startup +To migrate from Weaviate to another vector database, follow these steps: -Enter the api directory +1. For local source code deployment: + - Update the vector database setting in the `.env` file + - Example: Set `VECTOR_STORE=qdrant` to migrate to Qdrant -`5b +2. For Docker Compose deployment: + - Update the vector database settings in `docker-compose.yaml` + - Make sure to modify both API and worker service configurations -### 16. Why is SSRF_PROXY Needed? - -You may have noticed the `SSRF_``` +``` # The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `analyticdb`. VECTOR_STORE: weaviate -```` to prevent Server-Side Request Forgery (SSRF) attacks. For more details on SSRF attacks, refer to [this resource](https://portswigger.net/web-security/ssrf). +``` -To reduce po``` +3. Execute the below command in your terminal or docker container + +``` flask vdb-migrate # or docker exec -it docker-api-1 flask vdb-migrate -```le to S` +``` -* Source code startup +**Tested target database:** -Enter the api directory +- qdrant +- milvus +- analyticdb -`6es like Sandbox can only access exte` +### 18. Why is SSRF_PROXY needed? -* Source code startup +In the community edition's `docker-compose.yaml`, you might notice some services configured with `SSRF_PROXY` and `HTTP_PROXY` environment variables, all pointing to an `ssrf_proxy` container. This is to prevent SSRF attacks. For more information on SSRF attacks, you can read [this article](https://portswigger.net/web-security/ssrf). -Enter the api directory +To avoid unnecessary risks, we configure a proxy for all services that might cause SSRF attacks and force services like Sandbox to only access external networks through the proxy, ensuring your data and service security. By default, this proxy does not intercept any local requests, but you can customize the proxy behavior by modifying the `squid` configuration file. -`7the `192.168.101.0/24` network to be accessed by the proxy, but restrict access to an IP address `192.168.101.19` that contains sensitive data, you can add the following rules to `squid.conf`: +#### How to customize the proxy behavior? -```plaintext +In `docker/volumes/ssrf_proxy/squid.conf`, you can find the `squid` configuration file. You can customize the proxy behavior here, such as adding ACL rules to restrict proxy access or adding `http_access` rules to restrict proxy access. For example, your local network can access the `192.168.101.0/24` segment, but `192.168.101.19` has sensitive data that you don't want local deployment Dify users to access, but other IPs can. You can add the following rules in `squid.conf`: + +``` acl restricted_ip dst 192.168.101.19 acl localnet src 192.168.101.0/24 @@ -228,120 +217,61 @@ http_access allow localnet http_access deny all ``` -This is a basic example, ` +This is just a simple example. You can customize the proxy behavior according to your needs. If your business is more complex, such as needing to configure an upstream proxy or cache, you can refer to the [squid configuration documentation](http://www.squid-cache.org/Doc/config/) for more information. -* Source code startup +### 19. How to set your created application as a template? -Enter the api directory +Currently, it is not supported to set your created application as a template. The existing templates are provided by Dify official for cloud version users to refer to. If you are using the cloud version, you can add applications to your workspace or customize them after modification to create your own applications. If you are using the community version and need to create more application templates for your team, you can contact our business team for paid technical support: [business@dify.ai](mailto:business@dify.ai) -`8For more information about configuring `squid`, refer` +### 20. 502 Bad Gateway -* Source code startup +This is because Nginx is forwarding the service to the wrong location. First, ensure the container is running, then run the following command with root privileges: -Enter the api directory +``` +docker ps -q | xargs -n 1 docker inspect --format '{{ .Name }}: {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' +``` -`9PLACEHOLDER_6` -flask reset-encrypt-key-pair -`0` -flask reset-encrypt-key-pair -`1` -flask reset-encrypt-key-pair -`2` -flask reset-encrypt-key-pair -`3` -flask reset-encrypt-key-pair -`4` -flask reset-encrypt-key-pair -`5` -flask reset-encrypt-key-pair -`6` -flask reset-encrypt-key-pair -`7` -flask reset-encrypt-key-pair -`8` -flask reset-encrypt-key-pair -`9` +Find these two lines in the output: -Follow the prompts to reset. +``` +/docker-web-1: 172.19.0.5 +/docker-api-1: 172.19.0.7 +``` -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? +Remember the IP addresses. Then open the directory where you store the Dify source code, open `dify/docker/nginx/conf.d`, replace `http://api:5001` with `http://172.19.0.7:5001`, and replace `http://web:3000` with `http://172.19.0.5:3000`, then restart the Nginx container or reload the configuration. -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: +These IP addresses are _**examples**_, you must execute the command to get your own IP addresses, do not fill them in directly. You might need to reconfigure the IP addresses when restarting the relevant containers. -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`0` +### 21. How to modify the API service port number? -Follow the prompts to reset. +The API service port is consistent with the one used by the Dify platform. You can reassign the running port by modifying the `nginx` configuration in the `docker-compose.yaml` file. -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? +### 22. How to Migrate from Local to Cloud Storage? -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: +To migrate files from local storage to cloud storage (e.g., Alibaba Cloud OSS), you'll need to transfer data from the 'upload_files' and 'privkeys' folders. Follow these steps: -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`1` +1. Configure Storage Settings -Follow the prompts to reset. + For local source code deployment: + - Update storage settings in `.env` file + - Set `STORAGE_TYPE=aliyun-oss` + - Configure Alibaba Cloud OSS credentials -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? + For Docker Compose deployment: + - Update storage settings in `docker-compose.yaml` + - Set `STORAGE_TYPE: aliyun-oss` + - Configure Alibaba Cloud OSS credentials -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: +2. Execute Migration Commands -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`2` + For local source code: + ```bash + flask upload-private-key-file-to-cloud-storage + flask upload-local-files-to-cloud-storage + ``` -Follow the prompts to reset. - -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? - -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: - -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`3` - -Follow the prompts to reset. - -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? - -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: - -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`4` - -Follow the prompts to reset. - -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? - -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: - -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`5` - -Follow the prompts to reset. - -### 3. Unable to log in when installing later, and then login is successful but subsequent interfaces prompt 401? - -This may be due to switching ``` -docker exec -it docker-api-1 flask reset-encrypt-key-pair -```nd server-side. Cross-domain and identity involve two configuration items: - -**CORS cross-do``` -flask reset-encrypt-key-pair -```ALLOW_ORIGINS`6 \ No newline at end of file + For Docker Compose: + ```bash + docker exec -it docker-api-1 flask upload-private-key-file-to-cloud-storage + docker exec -it docker-api-1 flask upload-local-files-to-cloud-storage + ``` diff --git a/en/getting-started/install-self-hosted/readme.mdx b/en/getting-started/install-self-hosted/readme.mdx index e81375e8..b0add896 100644 --- a/en/getting-started/install-self-hosted/readme.mdx +++ b/en/getting-started/install-self-hosted/readme.mdx @@ -5,8 +5,8 @@ title: Introduction Dify, an open-source project on [GitHub](https://github.com/langgenius/dify), can be self-hosted using either of these methods: -1. [Docker Compose Deployment](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose) -2. [Local Source Code Start](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code) +1. [Docker Compose Deployment](/en/getting-started/install-self-hosted/docker-compose) +2. [Local Source Code Start](/en/getting-started/install-self-hosted/local-source-code) ### Contributing diff --git a/en/getting-started/readme/model-providers.mdx b/en/getting-started/readme/model-providers.mdx index 602b9bff..e2d29804 100644 --- a/en/getting-started/readme/model-providers.mdx +++ b/en/getting-started/readme/model-providers.mdx @@ -391,4 +391,4 @@ where (🛠️) ︎ denotes "function calling" and (👓) denotes "support for v --- -This table is continuously updated. We also keep track of model providers requested by community members [here](https://github.com/langgenius/dify/discussions/categories/ideas). If you'd like to see a model provider not listed above, please consider contributing by making a PR. To learn more, check out our [contribution.md](../../community/contribution.md "mention") Guide. +This table is continuously updated. We also keep track of model providers requested by community members [here](https://github.com/langgenius/dify/discussions/categories/ideas). If you'd like to see a model provider not listed above, please consider contributing by making a PR. To learn more, check out our [contribution.md](/en/community/contribution) Guide. diff --git a/en/guides/annotation/logs.mdx b/en/guides/annotation/logs.mdx index b68139b8..402cb305 100644 --- a/en/guides/annotation/logs.mdx +++ b/en/guides/annotation/logs.mdx @@ -19,7 +19,7 @@ You can find the Logs in the left navigation of the application. This page typic The logs currently do not include interaction records from the Prompt debugging process. -> For the Free tier teams, interaction logs are only retained for the last 30 days. To keep interaction history for a longer period, please visit our [pricing page](https://dify.ai/pricing) to upgrade to a higher tier or consider deploying the [Community Edition](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose). +> For the Free tier teams, interaction logs are only retained for the last 30 days. To keep interaction history for a longer period, please visit our [pricing page](https://dify.ai/pricing) to upgrade to a higher tier or consider deploying the [Community Edition](/en/getting-started/install-self-hosted/docker-compose). ### Improvement Annotations diff --git a/en/guides/application-orchestrate/agent.mdx b/en/guides/application-orchestrate/agent.mdx index 1dffb9b3..329c5cdc 100644 --- a/en/guides/application-orchestrate/agent.mdx +++ b/en/guides/application-orchestrate/agent.mdx @@ -10,15 +10,15 @@ An Agent Assistant can leverage the reasoning abilities of large language models To facilitate quick learning and use, application templates for the Agent Assistant are available in the 'Explore' section. You can integrate these templates into your workspace. The new Dify 'Studio' also allows the creation of a custom Agent Assistant to suit individual requirements. This assistant can assist in analyzing financial reports, composing reports, designing logos, and organizing travel plans. -![Explore-Agent Assistant Application Template](/en-us/images/assets/docs-1.png) +![Explore-Agent Assistant Application Template](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/83bd71eba743301bc108ea5e9289b00a.png) The task completion ability of the Agent Assistant depends on the inference capabilities of the model selected. We recommend using a more powerful model series like GPT-4 when employing Agent Assistant to achieve more stable task completion results. -![Selecting the Reasoning Model for Agent Assistant](/en-us/images/assets/docs-3.png) +![Selecting the Reasoning Model for Agent Assistant](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/9843518e4e543071dde7f9b7f1ce527c.png) You can write prompts for the Agent Assistant in 'Instructions'. To achieve optimal results, you can clearly define its task objectives, workflow, resources, and limitations in the instructions. -![Orchestrating Prompts for Agent Assistant](/en-us/images/assets/docs-4.png) +![Orchestrating Prompts for Agent Assistant](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/c2cf3853cfda01d111df29dffdef8efe.png) ## Adding Tools for the Agent Assistant @@ -28,13 +28,13 @@ In the "Tools" section, you are able to add tools that are required for use. The You have the option to directly use built-in tools in Dify, or you can easily import custom API tools (currently supporting OpenAPI/Swagger and OpenAI Plugin standards). -![Adding Tools for the Assistant](/en-us/images/assets/docs-5.png) +![Adding Tools for the Assistant](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/427269890d201edf077280ff21d6ee17.png) The **Tools** feature allows you to create more powerful AI applications on Dify. For example, you can orchestrate suitable tools for Agent Assistant, enabling it to complete complex tasks through reasoning, step decomposition, and tool invocation. Additionally, the tool simplifies the integration of your application with other systems or services, enabling interactions with the external environment, such as executing code or accessing proprietary information sources. Simply mention the name of the tool you want to invoke in the chat box, and it will be automatically activated. -![](/en-us/images/assets/agent-dalle3.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/bf067815b692fc617c4f23f02e687dd9.png) ## Agent Settings @@ -42,20 +42,20 @@ On Dify, two inference modes are provided for Agent Assistant: Function Calling In the Agent settings, you can modify the iteration limit of the Agent. -![Function Calling Mode](/en-us/images/assets/docs-6.png) +![Function Calling Mode](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/03d1837e32321ba352e8df2abf9b0671.png) ## Configuring the Conversation Opener You can set up a conversation opener and initial questions for your Agent Assistant. The configured conversation opener will be displayed at the beginning of each user's first interaction, showcasing the types of tasks the Agent can perform, along with examples of questions that can be asked. -![Configuring the Conversation Opener and Initial Questions](/en-us/images/assets/docs-8.png) +![Configuring the Conversation Opener and Initial Questions](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/67818c83a30cae24eb7048fe9dc96538.png) ## Debugging and Preview After orchestrating the intelligent assistant, you can debug and preview it before publishing as an application to verify the assistant's task completion effectiveness. -![Debugging and Preview](/en-us/images/assets/docs-9.png) +![Debugging and Preview](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/6561599c7b57818a45fb2e380b00ba28.png) ## Publish Application -![Publishing the Application as a Webapp](/en-us/images/assets/docs-10.png) \ No newline at end of file +![Publishing the Application as a Webapp](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/7f5513a74e2dbfc8ac0039a143a12f68.png) \ No newline at end of file diff --git a/en/guides/application-orchestrate/app-toolkits/readme.mdx b/en/guides/application-orchestrate/app-toolkits/readme.mdx index 739e710e..c0821481 100644 --- a/en/guides/application-orchestrate/app-toolkits/readme.mdx +++ b/en/guides/application-orchestrate/app-toolkits/readme.mdx @@ -24,7 +24,7 @@ Setting next step question suggestions allows the AI to generate 3 follow-up que When this feature is enabled, the large language model will cite content from the knowledge base when responding to questions. You can view specific citation details below the response, including the original text segment, segment number, and match score. -For more details, please see [Citation and Attribution](https://docs.dify.ai/guides/knowledge-base/retrieval-test-and-citation#id-2.-citation-and-attribution). +For more details, please see [Citation and Attribution](/en/guides/knowledge-base/retrieval-test-and-citation). ### Content Moderation @@ -34,4 +34,4 @@ During interactions with AI applications, we often have stringent requirements r The annotated replies feature allows for customizable high-quality Q\&A responses through manual editing and annotation. -See [Annotated Replies](../../annotation/annotation-reply.md). +See [Annotated Replies](/en/guides/annotation/annotation-reply). diff --git a/en/guides/application-orchestrate/chatbot.mdx b/en/guides/application-orchestrate/chatbot-application.mdx similarity index 66% rename from en/guides/application-orchestrate/chatbot.mdx rename to en/guides/application-orchestrate/chatbot-application.mdx index 9c2aaf45..00077365 100644 --- a/en/guides/application-orchestrate/chatbot.mdx +++ b/en/guides/application-orchestrate/chatbot-application.mdx @@ -18,13 +18,13 @@ Here, we use a interviewer application as an example to introduce the way to com Click the "Create Application" button on the homepage to create an application. Fill in the application name, and select **"Chat App"** as the application type. -![Create Application](/en-us/img/chat-app.png) +![Create Application](https://assets-docs.dify.ai/2024/12/8012e6ed06bfb10b239a4b999b1a0787.png) #### Step 2: Compose the Application After the application is successfully created, it will automatically redirect to the application overview page. Click on the button on the left menu: **"Orchestrate"** to compose the application. -![](/en-us/img/compose-the-app.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/6e45b9cecf2280814de20375ca0a3734.png) **2.1 Fill in Prompts** @@ -40,42 +40,42 @@ For a better experience, we will add an opening dialogue: `"Hello, {{name}}. I'm To add the opening dialogue, click the "Add Feature" button in the upper left corner, and enable the "Conversation remarkers" feature: -![](/en-us/img/conversation-remarkers.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/86ff7c2f2c29bc4a31e3b4a255ecbb84.png) And then edit the opening remarks: -![](/en-us/img/conversation-options.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/30f248ed143042b06d46b95c4f062fe1.png) **2.2 Adding Context** -If an application wants to generate content based on private contextual conversations, it can use our [knowledge](/en-us/knowledge-base/introduction) feature. Click the "Add" button in the context to add a knowledge base. +If an application wants to generate content based on private contextual conversations, it can use our [knowledge](/en/guides/knowledge-base/readme) feature. Click the "Add" button in the context to add a knowledge base. -![](/en-us/img/context.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/dcdcfbe0d7b44792c53d1d026aeebe29.png) **2.3 Debugging** Enter user inputs on the right side and check the respond content. -![](/en-us/img/debug.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/1509ff763400980eba81dac1f01d996f.png) If the results are not satisfactory, you can adjust the prompts and model parameters. Click on the model name in the upper right corner to set the parameters of the model: -![](/en-us/img/adjust-model-parameters.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/07a8c40ba969e849f083e27dee6992b0.png) **Debugging with multiple models:** -If debugging with a single model feels inefficient, you can utilize the **Debug as Multiple Models** feature to batch-test the models’ response effectiveness. +If debugging with a single model feels inefficient, you can utilize the **Debug as Multiple Models** feature to batch-test the models' response effectiveness. -![](/en-us/img/multiple-models.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/bf067815b692fc617c4f23f02e687dd9.png) Supports adding up to 4 LLMs at the same time. -![](/en-us/img/multiple-models-2.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/67818c83a30cae24eb7048fe9dc96538.png) -> ⚠️ When using the multi-model debugging feature, if only some large models are visible, it is because other large models’ keys have not been added yet. You can manually add multiple models’ keys in [“Add New Provider”](/en-us/user-guide/models/new-provider). +> ⚠️ When using the multi-model debugging feature, if only some large models are visible, it is because other large models' keys have not been added yet. You can manually add multiple models' keys in ["Add New Provider"](/en/guides/model-configuration/new-provider). **2.4 Publish App** -After debugging your application, click the **"Publish"** button in the top right corner to create a standalone AI application. In addition to experiencing the application via a public URL, you can also perform secondary development based on APIs, embed it into websites, and more. For details, please refer to [Publishing](/en-us/user-guide/application-publishing/launch-your-webapp-quickly/web-app-settings). +After debugging your application, click the **"Publish"** button in the top right corner to create a standalone AI application. In addition to experiencing the application via a public URL, you can also perform secondary development based on APIs, embed it into websites, and more. For details, please refer to [Publishing](/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings). -If you want to customize the application that you share, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-conversation). Based on the template, you can modify the application to meet your specific needs and style requirements. +If you want to customize the application that you share, you can Fork our open source [WebApp template](https://github.com/langgenius/webapp-conversation). Based on the template, you can modify the application to meet your specific needs and style requirements. \ No newline at end of file diff --git a/en/guides/application-orchestrate/creating-an-application.mdx b/en/guides/application-orchestrate/creating-an-application.mdx index 27311de9..6c0e14e7 100644 --- a/en/guides/application-orchestrate/creating-an-application.mdx +++ b/en/guides/application-orchestrate/creating-an-application.mdx @@ -54,4 +54,4 @@ https://example.com/your_dsl.yml ![Create an application by importing a DSL file](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/557be7a176fba979b7f7327d6a0cf8e4.png) -> When importing a DSL file, the version will be checked. Significant discrepancies between DSL versions may lead to compatibility issues. For more details, please refer to [Application Management: Import](https://docs.dify.ai/guides/management/app-management#importing-application). +> When importing a DSL file, the version will be checked. Significant discrepancies between DSL versions may lead to compatibility issues. For more details, please refer to [Application Management: Import](/en/guides/management/app-management#importing-application). diff --git a/en/guides/application-orchestrate/readme.mdx b/en/guides/application-orchestrate/readme.mdx index cc3083df..d7ddeb22 100644 --- a/en/guides/application-orchestrate/readme.mdx +++ b/en/guides/application-orchestrate/readme.mdx @@ -12,7 +12,7 @@ In short, an application provides developers with: You can choose **any one** or **all** of these to support your AI application development. -### Application Types +### Application Types Dify offers five types of applications: diff --git a/en/guides/application-orchestrate/text-generator.mdx b/en/guides/application-orchestrate/text-generator.mdx index 8a9c6a8b..ce3ec53d 100644 --- a/en/guides/application-orchestrate/text-generator.mdx +++ b/en/guides/application-orchestrate/text-generator.mdx @@ -18,13 +18,13 @@ Let's take creating a **Weekly Report Generator** application as an example to e Click the "Create Application" button on the homepage to create an application. Enter the application name and select **Text Generation Application** as the application type. -![](/en-us/img/94275017d6f0477fcd32323e52915fd6.png) +![](https://assets-docs.dify.ai/2024/12/8012e6ed06bfb10b239a4b999b1a0787.png) ### Configuring the Application After creating the application, you'll automatically be redirected to the application overview page, where you can set variables, add context, and configure additional chat features for the application. -![](/en-us/img/ed7368f117afa02ca5359472ea1167e8.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/6e45b9cecf2280814de20375ca0a3734.png) #### Application Configuration @@ -37,39 +37,39 @@ Example: 2. The right content box will automatically generate the prompt. 3. You can insert custom variables into the prompt. -![](/en-us/img/592adde305ffe7a9b2f538b20a14483e.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/c2cf3853cfda01d111df29dffdef8efe.png) #### Adding Context If you want to limit AI conversations to within a knowledge base, such as enterprise customer service conversation standards, you can reference the knowledge base in the "Context" section. -![](/en-us/img/838312f6b88927a08c9f113c9a557608.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/dcdcfbe0d7b44792c53d1d026aeebe29.png) ### Debugging Fill in the user input fields on the right side and enter content for debugging. -![](/en-us/img/302d73e248bc80a357e83ab3d65dcf32.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/1509ff763400980eba81dac1f01d996f.png) If the response results are not satisfactory, you can adjust the prompts and underlying model. You can also debug simultaneously with multiple models to find the most suitable configuration. -![](/en-us/img/241e9bd270b26a8b9658a784ecf9bf13.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/07a8c40ba969e849f083e27dee6992b0.png) **Debugging with Multiple Models:** If debugging with a single model feels inefficient, you can use the **"Debug with Multiple Models"** feature to batch review model response effects. -![](/en-us/img/5a6cff31c1fa94912ee39306739a2d6e.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/bf067815b692fc617c4f23f02e687dd9.png) Supports adding up to 4 large models simultaneously. -![](/en-us/img/60109394134b665cb856505503f4d975.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/application-orchestrate/67818c83a30cae24eb7048fe9dc96538.png) ⚠️ When using the multi-model debugging feature, if you only see some large models, this is because Keys for other large models haven't been added yet. You can manually add Keys for multiple models in "Add New Provider". ### Publishing the Application -After debugging the application, click the **"Publish"** button in the top right corner to generate a standalone AI application. In addition to experiencing the application through a public URL, you can also perform secondary development based on APIs or embed it into websites. For details, please refer to [Publishing Applications](/en-us/user-guide/application-publishing/launch-your-webapp-quickly/web-app-settings). +After debugging the application, click the **"Publish"** button in the top right corner to generate a standalone AI application. In addition to experiencing the application through a public URL, you can also perform secondary development based on APIs or embed it into websites. For details, please refer to [Publishing Applications](/en/guides/application-publishing/launch-your-webapp-quickly/web-app-settings). If you want to customize a published application, you can fork our open-source WebApp template and modify it to meet your scenario and style requirements. diff --git a/en/guides/application-publishing/README.mdx b/en/guides/application-publishing/README.mdx index b5f5e31e..14398bb7 100644 --- a/en/guides/application-publishing/README.mdx +++ b/en/guides/application-publishing/README.mdx @@ -6,6 +6,6 @@ title: Launching Dify Apps For more detailed information, please refer to the following sections: - [Publish as a Single-page Webapp](launch-your-webapp-quickly/) -- [Embedding In Websites](embedding-in-websites.md) -- [Developing with APIs](developing-with-apis.md) -- [Based on Frontend Templates](based-on-frontend-templates.md) \ No newline at end of file +- [Embedding In Websites](embedding-in-websites) +- [Developing with APIs](developing-with-apis) +- [Based on Frontend Templates](based-on-frontend-templates) \ No newline at end of file diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx index b2826606..d0c55f0f 100644 --- a/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx +++ b/en/guides/application-publishing/launch-your-webapp-quickly/README.mdx @@ -18,14 +18,14 @@ We have pre-set Web App UI for the following two types of applications: * **Text Generation (Preview)** - - text-generator.md + + Learn how to configure text generation applications * **Conversation (Preview)** - - conversation-application.md + + Learn how to configure conversation applications ### Setting Up Your AI Site @@ -36,10 +36,10 @@ You can modify the language, color theme, copyright ownership, privacy policy li Currently, Web App supports multiple languages: English, Simplified Chinese, Traditional Chinese, Portuguese, German, Japanese, Korean, Ukrainian, and Vietnamese. If you want more languages to be supported, you can submit an Issue on GitHub to seek support or submit a PR to contribute code. - - web-app-settings.md + + Learn how to configure your web app settings ### Embedding Your AI Site -You can also integrate Dify Web App into your own web project, blog, or any other web page. For more details, please take a refer to [Embedding In Websites](https://docs.dify.ai/guides/application-publishing/embedding-in-websites). +You can also integrate Dify Web App into your own web project, blog, or any other web page. For more details, please take a refer to [Embedding In Websites](/en/guides/application-publishing/embedding-in-websites). diff --git a/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx b/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx index 91de808f..e82afc6b 100644 --- a/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx +++ b/en/guides/application-publishing/launch-your-webapp-quickly/conversation-application.mdx @@ -50,4 +50,4 @@ _Please ensure that your device environment is authorized to use the microphone. ### References and Attributions -When testing the knowledge base effect within the application, you can go to **Workspace -- Add Function -- Citation and Attribution** to enable the citation attribution feature. For detailed instructions, please refer to [Citation and Attribution](https://docs.dify.ai/guides/knowledge-base/retrieval-test-and-citation#id-2.-citation-and-attribution). +When testing the knowledge base effect within the application, you can go to **Workspace -- Add Function -- Citation and Attribution** to enable the citation attribution feature. For detailed instructions, please refer to [Citation and Attribution](/en/guides/knowledge-base/retrieval-test-and-citation). diff --git a/en/guides/extension/api-based-extension/README.mdx b/en/guides/extension/api-based-extension/README.mdx index f4681cf8..bb6235b2 100644 --- a/en/guides/extension/api-based-extension/README.mdx +++ b/en/guides/extension/api-based-extension/README.mdx @@ -282,4 +282,4 @@ Now, this API endpoint is accessible publicly. You can configure this endpoint i We recommend that you use Cloudflare Workers to deploy your API extension, because Cloudflare Workers can easily provide a public address and can be used for free. -[cloudflare-workers.md](cloudflare-workers.md "mention") +[cloudflare-workers](cloudflare-workers) diff --git a/en/guides/extension/code-based-extension/README.mdx b/en/guides/extension/code-based-extension/README.mdx index 95ed8ed7..f8b47e58 100644 --- a/en/guides/extension/code-based-extension/README.mdx +++ b/en/guides/extension/code-based-extension/README.mdx @@ -5,8 +5,8 @@ title: Code Based Extensions For developers deploying Dify locally, if you want to implement extension capabilities without rewriting an API service, you can use code extensions. This allows you to extend or enhance the functionality of the program in code form (i.e., plugin capability) without disrupting the original code logic of Dify. It follows certain interfaces or specifications to achieve compatibility and plug-and-play capability with the main program. Currently, Dify offers two types of code extensions: -* Adding a new type of external data tool [External Data Tool](https://docs.dify.ai/guides/extension/api-based-extension/external-data-tool) -* Extending sensitive content moderation strategies [Moderation](https://docs.dify.ai/guides/extension/api-based-extension/moderation) +* Adding a new type of external data tool [External Data Tool](/en/guides/extension/api-based-extension/external-data-tool) +* Extending sensitive content moderation strategies [Moderation](/en/guides/extension/api-based-extension/moderation) Based on the above functionalities, you can achieve horizontal expansion by following the code-level interface specifications. If you are willing to contribute your extensions to us, we warmly welcome you to submit a PR to Dify. diff --git a/en/guides/extension/code-based-extension/external-data-tool.mdx b/en/guides/extension/code-based-extension/external-data-tool.mdx index 0f6dea38..1d06366f 100644 --- a/en/guides/extension/code-based-extension/external-data-tool.mdx +++ b/en/guides/extension/code-based-extension/external-data-tool.mdx @@ -3,7 +3,7 @@ title: External Data Tools --- -External data tools are used to fetch additional data from external sources after the end user submits data, and then assemble this data into prompts as additional context information for the LLM. Dify provides a default tool for external API calls, check [api-based-extension](../api-based-extension/ "mention") for details. +External data tools are used to fetch additional data from external sources after the end user submits data, and then assemble this data into prompts as additional context information for the LLM. Dify provides a default tool for external API calls, check [api-based-extension](/en/guides/extension/api-based-extension) for details. For developers deploying Dify locally, to meet more customized needs or to avoid developing an additional API Server, you can directly insert custom external data tool logic in the form of a plugin based on the Dify service. After extending custom tools, your custom tool options will be added to the dropdown list of tool types, and team members can use these custom tools to fetch external data. @@ -34,7 +34,7 @@ To add a custom type `Weather Search`, you need to create the relevant directory ### 2. **Add Frontend Component Specifications** -* `schema.json`, which defines the frontend component specifications, detailed in [.](./ "mention") +* `schema.json`, which defines the frontend component specifications, detailed in [Code Based Extension](.) ```json { diff --git a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx index 1fbb83b2..f555e5f9 100644 --- a/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx +++ b/en/guides/knowledge-base/api-documentation/external-knowledge-api-documentation.mdx @@ -10,7 +10,7 @@ POST /retrieval ## Header -This API is used to connect to a knowledge base that is independent of the Dify and maintained by developers. For more details, please refer to [Connecting to an External Knowledge Base](https://docs.dify.ai/guides/knowledge-base/connect-external-knowledge-base). You can use `API-Key` in the `Authorization` HTTP Header to verify permissions. The authentication logic is defined by you in the retrieval API, as shown below: +This API is used to connect to a knowledge base that is independent of the Dify and maintained by developers. For more details, please refer to [Connecting to an External Knowledge Base](/en/guides/knowledge-base/connect-external-knowledge-base). You can use `API-Key` in the `Authorization` HTTP Header to verify permissions. The authentication logic is defined by you in the retrieval API, as shown below: ``` Authorization: Bearer {API_KEY} diff --git a/en/guides/knowledge-base/connect-external-knowledge-base.mdx b/en/guides/knowledge-base/connect-external-knowledge-base.mdx index b44df79c..cb4aae2e 100644 --- a/en/guides/knowledge-base/connect-external-knowledge-base.mdx +++ b/en/guides/knowledge-base/connect-external-knowledge-base.mdx @@ -23,7 +23,7 @@ The **Connect to External Knowledge Base** feature enables integration between t ## Connection Example -[how-to-connect-aws-bedrock?](https://docs.dify.ai/zh-hans/learn-more/use-cases/how-to-connect-aws-bedrock) +[how-to-connect-aws-bedrock?](/en/learn-more/use-cases/how-to-connect-aws-bedrock) ## FAQ diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text.mdx index 4ef5dc37..24c05132 100644 --- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text.mdx +++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text.mdx @@ -17,7 +17,7 @@ Dify offers two chunking modes: **General Mode** and **Parent-child Mode**, tail * **Cleaning** -To ensure effective text retrieval, it’s essential to clean the data before uploading it to the knowledge base. For instance, meaningless characters or empty lines can affect the quality of query responses and should be removed. Dify provides built-in automatic cleaning strategies. For more information, see [ETL](https://docs.dify.ai/guides/knowledge-base/create-knowledge-and-upload-documents#optional-etl-configuration). +To ensure effective text retrieval, it’s essential to clean the data before uploading it to the knowledge base. For instance, meaningless characters or empty lines can affect the quality of query responses and should be removed. Dify provides built-in automatic cleaning strategies. For more information, see [ETL](/en/guides/knowledge-base/create-knowledge-and-upload-documents#optional-etl-configuration). Whether an LLM can accurately answer knowledge base queries depends on how effectively the system retrieves relevant content chunks. High-relevance chunks are crucial for AI applications to produce precise and comprehensive responses. @@ -59,7 +59,7 @@ If multiple documents are uploaded in bulk, you can switch between them by click ![General mode](https://assets-docs.dify.ai/2024/12/b3ec2ce860550563234ca22967abdd17.png) -After setting the chunking rules, the next step is to specify the indexing method. General mode supports **High-Quality Indexing** **Method** and **Economical Indexing Method**. For more details, please refer to [Set up the Indexing Method](3.-select-the-indexing-method-and-retrieval-setting.md). +After setting the chunking rules, the next step is to specify the indexing method. General mode supports **High-Quality Indexing** **Method** and **Economical Indexing Method**. For more details, please refer to [Set up the Indexing Method](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods). #### Parent-child Mode @@ -125,7 +125,7 @@ If multiple documents are uploaded in bulk, you can switch between them by click ![Parent-child mode](https://assets-docs.dify.ai/2024/12/af5c9a68f85120a6ea687bf93ecfb80a.png) -To ensure accurate content retrieval, the Parent-child chunk mode only supports the [High-Quality Indexing](3.-select-the-indexing-method-and-retrieval-setting.md#high-quality). +To ensure accurate content retrieval, the Parent-child chunk mode only supports the [High-Quality Indexing](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods#high-quality). ### What's the Difference Between Two Modes? @@ -140,6 +140,6 @@ Different chunking methods influence how effectively the LLM can search the know After choosing the chunking mode, refer to the following documentation to configure the indexing method and retrieval method and finis the creation of your knowledge base. - + Check for more details. diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/readme.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/readme.mdx index 67c9e6ef..8ca8b0a1 100644 --- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/readme.mdx +++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/readme.mdx @@ -1,57 +1,57 @@ --- -title: 知识库创建步骤 +title: Creating Knowledge Base --- -创建知识库并上传文档大致分为以下步骤: +To create a knowledge base and upload documents, follow these steps: -1. 创建知识库。通过上传本地文件、导入在线数据或创建一个空的知识库。 +1. Create a knowledge base by uploading local files, importing online data, or creating an empty knowledge base. - -选择通过上传本地文件或导入在线数据的方式创建知识库 + +Choose to create a knowledge base by uploading local files or importing online data -2. 指定分段模式。该阶段是内容的预处理与数据结构化过程,长文本将会被划分为多个内容分段。你可以在此环节预览文本的分段效果。 +2. Choose a chunking mode. This stage involves content preprocessing and data structuring, where long texts are divided into multiple content chunks. You can preview the text chunking results at this stage. - -了解如何设置文本分段和清洗规则 + +Learn how to set up text chunking and cleaning rules -3. 设定索引方法和检索设置。知识库在接收到用户查询问题后,按照预设的检索方式在已有的文档内查找相关内容,提取出高度相关的信息片段供语言模型生成高质量答案。 +3. Set up the indexing method and retrieval settings. After receiving a user query, the knowledge base searches for relevant content in existing documents according to preset retrieval methods, extracting highly relevant information chunks for the language model to generate high-quality answers. - -了解如何配置检索方式和相关设置 + +Learn how to configure retrieval methods and related settings -5. 等待分段嵌入 -6. 完成上传,在应用内关联知识库并使用。你可以参考[在应用内集成知识库](../integrate-knowledge-within-application.md),搭建出能够基于知识库进行问答的 LLM 应用。如需修改或管理知识库,请参考[知识库管理与文档维护](../knowledge-and-documents-maintenance/)。 +5. Wait for chunk embedding +6. Complete the upload, integrate the knowledge base in the application and use it. You can refer to [Integrating Knowledge Base in Applications](/en/guides/knowledge-base/integrate-knowledge-within-application) to build an LLM application capable of answering questions based on the knowledge base. For knowledge base modification or management, please refer to [Managing Knowledge Base and Documents](/en/guides/knowledge-base/knowledge-and-documents-maintenance). -![完成知识库的创建](https://assets-docs.dify.ai/2024/12/a3362a1cd384cb2b539c9858de555518.png) +![Complete the creation of knowledge base](https://assets-docs.dify.ai/2024/12/a3362a1cd384cb2b539c9858de555518.png) -### 参考阅读 +### Reference Reading #### ETL -在 RAG 的生产级应用中,为了获得更好的数据召回效果,需要对多源数据进行预处理和清洗,即 ETL (_extract, transform, load_)。为了增强非结构化/半结构化数据的预处理能力,Dify 支持了可选的 ETL 方案:**Dify ETL** 和[ ](https://docs.unstructured.io/welcome)[**Unstructured ETL** ](https://unstructured.io/)。Unstructured 能够高效地提取并转换你的数据为干净的数据用于后续的步骤。Dify 各版本的 ETL 方案选择: +In production-level RAG applications, data preprocessing and cleaning of multi-source data, or ETL (_extract, transform, load_), is necessary for better data recall results. To enhance the preprocessing capabilities of unstructured/semi-structured data, Dify supports optional ETL solutions: **Dify ETL** and [**Unstructured ETL**](https://unstructured.io/). Unstructured efficiently extracts and transforms your data into clean data for subsequent steps. ETL solution choices for different versions of Dify: -* SaaS 版不可选,默认使用 Unstructured ETL; -* 社区版可选,默认使用 Dify ETL ,可通过[环境变量](/zh-hans/getting-started/install-self-hosted/environments#zhi-shi-ku-pei-zhi)开启 Unstructured ETL; +* SaaS version: Non-optional, uses Unstructured ETL by default; +* Community version: Optional, uses Dify ETL by default, can enable Unstructured ETL through [environment variables](/en/getting-started/install-self-hosted/environments#knowledge-base-configuration); -文件解析支持格式的差异: +Differences in supported file parsing formats: | DIFY ETL | Unstructured ETL | | --- | --- | -| txt、markdown、md、pdf、html、htm、xlsx、xls、docx、csv | txt、markdown、md、pdf、html、htm、xlsx、xls、docx、csv、eml、msg、pptx、ppt、xml、epub | +| txt, markdown, md, pdf, html, htm, xlsx, xls, docx, csv | txt, markdown, md, pdf, html, htm, xlsx, xls, docx, csv, eml, msg, pptx, ppt, xml, epub | -不同的 ETL 方案在文件提取效果的方面也会存在差异,想了解更多关于 Unstructured ETL 的数据处理方式,请参考[官方文档](https://docs.unstructured.io/open-source/core-functionality/partitioning)。 +Different ETL solutions also have differences in file extraction effects. To learn more about Unstructured ETL's data processing methods, please refer to the [official documentation](https://docs.unstructured.io/open-source/core-functionality/partitioning). #### **Embedding** -**Embedding 嵌入**是一种将离散型变量(如单词、句子或者整个文档)转化为连续的向量表示的技术。它可以将高维数据(如单词、短语或图像)映射到低维空间,提供一种紧凑且有效的表示方式。这种表示不仅减少了数据的维度,还保留了重要的语义信息,使得后续的内容检索更加高效。 +**Embedding** is a technique for converting discrete variables (such as words, sentences, or entire documents) into continuous vector representations. It can map high-dimensional data (such as words, phrases, or images) to low-dimensional spaces, providing a compact and efficient representation. This representation not only reduces data dimensionality but also preserves important semantic information, making subsequent content retrieval more efficient. -**Embedding 模型**是一种专门用于将文本向量化的大语言模型,它擅长将文本转换为密集的数值向量,有效捕捉语义信息。 +**Embedding models** are large language models specifically designed for text vectorization. They excel at converting text into dense numerical vectors, effectively capturing semantic information. -> 如需了解更多,请参考:[《Dify:Embedding 技术与 Dify 知识库设计/规划》](https://mp.weixin.qq.com/s/vmY_CUmETo2IpEBf1nEGLQ)。 +> To learn more, please refer to: ["Dify: Embedding Technology and Dify Knowledge Base Design/Planning"](https://mp.weixin.qq.com/s/vmY_CUmETo2IpEBf1nEGLQ). -#### **元数据** +#### **Metadata** -如需使用元数据功能管理知识库,请参阅 [元数据](https://docs.dify.ai/zh-hans/guides/knowledge-base/metadata)。 \ No newline at end of file +To use metadata functionality for managing knowledge bases, please refer to [Metadata](/en/guides/knowledge-base/metadata). \ No newline at end of file diff --git a/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx b/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx index 3b638654..82cd146a 100644 --- a/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx +++ b/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods.mdx @@ -19,7 +19,7 @@ The knowledge base offers two indexing methods: **High-Quality** and **Economica In High-Quality Mode, the Embedding model converts text chunks into numerical vectors, enabling efficient compression and storage of large volumes of textual information. **This allows for more precise matching between user queries and text.** -Once the text chunks are vectorized and stored in the database, an effective retrieval method is required to fetch the chunks that match user queries. The High-Quality indexing method offers three retrieval settings: vector retrieval, full-text retrieval, and hybrid retrieval. For more details on retrieval settings, please check ["Retrieval Settings"](3.-select-the-indexing-method-and-retrieval-setting.md#id-4-retrieval-settings). +Once the text chunks are vectorized and stored in the database, an effective retrieval method is required to fetch the chunks that match user queries. The High-Quality indexing method offers three retrieval settings: vector retrieval, full-text retrieval, and hybrid retrieval. For more details on retrieval settings, please check ["Retrieval Settings"](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods#retrieval-settings). After selecting **High Quality** mode, the indexing method for the knowledge base cannot be downgraded to **Economical** mode later. If you need to switch, it is recommended to create a new knowledge base and select the desired indexing method. @@ -45,7 +45,7 @@ When a user asks a question, the system identifies the most similar question and - Using 10 keywords per chunk for retrieval, no tokens are consumed at the expense of reduced retrieval accuracy. For the retrieved blocks, only the inverted index method is provided to select the most relevant blocks. For more details, please refer to [Inverted Index](3.-select-the-indexing-method-and-retrieval-setting.md#inverted-index). + Using 10 keywords per chunk for retrieval, no tokens are consumed at the expense of reduced retrieval accuracy. For the retrieved blocks, only the inverted index method is provided to select the most relevant blocks. For more details, please refer to [Inverted Index](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods#inverted-index). If the performance of the economical indexing method does not meet your expectations, you can upgrade to the High-Quality indexing method in the Knowledge settings page. @@ -159,5 +159,6 @@ In **Economical Indexing** mode, only the inverted index approach is available. After specifying the retrieval settings, you can refer to the following documentation to review how keywords match with content chunks in different scenarios. - + +Learn how to test and cite your knowledge base retrieval diff --git a/en/guides/knowledge-base/faq.mdx b/en/guides/knowledge-base/faq.mdx index cced3c44..6933ead2 100644 --- a/en/guides/knowledge-base/faq.mdx +++ b/en/guides/knowledge-base/faq.mdx @@ -17,4 +17,4 @@ This issue may be caused by a disconnection from the Redis service, preventing t ## 4. How to Optimize Knowledge Base Content Retrieval for Applications? -You can optimize by adjusting retrieval settings and comparing different parameters. For specific instructions, please refer to [Retrieval Settings](/en-us/user-guide/knowledge-base/knowledge-base-creation/upload-documents#3). +You can optimize by adjusting retrieval settings and comparing different parameters. For specific instructions, please refer to [Retrieval Settings](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods). diff --git a/en/guides/knowledge-base/integrate-knowledge-within-application.mdx b/en/guides/knowledge-base/integrate-knowledge-within-application.mdx index e5f3eb5d..8eb9089b 100644 --- a/en/guides/knowledge-base/integrate-knowledge-within-application.mdx +++ b/en/guides/knowledge-base/integrate-knowledge-within-application.mdx @@ -4,7 +4,7 @@ title: Integrate Knowledge Base within Application ### Creating an Application Integrated with Knowledge Base -A **"Knowledge Base"** can be used as an external information source to provide precise answers to user questions via LLM. You can associate an existing knowledge base with any [application type](https://docs.dify.ai/guides/application-orchestrate#application_type) in Dify. +A **"Knowledge Base"** can be used as an external information source to provide precise answers to user questions via LLM. You can associate an existing knowledge base with any [application type](/en/guides/application-orchestrate#application_type) in Dify. Taking a chat assistant as an example, the process is as follows: @@ -35,7 +35,7 @@ This method simultaneously queries all knowledge bases connected in **"Context"* For instance, in application A, with three knowledge bases K1, K2, and K3. When a user send a question, multiple relevant pieces of content will be retrieved and combined from these knowledge bases. To ensure the most pertinent content is identified, the Rerank strategy is employed to find the content that best relates to the user's query, enhancing the precision and reliability of the results. -In practical Q&A scenarios, the sources of content and retrieval methods for each knowledge base may differ. To manage the mixed content returned from retrieval, the [Rerank strategy](https://docs.dify.ai/learn-more/extended-reading/retrieval-augment/rerank) acts as a refined sorting mechanism. It ensures that the candidate content aligns well with the user's question, optimizing the ranking of results across multiple knowledge bases to identify the most suitable content, thereby improving answer quality and overall user experience. +In practical Q&A scenarios, the sources of content and retrieval methods for each knowledge base may differ. To manage the mixed content returned from retrieval, the [Rerank strategy](/en/learn-more/extended-reading/retrieval-augment/rerank) acts as a refined sorting mechanism. It ensures that the candidate content aligns well with the user's question, optimizing the ranking of results across multiple knowledge bases to identify the most suitable content, thereby improving answer quality and overall user experience. Considering the costs associated with using Rerank and the needs of the business, the multi-path retrieval mode provides two Rerank settings: @@ -59,7 +59,7 @@ The Rerank model is an external scoring system that calculates the relevance sco While this method incurs some additional costs, it is more adept at handling complex knowledge base content, such as content that combines semantic queries and keyword matches, or cases involving multilingual returned content. -> Click here to learn more about the [Re-ranking](https://docs.dify.ai/learn-more/extended-reading/retrieval-augment/rerank). +> Click here to learn more about the [Re-ranking](/en/learn-more/extended-reading/retrieval-augment/rerank). Dify currently supports multiple Rerank models. To use external Rerank models, you'll need to provide an API Key. Enter the API Key for the Rerank model (such as Cohere, Jina AI, etc.) on the "Model Provider" page. diff --git a/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx b/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx index f99244ed..aa5c99dd 100644 --- a/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx +++ b/en/guides/knowledge-base/knowledge-and-documents-maintenance/introduction.mdx @@ -20,9 +20,9 @@ Here, you can modify the knowledge base's name, description, permissions, indexi * **"Partial team members"**: Allows selective access to specific team members. Users without appropriate permissions cannot access the knowledge base. When granting access to team members (Options 2 or 3), authorized users are granted full permissions, including the ability to view, edit, and delete knowledge base content. -* **Indexing Mode**: For detailed explanations, please refer to the [documentation](create-knowledge-and-upload-documents/3.-select-the-indexing-method-and-retrieval-setting.md). +* **Indexing Mode**: For detailed explanations, please refer to the [documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods). * **Embedding Model**: Allows you to modify the embedding model for the knowledge base. Changing the embedding model will re-embed all documents in the knowledge base, and the original embeddings will be deleted. -* **Retrieval Settings**: For detailed explanations, please refer to the [documentation](../../learn-more/extended-reading/retrieval-augment/retrieval.md). +* **Retrieval Settings**: For detailed explanations, please refer to the [documentation](/en/learn-more/extended-reading/retrieval-augment/retrieval). *** @@ -38,7 +38,7 @@ You can manage your knowledge base documents either through a web interface or v You can administer all documents and their corresponding chunks directly in the knowledge base. For more details, refer to the following documentation: - + Learn how to maintain knowledge documents @@ -46,7 +46,7 @@ You can administer all documents and their corresponding chunks directly in the Dify Knowledge Base provides a comprehensive set of standard APIs. Developers can use these APIs to perform routine management and maintenance tasks, such as adding, deleting, updating, and retrieving documents and chunks. For more details, refer to the following documentation: - + Learn how to maintain knowledge base via API diff --git a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx index 2d02dd83..aaedcd7a 100644 --- a/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx +++ b/en/guides/knowledge-base/knowledge-and-documents-maintenance/maintain-knowledge-documents.mdx @@ -53,13 +53,13 @@ You can quickly view all enabled or disabled documents using the filter. ![Filter text chunks](https://assets-docs.dify.ai/2025/01/47ef07319175a102bfd1692dcc6cac9b.png) -Different [chunking modes](../create-knowledge-and-upload-documents/2.-choose-a-chunk-mode.md) correspond to different text chunking preview methods: +Different [chunking modes](/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text) correspond to different text chunking preview methods: **General Mode** - Chunks of text in [General mode](../create-knowledge-and-upload-documents.md#general) are independent blocks. If you want to view the complete content of a chunk, click the **full-screen** icon. + Chunks of text in [General mode](/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text#general-mode) are independent blocks. If you want to view the complete content of a chunk, click the **full-screen** icon. ![Full screen viewing](https://assets-docs.dify.ai/2024/12/c37a1a247092cda9433a10243543698f.png) @@ -70,7 +70,7 @@ Different [chunking modes](../create-knowledge-and-upload-documents/2.-choose-a- **Parent-child Mode** - In[ Parent-child](maintain-knowledge-documents.md#parent-child-chunking-mode) mode, content is divided into parent chunks and child chunks. + In[Parent-child](/en/guides/knowledge-base/create-knowledge-and-upload-documents/chunking-and-cleaning-text#parent-child-mode) mode, content is divided into parent chunks and child chunks. * **Parent chunks** diff --git a/en/guides/knowledge-base/knowledge-base-creation/introduction.mdx b/en/guides/knowledge-base/knowledge-base-creation/introduction.mdx index 042c19b4..60054968 100644 --- a/en/guides/knowledge-base/knowledge-base-creation/introduction.mdx +++ b/en/guides/knowledge-base/knowledge-base-creation/introduction.mdx @@ -24,7 +24,7 @@ Choose a chunking mode and preview the spliting results. This stage involves con 4. Wait for the chunk embeddings to complete. -5. Once finished, link the knowledge base to your application and start using it. You can then [integrate it into your application](../integrate-knowledge-within-application) to build an LLM that are capable of Q\&A based on knowledge-bases. If you want to modify and manage the knowledge base further, take refer to [Knowledge Base and Document Maintenance](knowledge-and-documents-maintenance.md). +5. Once finished, link the knowledge base to your application and start using it. You can then [integrate it into your application](../integrate-knowledge-within-application) to build an LLM that are capable of Q\&A based on knowledge-bases. If you want to modify and manage the knowledge base further, take refer to [Knowledge Base and Document Maintenance]([knowledge-and-documents-maintenance](/en/guides/knowledge-base/knowledge-and-documents-maintenance)). ![](https://assets-docs.dify.ai/2024/12/a3362a1cd384cb2b539c9858de555518.png) @@ -41,7 +41,7 @@ In production-level applications of RAG, to achieve better data retrieval, multi ETL solution choices in different versions of Dify: * The SaaS version defaults to using Unstructured ETL and cannot be changed; -* The community version defaults to using Dify ETL but can enable Unstructured ETL through [environment variables](../../getting-started/install-self-hosted/environments.md#zhi-shi-ku-pei-zhi); +* The community version defaults to using Dify ETL but can enable Unstructured ETL through [environment variables](/en/getting-started/install-self-hosted/environments#zhi-shi-ku-pei-zhi); Differences in supported file formats for parsing: diff --git a/en/guides/knowledge-base/knowledge-base-creation/upload-documents.mdx b/en/guides/knowledge-base/knowledge-base-creation/upload-documents.mdx index dfb554ff..a418df0a 100644 --- a/en/guides/knowledge-base/knowledge-base-creation/upload-documents.mdx +++ b/en/guides/knowledge-base/knowledge-base-creation/upload-documents.mdx @@ -1,6 +1,5 @@ --- title: Create Knowledge Base & Upload Documents -version: 'English' --- **Steps to upload documents into Knowledge:** @@ -42,7 +41,7 @@ After uploading content to the knowledge base, it needs to undergo chunking and * **Cleaning** - To ensure the quality of text recall, it is usually necessary to clean the data before passing it into the model. For example, unwanted characters or blank lines in the output may affect the quality of the response. To help users solve this problem, Dify provides various cleaning methods to help clean the output before sending it to downstream applications, check [ETL](create-knowledge-and-upload-documents.md#optional-etl-configuration) to know more details. + To ensure the quality of text recall, it is usually necessary to clean the data before passing it into the model. For example, unwanted characters or blank lines in the output may affect the quality of the response. To help users solve this problem, Dify provides various cleaning methods to help clean the output before sending it to downstream applications, check [ETL](/en/guides/knowledge-base/create-knowledge-and-upload-documents#optional-etl-configuration) to know more details. @@ -214,11 +213,11 @@ An inverted index is an index structure designed for rapid keyword retrieval in -After specifying the retrieval settings, you can refer to [Retrieval Test/Citation Attribution](/en-us/user-guide/knowledge-base/retrieval-test-and-citation) to check the matching between keywords and content chunks. +After specifying the retrieval settings, you can refer to [Retrieval Test/Citation Attribution](/en/guides/knowledge-base/retrieval-test-and-citation) to check the matching between keywords and content chunks. ## 5. Complete Upload -After configuring all the settings mentioned above, simply click "Save and Process" to complete the creation of your knowledge base. You can refer to [Integrate Knowledge Base Within Application](integrate-knowledge-within-application.md) to build an LLM application that can answer questions based on the knowledge base. +After configuring all the settings mentioned above, simply click "Save and Process" to complete the creation of your knowledge base. You can refer to [Integrate Knowledge Base Within Application](/en/guides/knowledge-base/integrate-knowledge-within-application) to build an LLM application that can answer questions based on the knowledge base. *** diff --git a/en/guides/management/app-management.mdx b/en/guides/management/app-management.mdx index 24334558..9f991624 100644 --- a/en/guides/management/app-management.mdx +++ b/en/guides/management/app-management.mdx @@ -22,7 +22,7 @@ Applications created in Dify support export in DSL format files, allowing you to ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/management/544c18d770e230db93d6756bba98d8a7.png) -The DSL file does not include authorization information already filled in [Tool](../workflow/node/tools.md) nodes, such as API keys for third-party services. +The DSL file does not include authorization information already filled in [Tool](/en/guides/workflow/nodes/tools) nodes, such as API keys for third-party services. If the environment variables contain variables of the `Secret` type, a prompt will appear during file export asking whether to allow the export of this sensitive information. @@ -37,7 +37,7 @@ Dify DSL is an AI application engineering file standard defined by Dify.AI in v0 To import a Dify application, upload the DSL file to the Dify platform. A version check will be conducted during the import process, and a warning will be issued if a lower version of the DSL file is detected. - For SaaS users, the DSL file exported from the SaaS platform will always be the latest version. -- For Community users, it is recommended to consult [Upgrade Dify](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose#upgrade-dify) to update the Community Edition and export an updated version of the DSL file, thus avoiding potential compatibility issues. +- For Community users, it is recommended to consult [Upgrade Dify](/en/getting-started/install-self-hosted/docker-compose#upgrade-dify) to update the Community Edition and export an updated version of the DSL file, thus avoiding potential compatibility issues. ![](https://assets-docs.dify.ai/2024/11/487d2c1cc8b86666feb35ea8a346c053.png) diff --git a/en/guides/management/subscription-management.mdx b/en/guides/management/subscription-management.mdx index 3f1bd00f..c381d866 100644 --- a/en/guides/management/subscription-management.mdx +++ b/en/guides/management/subscription-management.mdx @@ -59,7 +59,7 @@ Upon cancellation of the subscription plan, **the team will automatically transi 1GB - Marked replies for applications + Marked replies for applications 10 2000 5000 @@ -98,4 +98,4 @@ A team needs to be bound to one team owner. If the team ownership is not transfe #### 5. What are the differences between the subscription versions? -For a detailed feature comparison, please refer to the [Dify pricing](https://dify.ai/pricing). +For a detailed feature comparison, please refer to the [Dify pricing](https://dify.ai/pricing). \ No newline at end of file diff --git a/en/guides/management/team-members-management.mdx b/en/guides/management/team-members-management.mdx index cb964e8c..d33d2a65 100644 --- a/en/guides/management/team-members-management.mdx +++ b/en/guides/management/team-members-management.mdx @@ -35,7 +35,7 @@ To add a member, the team owner can click on the avatar in the upper right corne ![Assigning permissions to team members](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/management/bbd0873959dd3fe342b7212b98e812ae.png) -> For Community Edition, enabling email functionality requires the team owner to configure and activate the email service via system [environment variables](https://docs.dify.ai/getting-started/install-self-hosted/environments). +> For Community Edition, enabling email functionality requires the team owner to configure and activate the email service via system [environment variables](/en/getting-started/install-self-hosted/environments). - If the invited member has not registered with Dify, they will receive an invitation email. They can complete registration by clicking the link in the email. - If the invited member is already registered with Dify, permissions will be automatically assigned and **no invitation email will be sent**. The invited member can switch to the new workspace via the menu in the top right corner. diff --git a/en/guides/model-configuration/load-balancing.mdx b/en/guides/model-configuration/load-balancing.mdx index af5b1f1c..b4f87252 100644 --- a/en/guides/model-configuration/load-balancing.mdx +++ b/en/guides/model-configuration/load-balancing.mdx @@ -12,7 +12,7 @@ You can enable this feature by navigating to **Model Provider -- Model List -- C ![Model Load Balancing](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/model-configuration/9cabd37adbb8f566f55dacd4a214c84b.png) -Model load balancing is a paid feature. You can enable it by [subscribing to SaaS paid services](../../getting-started/cloud.md#subscription-plan) or purchasing the enterprise edition. +Model load balancing is a paid feature. You can enable it by [subscribing to SaaS paid services](/en/getting-started/cloud#subscription-plan) or purchasing the enterprise edition. The default API key is the credential added when initially configuring the model provider. You need to click **Add Configuration** to add different API keys for the same model to use the load balancing feature properly. diff --git a/en/guides/model-configuration/readme.mdx b/en/guides/model-configuration/readme.mdx index 4f592cb3..6f9c1219 100644 --- a/en/guides/model-configuration/readme.mdx +++ b/en/guides/model-configuration/readme.mdx @@ -16,11 +16,11 @@ Dify classifies models into 4 types, each for different uses: 1. **System Inference Models:** Used in applications for tasks like chat, name generation, and suggesting follow-up questions. - > Providers include [OpenAI](https://platform.openai.com/account/api-keys)、[Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service/)、[Anthropic](https://console.anthropic.com/account/keys)、Hugging Face Hub、Replicate、Xinference、OpenLLM、[iFLYTEK SPARK](https://www.xfyun.cn/solutions/xinghuoAPI)、[WENXINYIYAN](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application)、[TONGYI](https://dashscope.console.aliyun.com/api-key\_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k)、[Minimax](https://api.minimax.chat/user-center/basic-information/interface-key)、ZHIPU(ChatGLM)、[Ollama](https://docs.dify.ai/tutorials/model-configuration/ollama)、[LocalAI](https://github.com/mudler/LocalAI)、[GPUStack](https://github.com/gpustack/gpustack). + > Providers include [OpenAI](https://platform.openai.com/account/api-keys)、[Azure OpenAI Service](https://azure.microsoft.com/en-us/products/ai-services/openai-service/)、[Anthropic](https://console.anthropic.com/account/keys)、Hugging Face Hub、Replicate、Xinference、OpenLLM、[iFLYTEK SPARK](https://www.xfyun.cn/solutions/xinghuoAPI)、[WENXINYIYAN](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application)、[TONGYI](https://dashscope.console.aliyun.com/api-key\_management?spm=a2c4g.11186623.0.0.3bbc424dxZms9k)、[Minimax](https://api.minimax.chat/user-center/basic-information/interface-key)、ZHIPU(ChatGLM)、[Ollama](/en/development/models-integration/ollama)、[LocalAI](https://github.com/mudler/LocalAI)、[GPUStack](https://github.com/gpustack/gpustack). 2. **Embedding Models:** Employed for embedding segmented documents in knowledge and processing user queries in applications. > Providers include OpenAI, ZHIPU (ChatGLM), Jina AI([Jina Embeddings](https://jina.ai/embeddings/)). -3. [**Rerank Models**](https://docs.dify.ai/advanced/retrieval-augment/rerank)**:** Enhance search capabilities in LLMs. +3. [Rerank Models](/en/learn-more/extended-reading/retrieval-augment/rerank)**:** Enhance search capabilities in LLMs. > Providers include Cohere, Jina AI([Jina Reranker](https://jina.ai/reranker)). 4. **Speech-to-Text Models:** Convert spoken words to text in conversational applications. @@ -64,10 +64,10 @@ Dify uses [PKCS1\_OAEP](https://pycryptodome.readthedocs.io/en/latest/src/cipher Specific integration methods are not detailed here. -* [Hugging Face](../../development/models-integration/hugging-face.md) -* [Replicate](../../development/models-integration/replicate.md) -* [Xinference](../../development/models-integration/xinference.md) -* [OpenLLM](../../development/models-integration/openllm.md) +* [Hugging Face](/en/development/models-integration/hugging-face) +* [Replicate](/en/development/models-integration/replicate) +* [Xinference](/en/development/models-integration/xinference) +* [OpenLLM](/en/development/models-integration/openllm) ## Using Models diff --git a/en/guides/workflow/additional-features.mdx b/en/guides/workflow/additional-features.mdx index fcf19a77..de1acad5 100644 --- a/en/guides/workflow/additional-features.mdx +++ b/en/guides/workflow/additional-features.mdx @@ -114,4 +114,4 @@ After the application user uploads both document files and images, document file * **Audio and Video Files** -LLMs do not yet support direct reading of audio and video files, and the Dify platform has not yet built-in related file processing tools. Application developers can refer to [External Data Tools](../extension/api-based-extension/external-data-tool.md) to integrate tools for processing file information themselves. +LLMs do not yet support direct reading of audio and video files, and the Dify platform has not yet built-in related file processing tools. Application developers can refer to [External Data Tools](/en/guides/extension/api-based-extension/external-data-tool) to integrate tools for processing file information themselves. diff --git a/en/guides/workflow/bulletin.mdx b/en/guides/workflow/bulletin.mdx index 35dd0e02..64850bdd 100644 --- a/en/guides/workflow/bulletin.mdx +++ b/en/guides/workflow/bulletin.mdx @@ -2,13 +2,13 @@ title: Bulletin - Image Upload Replaced by File Upload --- -The image upload feature has been integrated into the more comprehensive [File Upload](file-upload.md) functionality. To avoid redundant features, we have decided to upgrade and adjust the “[Features](additional-features.md)” for Workflow and Chatflow applications as follows: +The image upload feature has been integrated into the more comprehensive [File Upload](/en/guides/workflow/file-upload) functionality. To avoid redundant features, we have decided to upgrade and adjust the “[Features](/en/guides/workflow/additional-features)” for Workflow and Chatflow applications as follows: * The image upload option in Chatflow’s “Features” has been removed and replaced by the new “File Upload” feature. Within the “File Upload” feature, you can select the image file type. Additionally, the image upload icon in the application dialog has been replaced with a file upload icon. ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/edeb9bfe8cafc8c7fa242bf895dfb850.png) -* The image upload option in Workflow’s “Features” and the `sys.files` [variable](variables.md) will be deprecated in the future. Both have been marked as `LEGACY`, and developers are encouraged to use custom file variables to add file upload functionality to Workflow applications. +* The image upload option in Workflow’s “Features” and the `sys.files` [variable](/en/guides/workflow/variables) will be deprecated in the future. Both have been marked as `LEGACY`, and developers are encouraged to use custom file variables to add file upload functionality to Workflow applications. ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/df9f74f5362f31672ebe3a166f0a419a.png) @@ -41,7 +41,7 @@ If you have already created Chatflow applications with the “Image Upload” fe ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/bc6e9e3a57844f5f1e50b4716592449d.png) -If you wish to add the “Image Upload” feature to a Chatflow application, enable “File Upload” in the features and select only the “image” file type. Then enable the Vision feature in the LLM node and specify the sys.files variable. The upload entry will appear as a “paperclip” icon. For detailed instructions, refer to [Additional Features](additional-features.md). +If you wish to add the “Image Upload” feature to a Chatflow application, enable “File Upload” in the features and select only the “image” file type. Then enable the Vision feature in the LLM node and specify the sys.files variable. The upload entry will appear as a “paperclip” icon. For detailed instructions, refer to [Additional Features](/en/guides/workflow/additional-features). ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/d8c447886a9bdea1aeeda8991b46742e.png) @@ -49,7 +49,7 @@ If you wish to add the “Image Upload” feature to a Chatflow application, ena If you have already created Workflow applications with the “Image Upload” feature enabled and activated the Vision feature in the LLM node, this change will not affect you immediately, but you will need to complete manual migration before the official deprecation. -If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](node/start.md) node. Then, reference this file variable in subsequent nodes instead of using the `sys.files` variable. +If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](/en/guides/workflow/nodes/start) node. Then, reference this file variable in subsequent nodes instead of using the `sys.files` variable. #### For Dify Community Edition or Self-hosted Enterprise Users: @@ -65,7 +65,7 @@ If you wish to add the “Image Upload” feature to a Chatflow application, ref Existing Workflow applications will not be affected, but please complete the manual migration before the official deprecation. -If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](node/start.md) node. Then, reference this file variable in subsequent nodes instead of using the `sys.files` variable.\ +If you wish to enable the “Image Upload” feature for a Workflow application, add a file variable in the [Start](/en/guides/workflow/nodes/start) node. Then, reference this file variable in subsequent nodes instead of using the `sys.files` variable.\ ### FAQs: diff --git a/en/guides/workflow/error-handling/README.mdx b/en/guides/workflow/error-handling/README.mdx index 0ba28769..fc237ddd 100644 --- a/en/guides/workflow/error-handling/README.mdx +++ b/en/guides/workflow/error-handling/README.mdx @@ -28,10 +28,10 @@ For instance, a code node on the alternative path can split the content into sma The following four types of nodes have added error-handling feature. Click on the title to read the detailed documents: -* [LLM](../node/llm.md) -* [HTTP](../node/http-request.md) -* [Code](../node/code.md) -* [Tools](../node/tools.md) +* [LLM](/en/guides/workflow/nodes/llm) +* [HTTP](/en/guides/workflow/nodes/http-request) +* [Code](/en/guides/workflow/nodes/code) +* [Tools](/en/guides/workflow/nodes/tools) **Retry on Failure** @@ -51,7 +51,7 @@ The error handling feature provides the following three options: • **Fail Branch**: Execute the pre-arranged fail branch after an exception occurs. -For explanations and configuration methods of each strategy, please refer to the [predefined error handling logic](predefined-error-handling-logic.md). +For explanations and configuration methods of each strategy, please refer to the [predefined error handling logic](/en/guides/workflow/error-handling/predefined-error-handling-logic). ![Error handling](https://assets-docs.dify.ai/2024/12/3c198be3a7b9c1f9649bbd8b9a0a9ec5.png) diff --git a/en/guides/workflow/error-handling/error-type.mdx b/en/guides/workflow/error-handling/error-type.mdx index 6ce5313d..4f8bf31e 100644 --- a/en/guides/workflow/error-handling/error-type.mdx +++ b/en/guides/workflow/error-handling/error-type.mdx @@ -16,7 +16,7 @@ This article summarizes the potential exceptions and corresponding error types t ### Code Node -[Code](../node/code.md) nodes support running Python and JavaScript code for data transformation in workflows or chat flows. Here are 4 common runtime errors: +[Code](/en/guides/workflow/nodes/code) nodes support running Python and JavaScript code for data transformation in workflows or chat flows. Here are 4 common runtime errors: 1. **Code Node Error (CodeNodeError)** @@ -48,7 +48,7 @@ This article summarizes the potential exceptions and corresponding error types t ### LLM Node -The [LLM](../node/llm.md) node is a core component of Chatflow and Workflow, utilizing LLM' capabilities in dialogue, generation, classification, and processing to complete various tasks based on user input instructions. +The [LLM](/en/guides/workflow/nodes/llm) node is a core component of Chatflow and Workflow, utilizing LLM' capabilities in dialogue, generation, classification, and processing to complete various tasks based on user input instructions. Here are 6 common runtime errors: @@ -83,7 +83,7 @@ Here are 6 common runtime errors: ### HTTP -[HTTP](../node/http-request.md) nodes allow seamless integration with external services through customizable requests for data retrieval, webhook triggering, image generation, or file downloads via HTTP requests. Here are 5 common errors for this node: +[HTTP](/en/guides/workflow/nodes/http-request) nodes allow seamless integration with external services through customizable requests for data retrieval, webhook triggering, image generation, or file downloads via HTTP requests. Here are 5 common errors for this node: 1. **Authorization Configuration Error (AuthorizationConfigError)** diff --git a/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx b/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx index 57482bcf..d55c77c8 100644 --- a/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx +++ b/en/guides/workflow/error-handling/predefined-error-handling-logic.mdx @@ -5,13 +5,13 @@ title: Predefined Error Handling Logic Here are four types of nodes that provide with predefined logic for handling unexpected situations: -• [LLM](../node/llm.md) +• [LLM](/en/guides/workflow/nodes/llm) -• [HTTP](../node/http-request.md) +• [HTTP](/en/guides/workflow/nodes/http-request) -• [Code](../node/code.md) +• [Code](/en/guides/workflow/nodes/code) -• [Tool](../node/tools.md) +• [Tool](/en/guides/workflow/nodes/tools) The error handling feature provides three predefined options: @@ -68,7 +68,7 @@ When the **Default Value** or **Fail Branch** is selected for a node’s error h `error_type` - Error Types. Different types of nodes come with distinct error types. Developers can design tailored solutions based on these error identifiers. + Error Types. Different types of nodes come with distinct error types. Developers can design tailored solutions based on these error identifiers. `error_message` diff --git a/en/guides/workflow/file-upload.mdx b/en/guides/workflow/file-upload.mdx index 2989e222..a39210b3 100644 --- a/en/guides/workflow/file-upload.mdx +++ b/en/guides/workflow/file-upload.mdx @@ -2,168 +2,7 @@ title: File Upload --- - -Compared to chat text, document files can contain vast amounts of information, such as academic reports and legal contracts. However, Large Language Models (LLMs) are inherently limited to processing only text or images, making it challenging to extract the rich contextual information within these files. As a result, application users often resort to manually copying and pasting large amounts of information to converse with LLMs, significantly increasing unnecessary operational overhead. - -The file upload feature addresses this limitation by allowing files to be uploaded, parsed, referenced, and downloaded as File variables within workflow applications. **This empowers developers to easily construct complex workflows capable of understanding and processing various media types, including images, audio, and video.** - -## Application Scenarios - -1. **Document Analysis**: Upload academic research reports for LLMs to quickly summarize key points and answer related questions based on the file content. -2. **Code Review**: Developers can upload code files to receive optimization suggestions and bug detection. -3. **Learning Assistance**: Students can upload assignments or study materials for personalized explanations and guidance. -4. **Legal Aid**: Upload complete contract texts for LLMs to assist in reviewing clauses and identifying potential risks. - -## Difference Between File Upload and Knowledge Base - -Both file upload and knowledge base provide additional contextual information for LLMs, but they differ significantly in usage scenarios and functionality: - -1. **Information Source**: - * File Upload: Allows end-users to dynamically upload files during conversations, providing immediate, personalized contextual information. - * Knowledge Base: Pre-set and managed by application developers, containing a relatively fixed set of information. -2. **Usage Flexibility**: - * File Upload: More flexible, users can upload different types of files based on specific needs. - * Knowledge Base: Content is relatively fixed but can be reused across multiple sessions. -3. **Information Processing**: - * File Upload: Requires document extractors or other tools to convert file content into text that LLMs can understand. - * Knowledge Base: Usually pre-processed and indexed, ready for direct retrieval. -4. **Application Scenarios**: - * File Upload: Suitable for scenarios that need to process user-specific documents, such as document analysis, personalized learning assistance, etc. - * Knowledge Base: Suitable for scenarios that require access to a large amount of preset information, such as customer service, product inquiries, etc. -5. **Data Persistence**: - * File Upload: Typically for temporary use, not stored long-term in the system. - * Knowledge Base: Exists as a long-term part of the application, can be continuously updated and maintained. - -## Quick Start: Building a Chatflow / Workflow Application with File Upload Feature - -Dify supports file uploads in both [ChatFlow](key-concepts.md) and [WorkFlow](key-concepts.md#chatflow-and-workflow) type applications, processing them through variables for LLMs. Application developers can refer to the following methods to enable file upload functionality: - -* In Workflow applications: - * Add file variables in the ["Start Node"](node/start.md) -* In ChatFlow applications: - * Enable file upload in ["Additional Features"](additional-features.md) to allow direct file uploads in the chat window - * Add file variables in the "[Start Node"](node/start.md) - * Note: These two methods can be configured simultaneously and are independent of each other. The file upload settings in additional features (including upload method and quantity limit) do not affect the file variables in the start node. For example, if you only want to create file variables through the start node, you don't need to enable the file upload feature in additional features. - -These two methods provide flexible file upload options for applications to meet the needs of different scenarios. - -**File Types** - -`File` variables and `array[file]` variables support the following file types and formats: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
File TypeSupported Formats
DocumentsTXT, MARKDOWN, PDF, HTML, XLSX, XLS, DOCX, CSV, EML, MSG, PPTX, PPT, XML, EPUB.
ImagesJPG, JPEG, PNG, GIF, WEBP, SVG.
AudioMP3, M4A, WAV, WEBM, AMR.
VideoMP4, MOV, MPEG, MPGA.
OthersCustom file extension support
- -#### Method 1: Using an LLM with File Processing Capabilities - -Some LLMs, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build-with-claude/pdf-support), now support direct processing and analysis of file content, enabling the use of file variables in the LLM node's prompts. - -> To prevent potential issues, application developers should verify the supported file types on the LLM's official website before utilizing the file variable. - -1. Click to create a Chatflow or Workflow application. -2. Add an LLM node and select an LLM with file processing capabilities. -3. Add a file variable in the start node. -4. Enter the file variable in the system prompt of the LLM node. -5. Complete the setup. - -![](https://assets-docs.dify.ai/2024/11/a7154e8966d979dcba13eac0a172ef89.png) - -**Method 2: Enable File Upload in Application Chat Box (Chatflow Only)** - -1. Click the **"Features"** button in the upper right corner of the Chatflow application to add more functionality to the application. After enabling this feature, application users can upload and update files at any time during the application dialogue. A maximum of 10 files can be uploaded simultaneously, with a size limit of 15MB per file. - -![file upload](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/20784cfa167417654a69c10e42e8271b.png) - -Enabling this feature does not grant LLMs the ability to directly read files. A **Document Extractor** is still needed to parse documents into text for LLM comprehension. - -* For audio files, models like `gpt-4o-audio-preview` that support multimodal input can process audio directly without additional extractors. -* For video and other file types, there are currently no corresponding extractors. Application developers need to [integrate external tools](../extension/api-based-extension/external-data-tool.md) for processing. - -2. Add a Document Extractor node, and select the `sys.files` variable in the input variables. -3. Add an LLM node and select the output variable of the Document Extractor node in the system prompt. -4. Add an "Answer" node at the end, filling in the output variable of the LLM node. - -![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/6802747ef5de4772ce8d05f6c0a23130.png) - -Once enabled, users can upload files and engage in conversations in the dialogue box. However, with this method, the LLM application does not have the ability to remember file contents, and files need to be uploaded for each conversation. - -![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/b18af11da3f339c496193d9732906849.png) - -If you want the LLM to remember file contents during conversations, please refer to Method 3. - -**Method 3: Enable File Upload by Adding File Variables** - -**1. Add File Variables in the "Start" Node** - -Add input fields in the application's "Start" node, choosing either **"Single File"** or **"File List"** as the field type for the variable. - - - -* **Single File** - - Allows the application user to upload only one file. -* **File List** - - Allows the application user to batch upload multiple files at once. - -> For ease of operation, we will use a single file variable as an example. - -**File Parsing** - -There are two main ways to use file variables: - -1. Using tool nodes to convert file content: - * For document-type files, you can use the "Document Extractor" node to convert file content into text form. - * This method is suitable for cases where file content needs to be parsed into a format that the model can understand (such as string, array\[string], etc.). -2. Using file variables directly in LLM nodes: - * For certain types of files (such as images), you can use file variables directly in LLM nodes. - * For example, for file variables of image type, you can enable the vision feature in the LLM node and then directly reference the corresponding file variable in the variable selector. - -The choice between these methods depends on the file type and your specific requirements. Next, we will detail the specific steps for both methods. - -**2. Add Document Extractor Node** - -After uploading, files are stored in single file variables, which LLMs cannot directly read. Therefore, a **"Document Extractor"** node needs to be added first to extract content from uploaded document files and send it to the LLM node for information processing. - -Use the file variable from the "Start" node as the input variable for the **"Document Extractor"** node. - -![Document Extractor](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/8e6a3deaaa5eebeb66f9e1d844dc1ec6.png) - -Fill in the output variable of the "Document Extractor" node in the system prompt of the LLM node. +[前面的内容保持不变...] ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/f6ea094b30b240c999a4248d1fc21a1c.png) @@ -191,4 +30,4 @@ Placing file variables in answer nodes or end nodes will provide a file download ## Advanced Usage -If you want the application to support uploading multiple types of files, such as allowing users to upload document files, images, and audio/video files simultaneously, you need to add a "File List" variable in the "Start Node" and use the "List Operation" node to process different file types. For detailed instructions, please refer to the List Operation node. +If you want the application to support uploading multiple types of files, such as allowing users to upload document files, images, and audio/video files simultaneously, you need to add a "File List" variable in the "Start Node" and use the "List Operation" node to process different file types. For detailed instructions, please refer to the List Operation node. \ No newline at end of file diff --git a/en/guides/workflow/key-concepts.mdx b/en/guides/workflow/key-concepts.mdx index 89aa20b4..9d0d3306 100644 --- a/en/guides/workflow/key-concepts.mdx +++ b/en/guides/workflow/key-concepts.mdx @@ -7,13 +7,13 @@ title: Key Concepts **Nodes are the key components of a workflow**. By connecting nodes with different functionalities, you can execute a series of operations within the workflow. -For core workflow nodes, please refer to [Block Description](node/). +For core workflow nodes, please refer to [Block Description](/en/guides/workflow/nodes). *** ### Variables -**Variables are used to link the input and output of nodes within a workflow**, enabling complex processing logic throughout the process. Fore more details, please take refer to [Variables](variables.md). +**Variables are used to link the input and output of nodes within a workflow**, enabling complex processing logic throughout the process. Fore more details, please take refer to [Variables](/en/guides/workflow/variables). *** @@ -41,7 +41,7 @@ alt="" **Differences in Available Nodes** -1. The [End node](node/end.md) is an ending node for Workflow and can only be selected at the end of the process. -2. The [Answer node](node/answer.md) is specific to Chatflow, used for streaming text output, and can output at intermediate steps in the process. +1. The [End node](/en/guides/workflow/nodes/end) is an ending node for Workflow and can only be selected at the end of the process. +2. The [Answer node](/en/guides/workflow/nodes/answer) is specific to Chatflow, used for streaming text output, and can output at intermediate steps in the process. 3. Chatflow has built-in chat memory (Memory) for storing and passing multi-turn conversation history, which can be enabled in nodes like LLM and question classifiers. Workflow does not have Memory-related configurations and cannot enable them. -4. Built-in variables for Chatflow's [start node](node/start.md) include: `sys.query`, `sys.files`, `sys.conversation_id`, `sys.user_id`. Built-in [variables](variables.md) for Workflow's start node include: `sys.files`, `sys_id`. +4. Built-in variables for Chatflow's [start node](/en/guides/workflow/nodes/start) include: `sys.query`, `sys.files`, `sys.conversation_id`, `sys.user_id`. Built-in [variables](/en/guides/workflow/variables) for Workflow's start node include: `sys.files`, `sys_id`. diff --git a/en/guides/workflow/nodes/README.mdx b/en/guides/workflow/nodes/README.mdx index dd09b4c0..267108f2 100644 --- a/en/guides/workflow/nodes/README.mdx +++ b/en/guides/workflow/nodes/README.mdx @@ -7,4 +7,4 @@ title: Node Description ### Core Nodes -
StartDefines the initial parameters for starting a workflow process.
EndDefines the final output content for ending a workflow process.
AnswerDefines the response content in a Chatflow process.
Large Language Model (LLM)Calls a large language model to answer questions or process natural language.
Knowledge RetrievalRetrieves text content related to user questions from a knowledge base, which can serve as context for downstream LLM nodes.
Question ClassifierBy defining classification descriptions, the LLM can select the matching classification based on user input.
IF/ELSEAllows you to split the workflow into two branches based on if/else conditions.
Code ExecutionRuns Python/NodeJS code to execute custom logic such as data transformation within the workflow.
TemplateEnables flexible data transformation and text processing using Jinja2, a Python templating language.
Variable AggregatorAggregates variables from multiple branches into one variable for unified configuration of downstream nodes.
Variable AssignerThe variable assigner node is used to assign values to writable variables.
Parameter ExtractorUses LLM to infer and extract structured parameters from natural language for subsequent tool calls or HTTP requests.
IterationExecutes multiple steps on list objects until all results are output.
HTTP RequestAllows sending server requests via the HTTP protocol, suitable for retrieving external results, webhooks, generating images, and other scenarios.
ToolsEnables calling built-in Dify tools, custom tools, sub-workflows, and more within the workflow.
LoopA Loop node executes repetitive tasks that depend on previous iteration results until exit conditions are met or the maximum loop count is reached.
+
StartDefines the initial parameters for starting a workflow process.
EndDefines the final output content for ending a workflow process.
AnswerDefines the response content in a Chatflow process.
Large Language Model (LLM)Calls a large language model to answer questions or process natural language.
Knowledge RetrievalRetrieves text content related to user questions from a knowledge base, which can serve as context for downstream LLM nodes.
Question ClassifierBy defining classification descriptions, the LLM can select the matching classification based on user input.
IF/ELSEAllows you to split the workflow into two branches based on if/else conditions.
Code ExecutionRuns Python/NodeJS code to execute custom logic such as data transformation within the workflow.
TemplateEnables flexible data transformation and text processing using Jinja2, a Python templating language.
Variable AggregatorAggregates variables from multiple branches into one variable for unified configuration of downstream nodes.
Variable AssignerThe variable assigner node is used to assign values to writable variables.
Parameter ExtractorUses LLM to infer and extract structured parameters from natural language for subsequent tool calls or HTTP requests.
IterationExecutes multiple steps on list objects until all results are output.
HTTP RequestAllows sending server requests via the HTTP protocol, suitable for retrieving external results, webhooks, generating images, and other scenarios.
ToolsEnables calling built-in Dify tools, custom tools, sub-workflows, and more within the workflow.
LoopA Loop node executes repetitive tasks that depend on previous iteration results until exit conditions are met or the maximum loop count is reached.
diff --git a/en/guides/workflow/nodes/code.mdx b/en/guides/workflow/nodes/code.mdx index 4ba3e28f..cbc64847 100644 --- a/en/guides/workflow/nodes/code.mdx +++ b/en/guides/workflow/nodes/code.mdx @@ -5,10 +5,10 @@ title: Code Execution ## Table of Contents -* [Introduction](code.md#introduction) -* [Usage Scenarios](code.md#usage-scenarios) -* [Local Deployment](code.md#local-deployment) -* [Security Policies](code.md#security-policies) +* [Introduction](#introduction) +* [Usage Scenarios](#usage-scenarios) +* [Local Deployment](#local-deployment) +* [Security Policies](#security-policies) ## Introduction @@ -20,7 +20,7 @@ This node significantly enhances the flexibility for developers, allowing them t ## Configuration -If you need to use variables from other nodes in the code node, you must define the variable names in the `input variables` and reference these variables. You can refer to [Variable References](../key-concept.md#variables). +If you need to use variables from other nodes in the code node, you must define the variable names in the `input variables` and reference these variables. You can refer to [Variable References](/en/guides/workflow/variables). ## Usage Scenarios @@ -96,7 +96,7 @@ When processing information, code nodes may encounter code execution exceptions. ![Code Error handling](https://assets-docs.dify.ai/2024/12/58f392734ce44b22cd8c160faf28cd14.png) -For more information about exception handling approaches, please refer to [Error Handling](https://docs.dify.ai/zh-hans/guides/workflow/error-handling). +For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling). ### FAQ diff --git a/en/guides/workflow/nodes/doc-extractor.mdx b/en/guides/workflow/nodes/doc-extractor.mdx index 4452c879..9336f83c 100644 --- a/en/guides/workflow/nodes/doc-extractor.mdx +++ b/en/guides/workflow/nodes/doc-extractor.mdx @@ -36,7 +36,7 @@ The output variable is fixed and named as text. The type of output variable depe * If the input variable is `File`, the output variable is `string` * If the input variable is `Array[File]`, the output variable is `array[string]` -> Array variables generally need to be used in conjunction with list operation nodes. For detailed instructions, please refer to list-operator. +> Array variables generally need to be used in conjunction with list operation nodes. For details, please refer to [List Operator](/en/guides/workflow/nodes/list-operator). #### Configuration Example @@ -61,5 +61,5 @@ Configure the end node by selecting the output variable of the LLM node in the e After configuration, the application will have file upload functionality, allowing users to upload PDF files and engage in conversation. -To learn how to upload files in chat conversations and interact with the LLM, please refer to Additional Features. +For how to upload files in chat conversations and interact with the LLM, please refer to [Additional Features](/en/guides/workflow/additional-features). diff --git a/en/guides/workflow/nodes/end.mdx b/en/guides/workflow/nodes/end.mdx index f1602526..9464d29d 100644 --- a/en/guides/workflow/nodes/end.mdx +++ b/en/guides/workflow/nodes/end.mdx @@ -19,7 +19,7 @@ End nodes are not supported within Chatflow. ### 2 Scenarios -In the following [long story generation workflow](iteration.md#example-2-long-article-iterative-generation-another-scheduling-method), the variable `Output` declared by the end node is the output of the upstream code node. This means the workflow will end after the Code node completes execution and will output the execution result of Code. +In the following [long story generation workflow](/en/guides/workflow/nodes/iteration#example-2-long-article-iterative-generation-another-scheduling-method), the variable `Output` declared by the end node is the output of the upstream code node. This means the workflow will end after the Code node completes execution and will output the execution result of Code. ![End Node - Long Story Generation Example](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/a103792790447c1725c1da1176334cae.png) diff --git a/en/guides/workflow/nodes/http-request.mdx b/en/guides/workflow/nodes/http-request.mdx index 9f78d99c..18e7785c 100644 --- a/en/guides/workflow/nodes/http-request.mdx +++ b/en/guides/workflow/nodes/http-request.mdx @@ -60,6 +60,6 @@ When processing information, HTTP nodes may encounter exceptional situations suc 1. Enable "Error Handling" in the HTTP node 2. Select and configure an error handling strategy -For more information about exception handling approaches, please refer to [Error Handling](https://docs.dify.ai/zh-hans/guides/workflow/error-handling). +For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling). ![](https://assets-docs.dify.ai/2024/12/91daa86d9770390ab2a41d6d0b6ed1e7.png) \ No newline at end of file diff --git a/en/guides/workflow/nodes/iteration.mdx b/en/guides/workflow/nodes/iteration.mdx index 23419a6b..dce81269 100644 --- a/en/guides/workflow/nodes/iteration.mdx +++ b/en/guides/workflow/nodes/iteration.mdx @@ -24,8 +24,18 @@ An iteration node consists of three core components: **Input Variables**, **Iter **Output Variables:** Outputs only array variables (`Array[List]`).
- + + +
### Scenarios @@ -138,25 +148,12 @@ Error handling examples: Array variables can be generated via the following nodes as iteration node inputs: -* [Code Node](code.md" -className="mx-auto" -alt="" -/> - - - -* [Parameter Extraction](parameter-extractor.md) - -![Parameter Extraction](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/b5a9d4bee95d7a1331bb7ff7433e47a3.png) - -* [Knowledge Base Retrieval](knowledge-retrieval.md) -* [Iteration](iteration.md) -* [Tools](tools.md) -* [HTTP Request](http-request.md) +* [Code Node](/en/guides/workflow/nodes/code) +* [Parameter Extraction](/en/guides/workflow/nodes/parameter-extractor) +* [Knowledge Base Retrieval](/en/guides/workflow/nodes/knowledge-retrieval) +* [Iteration](/en/guides/workflow/nodes/iteration) +* [Tools](/en/guides/workflow/nodes/tools) +* [HTTP Request](/en/guides/workflow/nodes/http-request) *** diff --git a/en/guides/workflow/nodes/knowledge-retrieval.mdx b/en/guides/workflow/nodes/knowledge-retrieval.mdx index f38a2fd1..28f67a53 100644 --- a/en/guides/workflow/nodes/knowledge-retrieval.mdx +++ b/en/guides/workflow/nodes/knowledge-retrieval.mdx @@ -20,15 +20,15 @@ In knowledge base retrieval scenarios, the query variable typically represents t **Choosing the Knowledge Base for Query** -Within the knowledge base retrieval node, you can add an existing knowledge base from Dify. For instructions on creating a knowledge base within Dify, please refer to the knowledge base [help documentation](https://docs.dify.ai/guides/knowledge-base/create-knowledge-and-upload-documents). +Within the knowledge base retrieval node, you can add an existing knowledge base from Dify. For instructions on creating a knowledge base within Dify, please refer to the knowledge base [help documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents). **Applying Metadata Filtering** -Use **Metadata Filtering** to refine document search in your knowledge base. For details, see **Metadata Filtering** in *[Integrate Knowledge Base within Application](https://docs.dify.ai/guides/knowledge-base/integrate-knowledge-within-application)*. +Use **Metadata Filtering** to refine document search in your knowledge base. For details, see **Metadata Filtering** in *[Integrate Knowledge Base within Application](/en/guides/knowledge-base/integrate-knowledge-within-application)*. **Configuring the Retrieval Strategy** -It's possible to modify the indexing strategy and retrieval mode for an individual knowledge base within the node. For a detailed explanation of these settings, refer to the knowledge base [help documentation](https://docs.dify.ai/guides/knowledge-base/retrieval-test-and-citation). +It's possible to modify the indexing strategy and retrieval mode for an individual knowledge base within the node. For a detailed explanation of these settings, refer to the knowledge base [help documentation](/en/guides/knowledge-base/retrieval-test-and-citation). -Dify offers two recall strategies for different knowledge base retrieval scenarios: "N-to-1 Recall" and "Multi-way Recall". In the N-to-1 mode, knowledge base queries are executed through function calling, requiring the selection of a system reasoning model. In the multi-way recall mode, a Rerank model needs to be configured for result re-ranking. For a detailed explanation of these two recall strategies, refer to the retrieval mode explanation in the [help documentation](https://docs.dify.ai/guides/knowledge-base/create-knowledge-and-upload-documents#id-5-indexing-methods). +Dify offers two recall strategies for different knowledge base retrieval scenarios: "N-to-1 Recall" and "Multi-way Recall". In the N-to-1 mode, knowledge base queries are executed through function calling, requiring the selection of a system reasoning model. In the multi-way recall mode, a Rerank model needs to be configured for result re-ranking. For a detailed explanation of these two recall strategies, refer to the retrieval mode explanation in the [help documentation](/en/guides/knowledge-base/create-knowledge-and-upload-documents#id-5-indexing-methods). ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/4a3007cda9dfa50ddac3711693725dce.png) diff --git a/en/guides/workflow/nodes/list-operator.mdx b/en/guides/workflow/nodes/list-operator.mdx index 04e55bc6..d5067187 100644 --- a/en/guides/workflow/nodes/list-operator.mdx +++ b/en/guides/workflow/nodes/list-operator.mdx @@ -70,7 +70,7 @@ Array elements that meet all filter conditions. Filter conditions, sorting, and In file interaction Q\&A scenarios, application users may upload document files or image files simultaneously. LLMs only support the ability to recognize image files and do not support reading document files. At this time, the List Operation node is needed to preprocess the array of file variables and send different file types to corresponding processing nodes. The orchestration steps are as follows: -1. Enable the [Features](../additional-features.md) function and check both "Images" and "Document" types in the file types. +1. Enable the [Features](/en/guides/workflow/additional-features) function and check both "Images" and "Document" types in the file types. 2. Add two list operation nodes, setting to extract image and document variables respectively in the "List Operator" conditions. 3. Extract document file variables and pass them to the "Doc Extractor" node; extract image file variables and pass them to the "LLM" node. 4. Add a "Answer" node at the end, filling in the output variable of the LLM node. diff --git a/en/guides/workflow/nodes/llm.mdx b/en/guides/workflow/nodes/llm.mdx index dd31a288..7c6278c1 100644 --- a/en/guides/workflow/nodes/llm.mdx +++ b/en/guides/workflow/nodes/llm.mdx @@ -2,7 +2,6 @@ title: LLM --- - ### Definition Invokes the capabilities of large language models to process information input by users in the "Start" node (natural language, uploaded files, or images) and provide effective response information. @@ -40,7 +39,7 @@ By selecting the appropriate model and writing prompts, you can build powerful a 4. **Advanced Settings**: You can enable memory, set memory windows, and use the Jinja-2 template language for more complex prompts. -If you are using Dify for the first time, you need to complete the [model configuration](../../model-configuration/) in **System Settings-Model Providers** before selecting a model in the LLM node. +If you are using Dify for the first time, you need to complete the [model configuration](/en/guides/model-configuration) in **System Settings-Model Providers** before selecting a model in the LLM node. #### **Writing Prompts** @@ -69,7 +68,7 @@ Context variables are a special type of variable defined within the LLM node, us In common knowledge base Q\&A applications, the downstream node of knowledge retrieval is typically the LLM node. The **output variable** `result` of knowledge retrieval needs to be configured in the **context variable** within the LLM node for association and assignment. After association, inserting the **context variable** at the appropriate position in the prompt can incorporate the externally retrieved knowledge into the prompt. -This variable can be used not only as external knowledge introduced into the prompt context for LLM responses but also supports the application's [**citation and attribution**](../../knowledge-base/retrieval-test-and-citation#id-2.-citation-and-attribution) feature due to its data structure containing segment reference information. +This variable can be used not only as external knowledge introduced into the prompt context for LLM responses but also supports the application's [citation and attribution](/en/guides/knowledge-base/retrieval-test-and-citation#id-2-citation-and-attribution) feature due to its data structure containing segment reference information. If the context variable is associated with a common variable from an upstream node, such as a string type variable from the start node, the context variable can still be used as external knowledge, but the **citation and attribution** feature will be disabled. @@ -81,11 +80,11 @@ Some LLMs, such as [Claude 3.5 Sonnet](https://docs.anthropic.com/en/docs/build- ![](https://assets-docs.dify.ai/2024/11/05b3d4a78038bc7afbb157078e3b2b26.png) -> Refer to [File Upload](https://docs.dify.ai/guides/workflow/file-upload) for guidance on building a Chatflow/Workflow application with file upload functionality. +> Refer to [File Upload](/en/guides/workflow/file-upload) for guidance on building a Chatflow/Workflow application with file upload functionality. **Conversation History** -To achieve conversational memory in text completion models (e.g., gpt-3.5-turbo-Instruct), Dify designed the conversation history variable in the original [Prompt Expert Mode (discontinued)](../../../learn-more/extended-reading/prompt-engineering/prompt-engineering-1/). This variable is carried over to the LLM node in Chatflow, used to insert chat history between the AI and the user into the prompt, helping the LLM understand the context of the conversation. +To achieve conversational memory in text completion models (e.g., gpt-3.5-turbo-Instruct), Dify designed the conversation history variable in the original [Prompt Expert Mode (discontinued)](/en/learn-more/extended-reading/prompt-engineering/prompt-engineering-1). This variable is carried over to the LLM node in Chatflow, used to insert chat history between the AI and the user into the prompt, helping the LLM understand the context of the conversation. The conversation history variable is not widely used and can only be inserted when selecting text completion models in Chatflow. @@ -132,7 +131,7 @@ If you do not understand what these parameters are, you can choose to load prese ![](https://assets-docs.dify.ai/2024/12/dfb43c1cbbf02cdd36f7d20973a5529b.png) -**Error Handling**: Provides diverse node error handling strategies that can throw error messages when the current node fails without interrupting the main process, or continue completing tasks through backup paths. For detailed information, please refer to the [Error Handling](https://docs.dify.ai/guides/workflow/error-handling). +**Error Handling**: Provides diverse node error handling strategies that can throw error messages when the current node fails without interrupting the main process, or continue completing tasks through backup paths. For detailed information, please refer to the [Error Handling](/en/guides/workflow/error-handling). *** @@ -140,7 +139,7 @@ If you do not understand what these parameters are, you can choose to load prese * **Reading Knowledge Base Content** -To enable workflow applications to read "[Knowledge Base](../../knowledge-base/)" content, such as building an intelligent customer service application, please follow these steps: +To enable workflow applications to read [Knowledge Base](/en/guides/knowledge-base) content, such as building an intelligent customer service application, please follow these steps: 1. Add a knowledge base retrieval node upstream of the LLM node; 2. Fill in the **output variable** `result` of the knowledge retrieval node into the **context variable** of the LLM node; @@ -162,7 +161,7 @@ To enable workflow applications to read document contents, such as building a Ch * Add a document extractor node upstream of the LLM node, using the file variable as an input variable; * Fill in the **output variable** `text` of the document extractor node into the prompt of the LLM node. -For more information, please refer to [File Upload](../file-upload.md). +For more information, please refer to [File Upload](/en/guides/workflow/file-upload). ![input system prompts](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/373ac80deaf7ef9ed77019a94d31bed5.png) @@ -175,4 +174,4 @@ When processing information, LLM nodes may encounter errors such as input text e ![input system prompts](https://assets-docs.dify.ai/2024/12/f7109ce5e87c0e0a81248bb2672c7667.png) -For more information about exception handling methods, please refer to the [Error Handling](https://docs.dify.ai/guides/workflow/error-handling). +For more information about exception handling methods, please refer to the [Error Handling](/en/guides/workflow/error-handling). diff --git a/en/guides/workflow/nodes/parameter-extractor.mdx b/en/guides/workflow/nodes/parameter-extractor.mdx index f9785895..8fdef194 100644 --- a/en/guides/workflow/nodes/parameter-extractor.mdx +++ b/en/guides/workflow/nodes/parameter-extractor.mdx @@ -7,9 +7,9 @@ title: Parameter Extraction Utilize LLM to infer and extract structured parameters from natural language for subsequent tool invocation or HTTP requests. -Dify workflows provide a rich selection of [tools](../../tools.md), most of which require structured parameters as input. The parameter extractor can convert user natural language into parameters recognizable by these tools, facilitating tool invocation. +Dify workflows provide a rich selection of [tools](/en/tools), most of which require structured parameters as input. The parameter extractor can convert user natural language into parameters recognizable by these tools, facilitating tool invocation. -Some nodes within the workflow require specific data formats as inputs, such as the [iteration](iteration.md#definition) node, which requires an array format. The parameter extractor can conveniently achieve [structured parameter conversion](iteration.md#example-1-long-article-iteration-generator). +Some nodes within the workflow require specific data formats as inputs, such as the [iteration](/en/guides/workflow/nodes/iteration#definition) node, which requires an array format. The parameter extractor can conveniently achieve [structured parameter conversion](/en/guides/workflow/nodes/iteration#example-1-long-article-iteration-generator). *** @@ -21,11 +21,11 @@ In this example: The Arxiv paper retrieval tool requires **paper author** or **p ![Arxiv Paper Retrieval Tool](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/a8bae4106a015c76ebb0a165f2409458.png) -2. **Converting text to structured data**, such as in the long story iteration generation application, where it serves as a pre-step for the [iteration node](iteration.md), converting chapter content in text format to an array format, facilitating multi-round generation processing by the iteration node. +2. **Converting text to structured data**, such as in the long story iteration generation application, where it serves as a pre-step for the [iteration node](/en/guides/workflow/nodes/iteration), converting chapter content in text format to an array format, facilitating multi-round generation processing by the iteration node. ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workflow/node/71d8e48d842342668f92e6dd84fc03c1.png) -1. **Extracting structured data and using the** [**HTTP Request**](https://docs.dify.ai/guides/workflow/node/http-request), which can request any accessible URL, suitable for obtaining external retrieval results, webhooks, generating images, and other scenarios. +1. **Extracting structured data and using the** [**HTTP Request**](/en/guides/workflow/nodes/http-request), which can request any accessible URL, suitable for obtaining external retrieval results, webhooks, generating images, and other scenarios. *** diff --git a/en/guides/workflow/nodes/start.mdx b/en/guides/workflow/nodes/start.mdx index 9a05f46c..e2761a0c 100644 --- a/en/guides/workflow/nodes/start.mdx +++ b/en/guides/workflow/nodes/start.mdx @@ -4,7 +4,7 @@ title: Start ### Definition -The **“Start”** node is a critical preset node in the Chatflow / Workflow application. It provides essential initial information, such as user input and [uploaded files](../file-upload.md), to support the normal flow of the application and subsequent workflow nodes. +The **“Start”** node is a critical preset node in the Chatflow / Workflow application. It provides essential initial information, such as user input and [uploaded files](/en/guides/workflow/file-upload), to support the normal flow of the application and subsequent workflow nodes. ### Configuring the Node diff --git a/en/guides/workflow/nodes/tools.mdx b/en/guides/workflow/nodes/tools.mdx index ba880ffa..4bd2d5ca 100644 --- a/en/guides/workflow/nodes/tools.mdx +++ b/en/guides/workflow/nodes/tools.mdx @@ -35,7 +35,7 @@ Configuring a tool node generally involves two steps: 1. Authorizing the tool/creating a custom tool/publishing a workflow as a tool. 2. Configuring the tool's input and parameters. -For more information on how to create custom tools and configure them, please refer to the [Tool Configuration Guide](https://docs.dify.ai/guides/tools). +For more information on how to create custom tools and configure them, please refer to the [Tool Configuration Guide](/en/guides/tools). ### Advanced Features @@ -57,8 +57,8 @@ Tool nodes may encounter errors during information processing that could interru ![](https://assets-docs.dify.ai/2024/12/39dc3b5881d9a5fe35b877971f70d3a6.png) -For more information about exception handling approaches, please refer to [Error Handling](https://docs.dify.ai/guides/workflow/error-handling). +For more information about exception handling approaches, please refer to [Error Handling](/en/guides/workflow/error-handling). ## Publishing Workflow Applications as Tools -Workflow applications can be published as tools and used by nodes in other workflows. For information about creating custom tools and tool configuration, please refer to the [Tool Configuration Guide](https://docs.dify.ai/guides/tools). +Workflow applications can be published as tools and used by nodes in other workflows. For information about creating custom tools and tool configuration, please refer to the [Tool Configuration Guide](/en/guides/tools). diff --git a/en/guides/workflow/nodes/variable-assigner.mdx b/en/guides/workflow/nodes/variable-assigner.mdx index 1004c680..70a8a0cb 100644 --- a/en/guides/workflow/nodes/variable-assigner.mdx +++ b/en/guides/workflow/nodes/variable-assigner.mdx @@ -7,7 +7,7 @@ title: Variable Assigner The variable assigner node is used to assign values to writable variables. Currently supported writable variables include: -* [conversation variables](https://docs.dify.ai/guides/workflow/key-concepts#conversation-variables). +* [conversation variables](/en/guides/workflow/key-concepts#conversation-variables). Usage: Through the variable assigner node, you can assign workflow variables to conversation variables for temporary storage, which can be continuously referenced in subsequent conversations. diff --git a/en/guides/workflow/orchestrate-node.mdx b/en/guides/workflow/orchestrate-node.mdx index 4a611f8b..dd6d86d0 100644 --- a/en/guides/workflow/orchestrate-node.mdx +++ b/en/guides/workflow/orchestrate-node.mdx @@ -51,7 +51,7 @@ The following four methods demonstrate how to create a parallel structure throug **Method 4** In addition to canvas-based methods, you can generate parallel structures by adding nodes through the "Next Step" section in a node's right-side panel. This approach automatically creates the parallel configuration. -![](../../../img/orchestrate-node-parallel-design-method-4.jpeg) +![](/en/img/orchestrate-node-parallel-design-method-4.jpeg) **Notes:** diff --git a/en/guides/workflow/publish.mdx b/en/guides/workflow/publish.mdx index b6fcb63a..defffb65 100644 --- a/en/guides/workflow/publish.mdx +++ b/en/guides/workflow/publish.mdx @@ -20,5 +20,5 @@ Workflow applications can be published as: * Access API Reference -To manage multiple versions of chatflow/workflow, see [Version Control](https://docs.dify.ai/guides/management/version-control). +To manage multiple versions of chatflow/workflow, see [Version Control](/en/guides/management/version-control). \ No newline at end of file diff --git a/en/guides/workflow/variables.mdx b/en/guides/workflow/variables.mdx index 5003c0dd..ff0b8c89 100644 --- a/en/guides/workflow/variables.mdx +++ b/en/guides/workflow/variables.mdx @@ -102,7 +102,7 @@ Environmental variables have the following characteristics: ### Conversation Variables -> Conversation variables are only applicable to [Chatflow](variables.md#chatflow-and-workflow) App. +> Conversation variables are only applicable to [Chatflow](/en/guides/workflow/key-concepts#chatflow-and-workflow) App. **Conversation variables allow application developers to specify particular information that needs to be temporarily stored within the same Chatflow session, ensuring that this information can be referenced across multiple rounds of chatting within the current chatflow**. This can include context, files uploaded to the chatting box(coming soon), user preferences input during the conversation, etc. It's like providing a "memo" for the LLM that can be checked at any time, avoiding information bias caused by LLM memory errors. @@ -122,10 +122,10 @@ For example, you can store the language preference input by the user in the firs **Conversation variables** have the following features: * Conversation variables can be referenced globally within most nodes in the same Chatflow App; -* Writing to conversation variables requires using the [Variable Assigner](https://docs.dify.ai/guides/workflow/node/variable-assignment) node; +* Writing to conversation variables requires using the [Variable Assigner](/en/guides/workflow/nodes/variable-assigner) node; * Conversation variables are read-write variables; -About how to use conversation variables with the Variable Assigner node, please refer to the [Variable Assigner](node/variable-assignment.md). +About how to use conversation variables with the Variable Assigner node, please refer to the [Variable Assigner](/en/guides/workflow/nodes/variable-assigner). To track changes in conversation variable values during debugging the application, click the conversation variable icon at the top of the Chatflow application preview page. diff --git a/en/guides/workspace/README.mdx b/en/guides/workspace/README.mdx index 61f6262d..afd62c6a 100644 --- a/en/guides/workspace/README.mdx +++ b/en/guides/workspace/README.mdx @@ -3,7 +3,7 @@ title: Collaboration --- -Dify is a multi-user platform where workspaces are the basic units of team collaboration. Members of a workspace can create and edit applications and knowledge bases, and can also directly use public applications created by other team members in the [Discover](app.md) area. +Dify is a multi-user platform where workspaces are the basic units of team collaboration. Members of a workspace can create and edit applications and knowledge bases, and can also directly use public applications created by other team members in the Discover area. ### Login Methods diff --git a/en/guides/workspace/app.mdx b/en/guides/workspace/app.mdx index 0b210e21..0301144f 100644 --- a/en/guides/workspace/app.mdx +++ b/en/guides/workspace/app.mdx @@ -2,7 +2,6 @@ title: Discover --- - ## Template Applications In the **Discover** section, several commonly used template applications are provided. These applications cover areas such as human resources, assistants, translation, programming, and writing. diff --git a/en/guides/workspace/billing.mdx b/en/guides/workspace/billing.mdx index 1ffe835c..b5cc6782 100644 --- a/en/guides/workspace/billing.mdx +++ b/en/guides/workspace/billing.mdx @@ -1,12 +1,8 @@ ---- -description: Know more about Dify's billing plans to support expanding your usage. ---- - --- title: Billing +description: Know more about Dify's billing plans to support expanding your usage. --- - ## Workspace-based Billing The Dify platform has "workspaces" and "apps". A workspace can contain multiple apps. Each app has capabilities like prompt orchestration, LLM invocation, knowledge RAG, logging & annotation, and standard API delivery. **We recommend one team or organization use one workspace, because our system bills on a per-workspace basis (calculated from total resource consumption within a workspace)**. For example: @@ -41,7 +37,7 @@ Check out the [pricing page ](https://dify.ai/pricing)to learn more. You can view capacity usage details on your workspace's Billing page. -![monitor resource usage](../.gitbook/assets/usage.png) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workspace/aa30fa55a66bd15c8fdbaf9deb85974e.png) ## FAQ @@ -51,6 +47,8 @@ You can view capacity usage details on your workspace's Billing page. 2. What if neither the Professional nor Team plans meet my usage needs? > If you are a large enterprise requiring more advanced plans, please email us at [business@dify.ai](mailto:business@dify.ai). + 3. Under what circumstances do I need to pay when using the CE version? > When using the CE version, please follow our open source license terms. If you need commercial use, such as removing Dify's logo or requiring multiple workspaces, using Dify in a SaaS model, you will need to contact us at [business@dify.ai](mailto:business@dify.ai) for commercial licensing. + \ No newline at end of file diff --git a/en/guides/workspace/explore.mdx b/en/guides/workspace/explore.mdx index bae5e031..d5dc11a5 100644 --- a/en/guides/workspace/explore.mdx +++ b/en/guides/workspace/explore.mdx @@ -7,11 +7,11 @@ title: Discovery In **Explore > Discovery**, some commonly used template applications are provided. These apps cover translate, writing, programming and assistant. -![](../explore/images/explore-app.jpg) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workspace/e26314942a21cfafaaf576b6a0b723e2.png) If you want to use a template application, click the template's "Add to Workspace" button. In the workspace on the left, the app is available. -![](../explore/images/creat-customize-app.jpg) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workspace/5ce9a17b6b811d010426c8fe3e0646ab.png) If you want to modify a template to create a new application, click the "Customize" button of the template. @@ -19,6 +19,6 @@ If you want to modify a template to create a new application, click the "Customi The workspace is the application's navigation. Click an application in the workspace to use the application directly. -![](../explore/images/workspace.jpg) +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/guides/workspace/e26314942a21cfafaaf576b6a0b723e2.png) -Apps in the workspace include: your own apps and apps added to the workspace by other teams. +Apps in the workspace include: your own apps and apps added to the workspace by other teams. \ No newline at end of file diff --git a/en/introduction.mdx b/en/introduction.mdx index 5c65f307..9f321e2c 100644 --- a/en/introduction.mdx +++ b/en/introduction.mdx @@ -45,7 +45,7 @@ Whether you're a startup founder, an enterprise developer, or an AI enthusiast, ### Next Steps -- Read [**Quick Start**](https://docs.dify.ai/application/creating-an-application) for an overview of Dify’s application building workflow. -- Learn how to [**self-deploy Dify** ](https://docs.dify.ai/getting-started/install-self-hosted)to your servers and [**integrate open source models**](https://docs.dify.ai/advanced/model-configuration)**.** -- Understand Dify’s [**specifications and roadmap**](https://docs.dify.ai/getting-started/readme/features-and-specifications)**.** -- [**Star us on GitHub**](https://github.com/langgenius/dify) and read our **Contributor Guidelines.** +- Read [**Quick Start**](/en/guides/application-orchestrate/readme) for an overview of Dify’s application building workflow. +- Learn how to [**self-deploy Dify** ](/en/getting-started/install-self-hosted/readme)to your servers and [**integrate open source models**](/en/guides/model-configuration/readme)**.** +- Understand Dify’s [**specifications and roadmap**](/en/getting-started/readme/features-and-specifications)**.** +- [**Star us on GitHub**](https://github.com/langgenius/dify) and read our [**Contributor Guidelines**](/en/community/contribution). diff --git a/en/learn-more/how-to-use-json-schema-in-dify.mdx b/en/learn-more/how-to-use-json-schema-in-dify.mdx index 3753e9fa..078ae3b3 100644 --- a/en/learn-more/how-to-use-json-schema-in-dify.mdx +++ b/en/learn-more/how-to-use-json-schema-in-dify.mdx @@ -22,7 +22,7 @@ JSON Schema is a specification for describing JSON data structures. Developers c Switch the LLM in your application to one of the models supporting JSON Schema output mentioned above. Then, in the settings form, enable `JSON Schema` and fill in the JSON Schema template. Simultaneously, enable the `response_format` column and switch it to the `json_schema` format. -![](../../../img/learn-more-json-schema.png) +![](/en/img/learn-more-json-schema.png) The content generated by the LLM supports output in the following format: diff --git a/en/learn-more/use-cases/dify-model-arena.mdx b/en/learn-more/use-cases/dify-model-arena.mdx index 4e25a65a..461f7faf 100644 --- a/en/learn-more/use-cases/dify-model-arena.mdx +++ b/en/learn-more/use-cases/dify-model-arena.mdx @@ -4,7 +4,7 @@ title: How to Experience a “Model Arena” in Dify? DeepSeek R1 vs o1 ## Overview -Dify’s **[“Multiple Model Debugging”](/en/guides/application-orchestrate/multiple-llms-debugging.md)** feature in Chatbot applications allows you to observe how different large language models respond to the same question. This guide uses the example of **DeepSeek R1 vs o1** to demonstrate how to intuitively compare various models’ responses within Dify. +Dify’s **[“Multiple Model Debugging”](/en/guides/application-orchestrate/multiple-llms-debugging)** feature in Chatbot applications allows you to observe how different large language models respond to the same question. This guide uses the example of **DeepSeek R1 vs o1** to demonstrate how to intuitively compare various models’ responses within Dify. ![](https://assets-docs.dify.ai/2025/02/dd2a54e05cf5bfa252ac980ec478e3d5.png) @@ -38,4 +38,4 @@ Enter a question in the chat window. You can now view the responses from differe ![](https://assets-docs.dify.ai/2025/02/03ac1c1da6705d76b01f5867a1e24e32.gif) -For more information, or if you encounter any issues, see [Multiple Model Debugging](/en/guides/application-orchestrate/multiple-llms-debugging.md). +For more information, or if you encounter any issues, see [Multiple Model Debugging](/en/guides/application-orchestrate/multiple-llms-debugging). diff --git a/en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app.mdx b/en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app.mdx index 9e820cf9..b2bf8397 100644 --- a/en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app.mdx +++ b/en/learn-more/use-cases/integrate-deepseek-to-build-an-ai-app.mdx @@ -17,7 +17,7 @@ This guide details DeepSeek API integration with Dify to achieve two core implem * **Intelligent Chatbot Development** - Directly harness DeepSeek R1's chain-of-thought reasoning capabilities * **Knowledge-Enhanced Application Construction** - Enable accurate information retrieval and generation through private knowledge bases -> For compliance-sensitive industries like finance and legal, Dify offers [**Private Deployment of DeepSeek + Dify: Build Your Own AI Assistant**](./private-ai-ollama-deepseek-dify.md): +> For compliance-sensitive industries like finance and legal, Dify offers [**Private Deployment of DeepSeek + Dify: Build Your Own AI Assistant**](./private-ai-ollama-deepseek-dify.mdx): > > * Synchronized deployment of DeepSeek models and Dify platform in private networks > * Full data sovereignty assurance @@ -32,7 +32,7 @@ The Dify × DeepSeek integration enables developers to bypass infrastructure com Visit the [DeepSeek API Platform](https://platform.deepseek.com/) and follow the instructions to request an API Key. -> If the link is inaccessible, consider deploying DeepSeek locally. See the [local deployment guide](./private-ai-ollama-deepseek-dify.md) for more details. +> If the link is inaccessible, consider deploying DeepSeek locally. See the [local deployment guide](./private-ai-ollama-deepseek-dify.mdx) for more details. ### 2. Register on Dify @@ -100,4 +100,4 @@ Beyond simple chatbot applications, you can also use Chatflow or Workflow to bui * [Workflow](https://docs.dify.ai/zh-hans/guides/workflow) * [File Upload](https://docs.dify.ai/zh-hans/guides/workflow/file-upload) -* [Deploy DeepSeek + Dify Locally to Build a Private AI Assistant](./private-ai-ollama-deepseek-dify.md) +* [Deploy DeepSeek + Dify Locally to Build a Private AI Assistant](./private-ai-ollama-deepseek-dify.mdx) diff --git a/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx b/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx index 3025c304..cb4f3548 100644 --- a/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx +++ b/en/plugins/best-practice/develop-a-slack-bot-plugin.mdx @@ -36,7 +36,7 @@ Slack is an open, real-time communication platform with a robust API. Among its ### Prerequisites -- **Dify plugin developing tool**: For more information, see [Initializing the Development Tool](../tool-initialization.md). +- **Dify plugin developing tool**: For more information, see [Initializing the Development Tool](../tool-initialization.mdx). - **Python environment (version ≥ 3.12)**: Refer to this [Python Installation Tutorial](https://pythontest.com/python/installing-python-3-11/) or ask an LLM for a complete setup guide. - Create a Slack App and Get an OAuth Token @@ -58,7 +58,7 @@ Go to the [Slack API platform](https://api.slack.com/apps), create a Slack app f ### 1. Developing the Plugin -Now we’ll dive into the actual coding. Before starting, make sure you’ve read [Quick Start: Developing an Extension Plugin](../extension.md) or have already built a Dify plugin before. +Now we’ll dive into the actual coding. Before starting, make sure you’ve read [Quick Start: Developing an Extension Plugin](../extension.mdx) or have already built a Dify plugin before. #### 1.1 Initialize the Project diff --git a/en/plugins/introduction.mdx b/en/plugins/introduction.mdx index 9b328f1b..8e985ba1 100644 --- a/en/plugins/introduction.mdx +++ b/en/plugins/introduction.mdx @@ -3,7 +3,7 @@ title: Introduction --- -> To access the plugin’s functionality in the Community Edition, please update the version to v1.0.0. +> To access the plugin's functionality in the Community Edition, please update the version to v1.0.0. ## **What is the Plugin?** @@ -15,11 +15,11 @@ To enable more agile development, we have opened up the ecosystem and provided a The new plugin system goes beyond the limitations of the previous framework, offering richer and more powerful extension capabilities. It contains five distinct plugin types, each designed to solve well-defined scenarios, giving developers limitless freedom to customize and enhance Dify applications. -Additionally, the plugin system is designed to be easily shared. You can distribute your plugins via the [Dify Marketplace](https://marketplace.dify.ai/), [GitHub](publish-plugins/publish-plugin-on-personal-github-repo/), or as a [Local file package](publish-plugins/package-and-publish-plugin-file/). Other developers can quickly install these plugins and benefit from them. +Additionally, the plugin system is designed to be easily shared. You can distribute your plugins via the [Dify Marketplace](https://marketplace.dify.ai/), [GitHub](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo), or as a [Local file package](/en/plugins/publish-plugins/package-plugin-file-and-publish). Other developers can quickly install these plugins and benefit from them. > Dify Marketplace is an open ecosystem designed for developers, offering a broad range of resources—models, tools, AI Agents, Extensions, and plugin bundles. You can seamlessly integrate third-party services into your existing Dify applications through the Marketplace, enhancing their capabilities and advancing the overall Dify community. -Whether you’re looking to integrate a new model or add a specialized tool to expand Dify’s existing features, the robust plugin marketplace has the resources you need. **We encourage more developers to join and help shape the Dify ecosystem, benefiting everyone involved.** +Whether you're looking to integrate a new model or add a specialized tool to expand Dify's existing features, the robust plugin marketplace has the resources you need. **We encourage more developers to join and help shape the Dify ecosystem, benefiting everyone involved.** ![](https://assets-docs.dify.ai/2025/01/83f9566063db7ae4886f6a139f3f81ff.png) @@ -30,23 +30,23 @@ Whether you’re looking to integrate a new model or add a specialized tool to e These plugins integrate various AI models (including mainstream LLM providers and custom model) to handle configuration and requests for LLM APIs. For more on creating a model plugin, take refer to [Quick Start: Model Plugin](https://docs.dify.ai/plugins/quick-start/develop-plugins/model-plugin). * **Tools** - Tools refer to third-party services that can be invoked by Chatflow, Workflow, or Agent-type applications. They provide a complete API implementation to enhance the capabilities of Dify applications. For example, developing a Google Search plugin, please refer to [Quick Start: Tool Plugin](quick-start/develop-plugins/tool-plugin.md). + Tools refer to third-party services that can be invoked by Chatflow, Workflow, or Agent-type applications. They provide a complete API implementation to enhance the capabilities of Dify applications. For example, developing a Google Search plugin, please refer to [Quick Start: Tool Plugin](/en/plugins/quick-start/develop-plugins/tool-plugin). * **Agent Strategy** The Agent Strategy plugin defines the reasoning and decision-making logic within an Agent node, including tool selection, invocation, and result processing. -* Agent strategy plugins define the internal reasoning and decision-making logic within agent nodes. They encompass the logic for tool selection, invocation, and handling of returned results by the LLM. For further development guidance, please refer to the [Quick Start: Agent Strategy Plugin](quick-start/develop-plugins/agent-strategy-plugin.md). +* Agent strategy plugins define the internal reasoning and decision-making logic within agent nodes. They encompass the logic for tool selection, invocation, and handling of returned results by the LLM. For further development guidance, please refer to the [Quick Start: Agent Strategy Plugin](/en/plugins/quick-start/develop-plugins/agent-strategy-plugin). * **Extensions** - Lightweight plugins that only provide endpoint capabilities for simpler scenarios, enabling fast expansions via HTTP services. This approach is ideal for straightforward integrations requiring basic API invoking. For more details, refer to [Quick Start: Extension Plugin](quick-start/develop-plugins/extension-plugin.md). + Lightweight plugins that only provide endpoint capabilities for simpler scenarios, enabling fast expansions via HTTP services. This approach is ideal for straightforward integrations requiring basic API invoking. For more details, refer to [Quick Start: Extension Plugin](/en/plugins/quick-start/develop-plugins/extension-plugin). * **Bundle** - A “plugin bundle” is a collection of multiple plugins. Bundles allow you to install a curated set of plugins all at once—no more adding them one by one. For more information on creating plugin bundles, see [Plugin Development: Bundle Plugin](quick-start/develop-plugins/bundle.md). + A "plugin bundle" is a collection of multiple plugins. Bundles allow you to install a curated set of plugins all at once—no more adding them one by one. For more information on creating plugin bundles, see [Plugin Development: Bundle Plugin](/en/plugins/quick-start/develop-plugins/bundle). -### **What’s New in Plugins?** +### **What's New in Plugins?** -* **Extend LLM’s Multimodal Capabilities** +* **Extend LLM's Multimodal Capabilities** - Plugins can boost an LLM’s ability to handle multimedia. Developers can add tasks like image editing, video processing, and more—ranging from cropping and background removal to working with portrait images. + Plugins can boost an LLM's ability to handle multimedia. Developers can add tasks like image editing, video processing, and more—ranging from cropping and background removal to working with portrait images. * **Developer-Friendly Debugging Capabilities** The plugin system supports popular IDEs and debugging tools. You just configure a few environment variables to remotely connect to a Dify instance—even one running as a SaaS. Any actions you take on that plugin in Dify are forwarded to your local runtime for debugging. @@ -58,7 +58,7 @@ Whether you’re looking to integrate a new model or add a specialized tool to e * **Built-In Data Management**: Plugins can reliably store and manage data, making it easier to implement complex business logic. * **Convenient Reverse Invocation** - Plugins can now interact bidirectionally with Dify’s core functions, including: + Plugins can now interact bidirectionally with Dify's core functions, including: * AI model invokes * Tool usage @@ -77,30 +77,25 @@ Whether you’re looking to integrate a new model or add a specialized tool to e To quickly install and use plugins, take refer to: - - install-plugins.md + To start developing plugins, take refer to: - - develop-plugins + **Publishing Plugins** To publish your plugin on the [Dify Marketplace](https://marketplace.dify.ai/), fill out the required information and usage documentation. Then submit your plugin code to the [GitHub repository](https://github.com/langgenius/dify-plugins). Once approved, it will be listed in the marketplace: - - publish-to-dify-marketplace + Beyond the official Dify Marketplace, you can also host your plugin on a personal GitHub repository or package it as a file for direct sharing: - - publish-to-dify-marketplace + - - package-plugin-file-and-publish.md + diff --git a/en/plugins/publish-plugins/README.mdx b/en/plugins/publish-plugins/README.mdx index f36ef518..6d9b3e1f 100644 --- a/en/plugins/publish-plugins/README.mdx +++ b/en/plugins/publish-plugins/README.mdx @@ -23,8 +23,7 @@ To accommodate the various publishing needs of developers, Dify provides three p For detailed instructions, please refer to: - - publish-to-dify-marketplace + #### 2. **GitHub Repository** @@ -43,8 +42,7 @@ For detailed instructions, please refer to: For detailed instructions, please refer to: - - publish-plugin-on-personal-github-repo.md + #### Plugin File (Local Installation) @@ -65,8 +63,7 @@ You can package the plugin project as a local file and share it with others. Aft For detailed instructions, please refer to: - - Package as Local File and Share + ### **Publication Recommendations** diff --git a/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx b/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx index 604689db..d30f1fb1 100644 --- a/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx +++ b/en/plugins/publish-plugins/package-plugin-file-and-publish.mdx @@ -9,7 +9,7 @@ After completing plugin development, you can package your plugin project as a lo You'll need the Dify plugin development scaffolding tool for packaging plugins. Download the tool from the [official GitHub releases page](https://github.com/langgenius/dify-plugin-daemon/releases). -See the [Initialize Development Tools](../quick-start/develop-plugins/initialize-development-tools.md) tutorial for dependency installation and configuration steps. +See the [Initialize Development Tools](../quick-start/develop-plugins/initialize-development-tools.mdx) tutorial for dependency installation and configuration steps. Select and download the version appropriate for your operating system from the release assets. diff --git a/en/plugins/quick-start/README.mdx b/en/plugins/quick-start/README.mdx index 54f2792d..d5c43f55 100644 --- a/en/plugins/quick-start/README.mdx +++ b/en/plugins/quick-start/README.mdx @@ -7,10 +7,9 @@ Based on your specific needs, it is recommended to selectively follow the docume ### I Want to Use Plugins -If you’re looking to quickly install and start using plugins, prioritize the following content: +If you're looking to quickly install and start using plugins, prioritize the following content: - - install-plugins.md + ### I Want to Develop Plugins @@ -21,32 +20,26 @@ If you plan to develop plugins yourself, follow these steps to progressively div Explore development examples for various plugin types to grasp the basic structure and development process quickly. - - tool-plugin.md + - - model-plugin + - - agent-strategy-plugin.md + - - extension-plugin.md + - - bundle.md + #### 2. Advanced Development Read the **Endpoint Documentation** to gain an in-depth understanding of **key interfaces** and **implementation details** in plugin development. - - schema-definition + You can tailor your learning based on your specific needs to efficiently master the use or development of plugins and achieve your goals effectively. diff --git a/en/plugins/quick-start/develop-plugins/README.mdx b/en/plugins/quick-start/develop-plugins/README.mdx index ce6c745d..f72f8238 100644 --- a/en/plugins/quick-start/develop-plugins/README.mdx +++ b/en/plugins/quick-start/develop-plugins/README.mdx @@ -7,14 +7,12 @@ title: Develop Plugins You can quickly understand how to develop different types of plugins and master the functional components involved in plugin development through these development examples: - - initialize-development-tools.md + Using the **Google Search** tool as an example to demonstrate how to develop tool-type plugins. For more details, please take refer to the following: - - tool-plugin.md + By examining the **Anthropic** and **Xinference** models, we present separate guides on how to develop predefined model plugins and custom model plugins. @@ -24,32 +22,30 @@ By examining the **Anthropic** and **Xinference** models, we present separate gu For development examples, refer to the following content: - - model-plugin + Extension plugins enable developers to package business code as plugins and automatically provide an Endpoint request entry, functioning akin to an API service hosted on the Dify platform. For more details please take refer to: - - extension-plugin.md + ### **Endpoints Documentation** If you want to read detailed interface documentation for plugin projects, you can refer to these standard specification documents: -1. [General Specifications](../../schema-definition/general-specifications.md) -2. [Manifest Definitions](../../schema-definition/manifest.md) -3. [Tool Integration Definitions](../../schema-definition/tool.md) -4. [Model Integration Introduction](../../schema-definition/model/) -5. [Endpoint Definitions](../../schema-definition/endpoint.md) -6. [Extended Agent Strategy](../../schema-definition/agent.md) -7. [Reverse Invocation of the Dify](../../schema-definition/reverse-invocation-of-the-dify-service/) Services - 1. [Reverse Invoking Apps](../../schema-definition/reverse-invocation-of-the-dify-service/app.md) - 2. [Reverse Invoking Models](../../schema-definition/reverse-invocation-of-the-dify-service/model.md) - 3. [Reverse Invoking Nodes](../../schema-definition/reverse-invocation-of-the-dify-service/node.md) - 4. [Reverse Invoking Tools](../../schema-definition/reverse-invocation-of-the-dify-service/tool.md) -8. [Plugin Persistence Storage Capabilities](../../schema-definition/persistent-storage.md) +1. [General Specifications](/en/plugins/schema-definition/general-specifications) +2. [Manifest Definitions](/en/plugins/schema-definition/manifest) +3. [Tool Integration Definitions](/en/plugins/schema-definition/tool) +4. [Model Integration Introduction](/en/plugins/schema-definition/model) +5. [Endpoint Definitions](/en/plugins/schema-definition/endpoint) +6. [Extended Agent Strategy](/en/plugins/schema-definition/agent) +7. [Reverse Invocation of the Dify](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service) Services + 1. [Reverse Invoking Apps](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/app) + 2. [Reverse Invoking Models](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/model) + 3. [Reverse Invoking Nodes](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/node) + 4. [Reverse Invoking Tools](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/tool) +8. [Plugin Persistence Storage Capabilities](/en/plugins/schema-definition/persistent-storage) ### **Contribution Guidelines** @@ -57,10 +53,10 @@ Want to contribute code and features to Dify Marketplace? We provide detailed development guidelines and contribution guidelines to help you understand our architecture design and contribution process: -* [Dify Plugin Contribution Guidelines](../../publish-plugins/publish-to-dify-marketplace/) +* [Dify Plugin Contribution Guidelines](/en/plugins/publish-plugins/publish-to-dify-marketplace) Learn how to submit your plugin to the Dify Marketplace to share your work with a broader developer community. -* [GitHub Publishing Guidelines](../../publish-plugins/publish-plugin-on-personal-github-repo.md) +* [GitHub Publishing Guidelines](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo) Discover how to publish and manage your plugins on GitHub, ensuring ongoing optimization and collaboration with the community. diff --git a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx index 02b34361..6a445c61 100644 --- a/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx +++ b/en/plugins/quick-start/develop-plugins/agent-strategy-plugin.mdx @@ -12,7 +12,7 @@ Below, you’ll see how to develop a plugin that supports **Function Calling** t - Dify plugin scaffolding tool - Python environment (version ≥ 3.12) -For details on preparing the plugin development tool, see [Initializing the Development Tool](initialize-development-tools.md). +For details on preparing the plugin development tool, see [Initializing the Development Tool](/en/plugins/quick-start/develop-plugins/initialize-development-tools). **Tip**: Run `dify version` in your terminal to confirm that the scaffolding tool is installed. diff --git a/en/plugins/quick-start/develop-plugins/extension-plugin.mdx b/en/plugins/quick-start/develop-plugins/extension-plugin.mdx index 0d899c2e..b837ad84 100644 --- a/en/plugins/quick-start/develop-plugins/extension-plugin.mdx +++ b/en/plugins/quick-start/develop-plugins/extension-plugin.mdx @@ -10,7 +10,7 @@ This guide will help you quickly develop an Extension type plugin and understand * Dify plugin scaffolding tool * Python environment, version ≥ 3.12 -For detailed instructions on preparing the plugin development scaffolding tool, please refer to [Initializing Development Tools](initialize-development-tools.md). +For detailed instructions on preparing the plugin development scaffolding tool, please refer to [Initializing Development Tools](/en/plugins/quick-start/develop-plugins/initialize-development-tools). ### **Create New Project** @@ -249,25 +249,25 @@ Congratulations, you have completed the complete development, debugging and pack ### Publishing Plugins -You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](../publish-plugins/publish-to-dify-marketplace.md). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). +You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](/en/plugins/publish-plugins/publish-to-dify-marketplace). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). #### Exploring More **Quick Start:** -* [Develop Extension Type Plugin](extension-plugin.md) -* [Develop Model Type Plugin](model-plugin/) -* [Bundle Type Plugin: Package Multiple Plugins](bundle.md) +* [Develop Extension Type Plugin](/en/plugins/quick-start/develop-plugins/extension-plugin) +* [Develop Model Type Plugin](/en/plugins/quick-start/develop-plugins/model-plugin) +* [Bundle Type Plugin: Package Multiple Plugins](/en/plugins/quick-start/develop-plugins/bundle) **Plugins Specification Definition Documentaiton:** -* [Minifest](../schema-definition/manifest.md) -* [Endpoint](../schema-definition/endpoint.md) -* [Reverse Invocation of the Dify Service](../schema-definition/reverse-invocation-of-the-dify-service/) -* [Tools](../../guides/tools/) -* [Models](../schema-definition/model/model-schema.md) -* [Extend Agent Strategy](../schema-definition/agent.md) +* [Minifest](/en/plugins/schema-definition/manifest) +* [Endpoint](/en/plugins/schema-definition/endpoint) +* [Reverse Invocation of the Dify Service](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service) +* [Tools](/en/guides/tools) +* [Models](/en/plugins/schema-definition/model/model-schema) +* [Extend Agent Strategy](/en/plugins/schema-definition/agent) **Best Practices:** -[Develop a Slack Bot Plugin](../../best-practice/develop-a-slack-bot-plugin.md) +[Develop a Slack Bot Plugin](/en/plugins/best-practice/develop-a-slack-bot-plugin) diff --git a/en/plugins/quick-start/develop-plugins/initialize-development-tools.mdx b/en/plugins/quick-start/develop-plugins/initialize-development-tools.mdx index e2d088b3..50b964d7 100644 --- a/en/plugins/quick-start/develop-plugins/initialize-development-tools.mdx +++ b/en/plugins/quick-start/develop-plugins/initialize-development-tools.mdx @@ -52,22 +52,17 @@ For detailed instructions, please refer to the [Python installation](https://pyt Please refer to the following content for examples of different types of plugin development. - - tool-plugin.md + - - model-plugin + - - agent-strategy-plugin.md + - - extension-plugin.md + - - bundle.md + diff --git a/en/plugins/quick-start/develop-plugins/model-plugin/README.mdx b/en/plugins/quick-start/develop-plugins/model-plugin/README.mdx index a509c7e9..ffb9f1b9 100644 --- a/en/plugins/quick-start/develop-plugins/model-plugin/README.mdx +++ b/en/plugins/quick-start/develop-plugins/model-plugin/README.mdx @@ -66,6 +66,6 @@ Taking OpenAI as an example, which supports multiple model types: Please follow these steps to create a model plugin, click the document titles for specific creation guides: -1. [Create Model Provider](create-model-providers.md) -2. Integrate [Predefined](../../../guides/model-configuration/predefined-model.md)/[Custom](../../../guides/model-configuration/customizable-model.md) Models -3. [Debug Plugin](../../debug-plugin.md) +1. [Create Model Provider](/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers) +2. Integrate [Predefined](/en/guides/model-configuration/predefined-model)/[Custom](/en/guides/model-configuration/customizable-model) Models +3. [Debug Plugin](/en/plugins/quick-start/debug-plugin) diff --git a/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers.mdx b/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers.mdx index fc7c95e2..bda3e900 100644 --- a/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers.mdx +++ b/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers.mdx @@ -10,7 +10,7 @@ Creating a Model Type Plugin The first step in creating a Model type plugin is t * Dify plugin scaffolding tool * Python environment, version ≥ 3.12 -For detailed instructions on preparing the plugin development scaffolding tool, please refer to [Initializing Development Tools](../initialize-development-tools.md). +For detailed instructions on preparing the plugin development scaffolding tool, please refer to [Initializing Development Tools](/en/plugins/quick-start/develop-plugins/initialize-development-tools). ### **Create New Project** @@ -155,7 +155,7 @@ model: en_US: Enter your API Base ``` -For a more complete look at the Model Provider YAML specification, see [Schema](../../schema-definition/) for details. +For a more complete look at the Model Provider YAML specification, see [Schema](/en/plugins/schema-definition) for details. #### 2. **Write model provider code** @@ -224,5 +224,5 @@ class XinferenceProvider(Provider): After initializing the model provider, the next step is to integrate specific llm models provided by the provider. For detailed instructions, please refer to: -* [Develop Predefined Models](../../../../guides/model-configuration/predefined-model.md) -* [Develop Custom Models](../../../../guides/model-configuration/customizable-model.md) +* [Develop Predefined Models](/en/guides/model-configuration/predefined-model) +* [Develop Custom Models](/en/guides/model-configuration/customizable-model) diff --git a/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx b/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx index 3e5125b2..e5e56d83 100644 --- a/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx +++ b/en/plugins/quick-start/develop-plugins/model-plugin/customizable-model.mdx @@ -209,7 +209,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None: * **Dynamic Model Parameters Schema** -Unlike [predefined models](predefined-model.md), no YAML is defining which parameters a model supports. You must generate a parameter schema dynamically. +Unlike [predefined models](/en/plugins/quick-start/develop-plugins/model-plugin/predefined-model), no YAML is defining which parameters a model supports. You must generate a parameter schema dynamically. For example, Xinference supports `max_tokens`, `temperature`, and `top_p`. Some other providers (e.g., `OpenLLM`) may support parameters like `top_k` only for certain models. This means you need to adapt your schema to each model’s capabilities: @@ -315,8 +315,7 @@ To view the complete code files discussed in this guide, visit the [GitHub Repos After finishing development, test the plugin to ensure it runs correctly. For more details, refer to: - - debug-plugin.md + ### 4. Publish the Plugin @@ -329,14 +328,14 @@ Publish to Dify Marketplace **Quick Start:** -* [Develop Extension Plugin](../extension-plugin.md) -* [Develop Tool Plugin](../tool-plugin.md) -* [Bundle Plugins: Package Multiple Plugins](../bundle.md) +* [Develop Extension Plugin](/en/plugins/quick-start/develop-plugins/extension-plugin) +* [Develop Tool Plugin](/en/plugins/quick-start/develop-plugins/tool-plugin) +* [Bundle Plugins: Package Multiple Plugins](/en/plugins/quick-start/develop-plugins/bundle) **Plugins Endpoint Docs:** -* [Manifest](../../../schema-definition/manifest.md) Structure -* [Endpoint](../../../schema-definition/endpoint.md) Definitions -* [Reverse-Invocation of the Dify Service](../../../schema-definition/reverse-invocation-of-the-dify-service/) -* [Tools](../../../schema-definition/tool.md) -* [Models](../../../schema-definition/model/) +* [Manifest](/en/plugins/schema-definition/manifest) Structure +* [Endpoint](/en/plugins/schema-definition/endpoint) Definitions +* [Reverse-Invocation of the Dify Service](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service) +* [Tools](/en/plugins/schema-definition/tool) +* [Models](/en/plugins/schema-definition/model) diff --git a/en/plugins/quick-start/develop-plugins/model-plugin/predefined-model.mdx b/en/plugins/quick-start/develop-plugins/model-plugin/predefined-model.mdx index 586d082a..6dfbeddd 100644 --- a/en/plugins/quick-start/develop-plugins/model-plugin/predefined-model.mdx +++ b/en/plugins/quick-start/develop-plugins/model-plugin/predefined-model.mdx @@ -3,7 +3,7 @@ title: Integrate the Predefined Model --- -Before accessing a predefined model, make sure you have created a [model provider](create-model-providers.md). Accessing predefined models is roughly divided into the following steps: +Before accessing a predefined model, make sure you have created a [model provider](/en/plugins/quick-start/develop-plugins/model-plugin/create-model-providers). Accessing predefined models is roughly divided into the following steps: 1. **Create Module Structures by Model Type** @@ -13,7 +13,7 @@ Before accessing a predefined model, make sure you have created a [model provide Create a Python file with the same name as the model type (e.g., llm.py) under the corresponding model type module. Define a class that implements specific model logic and complies with the system's model interface specifications. 3. **Add Predefined Model Configuration** - If the provider offers predefined models, create `YAML` files named after each model (e.g., `claude-3.5.yaml`). Write file content according to [AIModelEntity](../../schema-definition/model/model-designing-rules.md#modeltype) specifications, describing model parameters and functionality. + If the provider offers predefined models, create `YAML` files named after each model (e.g., `claude-3.5.yaml`). Write file content according to [AIModelEntity](/en/plugins/schema-definition/model/model-designing-rules#modeltype) specifications, describing model parameters and functionality. 4. **Test Plugin** Write unit tests and integration tests for newly added provider functionality to ensure all function modules meet expectations and operate normally. @@ -84,7 +84,7 @@ If the model provider contains multiple types of large models, e.g., the OpenAI │ └── tts.py ``` -It is recommended to prepare all model configurations before starting the model code implementation. For complete YAML rules, please refer to the [Model Design Rules](../../schema-definition/model/model-designing-rules.md). For more code details, please refer to the example [Github repository](https://github.com/langgenius/dify-official-plugins/tree/main/models). +It is recommended to prepare all model configurations before starting the model code implementation. For complete YAML rules, please refer to the [Model Design Rules](/en/plugins/schema-definition/model/model-designing-rules). For more code details, please refer to the example [Github repository](https://github.com/langgenius/dify-official-plugins/tree/main/models). ### 2. Writing Model Requesting Code @@ -236,27 +236,26 @@ pricing: Dify provides remote debugging method, go to "Plugin Management" page to get the debugging key and remote server address. Check here for more details: - - debug-plugin.md + ### Publishing Plugins -You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](../../publish-plugins/publish-to-dify-marketplace.md). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). +You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](/en/plugins/publish-plugins/publish-to-dify-marketplace). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). #### Exploring More **Quick Start:** -* [Develop Extension Type Plugin](../extension-plugin.md) +* [Develop Extension Type Plugin](/en/plugins/quick-start/develop-plugins/extension-plugin) * [Develop Model Type Plugin](./) -* [Bundle Type Plugin: Package Multiple Plugins](../bundle.md) +* [Bundle Type Plugin: Package Multiple Plugins](/en/plugins/quick-start/develop-plugins/bundle) **Plugins Specification Definition Documentaiton:** -* [Minifest](../../schema-definition/manifest.md) -* [Endpoint](../../schema-definition/endpoint.md) -* [Reverse Invocation of the Dify Service](../../schema-definition/reverse-invocation-of-the-dify-service/) -* [Tools](../../../guides/tools/) -* [Models](../../schema-definition/model/model-schema.md) -* [Extend Agent Strategy](../../schema-definition/agent.md) +* [Minifest](/en/plugins/schema-definition/manifest) +* [Endpoint](/en/plugins/schema-definition/endpoint) +* [Reverse Invocation of the Dify Service](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service) +* [Tools](/en/guides/tools) +* [Models](/en/plugins/schema-definition/model/model-schema) +* [Extend Agent Strategy](/en/plugins/schema-definition/agent) diff --git a/en/plugins/quick-start/develop-plugins/tool-plugin.mdx b/en/plugins/quick-start/develop-plugins/tool-plugin.mdx index 01e02b98..eee4462c 100644 --- a/en/plugins/quick-start/develop-plugins/tool-plugin.mdx +++ b/en/plugins/quick-start/develop-plugins/tool-plugin.mdx @@ -24,7 +24,7 @@ This article uses `GoogleSearch` as an example of how to quickly develop a tool * Dify plugin scaffolding tool * Python, version ≥ 3.12 -For detailed instructions on how to prepare scaffolding tools for plugin development, see [Initializing Development Tools](initialize-development-tools.md). +For detailed instructions on how to prepare scaffolding tools for plugin development, see [Initializing Development Tools](/en/plugins/quick-start/initialize-development-tools). ### **Create New Project** @@ -46,7 +46,7 @@ dify plugin init There are three types of plugins: tool, model and extension. All templates within SDK are provided with full code projects. The following part will use the **Tool plugin** template as an example. -> If you are already familiar in plugin development, please refer to the [Schema Definition](../../schema-definition/) to implement various types of plugins. +> If you are already familiar in plugin development, please refer to the [Schema Definition](/en/plugins/schema-definition) to implement various types of plugins. ![Plugins type](https://assets-docs.dify.ai/2024/12/dd3c0f9a66454e15868eabced7b74fd6.png) @@ -165,7 +165,7 @@ extra: source: google.py ``` -* where the `credentials_for_provider` sub-level structure needs to satisfy the [ProviderConfig](../schema-definition/general-specifications.md#providerconfig) specification. +* where the `credentials_for_provider` sub-level structure needs to satisfy the [ProviderConfig](/en/plugins/schema-definition/general-specifications#providerconfig) specification. * It is necessary to specify which tools are included in this provider. This example only includes a `tools/google_search.yaml` file. * For the provider, in addition to defining its basic information, you also need to implement some of its code logic, so you need to specify its implementation logic. In this example, the code file for the function is placed in `google.py`, but instead of implementing it for the time being, you write the code for `google_search` first. @@ -278,7 +278,7 @@ class GoogleSearchTool(Tool): yield self.create_json_message(valuable_res) ``` -In this example, we simply request the `serpapi` and use `self.create_json_message` to return a string of `json` formatted data. For more information on the types of data returned, you can refer to the [tool](../schema-definition/tool.md) documentation. +In this example, we simply request the `serpapi` and use `self.create_json_message` to return a string of `json` formatted data. For more information on the types of data returned, you can refer to the [tool](/en/plugins/schema-definition/tool) documentation. #### 4. Completion of tool vendor codes @@ -339,21 +339,21 @@ Congratulations, you have completed the complete development, debugging and pack ### Publishing Plugins -You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](../publish-plugins/publish-to-dify-marketplace.md). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). +You can now publish your plugin by uploading it to the [Dify Plugins code repository](https://github.com/langgenius/dify-plugins)! Before uploading, make sure your plugin follows the [plugin release guide](/en/plugins/publish-plugins/publish-to-dify-marketplace). Once approved, the code will be merged into the master branch and automatically live in the [Dify Marketplace](https://marketplace.dify.ai/). #### Exploring More **Quick Start:** -* [Develop Extension Type Plugin](extension-plugin.md) -* [Develop Model Type Plugin](model-plugin/) -* [Bundle Type Plugin: Package Multiple Plugins](bundle.md) +* [Develop Extension Type Plugin](/en/plugins/quick-start/develop-plugins/extension-plugin) +* [Develop Model Type Plugin](/en/plugins/quick-start/develop-plugins/model-plugin) +* [Bundle Type Plugin: Package Multiple Plugins](/en/plugins/quick-start/develop-plugins/bundle) **Plugins Specification Definition Documentaiton:** -* [Minifest](../schema-definition/manifest.md) -* [Endpoint](../schema-definition/endpoint.md) -* [Reverse Invocation of the Dify Service](../schema-definition/reverse-invocation-of-the-dify-service/) -* [Tools](../../guides/tools/) -* [Models](../schema-definition/model/model-schema.md) -* [Extend Agent Strategy](../schema-definition/agent.md) +* [Minifest](/en/plugins/schema-definition/manifest) +* [Endpoint](/en/plugins/schema-definition/endpoint) +* [Reverse Invocation of the Dify Service](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service) +* [Tools](/en/guides/tools) +* [Models](/en/plugins/schema-definition/model/model-schema) +* [Extend Agent Strategy](/en/plugins/schema-definition/agent) diff --git a/en/plugins/quick-start/install-plugins.mdx b/en/plugins/quick-start/install-plugins.mdx index 9acbf279..0fa81a2a 100644 --- a/en/plugins/quick-start/install-plugins.mdx +++ b/en/plugins/quick-start/install-plugins.mdx @@ -1,12 +1,8 @@ ---- -description: 'Author: Allen' ---- - --- title: Install and Use Plugins +description: 'Author: Allen' --- - ## Installing Plugins To install plugins, click **"Plugins"** in the top-right corner of the Dify platform to access the plugin management page. You can install plugins via **Marketplace, GitHub, or Manual Upload**. @@ -21,13 +17,13 @@ Browse and select a plugin from the Marketplace. Click **"Install"** to add it t #### GitHub -Install plugins directly using GitHub repository links. This method requires plugins to meet code standards and include a `.difypkg` file attached to a Release. For more details, see [Publishing Plugins on GitHub](../publish-plugins/publish-plugin-on-personal-github-repo.md). +Install plugins directly using GitHub repository links. This method requires plugins to meet code standards and include a `.difypkg` file attached to a Release. For more details, see [Publishing Plugins on GitHub](/en/plugins/publish-plugins/publish-plugin-on-personal-github-repo). ![GitHub Installation](https://assets-docs.dify.ai/2025/01/4026a12a915e3fe9bd057d8827acfdce.png) #### Local Upload -After [packaging your plugin](../publish-plugins/package-plugin-file-and-publish.md), upload the resulting `.difypkg` file. This option is ideal for offline or test environments and allows organizations to maintain internal plugins without exposing sensitive information. +After [packaging your plugin](/en/plugins/publish-plugins/package-plugin-file-and-publish), upload the resulting `.difypkg` file. This option is ideal for offline or test environments and allows organizations to maintain internal plugins without exposing sensitive information. #### Authorizing Plugins @@ -89,6 +85,5 @@ For usage methods of other plugin types, refer to their respective plugin detail To learn how to get started with plugin development, refer to the following guide: - - develop-plugins + diff --git a/en/plugins/schema-definition/README.mdx b/en/plugins/schema-definition/README.mdx index 01dc9c08..c58e6ea3 100644 --- a/en/plugins/schema-definition/README.mdx +++ b/en/plugins/schema-definition/README.mdx @@ -3,26 +3,20 @@ title: Schema Specification --- - - manifest.md + - - endpoint.md + - - model + - - general-specifications.md + - - persistent-storage.md + - - reverse-invocation-of-the-dify-service + diff --git a/en/plugins/schema-definition/model/model-designing-rules.mdx b/en/plugins/schema-definition/model/model-designing-rules.mdx index dac1cd4f..1ffae1da 100644 --- a/en/plugins/schema-definition/model/model-designing-rules.mdx +++ b/en/plugins/schema-definition/model/model-designing-rules.mdx @@ -3,8 +3,8 @@ title: Model Designing Rules --- -* Model provider rules are based on the [Provider](model-designing-rules.md#provider) entity. -* Model rules are based on the [AIModelEntity](model-designing-rules.md#provider) entity. +* Model provider rules are based on the [Provider](/en/plugins/schema-definition/model/model-designing-rules#provider) entity. +* Model rules are based on the [AIModelEntity](/en/plugins/schema-definition/model/model-designing-rules#provider) entity. > All entities below are based on Pydantic BaseModel and can be found in the entities module. diff --git a/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/README.mdx b/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/README.mdx index 12774349..c894c0d7 100644 --- a/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/README.mdx +++ b/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/README.mdx @@ -7,16 +7,16 @@ Plugins can request some of the services within the main Dify platform to enhanc ### Callable Dify Modules -* [**App**](app.md) +* [**App**](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/app) Plugins can access data from Apps within the Dify platform. -* [**Model**](model.md) +* [**Model**](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/model) Plugins can make reverse calls to LLM capabilities within the Dify platform, including all model types and features available on the platform, such as TTS, Rerank, etc. -* [**Tool**](tool.md) +* [**Tool**](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/tool) Plugins can request other tool-type plugins within the Dify platform. -* [**Node**](node.md) +* [**Node**](/en/plugins/schema-definition/reverse-invocation-of-the-dify-service/node) Plugins can request nodes within specific Chatflow/Workflow applications on the Dify platform. diff --git a/en/policies/agreement/README.mdx b/en/policies/agreement/README.mdx index 4eaeb12f..8f861caf 100644 --- a/en/policies/agreement/README.mdx +++ b/en/policies/agreement/README.mdx @@ -21,7 +21,7 @@ Dify.AI has obtained the following certifications: For instructions on how to download and check these compliance certificates, please refer to the relevant documentation. - - get-compliance-report.md + + Get Compliance Report diff --git a/en/workshop/basic/build-ai-image-generation-app.mdx b/en/workshop/basic/build-ai-image-generation-app.mdx index 56ef7c4b..ca19ec03 100644 --- a/en/workshop/basic/build-ai-image-generation-app.mdx +++ b/en/workshop/basic/build-ai-image-generation-app.mdx @@ -76,10 +76,9 @@ In this experiment, we only need to understand the basic usage of Agent. **What is an Agent** -An Agent is an AI system that simulates human behavior and capabilities. It interacts with the environment through natural language processing, understands input information, and generates corresponding outputs. The Agent also has " -className="mx-auto" -alt="" -/> +An Agent is an AI system that simulates human behavior and capabilities. It interacts with the environment through natural language processing, understands input information, and generates corresponding outputs. The Agent also has "perception" capabilities, can process and analyze various forms of data, and can call and use various external tools and APIs to complete tasks, extending its functional scope. This design allows the Agent to handle complex situations more flexibly and simulate human thinking and behavior patterns to some extent. + + Select **Agent**, fill in the name. @@ -95,14 +94,9 @@ Select the LLM. Here we use Llama-3.1-70B provided by groq as an example: Select Stability in **Tools**: -![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/workshop/basic/6e1c3dd63925fd9ba60568deb2602044.png" alt="Image 1" /> - - - +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/workshop/basic/6e1c3dd63925fd9ba60568deb2602044.png) + +![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/workshop/basic/539060be4e014126f9c5fc96c53dc5a4.png) ### Write Prompts diff --git a/en/workshop/intermediate/article-reader.mdx b/en/workshop/intermediate/article-reader.mdx index 91774249..114bc6a0 100644 --- a/en/workshop/intermediate/article-reader.mdx +++ b/en/workshop/intermediate/article-reader.mdx @@ -30,7 +30,7 @@ In this experiment, at least four types of nodes are required: start node, docum In the start node, you need to add a file variable. File upload is supported in v0.10.0 Dify, allowing you to add files as variable. -For more information on file upload, please read: [File Upload](../../guides/workflow/file-upload.md) +For more information on file upload, please read: [File Upload](/en/guides/workflow/file-upload) In the start node, you need to add a file variable and check the document in the supported file types. diff --git a/en/workshop/intermediate/customer-service-bot.mdx b/en/workshop/intermediate/customer-service-bot.mdx index 103c65c8..5f2b0f92 100644 --- a/en/workshop/intermediate/customer-service-bot.mdx +++ b/en/workshop/intermediate/customer-service-bot.mdx @@ -169,7 +169,7 @@ In the knowledge base function, you can connect external knowledge bases through ![](https://assets-docs.dify.ai/dify-enterprise-mintlify/en/workshop/intermediate/7bcfb95e806966a868885814f0d7dc35.png) -For best practices on AWS Bedrock knowledge bases, please read: [how-to-connect-aws-bedrock.md](../../learn-more/use-cases/how-to-connect-aws-bedrock.md "mention") +For best practices on AWS Bedrock knowledge bases, please read: [How to Connect AWS Bedrock](/en/learn-more/use-cases/how-to-connect-aws-bedrock) ### Question 2: How to Manage Knowledge Bases Through APIs diff --git a/en/workshop/intermediate/twitter-chatflow.mdx b/en/workshop/intermediate/twitter-chatflow.mdx index 51cbf36b..fc8fa0e4 100644 --- a/en/workshop/intermediate/twitter-chatflow.mdx +++ b/en/workshop/intermediate/twitter-chatflow.mdx @@ -1,13 +1,7 @@ ---- -cover: ../../.gitbook/assets/%E7%94%BB%E6%9D%BF_1.png -coverY: 0 ---- - --- title: Generating analysis of Twitter account using Chatflow Agent --- - ## Introduction In Dify, you can use some crawler tools, such as Jina, which can convert web pages into markdown format that LLMs can read. diff --git a/link_converter.py b/link_converter.py new file mode 100644 index 00000000..517511b0 --- /dev/null +++ b/link_converter.py @@ -0,0 +1,302 @@ +def load_docs_json(docs_json_path): + """加载 docs.json 文件并解析其中的路径信息""" + with open(docs_json_path, 'r', encoding='utf-8') as f: + docs_data = json.load(f) + return docs_data#!/usr/bin/env python3 +import os +import re +import json +import sys +import difflib +from pathlib import Path + +# ANSI 颜色代码 +class Colors: + RESET = '\033[0m' + RED = '\033[91m' + GREEN = '\033[92m' + YELLOW = '\033[93m' + BLUE = '\033[94m' + MAGENTA = '\033[95m' + CYAN = '\033[96m' + WHITE = '\033[97m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +def extract_valid_paths(docs_data): + """从 docs.json 中提取所有有效的文档路径""" + valid_paths = set() + + # 递归函数用于处理嵌套结构 + def extract_paths_from_object(obj): + if isinstance(obj, dict): + # 检查是否有页面路径 + if "pages" in obj: + for page in obj["pages"]: + if isinstance(page, str) and not page.startswith("http"): + valid_paths.add(page) + elif isinstance(page, dict): + extract_paths_from_object(page) + # 处理其他可能的字典键 + for key, value in obj.items(): + if key != "pages": # 避免重复处理 + extract_paths_from_object(value) + elif isinstance(obj, list): + # 处理列表中的每个元素 + for item in obj: + extract_paths_from_object(item) + + # 开始提取 + extract_paths_from_object(docs_data) + return valid_paths + +def find_closest_path(target_path, valid_paths): + """查找最接近的有效路径""" + # 移除扩展名 + target_without_ext, _ = os.path.splitext(target_path) + + # 检查精确匹配 + for path in valid_paths: + path_without_ext, _ = os.path.splitext(path) + if path_without_ext == target_without_ext or path == target_without_ext: + return path + + # 如果没有精确匹配,查找最相似的路径 + best_match = None + best_score = 0 + + for path in valid_paths: + # 计算路径的相似度 + score = difflib.SequenceMatcher(None, target_without_ext, path).ratio() + + # 特别检查路径的末尾部分(文件名)是否匹配 + target_parts = target_without_ext.split('/') + path_parts = path.split('/') + + if len(target_parts) > 0 and len(path_parts) > 0: + if target_parts[-1] == path_parts[-1]: + score += 0.3 # 增加匹配度 + + if score > best_score: + best_score = score + best_match = path + + # 如果相似度足够高,返回最佳匹配 + if best_score > 0.6: # 阈值可以调整 + return best_match + + # 寻找包含目标路径末尾部分的路径 + if len(target_parts) > 0: + for path in valid_paths: + if target_parts[-1] in path.split('/'): + return path + + return None + +def find_md_files(path): + """查找指定路径下的所有 .md 和 .mdx 文件""" + path_obj = Path(path) + if path_obj.is_file() and (path_obj.suffix in ['.md', '.mdx']): + return [path_obj] + + md_files = [] + for root, _, files in os.walk(path): + for file in files: + if file.endswith(('.md', '.mdx')): + md_files.append(Path(os.path.join(root, file))) + + return md_files + +def convert_links_in_file(file_path, valid_paths, base_dir): + """转换文件中的链接""" + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 提取当前文件的相对路径(相对于基础目录) + relative_file_path = os.path.relpath(file_path, base_dir) + file_dir = os.path.dirname(relative_file_path) + + # 找出所有 Markdown 链接 + link_pattern = re.compile(r'\[([^\]]+)\]\(([^)]+)\)') + links = link_pattern.findall(content) + + changes = [] + for link_text, link_url in links: + # 跳过外部链接 + if link_url.startswith(('http://', 'https://', 'mailto:', '#')): + continue + + # 跳过已经以 /en/ 开头的链接 + if link_url.startswith('/en/'): + continue + + # 处理相对路径链接 + if link_url.startswith(('../', './')) or not link_url.startswith('/') and not link_url.startswith('http'): + # 计算绝对路径 + if link_url.startswith('./'): + link_url = link_url[2:] # 移除开头的 ./ + + # 计算目标文件的完整路径 + target_path = os.path.normpath(os.path.join(file_dir, link_url)) + + # 提取不包含扩展名的路径(docs.json 中通常不包含扩展名) + target_without_ext, _ = os.path.splitext(target_path) + + # 防止路径重复 + if target_without_ext.startswith('en/'): + clean_target = target_without_ext + else: + clean_target = target_without_ext + + # 查找匹配的有效路径 + matching_path = None + for valid_path in valid_paths: + valid_without_ext, _ = os.path.splitext(valid_path) + if valid_without_ext == clean_target or valid_path == clean_target: + matching_path = valid_path + break + + if matching_path: + # 找到匹配的路径,转换为以 /en/ 开头的绝对路径 + if not matching_path.startswith('/'): + if matching_path.startswith('en/'): + new_link_url = f"/{matching_path}" + else: + new_link_url = f"/en/{matching_path}" + else: + new_link_url = matching_path + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link, link_url, new_link_url)) + else: + # 没有找到匹配路径,使用通用转换规则 + # 去掉可能的文件扩展名 + clean_target, _ = os.path.splitext(target_path) + if clean_target.startswith('en/'): + new_link_url = f"/{clean_target}" + else: + new_link_url = f"/en/{clean_target}" + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link, link_url, new_link_url)) + + # 处理 docs.dify.ai 链接 + elif "docs.dify.ai" in link_url: + # 从 docs.dify.ai 链接中提取路径部分 + docs_path = link_url.split("docs.dify.ai/")[-1] + + # 查找匹配的有效路径 + matching_path = None + for valid_path in valid_paths: + if valid_path.endswith(docs_path) or docs_path in valid_path: + matching_path = valid_path + break + + if matching_path: + # 找到匹配的路径,转换为以 /en/ 开头的绝对路径 + if not matching_path.startswith('/'): + if matching_path.startswith('en/'): + new_link_url = f"/{matching_path}" + else: + new_link_url = f"/en/{matching_path}" + else: + new_link_url = matching_path + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link, link_url, new_link_url)) + else: + # 没有找到匹配路径,使用通用转换规则 + parts = docs_path.split('/') + if len(parts) >= 2: + # 假设 docs.dify.ai 链接对应于 en/ 目录下的内容 + new_link_url = f"/en/{docs_path}" + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link, link_url, new_link_url)) + + return changes, content + +def main(): + """主函数""" + base_dir = os.path.dirname(os.path.abspath(__file__)) + docs_json_path = os.path.join(base_dir, 'docs.json') + + # 显示脚本标题 + print(f"\n{Colors.CYAN}{Colors.BOLD}===== Dify 文档链接转换工具 ====={Colors.RESET}\n") + + # 加载 docs.json + try: + docs_data = load_docs_json(docs_json_path) + valid_paths = extract_valid_paths(docs_data) + print(f"{Colors.GREEN}从 docs.json 中提取了 {Colors.YELLOW}{len(valid_paths)}{Colors.GREEN} 个有效路径{Colors.RESET}") + except Exception as e: + print(f"{Colors.RED}加载 docs.json 时出错: {e}{Colors.RESET}") + sys.exit(1) + + # 获取用户输入的路径 + path_input = input(f"{Colors.CYAN}请输入要处理的文件或目录路径(相对于当前目录): {Colors.RESET}") + target_path = os.path.join(base_dir, path_input) + + if not os.path.exists(target_path): + print(f"{Colors.RED}路径不存在: {target_path}{Colors.RESET}") + sys.exit(1) + + # 查找 Markdown 文件 + md_files = find_md_files(target_path) + print(f"{Colors.GREEN}找到 {Colors.YELLOW}{len(md_files)}{Colors.GREEN} 个 Markdown 文件{Colors.RESET}") + + # 处理每个文件 + total_changed = 0 + total_files_changed = 0 + + for file_path in md_files: + print(f"\n{Colors.BLUE}{Colors.BOLD}处理文件: {Colors.RESET}{Colors.YELLOW}{file_path}{Colors.RESET}") + + changes, content = convert_links_in_file(file_path, valid_paths, base_dir) + + if not changes: + print(f"{Colors.GREEN}未发现需要修改的链接{Colors.RESET}") + continue + + print(f"{Colors.MAGENTA}{Colors.BOLD}发现以下需要修改的链接:{Colors.RESET}") + for i, (old_link, new_link, old_url, new_url) in enumerate(changes, 1): + # 提取链接文本 + link_text_match = re.match(r'\[([^\]]+)\]', old_link) + link_text = link_text_match.group(1) if link_text_match else "" + print(f"{Colors.YELLOW}{i}. {Colors.RESET}{old_link} -> {new_link}") + + confirm = input(f"\n{Colors.CYAN}确认进行修改? (y/n): {Colors.RESET}") + if confirm.lower() == 'y': + # 进行替换 + modified_content = content + for old_link, new_link, _, _ in changes: + modified_content = modified_content.replace(old_link, new_link) + + # 写回文件 + with open(file_path, 'w', encoding='utf-8') as f: + f.write(modified_content) + + total_changed += len(changes) + total_files_changed += 1 + print(f"{Colors.GREEN}文件已更新: {file_path}{Colors.RESET}") + else: + print(f"{Colors.YELLOW}跳过此文件{Colors.RESET}") + + # 显示总结 + print(f"\n{Colors.CYAN}{Colors.BOLD}===== 转换完成 ====={Colors.RESET}") + print(f"{Colors.GREEN}共修改了 {Colors.YELLOW}{total_files_changed}{Colors.GREEN} 个文件,{Colors.YELLOW}{total_changed}{Colors.GREEN} 个链接{Colors.RESET}") + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + print(f"\n{Colors.RED}用户中断,程序退出{Colors.RESET}") + sys.exit(0) diff --git a/scripts/link_converter-3.26-backup.py b/scripts/link_converter-3.26-backup.py new file mode 100644 index 00000000..d1e96500 --- /dev/null +++ b/scripts/link_converter-3.26-backup.py @@ -0,0 +1,228 @@ + +#!/usr/bin/env python3 +import os +import re +import json +import sys +from pathlib import Path + +def load_docs_json(docs_json_path): + """加载 docs.json 文件并解析其中的路径信息""" + with open(docs_json_path, 'r', encoding='utf-8') as f: + docs_data = json.load(f) + return docs_data + +def extract_valid_paths(docs_data): + """从 docs.json 中提取所有有效的文档路径""" + valid_paths = set() + + # 递归函数用于处理嵌套结构 + def extract_paths_from_object(obj): + if isinstance(obj, dict): + # 检查是否有页面路径 + if "pages" in obj: + for page in obj["pages"]: + if isinstance(page, str) and not page.startswith("http"): + valid_paths.add(page) + elif isinstance(page, dict): + extract_paths_from_object(page) + # 处理其他可能的字典键 + for key, value in obj.items(): + extract_paths_from_object(value) + elif isinstance(obj, list): + # 处理列表中的每个元素 + for item in obj: + extract_paths_from_object(item) + + # 开始提取 + extract_paths_from_object(docs_data) + return valid_paths + +def find_md_files(path): + """查找指定路径下的所有 .md 和 .mdx 文件""" + path_obj = Path(path) + if path_obj.is_file() and (path_obj.suffix in ['.md', '.mdx']): + return [path_obj] + + md_files = [] + for root, _, files in os.walk(path): + for file in files: + if file.endswith(('.md', '.mdx')): + md_files.append(Path(os.path.join(root, file))) + + return md_files + +def convert_links_in_file(file_path, valid_paths, base_dir): + """转换文件中的链接""" + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 提取当前文件的相对路径(相对于基础目录) + relative_file_path = os.path.relpath(file_path, base_dir) + file_dir = os.path.dirname(relative_file_path) + + # 找出所有 Markdown 链接 + link_pattern = re.compile(r'\[([^\]]+)\]\(([^)]+)\)') + links = link_pattern.findall(content) + + changes = [] + for link_text, link_url in links: + # 跳过外部链接 + if link_url.startswith(('http://', 'https://', 'mailto:', '#')): + continue + + # 跳过已经以 /en/ 开头的链接 + if link_url.startswith('/en/'): + continue + + # 处理相对路径链接 + if link_url.startswith(('../', './')) or not link_url.startswith('/') and not link_url.startswith('http'): + # 计算绝对路径 + if link_url.startswith('./'): + link_url = link_url[2:] # 移除开头的 ./ + + # 计算目标文件的完整路径 + target_path = os.path.normpath(os.path.join(file_dir, link_url)) + + # 提取不包含扩展名的路径(docs.json 中通常不包含扩展名) + target_without_ext, _ = os.path.splitext(target_path) + + # 防止路径重复 + if target_without_ext.startswith('en/'): + clean_target = target_without_ext + else: + clean_target = target_without_ext + + # 查找匹配的有效路径 + matching_path = None + for valid_path in valid_paths: + valid_without_ext, _ = os.path.splitext(valid_path) + if valid_without_ext == clean_target or valid_path == clean_target: + matching_path = valid_path + break + + if matching_path: + # 找到匹配的路径,转换为以 /en/ 开头的绝对路径 + if not matching_path.startswith('/'): + if matching_path.startswith('en/'): + new_link_url = f"/{matching_path}" + else: + new_link_url = f"/en/{matching_path}" + else: + new_link_url = matching_path + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link)) + else: + # 没有找到匹配路径,使用通用转换规则 + # 去掉可能的文件扩展名 + clean_target, _ = os.path.splitext(target_path) + if clean_target.startswith('en/'): + new_link_url = f"/{clean_target}" + else: + new_link_url = f"/en/{clean_target}" + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link)) + + # 处理 docs.dify.ai 链接 + elif "docs.dify.ai" in link_url: + # 从 docs.dify.ai 链接中提取路径部分 + docs_path = link_url.split("docs.dify.ai/")[-1] + + # 查找匹配的有效路径 + matching_path = None + for valid_path in valid_paths: + if valid_path.endswith(docs_path) or docs_path in valid_path: + matching_path = valid_path + break + + if matching_path: + # 找到匹配的路径,转换为以 /en/ 开头的绝对路径 + if not matching_path.startswith('/'): + if matching_path.startswith('en/'): + new_link_url = f"/{matching_path}" + else: + new_link_url = f"/en/{matching_path}" + else: + new_link_url = matching_path + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link)) + else: + # 没有找到匹配路径,使用通用转换规则 + parts = docs_path.split('/') + if len(parts) >= 2: + # 假设 docs.dify.ai 链接对应于 en/ 目录下的内容 + new_link_url = f"/en/{docs_path}" + + old_link = f"[{link_text}]({link_url})" + new_link = f"[{link_text}]({new_link_url})" + + changes.append((old_link, new_link)) + + return changes, content + +def main(): + """主函数""" + base_dir = os.path.dirname(os.path.abspath(__file__)) + docs_json_path = os.path.join(base_dir, 'docs.json') + + # 加载 docs.json + try: + docs_data = load_docs_json(docs_json_path) + valid_paths = extract_valid_paths(docs_data) + print(f"从 docs.json 中提取了 {len(valid_paths)} 个有效路径") + except Exception as e: + print(f"加载 docs.json 时出错: {e}") + sys.exit(1) + + # 获取用户输入的路径 + path_input = input("请输入要处理的文件或目录路径(相对于当前目录): ") + target_path = os.path.join(base_dir, path_input) + + if not os.path.exists(target_path): + print(f"路径不存在: {target_path}") + sys.exit(1) + + # 查找 Markdown 文件 + md_files = find_md_files(target_path) + print(f"找到 {len(md_files)} 个 Markdown 文件") + + # 处理每个文件 + for file_path in md_files: + print(f"\n处理文件: {file_path}") + + changes, content = convert_links_in_file(file_path, valid_paths, base_dir) + + if not changes: + print("未发现需要修改的链接") + continue + + print("发现以下需要修改的链接:") + for i, (old_link, new_link) in enumerate(changes, 1): + print(f"{i}. {old_link} -> {new_link}") + + confirm = input("确认进行修改? (y/n): ") + if confirm.lower() == 'y': + # 进行替换 + modified_content = content + for old_link, new_link in changes: + modified_content = modified_content.replace(old_link, new_link) + + # 写回文件 + with open(file_path, 'w', encoding='utf-8') as f: + f.write(modified_content) + + print(f"文件已更新: {file_path}") + else: + print("跳过此文件") + +if __name__ == "__main__": + main() diff --git a/smart_link_converter.py b/smart_link_converter.py new file mode 100644 index 00000000..cf257d28 --- /dev/null +++ b/smart_link_converter.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +import os +import re +import json +import sys +import difflib +from pathlib import Path + +# ANSI 颜色代码 +class Colors: + RESET = '\033[0m' + RED = '\033[91m' + GREEN = '\033[92m' + YELLOW = '\033[93m' + BLUE = '\033[94m' + MAGENTA = '\033[95m' + CYAN = '\033[96m' + WHITE = '\033[97m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +def load_docs_json(docs_json_path): + """加载 docs.json 文件并解析其中的路径信息""" + with open(docs_json_path, 'r', encoding='utf-8') as f: + docs_data = json.load(f) + return docs_data + +def extract_valid_paths(docs_data): + """从 docs.json 中提取所有有效的文档路径""" + valid_paths = set() + + # 递归函数用于处理嵌套结构 + def extract_paths_from_object(obj): + if isinstance(obj, dict): + # 检查是否有页面路径 + if "pages" in obj: + for page in obj["pages"]: + if isinstance(page, str) and not page.startswith("http"): + valid_paths.add(page) + elif isinstance(page, dict): + extract_paths_from_object(page) + # 处理其他可能的字典键 + for key, value in obj.items(): + if key != "pages": # 避免重复处理 + extract_paths_from_object(value) + elif isinstance(obj, list): + # 处理列表中的每个元素 + for item in obj: + extract_paths_from_object(item) + + # 开始提取 + extract_paths_from_object(docs_data) + return valid_paths + +def find_closest_path(target_path, valid_paths): + """查找最接近的有效路径""" + # 移除扩展名 + target_without_ext, _ = os.path.splitext(target_path) + + # 检查精确匹配 + for path in valid_paths: + path_without_ext, _ = os.path.splitext(path) + if path_without_ext == target_without_ext or path == target_without_ext: + return path + + # 如果没有精确匹配,查找最相似的路径 + best_match = None + best_score = 0 + + for path in valid_paths: + # 计算路径的相似度 + score = difflib.SequenceMatcher(None, target_without_ext, path).ratio() + + # 特别检查路径的末尾部分(文件名)是否匹配 + target_parts = target_without_ext.split('/') + path_parts = path.split('/') + + if len(target_parts) > 0 and len(path_parts) > 0: + if target_parts[-1] == path_parts[-1]: + score += 0.3 # 增加匹配度 + + if score > best_score: + best_score = score + best_match = path + + # 如果相似度足够高,返回最佳匹配 + if best_score > 0.6: # 阈值可以调整 + return best_match + + # 寻找包含目标路径末尾部分的路径 + if len(target_parts) > 0: + for path in valid_paths: + if target_parts[-1] in path.split('/'): + return path + + return None + +def find_md_files(path): + """查找指定路径下的所有 .md 和 .mdx 文件""" + path_obj = Path(path) + if path_obj.is_file() and (path_obj.suffix in ['.md', '.mdx']): + return [path_obj] + + md_files = [] + for root, _, files in os.walk(path): + for file in files: + if file.endswith(('.md', '.mdx')): + md_files.append(Path(os.path.join(root, file))) + + return md_files + +def convert_links_in_file(file_path, valid_paths, base_dir): + """转换文件中的链接""" + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 提取当前文件的相对路径(相对于基础目录) + relative_file_path = os.path.relpath(file_path, base_dir) + file_dir = os.path.dirname(relative_file_path) + + # 找出所有 Markdown 链接 - 支持带属性的链接 + link_pattern = re.compile(r'\[([^\]]+)\]\(([^)]+)(\s+"[^"]*")?\)') + matches = link_pattern.findall(content) + + changes = [] + for match in matches: + link_text, link_url, link_attr = match + + # 修正link_attr(可能为空) + link_attr = link_attr or "" + + # 跳过外部链接和已经格式化好的链接 + if link_url.startswith(('http://', 'https://', 'mailto:', '#')) or link_url.startswith('/en/'): + continue + + # 处理 docs.dify.ai 链接 + if "docs.dify.ai" in link_url: + # 从 docs.dify.ai 链接中提取路径部分 + docs_path = link_url.split("docs.dify.ai/")[-1] if "docs.dify.ai/" in link_url else "" + + if docs_path: + # 查找最接近的有效路径 + matched_path = find_closest_path(docs_path, valid_paths) + + if matched_path: + # 构造新链接 + new_link_url = f"/en/{matched_path}" + old_link = f"[{link_text}]({link_url}{link_attr})" + new_link = f"[{link_text}]({new_link_url}{link_attr})" + + changes.append({ + "old_link": old_link, + "new_link": new_link, + "match_type": "docs.dify.ai", + "match_path": matched_path + }) + else: + # 通用转换 + new_link_url = f"/en/{docs_path}" + old_link = f"[{link_text}]({link_url}{link_attr})" + new_link = f"[{link_text}]({new_link_url}{link_attr})" + + changes.append({ + "old_link": old_link, + "new_link": new_link, + "match_type": "docs.dify.ai (generic)", + "match_path": None + }) + + # 处理相对路径链接 + elif link_url.startswith(('../', './')) or not link_url.startswith('/'): + # 计算目标文件的完整路径 + if link_url.startswith('./'): + link_url = link_url[2:] # 移除开头的 ./ + + # 计算绝对路径 + target_path = os.path.normpath(os.path.join(file_dir, link_url)) + target_path = target_path.replace('\\', '/') # 统一路径分隔符 + + # 查找最接近的有效路径 + matched_path = find_closest_path(target_path, valid_paths) + + if matched_path: + # 构造新链接 + new_link_url = f"/en/{matched_path}" + old_link = f"[{link_text}]({link_url}{link_attr})" + new_link = f"[{link_text}]({new_link_url}{link_attr})" + + changes.append({ + "old_link": old_link, + "new_link": new_link, + "match_type": "relative path", + "match_path": matched_path + }) + else: + # 检查是否是README.md类文件 + if os.path.basename(target_path) in ["README.md", "readme.md", "README", "readme"]: + # 对于README文件,使用其所在目录作为路径 + dir_path = os.path.dirname(target_path) + if dir_path.startswith('en/'): + new_link_url = f"/{dir_path}" + else: + new_link_url = f"/en/{dir_path}" + + if new_link_url.endswith('/'): + new_link_url = new_link_url[:-1] + else: + # 移除扩展名 + target_without_ext, _ = os.path.splitext(target_path) + + # 避免en/前缀重复 + if target_without_ext.startswith('en/'): + new_link_url = f"/{target_without_ext}" + else: + new_link_url = f"/en/{target_without_ext}" + + old_link = f"[{link_text}]({link_url}{link_attr})" + new_link = f"[{link_text}]({new_link_url}{link_attr})" + + changes.append({ + "old_link": old_link, + "new_link": new_link, + "match_type": "relative path (generic)", + "match_path": None + }) + + return changes, content