diff --git a/docs/faq.mdx b/docs/faq.mdx index 8fe27262..e36132ba 100644 --- a/docs/faq.mdx +++ b/docs/faq.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 1200 -title: "πŸ“‹ FAQ" +title: "❓ FAQ" --- import { TopBanners } from "@site/src/components/TopBanners"; diff --git a/docs/features/auth/_category_.json b/docs/features/auth/_category_.json index 274c99a4..077f4d31 100644 --- a/docs/features/auth/_category_.json +++ b/docs/features/auth/_category_.json @@ -1,4 +1,4 @@ { - "label": "πŸ” Federated Authentication", + "label": "🀝 Federated Authentication", "position": 0 } diff --git a/docs/features/auth/ldap.mdx b/docs/features/auth/ldap.mdx index 3beaf799..a94b1173 100644 --- a/docs/features/auth/ldap.mdx +++ b/docs/features/auth/ldap.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ–₯️ LDAP Authentication" +title: "πŸ“‡ LDAP Authentication" --- # OpenLDAP Integration diff --git a/docs/features/auth/scim.mdx b/docs/features/auth/scim.mdx index c20276ae..44b6a6fc 100644 --- a/docs/features/auth/scim.mdx +++ b/docs/features/auth/scim.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "πŸš€ SCIM 2.0" +title: "πŸ‘€β†”οΈπŸ†” SCIM 2.0" --- # SCIM 2.0 Support diff --git a/docs/features/chat-features/chatshare.md b/docs/features/chat-features/chatshare.md index a99d7756..63c0e564 100644 --- a/docs/features/chat-features/chatshare.md +++ b/docs/features/chat-features/chatshare.md @@ -1,6 +1,6 @@ --- sidebar_position: 4 -title: "πŸ—¨οΈ Chat Sharing" +title: "πŸ“€ Chat Sharing" --- ### Enabling Community Sharing diff --git a/docs/features/chat-features/code-execution/index.md b/docs/features/chat-features/code-execution/index.md index 2bff856f..ee156063 100644 --- a/docs/features/chat-features/code-execution/index.md +++ b/docs/features/chat-features/code-execution/index.md @@ -1,6 +1,6 @@ --- sidebar_position: 5 -title: "🐍 Code Execution" +title: "▢️ Code Execution" --- Open WebUI offers powerful code execution capabilities directly within your chat interface, enabling you to transform ideas into actionable results without leaving the platform. diff --git a/docs/features/interface/_category_.json b/docs/features/interface/_category_.json index 1e34d5f7..2077d97c 100644 --- a/docs/features/interface/_category_.json +++ b/docs/features/interface/_category_.json @@ -1,4 +1,4 @@ { - "label": "πŸ”Œ Interface", + "label": "🎨 Interface", "position": 4 } diff --git a/docs/features/pipelines/_category_.json b/docs/features/pipelines/_category_.json index 512ed53b..da6634ff 100644 --- a/docs/features/pipelines/_category_.json +++ b/docs/features/pipelines/_category_.json @@ -1,4 +1,4 @@ { - "label": "⚑ Pipelines", + "label": "πŸ›€οΈ Pipelines", "position": 900 } diff --git a/docs/features/pipelines/pipes.md b/docs/features/pipelines/pipes.md index 3790980c..9969ed05 100644 --- a/docs/features/pipelines/pipes.md +++ b/docs/features/pipelines/pipes.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ”§ Pipes" +title: "πŸ”— Pipes" --- ## Pipes diff --git a/docs/features/pipelines/valves.md b/docs/features/pipelines/valves.md index 2d5e78ab..0271571f 100644 --- a/docs/features/pipelines/valves.md +++ b/docs/features/pipelines/valves.md @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "βš™οΈ Valves" +title: "🚦 Valves" --- ## Valves diff --git a/docs/features/plugin/events/index.mdx b/docs/features/plugin/events/index.mdx index d0e7bc8a..79f56de1 100644 --- a/docs/features/plugin/events/index.mdx +++ b/docs/features/plugin/events/index.mdx @@ -1,9 +1,9 @@ --- sidebar_position: 3 -title: "⛑️ Events" +title: "πŸ”” Events" --- -# ⛑️ Events: Using `__event_emitter__` and `__event_call__` in Open WebUI +# πŸ”” Events: Using `__event_emitter__` and `__event_call__` in Open WebUI Open WebUI's plugin architecture is not just about processing input and producing outputβ€”**it's about real-time, interactive communication with the UI and users**. To make your Tools, Functions, and Pipes more dynamic, Open WebUI provides a built-in event system via the `__event_emitter__` and `__event_call__` helpers. diff --git a/docs/features/plugin/valves/index.mdx b/docs/features/plugin/valves/index.mdx index 22326c0e..208dd48d 100644 --- a/docs/features/plugin/valves/index.mdx +++ b/docs/features/plugin/valves/index.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "πŸ”„ Valves" +title: "🚦 Valves" --- ## Valves diff --git a/docs/features/rbac/groups.md b/docs/features/rbac/groups.md index 111b14a6..c4f9fc44 100644 --- a/docs/features/rbac/groups.md +++ b/docs/features/rbac/groups.md @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "πŸ” Groups" +title: "πŸ‘₯ Groups" --- Groups allow administrators to diff --git a/docs/features/workspace/prompts.md b/docs/features/workspace/prompts.md index ffdd2cd2..e7c8fc30 100644 --- a/docs/features/workspace/prompts.md +++ b/docs/features/workspace/prompts.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ“š Prompts" +title: "πŸ“œ Prompts" --- The `Prompts` section of the `Workspace` within Open WebUI enables users to create, manage, and share custom prompts. This feature streamlines interactions with AI models by allowing users to save frequently used prompts and easily access them through slash commands. diff --git a/docs/getting-started/quick-start/starting-with-ollama.mdx b/docs/getting-started/quick-start/starting-with-ollama.mdx index fa2a29ec..b6040c8d 100644 --- a/docs/getting-started/quick-start/starting-with-ollama.mdx +++ b/docs/getting-started/quick-start/starting-with-ollama.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 1 -title: "πŸ‘‰ Starting With Ollama" +title: "πŸ¦™ Starting With Ollama" --- ## Overview diff --git a/docs/security.mdx b/docs/security.mdx index e7b09fd0..bb444d59 100644 --- a/docs/security.mdx +++ b/docs/security.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 1500 -title: "πŸ”’ Security Policy" +title: "πŸ›‘οΈ Security Policy" --- import { TopBanners } from "@site/src/components/TopBanners"; diff --git a/docs/sponsorships.mdx b/docs/sponsorships.mdx index 1ed453e2..ebd832f4 100644 --- a/docs/sponsorships.mdx +++ b/docs/sponsorships.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 1800 -title: "🌐 Sponsorships" +title: "πŸ’– Sponsorships" --- import { TopBanners } from "@site/src/components/TopBanners"; diff --git a/docs/tutorials/_category_.json b/docs/tutorials/_category_.json index c881f1d4..c0b3c0e9 100644 --- a/docs/tutorials/_category_.json +++ b/docs/tutorials/_category_.json @@ -1,5 +1,5 @@ { - "label": "πŸ“ Tutorials", + "label": "πŸŽ“ Tutorials", "position": 800, "link": { "type": "generated-index" diff --git a/docs/tutorials/https/caddy.md b/docs/tutorials/https/caddy.md index 80044bed..e9f70869 100644 --- a/docs/tutorials/https/caddy.md +++ b/docs/tutorials/https/caddy.md @@ -1,6 +1,6 @@ --- sidebar_position: 202 -title: "πŸ”’ HTTPS using Caddy" +title: "πŸͺ„ HTTPS using Caddy" --- :::warning diff --git a/docs/tutorials/https/haproxy.md b/docs/tutorials/https/haproxy.md index c18a5465..5e57b1d4 100644 --- a/docs/tutorials/https/haproxy.md +++ b/docs/tutorials/https/haproxy.md @@ -1,6 +1,6 @@ --- sidebar_position: 201 -title: "πŸ”’ HTTPS using HAProxy" +title: "βš–οΈ HTTPS using HAProxy" --- :::warning diff --git a/docs/tutorials/https/nginx.md b/docs/tutorials/https/nginx.md index ab493631..e82aab37 100644 --- a/docs/tutorials/https/nginx.md +++ b/docs/tutorials/https/nginx.md @@ -1,6 +1,6 @@ --- sidebar_position: 200 -title: "πŸ”’ HTTPS using Nginx" +title: "πŸš€ HTTPS using Nginx" --- :::warning diff --git a/docs/tutorials/image-generation-and-editing/_category_.json b/docs/tutorials/image-generation-and-editing/_category_.json new file mode 100644 index 00000000..ce51b315 --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "🎨 Image Generation and Editing", + "position": 300, + "link": { + "type": "generated-index" + } +} diff --git a/docs/tutorials/image-generation-and-editing/automatic1111.md b/docs/tutorials/image-generation-and-editing/automatic1111.md new file mode 100644 index 00000000..3a3bb248 --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/automatic1111.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +title: "πŸŽ›οΈ AUTOMATIC1111" +--- + +:::warning +This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. +::: + +Open WebUI supports image generation through the **AUTOMATIC1111** [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API). Here are the steps to get started: + +### Initial Setup + +1. Ensure that you have [AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) installed. +2. Launch AUTOMATIC1111 with additional flags to enable API access: + +```python +/webui.sh --api --listen +``` + +3. For Docker installation of WebUI with the environment variables preset, use the following command: + +```docker +docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -e AUTOMATIC1111_BASE_URL=http://host.docker.internal:7860/ -e ENABLE_IMAGE_GENERATION=True -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main +``` + +### Setting Up Open WebUI with AUTOMATIC1111 + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Default (Automatic1111)`. +3. In the API URL field, enter the address where AUTOMATIC1111's API is accessible: + +![Screenshot of the Open WebUI Images settings page with Default (Automatic1111) selected and the API URL field highlighted.](/images/image-generation-and-editing/automatic1111-settings.png) + +```txt +http://:7860/ +``` + +If you're running a Docker installation of Open WebUI and AUTOMATIC1111 on the same host, use `http://host.docker.internal:7860/` as your address. diff --git a/docs/tutorials/image-generation-and-editing/comfyui.md b/docs/tutorials/image-generation-and-editing/comfyui.md new file mode 100644 index 00000000..28a9f16e --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/comfyui.md @@ -0,0 +1,428 @@ +--- +sidebar_position: 2 +title: "πŸ”€ ComfyUI" +--- + +:::warning +This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. +::: + +ComfyUI is a powerful and modular node-based GUI for Stable Diffusion. It gives users a high degree of control over the image generation process. Learn more or download it from its [GitHub page](https://github.com/comfyanonymous/ComfyUI). + +### System Requirements + +Before installing ComfyUI, ensure your system meets the following requirements: + +- **Operating System:** Windows, Linux, or macOS (including Apple Silicon M-series). +- **Python:** Python 3.12 is recommended. Python 3.13 is supported, but some custom nodes may have compatibility issues. **(Note: Python 3.11+ is generally required for modern PyTorch setups.)** +- **GPU:** + - NVIDIA (recommended) + - AMD (Requires ROCm Toolkit on Linux) + - Intel (including Arc series) + - Apple Silicon (M1/M2) +- **CPU:** (can use the `-cpu` parameter, but is slower) +- **Git:** You will need [Git](https://git-scm.com/downloads) to clone the repository. + +### Manual Installation + +A manual installation gives you the most control and ensures you are running the latest version of ComfyUI. + +#### 1. Create a Virtual Environment (Recommended) + +To avoid conflicts with other Python projects, it's best to install ComfyUI in a dedicated virtual environment. + +:::tip +**Choose Your Environment Manager** +We recommend **Miniconda** for simplicity across all operating systems. If you are on Linux and prefer a lighter tool, **pyenv** is an excellent alternative. +::: + +**Option A: Using Miniconda (Cross-Platform)** + +- **Install Miniconda:** Download and install it from the [official website](https://docs.anaconda.com/free/miniconda/index.html#latest-miniconda-installer-links). +- **Create and activate the environment:** + +```bash +conda create -n comfyenv python=3.13 +conda activate comfyenv +``` + +**Option B: Using pyenv (Linux/macOS)** + +- **Install Python (if needed):** Ensure Python 3.12 or 3.13 is installed via pyenv (e.g., `pyenv install 3.13.0`). +- **Create and activate the environment:** + +```bash +pyenv virtualenv 3.13.0 comfyenv +pyenv activate comfyenv +``` + +#### 2. Clone the ComfyUI Repository + +Use Git to clone the official ComfyUI repository: + +```bash +git clone https://github.com/comfyanonymous/ComfyUI.git +cd ComfyUI +``` + +#### 3. Install Dependencies + +Install the required Python packages, including PyTorch for your specific GPU. **Ensure your environment (`(comfyenv)` or `(conda env)`) is active.** + +- **For NVIDIA GPUs (Recommended):** + +```bash +pip install torch torchvision torchaudio index-url https://download.pytorch.org/whl/cu130 +pip install -r requirements.txt +``` + +- **For AMD GPUs (Linux/ROCm):** + +This installation is required for GPU acceleration via the ROCm toolkit. + +```bash +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.4 +pip install -r requirements.txt +``` + +- **For other platforms or CPU-only:** Refer to the [official PyTorch installation instructions](https://pytorch.org/get-started/locally/). + +#### 4. Download Models + +You need to place your Stable Diffusion models (checkpoints, VAEs, LoRAs, etc.) in the `ComfyUI/models/` subdirectories. For example, place checkpoint models (`.safetensors` or `.ckpt`) in `ComfyUI/models/checkpoints/`. + +### Post-Installation Setup (Essential for Extensions) + +Before running ComfyUI for the first time, it is highly recommended to install the [ComfyUI-Manager](https://github.com/Comfy-Org/ComfyUI-Manager) extension. **ComfyUI-Manager** is a an extension designed to enhance the usability of ComfyUI. It offers management functions to **install, remove, disable, and enable** various custom nodes of ComfyUI. Furthermore, this extension provides a hub feature and convenience functions to access a wide range of information within ComfyUI. + +1. **Create Custom Node Directory:** + +```bash +cd ComfyUI +mkdir -p custom_nodes +``` + +2. **Install ComfyUI Manager:** This provides a graphical interface to install all other extensions and manage models easily. + +```bash +cd custom_nodes +git clone https://github.com/ltdrdata/ComfyUI-Manager.git +cd .. +``` + +3. **Install Initial Custom Nodes:** After running ComfyUI for the first time, use the Manager to install necessary nodes like **ComfyUI-Custom-Scripts** and **ComfyUI-Impact-Pack**. + +#### 5. Start ComfyUI + +To run ComfyUI and make it accessible to Open WebUI, you must start it with the `--listen` flag to bind to `0.0.0.0`. This allows it to accept connections from other computers on your network. + +**Standard Start (Sufficient VRAM):** + +```bash +python main.py --listen 0.0.0.0 +``` + +**Low VRAM Start (Recommended for 16GB VRAM or less, especially AMD/Multimodal Models):** +The `--lowvram` flag aggressively moves models to system RAM when idle. + +```bash +python main.py --listen 0.0.0.0 --lowvram +``` + +Once running, the ComfyUI interface will be available at `http://:8188`. + +### Sharing Models with `extra_model_paths.yaml` + +If you already have a collection of Stable Diffusion models from another UI (like AUTOMATIC1111) or want to keep your models in a central location, you can use the `extra_model_paths.yaml` file to tell ComfyUI where to find them. + +1. **Locate the Example File:** In the root of your `ComfyUI` directory, you will find a file named `extra_model_paths.yaml.example`. +2. **Rename the File:** Rename or copy this file to `extra_model_paths.yaml`. +3. **Edit the Configuration:** Open `extra_model_paths.yaml` in a text editor. The file contains commented-out examples. To point to an existing AUTOMATIC1111 installation, you can uncomment the `a111:` section and set the `base_path` to your `stable-diffusion-webui` directory. + +**Example for sharing with AUTOMATIC1111:** + +```yaml + a111: + base_path: D:\stable-diffusion-webui\ # Use the correct path for your system + + checkpoints: models/Stable-diffusion + vae: models/VAE + loras: | + models/Lora + models/LyCORIS + upscale_models: | + models/ESRGAN + models/RealESRGAN + embeddings: embeddings + controlnet: models/ControlNet +``` + +You can also define custom paths for your models and even for custom nodes. After saving the file, you must **restart ComfyUI** for the changes to take effect. + +### Connecting ComfyUI to Open WebUI + +Since Open WebUI typically runs inside Docker, you must ensure the container can reach the host-based ComfyUI application via `host.docker.internal`. + +1. **Host Binding Check:** Ensure ComfyUI is running with the `--listen 0.0.0.0` flag (Step 5). +2. **Firewall Check:** If the host firewall (UFW) is active, ensure port 8188 is allowed (`sudo ufw allow 8188/tcp`). + +3. **Docker Run Command (Linux Native Docker):** +For Linux users not running Docker Desktop, you must explicitly map the host gateway when running the Open WebUI container. + +```docker +docker run -d -p 3000:8080 \ + --add-host=host.docker.internal:host-gateway \ + -e COMFYUI_BASE_URL=http://host.docker.internal:8188/ \ + -e ENABLE_IMAGE_GENERATION=True \ + -v open-webui:/app/backend/data \ + --name open-webui \ + --restart always \ + ghcr.io/open-webui/open-webui:main +``` + +:::warning +**Debugging Network Stalls** +If Open WebUI stalls when connecting, the issue is usually missing the `--add-host=host.docker.internal:host-gateway` flag in your Docker run command. +::: + +Once you have ComfyUI installed and running, you can connect it to Open WebUI from the admin settings. + +### ComfyUI Image Generation + +1. **Navigate to Image Settings:** In Open WebUI, go to the **Admin Panel** > **Settings** > **Images**. + +2. **Enable and Configure ComfyUI:** + +- Ensure the **Image Generation** toggle at the top of the page is enabled. +- Under the **Create Image** section, set the **Image Generation Engine** to `ComfyUI`. +- **Model**: Select the base model to be used for generating the image. +- **Image Size**: Defines the resolution of the generated image (e.g., 512x512, 1024x1024). +- **Steps**: The number of sampling steps; higher values can improve image quality but take longer to process. +- **Image Prompt Generation**: When enabled, this feature uses a language model to automatically generate a more detailed and creative prompt based on your initial input, which can lead to better image results. +- In the **ComfyUI Base URL** field, enter the address of your running ComfyUI instance (e.g., `http://host.docker.internal:8188/`). +- Click the **refresh icon** (πŸ”„) next to the URL field to verify the connection. A success message should appear. +- If your ComfyUI instance requires an API key, enter it in the **ComfyUI API Key** field. + +![Screenshot of the Open WebUI Images settings page with ComfyUI selected for image generation.](/images/image-generation-and-editing/comfyui-generation-settings.png) + +1. **Upload Your ComfyUI Workflow:** + +- First, you need to export a workflow from ComfyUI in the correct format. In the ComfyUI interface, click the ComfyUI logo at the top left and click **Settings**. Then toggle **"Dev Mode"** with a description that states "Enable dev mode options (API save, etc.)"**. +- While still in ComfyUI, load the **image generation** workflow you want to use, and then click the **"Save (API Format)"** button. This will prompt you to give a name to the file. Name it something memorable and download the file. +- Back in Open WebUI, under the **ComfyUI Workflow** section, click **Upload**. Select the JSON workflow file you just downloaded. + +![Screenshot of the ComfyUI Workflow section in Open WebUI, showing the upload button.](/images/image-generation-and-editing/comfyui-workflow-upload.png) + +1. **Map Workflow Nodes:** + +- After the workflow is imported, you must map the node IDs from your workflow to the corresponding fields in Open WebUI (e.g., `Prompt`, `Model`, `Seed`). This tells Open WebUI which inputs in your ComfyUI workflow to control. +- You can find the node ID by clicking on a node in ComfyUI and viewing its details. + +![Screenshot of the ComfyUI Workflow Nodes section in Open WebUI, showing the mapping fields.](/images/image-generation-and-editing/comfyui-node-mapping.png) + +:::info +You may need to adjust an `Input Key` within Open WebUI's `ComfyUI Workflow Nodes` section to match a node in your workflow. For example, the default `seed` key might need to be changed to `noise_seed` depending on your workflow's structure. +::: + +:::tip +Some workflows, such as ones that use any of the Flux models, may utilize multiple nodes IDs that is necessary to fill in for their node entry fields within Open WebUI. If a node entry field requires multiple IDs, the node IDs should be comma separated (e.g., `1` or `1, 2`). +::: + +1. **Save Configuration:** + +- Click the **Save** button at the bottom of the page to finalize the configuration. You can now use ComfyUI for image generation in Open WebUI. + +### ComfyUI Image Editing + +Open WebUI also supports image editing through ComfyUI, allowing you to modify existing images. + +1. **Navigate to Image Settings:** In Open WebUI, go to the **Admin Panel** > **Settings** > **Images**. + +2. **Configure Image Editing:** + +- Under the **Edit Image** section, set the **Image Edit Engine** to `ComfyUI`. +- **Model**: Select the model to be used for the editing task. +- **Image Size**: Specify the desired resolution for the output image. +- **ComfyUI Base URL** and **API Key**: These fields are shared with the image generation settings. +- **ComfyUI Workflow**: Upload a separate workflow file specifically designed for image editing tasks. The process is the same as for image generation. +- **Map Workflow Nodes**: Just as with image generation, you must map the node IDs from your editing workflow to the corresponding fields in Open WebUI. Common fields for editing workflows include `Image`, `Prompt`, and `Model`. + +![Screenshot of the Open WebUI Images settings page with ComfyUI selected for image editing.](/images/image-generation-and-editing/comfyui-editing-settings.png) + +### Deeper Dive: Mapping ComfyUI Nodes to Open WebUI + +Understanding the node ID mapping is often the biggest hurdle in integrating ComfyUI with an external service like Open WebUI. Integrating ComfyUI via API requires mapping Open WebUI's generic controls (e.g., "Model," "Width," "Prompt") to specific node inputs within your static ComfyUI workflow JSON. + +#### 1. Identifying Node IDs and Input Keys in ComfyUI + +Before configuring Open WebUI, you must examine your exported workflow JSON files directly in a text editor. The Node ID is the unique number ComfyUI uses to identify the node in the JSON structure. The top-level keys in the JSON object are the node IDs. + +**Identify the Input Key (The Parameter Name)** + +The Input Key is the exact parameter name within that node's JSON structure that you need to change (e.g., `seed`, `width`, `unet_name`). + +1. **Examine the JSON**: Look at your API workflow JSON (`workflow_api.json`). +2. **Find the Node ID**: Locate the section corresponding to the node's ID (e.g., `"37"`). +3. **Identify the Key**: Within the `"inputs"` block, find the variable you want to control. + +**Example: unet_name Node (ID 37)** + +```json +"37": { + "inputs": { + "unet_name": "qwen_image_fp8_e4m3fn.safetensors", + "weight_dtype": "default" + }, + "class_type": "UNETLoader", + "_meta": { + "title": "Load Diffusion Model" + } +}, +``` + +![Screenshot of the ComfyUI interface with the UNETLoader node selected, highlighting the node ID and the 'seed' and 'steps' input fields.](/images/image-generation-and-editing/comfyui-unet-name-node.png) + +In this example, the Input Keys are `seed` and `steps`. + +#### 2. Mapping in Open WebUI + +In the Open WebUI settings under **ComfyUI Workflow Nodes**, you will see a list of hard-coded parameters (e.g., `Prompt`, `Model`, `Seed`). For each parameter, you must provide two pieces of information from your workflow: + +- **Input Key (Left Field)**: The specific parameter name from the node's `inputs` block in your workflow JSON (e.g., `text`, `unet_name`, `seed`). +- **Node ID (Right Field)**: The corresponding ID of the node you want to control (e.g., `6`, `39`, `3`). + +This tells Open WebUI to find the node with the given ID and modify the value of the specified input key. + +**Example: Mapping KSampler Seed** + +Let's say you want to control the `seed` in your KSampler node, which has an ID of `3`. In the `Seed` section of the Open WebUI settings: + +| Open WebUI Parameter | Input Key (Left Field) | Node ID (Right Field) | +|----------------------|------------------------|-----------------------| +| `Seed` | `seed` | `3` | + +#### 3. Handling Complex/Multimodal Nodes (Qwen Example) + +For specialized nodes, the Input Key may not be a simple text. + +| Parameter | Input Key (Left Field) | Node ID (Right Field) | Note | +|-------------|------------------------|-----------------------|--------------------------------------------------------------------------------------------------| +| **Prompt** | `prompt` | `76` | The key is still `prompt`, but it targets the specialized TextEncodeQwenImageEdit node (76). | +| **Model** | `unet_name` | `37` | You must use the exact input key `unet_name` to control the model file name in the UNETLoader. | +| **Image Input** | `image` | `78` | The key is `image`. This passes the filename of the source image to the LoadImage node. | + +#### 4. Troubleshooting Mismatch Errors + +If ComfyUI stalls or gives a validation error, consult the log and the JSON structure: + +| Error Type | Cause & Debugging | Solution | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Value not in list: unet_name: 'xyz.safetensors'` | You mapped the correct node ID (e.g., 37), but the value being passed (e.g., `xyz.safetensors`) is not a valid model name for that node type (e.g., accidentally sending a VAE model to a UNET loader). | Correct the model name set in Open WebUI for either image generation or editing, ensuring both model names matche the type of model the ComfyUI node is expecting. | +| `Missing input ` | Your workflow requires an input (e.g., `cfg` or `sampler_name`), but Open WebUI did not send a value because the field was not mapped. | Either hardcode the value in the workflow JSON, or map the required input key to the correct node ID. | + +By meticulously matching the Node ID and the specific Input Key, you ensure Open WebUI correctly overwrites the default values in your workflow JSON before submitting the prompt to ComfyUI. + +## Example Setup: Qwen Image Generation and Editing + +This section provides a supplementary guide on setting up the Qwen models for both image generation and editing. + +### Qwen Image Generation + +#### Model Download + +- **Diffusion Model**: [qwen_image_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors) +- **Text Encoder**: [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors) +- **VAE**: [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors) + +#### Model Storage Location + +``` +πŸ“‚ ComfyUI/ +β”œβ”€β”€ πŸ“‚ models/ +β”‚ β”œβ”€β”€ πŸ“‚ diffusion_models/ +β”‚ β”‚ └── qwen_image_fp8_e4m3fn.safetensors +β”‚ β”œβ”€β”€ πŸ“‚ vae/ +β”‚ β”‚ └── qwen_image_vae.safetensors +β”‚ └── πŸ“‚ text_encoders/ +β”‚ └── qwen_2.5_vl_7b_fp8_scaled.safetensors +``` + +### Qwen Image Editing + +#### Model Download + +- **Diffusion Model**: [qwen_image_edit_fp8_e4m3fn.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_edit_fp8_e4m3fn.safetensors) +- **Text Encoder**: [qwen_2.5_vl_7b_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors) +- **VAE**: [qwen_image_vae.safetensors](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/vae/qwen_image_vae.safetensors) + +#### Model Storage Location + +``` +πŸ“‚ ComfyUI/ +β”œβ”€β”€ πŸ“‚ models/ +β”‚ β”œβ”€β”€ πŸ“‚ diffusion_models/ +β”‚ β”‚ └── qwen_image_edit_fp8_e4m3fn.safetensors +β”‚ β”œβ”€β”€ πŸ“‚ vae/ +β”‚ β”‚ └── qwen_image_vae.safetensors +β”‚ └── πŸ“‚ text_encoders/ +β”‚ └── qwen_2.5_vl_7b_fp8_scaled.safetensors +``` + +## Example Setup: FLUX.1 Image Generation + +This section provides a supplementary guide on setting up the FLUX.1 models for image generation. + +### FLUX.1 Dev + +#### Model Download + +- **Diffusion Model**: [flux1-dev.safetensors](https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors) +- **Text Encoder 1**: [clip_l.safetensors](https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors?download=true) +- **Text Encoder 2**: [t5xxl_fp16.safetensors](https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors?download=true) (Recommended when your VRAM is greater than 32GB) +- **VAE**: [ae.safetensors](https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors?download=true) + +#### Model Storage Location + +``` +πŸ“‚ ComfyUI/ +β”œβ”€β”€ πŸ“‚ models/ +β”‚ β”œβ”€β”€ πŸ“‚ diffusion_models/ +β”‚ β”‚ └── flux1-dev.safetensors +β”‚ β”œβ”€β”€ πŸ“‚ vae/ +β”‚ β”‚ └── ae.safetensors +β”‚ └── πŸ“‚ text_encoders/ +β”‚ β”œβ”€β”€ clip_l.safetensors +β”‚ └── t5xxl_fp16.safetensors +``` + +### FLUX.1 Schnell + +#### Model Download + +- **Diffusion Model**: [flux1-schnell.safetensors](https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/flux1-schnell.safetensors) +- **Text Encoder 1**: [clip_l.safetensors](https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors?download=true) +- **Text Encoder 2**: [t5xxl_fp8_e4m3fn.safetensors](https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors?download=true) (Recommended when your VRAM is greater than 32GB) +- **VAE**: [ae.safetensors](https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors?download=true) + +#### Model Storage Location + +``` +πŸ“‚ ComfyUI/ +β”œβ”€β”€ πŸ“‚ models/ +β”‚ β”œβ”€β”€ πŸ“‚ diffusion_models/ +β”‚ β”‚ └── flux1-schnell.safetensors +β”‚ β”œβ”€β”€ πŸ“‚ vae/ +β”‚ β”‚ └── ae.safetensors +β”‚ └── πŸ“‚ text_encoders/ +β”‚ β”œβ”€β”€ clip_l.safetensors +β”‚ └── t5xxl_fp8_e4m3fn.safetensors +``` + +## Configuring with SwarmUI + +SwarmUI utilizes ComfyUI as its backend. In order to get Open WebUI to work with SwarmUI you will have to append `ComfyBackendDirect` to the `ComfyUI Base URL`. Additionally, you will want to setup SwarmUI with LAN access. After aforementioned adjustments, setting up SwarmUI to work with Open WebUI will be the same as the steps for [ComfyUI Image Generation](#comfyui-image-generation) outlined above. +![Install SwarmUI with LAN Access](https://github.com/user-attachments/assets/a6567e13-1ced-4743-8d8e-be526207f9f6) + +### SwarmUI API URL + +The address you will input as the ComfyUI Base URL will look like: `http://:7801/ComfyBackendDirect` diff --git a/docs/tutorials/image-generation-and-editing/gemini.md b/docs/tutorials/image-generation-and-editing/gemini.md new file mode 100644 index 00000000..5d7c43b1 --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/gemini.md @@ -0,0 +1,68 @@ +--- +sidebar_position: 5 +title: "🎨 Gemini" +--- + +:::warning +This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. +::: + +Open WebUI also supports image generation through the **Google AI Studio API** also known as the **Gemini API**. + +### Initial Setup + +1. Obtain an [API key](https://aistudio.google.com/api-keys) from Google AI Studio. +2. You may need to create a project and enable the `Generative Language API` in addition to adding billing information. + +### Configuring Open WebUI + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Gemini`. +3. Set the `API Base URL` to `https://generativelanguage.googleapis.com/v1beta`. +4. Enter your Google AI Studio [API key](https://aistudio.google.com/api-keys). +5. Enter the model you wish to use from these [available models](https://ai.google.dev/gemini-api/docs/imagen#model-versions). +6. Set the image size to one of the available [image sizes](https://ai.google.dev/gemini-api/docs/image-generation#aspect_ratios). + +![Screenshot of the Open WebUI Images settings page with Gemini selected and the API key, model, and image size fields highlighted.](/images/image-generation-and-editing/gemini-settings.png) + +:::info + +This feature appears to only work for models supported with this endpoint: `https://generativelanguage.googleapis.com/v1beta/models/:predict`. +This is the OpenAI **BETA** endpoint, which Google provides for experimental OpenAI compatibility. + +Google Imagen models use this endpoint while Gemini models use a different endpoint ending with `:generateContent` + +Imagen model endpoint example: + +- `https://generativelanguage.googleapis.com/v1beta/models/imagen-4.0-generate-001:predict`. +- [Documentation for Imagen models](https://ai.google.dev/gemini-api/docs/imagen) + +Gemini model endpoint example: + +- `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-image:generateContent`. +- [Documentation for Gemini models](https://ai.google.dev/gemini-api/docs/image-generation) + +Trying to call a Gemini model, such as gemini-2.5-flash-image aka *Nano Banana* would result in an error due to the difference in supported endpoints for Image Generation. + +`400: [ERROR: models/gemini-2.5-flash-image is not found for API version v1beta, or is not supported for predict. Call ListModels to see the list of available models and their supported methods.]` + +::: + +### LiteLLM Proxy with Gemini Endpoints + +Image generation with a LiteLLM proxy using Gemini or Imagen endpoints is supported with Open WebUI. Configure the Image Generation as follows: + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Open AI`. +3. Change the API endpoint URL to `https://:/v1`. +4. Enter your LiteLLM API key. +5. The API version can be left blank. +6. Enter the image model name as it appears in your LiteLLM configuration. +7. Set the image size to one of the available sizes for the selected model. + +:::tip + +To find your LiteLLM connection information, navigate to the **Admin Panel** > **Settings** > **Connections** menu. +Your connection information will be listed under the Gemini API connection. + +::: \ No newline at end of file diff --git a/docs/tutorials/image-generation-and-editing/image-router.md b/docs/tutorials/image-generation-and-editing/image-router.md new file mode 100644 index 00000000..2aeeaf8b --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/image-router.md @@ -0,0 +1,24 @@ +--- +sidebar_position: 4 +title: "🧭 Image Router" +--- + +:::warning +This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. +::: + +Open WebUI also supports image generation through the **Image Router APIs**. Image Router is an [open source](https://github.com/DaWe35/image-router) image generation proxy that unifies most popular models into a single API. + +### Initial Setup + +1. Obtain an [API key](https://imagerouter.io/api-keys) from Image Router. + +### Configuring Open WebUI + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Open AI` (Image Router uses the same syntax as OpenAI). +3. Change the API endpoint URL to `https://api.imagerouter.io/v1/openai` +4. Enter your Image Router API key. +5. Enter the model you wish to use. Do not use the dropdown to select models, enter the model name instead. For more information, [see all models](https://imagerouter.io/models). + +![Screenshot of the Open WebUI Images settings page with Open AI selected and the API endpoint URL, API key, and model fields highlighted for Image Router configuration.](/images/image-generation-and-editing/image-router-settings.png) diff --git a/docs/tutorials/image-generation-and-editing/openai.md b/docs/tutorials/image-generation-and-editing/openai.md new file mode 100644 index 00000000..e29f9f3c --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/openai.md @@ -0,0 +1,65 @@ +--- +sidebar_position: 3 +title: "🧠 OpenAI" +--- + +:::warning +This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. +::: + +Open WebUI also supports image generation through the **OpenAI APIs**. This option includes a selector for choosing between DALLΒ·E 2, DALLΒ·E 3, and GPT-Image-1 each supporting different image sizes. + +### Initial Setup + +1. Obtain an [API key](https://platform.openai.com/api-keys) from OpenAI. + +### Configuring Open WebUI + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Open AI`. +3. Enter your OpenAI API key. +4. Choose the model you wish to use. Note that image size options will depend on the selected model: + - **DALLΒ·E 2**: Supports `256x256`, `512x512`, or `1024x1024` images. + - **DALLΒ·E 3**: Supports `1024x1024`, `1792x1024`, or `1024x1792` images. + - **GPT-Image-1**: Supports `auto`, `1024x1024`, `1536x1024`, or `1024x1536` images. + +![Screenshot of the Open WebUI Images settings page with Open AI selected and the API key, model, and image size fields highlighted.](/images/image-generation-and-editing/openai-settings.png) + +### Azure OpenAI + +Image generation with Azure OpenAI Dall-E or GPT-Image is supported with Open WebUI. Configure the Image Generation as follows: + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Open AI` (Azure OpenAI uses the same syntax as OpenAI). +3. Change the API endpoint URL to `https://.cognitiveservices.azure.com/openai/deployments//`. Set the instance and model id as you find it in the settings of the Azure AI Foundry. +4. Configure the API version to the value you find in the settings of the Azure AI Fountry. +5. Enter your Azure OpenAI API key. + +![Screenshot of the Open WebUI Images settings page with Open AI selected and the API endpoint URL, API version, and API key fields highlighted for Azure OpenAI configuration.](/images/image-generation-and-editing/azure-openai-settings.png) + +:::tip + +Alternative API endpoint URL tutorial: `https://.openai.azure.com/openai/deployments//` - you can find your endpoint name on https://ai.azure.com/resource/overview, and model name on https://ai.azure.com/resource/deployments. +You can also copy Target URI from your deployment detailed page, but remember to delete strings after model name. +For example, if your Target URI is `https://test.openai.azure.com/openai/deployments/gpt-image-1/images/generations?api-version=2025-04-01-preview`, the API endpoint URL in Open WebUI should be `https://test.openai.azure.com/openai/deployments/gpt-image-1/`. + +::: + +### LiteLLM Proxy with OpenAI Endpoints + +Image generation with a LiteLLM proxy using OpenAI endpoints is supported with Open WebUI. Configure the Image Generation as follows: + +1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. +2. Set the `Image Generation Engine` field to `Open AI`. +3. Change the API endpoint URL to `https://:/v1`. +4. Enter your LiteLLM API key. +5. The API version can be left blank. +6. Enter the image model name as it appears in your LiteLLM configuration. +7. Set the image size to one of the available sizes for the selected model. + +:::tip + +To find your LiteLLM connection information, navigate to the **Admin Panel** > **Settings** > **Connections** menu. +Your connection information will be listed under the OpenAI API connection. + +::: diff --git a/docs/tutorials/image-generation-and-editing/usage.md b/docs/tutorials/image-generation-and-editing/usage.md new file mode 100644 index 00000000..590c99b0 --- /dev/null +++ b/docs/tutorials/image-generation-and-editing/usage.md @@ -0,0 +1,30 @@ +--- +sidebar_position: 6 +title: "πŸ“Š Usage" +--- + +Before you can use image generation, you must ensure that the **Image Generation** toggle is enabled in the **Admin Panel** > **Settings** > **Images** menu. + +## Using Image Generation + +### Method 1 + +1. Toggle the `Image Generation` switch to on. +2. Enter your image generation prompt. +3. Click `Send`. + +![Image Generation Tutorial](/images/tutorial_image_generation_2.png) + +### Method 2 + +![Image Generation Tutorial](/images/tutorial_image_generation.png) + +1. First, use a text generation model to write a prompt for image generation. +2. After the response has finished, you can click the Picture icon to generate an image. +3. After the image has finished generating, it will be returned automatically in chat. + +:::tip + +You can also edit the LLM's response and enter your image generation prompt as the message to send off for image generation instead of using the actual response provided by the LLM. + +::: diff --git a/docs/tutorials/images.md b/docs/tutorials/images.md deleted file mode 100644 index c366b7e5..00000000 --- a/docs/tutorials/images.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -sidebar_position: 6 -title: "🎨 Image Generation" ---- - -:::warning - -This tutorial is a community contribution and is not supported by the Open WebUI team. It serves only as a demonstration on how to customize Open WebUI for your specific use case. Want to contribute? Check out the contributing tutorial. - -::: - -# 🎨 Image Generation - -Open WebUI supports image generation through three backends: **AUTOMATIC1111**, **ComfyUI**, and **OpenAI DALLΒ·E**. This guide will help you set up and use either of these options. - -## AUTOMATIC1111 - -Open WebUI supports image generation through the **AUTOMATIC1111** [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API). Here are the steps to get started: - -### Initial Setup - -1. Ensure that you have [AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) installed. -2. Launch AUTOMATIC1111 with additional flags to enable API access: - - ```python - ./webui.sh --api --listen - ``` - -3. For Docker installation of WebUI with the environment variables preset, use the following command: - - ```docker - docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -e AUTOMATIC1111_BASE_URL=http://host.docker.internal:7860/ -e ENABLE_IMAGE_GENERATION=True -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main - ``` - -### Setting Up Open WebUI with AUTOMATIC1111 - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Default (Automatic1111)`. -3. In the API URL field, enter the address where AUTOMATIC1111's API is accessible: - - ```txt - http://:7860/ - ``` - - If you're running a Docker installation of Open WebUI and AUTOMATIC1111 on the same host, use `http://host.docker.internal:7860/` as your address. - -## ComfyUI - -ComfyUI provides an alternative interface for managing and interacting with image generation models. Learn more or download it from its [GitHub page](https://github.com/comfyanonymous/ComfyUI). Below are the setup instructions to get ComfyUI running alongside your other tools. - -### Initial Setup - -1. Download and extract the ComfyUI software package from [GitHub](https://github.com/comfyanonymous/ComfyUI) to your desired directory. -2. To start ComfyUI, run the following command: - - ```python - python main.py - ``` - - For systems with low VRAM, launch ComfyUI with additional flags to reduce memory usage: - - ```python - python main.py --lowvram - ``` - -3. For Docker installation of WebUI with the environment variables preset, use the following command: - - ```docker - docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -e COMFYUI_BASE_URL=http://host.docker.internal:7860/ -e ENABLE_IMAGE_GENERATION=True -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main - ``` - -### Setting Up Open WebUI with ComfyUI - -#### Setting Up FLUX.1 Models - -1. **Model Checkpoints**: - -- Download either the `FLUX.1-schnell` or `FLUX.1-dev` model from the [black-forest-labs HuggingFace page](https://huggingface.co/black-forest-labs). -- Place the model checkpoint(s) in both the `models/checkpoints` and `models/unet` directories of ComfyUI. Alternatively, you can create a symbolic link between `models/checkpoints` and `models/unet` to ensure both directories contain the same model checkpoints. - -2. **VAE Model**: - -- Download `ae.safetensors` VAE from [here](https://huggingface.co/black-forest-labs/FLUX.1-schnell/blob/main/ae.safetensors). -- Place it in the `models/vae` ComfyUI directory. - -3. **CLIP Model**: - -- Download `clip_l.safetensors` from [here](https://huggingface.co/comfyanonymous/flux_text_encoders/tree/main). -- Place it in the `models/clip` ComfyUI directory. - -4. **T5XXL Model**: - -- Download either the `t5xxl_fp16.safetensors` or `t5xxl_fp8_e4m3fn.safetensors` model from [here](https://huggingface.co/comfyanonymous/flux_text_encoders/tree/main). -- Place it in the `models/clip` ComfyUI directory. - -To integrate ComfyUI into Open WebUI, follow these steps: - -#### Step 1: Configure Open WebUI Settings - -1. Navigate to the **Admin Panel** in Open WebUI. -2. Click on **Settings** and then select the **Images** tab. -3. In the `Image Generation Engine` field, choose `ComfyUI`. -4. In the **API URL** field, enter the address where ComfyUI's API is accessible, following this format: `http://:8188/`. - - Set the environment variable `COMFYUI_BASE_URL` to this address to ensure it persists within the WebUI. - -#### Step 2: Verify the Connection and Enable Image Generation - -1. Ensure ComfyUI is running and that you've successfully verified the connection to Open WebUI. You won't be able to proceed without a successful connection. -2. Once the connection is verified, toggle on **Image Generation (Experimental)**. More options will be presented to you. -3. Continue to step 3 for the final configuration steps. - -#### Step 3: Configure ComfyUI Settings and Import Workflow - -1. Enable developer mode within ComfyUI. To do this, look for the gear icon above the **Queue Prompt** button within ComfyUI and enable the `Dev Mode` toggle. -2. Export the desired workflow from ComfyUI in `API format` using the `Save (API Format)` button. The file will be downloaded as `workflow_api.json` if done correctly. -3. Return to Open WebUI and click the **Click here to upload a workflow.json file** button. -4. Select the `workflow_api.json` file to import the exported workflow from ComfyUI into Open WebUI. -5. After importing the workflow, you must map the `ComfyUI Workflow Nodes` according to the imported workflow node IDs. -6. Set `Set Default Model` to the name of the model file being used, such as `flux1-dev.safetensors` - -:::info - -You may need to adjust an `Input Key` or two within Open WebUI's `ComfyUI Workflow Nodes` section to match a node within your workflow. -For example, `seed` may need to be renamed to `noise_seed` to match a node ID within your imported workflow. - -::: - -:::tip - -Some workflows, such as ones that use any of the Flux models, may utilize multiple nodes IDs that is necessary to fill in for their node entry fields within Open WebUI. If a node entry field requires multiple IDs, the node IDs should be comma separated (e.g., `1` or `1, 2`). - -::: - -6. Click `Save` to apply the settings and enjoy image generation with ComfyUI integrated into Open WebUI! - -After completing these steps, your ComfyUI setup should be integrated with Open WebUI, and you can use the Flux.1 models for image generation. - -### Configuring with SwarmUI - -SwarmUI utilizes ComfyUI as its backend. In order to get Open WebUI to work with SwarmUI you will have to append `ComfyBackendDirect` to the `ComfyUI Base URL`. Additionally, you will want to setup SwarmUI with LAN access. After aforementioned adjustments, setting up SwarmUI to work with Open WebUI will be the same as [Step one: Configure Open WebUI Settings](https://github.com/open-webui/docs/edit/main/docs/features/images.md#step-1-configure-open-webui-settings) as outlined above. -![Install SwarmUI with LAN Access](https://github.com/user-attachments/assets/a6567e13-1ced-4743-8d8e-be526207f9f6) - -#### SwarmUI API URL - -The address you will input as the ComfyUI Base URL will look like: `http://:7801/ComfyBackendDirect` - -## OpenAI - -Open WebUI also supports image generation through the **OpenAI APIs**. This option includes a selector for choosing between DALLΒ·E 2, DALLΒ·E 3, and GPT-Image-1 each supporting different image sizes. - -### Initial Setup - -1. Obtain an [API key](https://platform.openai.com/api-keys) from OpenAI. - -### Configuring Open WebUI - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Open AI`. -3. Enter your OpenAI API key. -4. Choose the model you wish to use. Note that image size options will depend on the selected model: - - **DALLΒ·E 2**: Supports `256x256`, `512x512`, or `1024x1024` images. - - **DALLΒ·E 3**: Supports `1024x1024`, `1792x1024`, or `1024x1792` images. - - **GPT-Image-1**: Supports `auto`, `1024x1024`, `1536x1024`, or `1024x1536` images. - -### Azure OpenAI - -Image generation with Azure OpenAI Dall-E or GPT-Image is supported with Open WebUI. Configure the Image Generation as follows: - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Open AI` (Azure OpenAI uses the same syntax as OpenAI). -3. Change the API endpoint URL to `https://.cognitiveservices.azure.com/openai/deployments//`. Set the instance and model id as you find it in the settings of the Azure AI Foundry. -4. Configure the API version to the value you find in the settings of the Azure AI Fountry. -5. Enter your Azure OpenAI API key. - -:::tip - -Alternative API endpoint URL tutorial: `https://.openai.azure.com/openai/deployments//` - you can find your endpoint name on https://ai.azure.com/resource/overview, and model name on https://ai.azure.com/resource/deployments. -You can also copy Target URI from your deployment detailed page, but remember to delete strings after model name. -For example, if your Target URI is `https://test.openai.azure.com/openai/deployments/gpt-image-1/images/generations?api-version=2025-04-01-preview`, the API endpoint URL in Open WebUI should be `https://test.openai.azure.com/openai/deployments/gpt-image-1/`. - -::: - -### LiteLLM Proxy with OpenAI Endpoints - -Image generation with a LiteLLM proxy using OpenAI endpoints is supported with Open WebUI. Configure the Image Generation as follows: - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Open AI`. -3. Change the API endpoint URL to `https://:/v1`. -4. Enter your LiteLLM API key. -5. The API version can be left blank. -6. Enter the image model name as it appears in your LiteLLM configuration. -7. Set the image size to one of the available sizes for the selected model. - -:::tip - -To find your LiteLLM connection information, navigate to the **Admin Panel** > **Settings** > **Connections** menu. -Your connection information will be listed under the OpenAI API connection. - -::: - -## Image Router - -Open WebUI also supports image generation through the **Image Router APIs**. Image Router is an [open source](https://github.com/DaWe35/image-router) image generation proxy that unifies most popular models into a single API. - -### Initial Setup - -1. Obtain an [API key](https://imagerouter.io/api-keys) from Image Router. - -### Configuring Open WebUI - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Open AI` (Image Router uses the same syntax as OpenAI). -3. Change the API endpoint URL to `https://api.imagerouter.io/v1/openai` -4. Enter your Image Router API key. -5. Enter the model you wish to use. Do not use the dropdown to select models, enter the model name instead. For more information, [see all models](https://imagerouter.io/models). - -## Gemini - -Open WebUI also supports image generation through the **Google Studio API**. - -### Initial Setup - -1. Obtain an [API key](https://aistudio.google.com/api-keys) from Google AI Studio. -2. You may need to create a project and enable the `Generative Language API` in addition to adding billing information. - -### Configuring Open WebUI - -1. In Open WebUI, navigate to the **Admin Panel** > **Settings** > **Images** menu. -2. Set the `Image Generation Engine` field to `Gemini`. -3. Set the `API Base URL` to `https://generativelanguage.googleapis.com/v1beta`. -4. Enter your Google AI Studio [API key](https://aistudio.google.com/api-keys). -5. Enter the model you wish to use from these [available models](https://ai.google.dev/gemini-api/docs/imagen#model-versions). -6. Set the image size to one of the available [image sizes](https://ai.google.dev/gemini-api/docs/image-generation#aspect_ratios). - -:::info - -This feature appears to only work for models supported with this endpoint: `https://generativelanguage.googleapis.com/v1beta/models/:predict`. - -Google Imagen models use this endpoint while Gemini models use a different endpoint ending with `:generateContent` - -Imagen model endpoint example: - - `https://generativelanguage.googleapis.com/v1beta/models/imagen-4.0-generate-001:predict`. - - [Documentation for Imagen models](https://ai.google.dev/gemini-api/docs/imagen) - -Gemini model endpoint example: - - `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-image:generateContent`. - - [Documentation for Gemini models](https://ai.google.dev/gemini-api/docs/image-generation) - -Trying to call a Gemini model, such as gemini-2.5-flash-image aka *Nano Banana* will result in an error due to the difference in supported endpoints. - -`400: [ERROR: models/gemini-2.5-flash-image is not found for API version v1beta, or is not supported for predict. Call ListModels to see the list of available models and their supported methods.]` - -::: - -## Using Image Generation - -### Method 1 - -1. Toggle the `Image Generation` switch to on. -2. Enter your image generation prompt. -3. Click `Send`. - -![Image Generation Tutorial](/images/tutorial_image_generation_2.png) - -### Method 2 - -![Image Generation Tutorial](/images/tutorial_image_generation.png) - -1. First, use a text generation model to write a prompt for image generation. -2. After the response has finished, you can click the Picture icon to generate an image. -3. After the image has finished generating, it will be returned automatically in chat. - -:::tip - -You can also edit the LLM's response and enter your image generation prompt as the message - to send off for image generation instead of using the actual response provided by the - LLM. - -::: diff --git a/docs/tutorials/integrations/amazon-bedrock.md b/docs/tutorials/integrations/amazon-bedrock.md index d0040b01..7a94f6e8 100644 --- a/docs/tutorials/integrations/amazon-bedrock.md +++ b/docs/tutorials/integrations/amazon-bedrock.md @@ -1,6 +1,6 @@ --- sidebar_position: 31 -title: "πŸ›Œ Integrate with Amazon Bedrock" +title: "πŸͺ¨ Integrate with Amazon Bedrock" --- :::warning diff --git a/docs/tutorials/integrations/azure-ad-ds-ldap.mdx b/docs/tutorials/integrations/azure-ad-ds-ldap.mdx index 44218712..55b6f728 100644 --- a/docs/tutorials/integrations/azure-ad-ds-ldap.mdx +++ b/docs/tutorials/integrations/azure-ad-ds-ldap.mdx @@ -1,5 +1,5 @@ --- -title: "Azure AD Domain Services (LDAPS) Integration" +title: "πŸ“‡ Azure AD Domain Services (LDAPS) Integration" --- :::warning diff --git a/docs/tutorials/integrations/azure-openai/azure-cli-auth.mdx b/docs/tutorials/integrations/azure-openai/azure-cli-auth.mdx index ea38cfc5..91dba420 100644 --- a/docs/tutorials/integrations/azure-openai/azure-cli-auth.mdx +++ b/docs/tutorials/integrations/azure-openai/azure-cli-auth.mdx @@ -1,5 +1,5 @@ --- -title: "Azure CLI Authentication" +title: "πŸ“Ÿ Azure CLI Authentication" --- :::warning diff --git a/docs/tutorials/integrations/azure-openai/workload-identity-auth.mdx b/docs/tutorials/integrations/azure-openai/workload-identity-auth.mdx index fc838405..53c2fb68 100644 --- a/docs/tutorials/integrations/azure-openai/workload-identity-auth.mdx +++ b/docs/tutorials/integrations/azure-openai/workload-identity-auth.mdx @@ -1,5 +1,5 @@ --- -title: "Workload Identity Authentication" +title: "πŸ€– Workload Identity Authentication" --- :::warning diff --git a/docs/tutorials/integrations/iterm2.md b/docs/tutorials/integrations/iterm2.md index efde46d8..b3076270 100644 --- a/docs/tutorials/integrations/iterm2.md +++ b/docs/tutorials/integrations/iterm2.md @@ -1,5 +1,5 @@ --- -title: "πŸ’» Iterm2 AI Integration" +title: "⌨️ Iterm2 AI Integration" --- :::warning diff --git a/docs/tutorials/offline-mode.md b/docs/tutorials/offline-mode.md index 5505bac1..8d3008cb 100644 --- a/docs/tutorials/offline-mode.md +++ b/docs/tutorials/offline-mode.md @@ -1,6 +1,6 @@ --- sidebar_position: 24 -title: "πŸ”Œ Offline Mode" +title: "✈️ Offline Mode" --- :::warning diff --git a/docs/tutorials/speech-to-text/_category_.json b/docs/tutorials/speech-to-text/_category_.json index 38926e75..cae7d080 100644 --- a/docs/tutorials/speech-to-text/_category_.json +++ b/docs/tutorials/speech-to-text/_category_.json @@ -1,5 +1,5 @@ { - "label": "🎀 Speech To Text", + "label": "πŸŽ™οΈ Speech To Text", "position": 5, "link": { "type": "generated-index" diff --git a/docs/tutorials/speech-to-text/env-variables.md b/docs/tutorials/speech-to-text/env-variables.md index 1d4fd926..b25978cb 100644 --- a/docs/tutorials/speech-to-text/env-variables.md +++ b/docs/tutorials/speech-to-text/env-variables.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "Environment Variables" +title: "🌍 Environment Variables" --- ## Environment Variables List diff --git a/docs/tutorials/speech-to-text/stt-config.md b/docs/tutorials/speech-to-text/stt-config.md index fd4f9159..d4be3ebf 100644 --- a/docs/tutorials/speech-to-text/stt-config.md +++ b/docs/tutorials/speech-to-text/stt-config.md @@ -1,6 +1,6 @@ --- sidebar_position: 1 -title: "πŸ—¨οΈ Configuration" +title: "βš™οΈ Configuration" --- Open Web UI supports both local, browser, and remote speech to text. diff --git a/docs/tutorials/text-to-speech/Kokoro-FastAPI-integration.md b/docs/tutorials/text-to-speech/Kokoro-FastAPI-integration.md index 09279f8d..5ff65df2 100644 --- a/docs/tutorials/text-to-speech/Kokoro-FastAPI-integration.md +++ b/docs/tutorials/text-to-speech/Kokoro-FastAPI-integration.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ—¨οΈ Kokoro-FastAPI Using Docker" +title: "πŸ’– Kokoro-FastAPI Using Docker" --- :::warning diff --git a/docs/tutorials/text-to-speech/_category_.json b/docs/tutorials/text-to-speech/_category_.json index 0b92cd96..791822a7 100644 --- a/docs/tutorials/text-to-speech/_category_.json +++ b/docs/tutorials/text-to-speech/_category_.json @@ -1,5 +1,5 @@ { - "label": "πŸ—¨οΈ Text-to-Speech", + "label": "πŸ“£ Text-to-Speech", "position": 5, "link": { "type": "generated-index" diff --git a/docs/tutorials/text-to-speech/chatterbox-tts-api-integration.md b/docs/tutorials/text-to-speech/chatterbox-tts-api-integration.md index 5b0ebb06..df374b66 100644 --- a/docs/tutorials/text-to-speech/chatterbox-tts-api-integration.md +++ b/docs/tutorials/text-to-speech/chatterbox-tts-api-integration.md @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "πŸ—¨οΈ Chatterbox TTS β€” Voice Cloning" +title: "🧬 Chatterbox TTS β€” Voice Cloning" --- # Chatterbox TTS β€” Voice Cloning diff --git a/docs/tutorials/text-to-speech/kokoro-web-integration.md b/docs/tutorials/text-to-speech/kokoro-web-integration.md index e2d6d6cf..41363c71 100644 --- a/docs/tutorials/text-to-speech/kokoro-web-integration.md +++ b/docs/tutorials/text-to-speech/kokoro-web-integration.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ—¨οΈ Kokoro Web - Effortless TTS for Open WebUI" +title: "🌐 Kokoro Web - Effortless TTS for Open WebUI" --- :::warning diff --git a/docs/tutorials/text-to-speech/openai-edge-tts-integration.md b/docs/tutorials/text-to-speech/openai-edge-tts-integration.md index 0fdf4395..7640da66 100644 --- a/docs/tutorials/text-to-speech/openai-edge-tts-integration.md +++ b/docs/tutorials/text-to-speech/openai-edge-tts-integration.md @@ -1,6 +1,6 @@ --- sidebar_position: 1 -title: "πŸ—¨οΈ Edge TTS Using Docker" +title: "🌊 Edge TTS Using Docker" --- :::warning diff --git a/docs/tutorials/text-to-speech/openedai-speech-integration.md b/docs/tutorials/text-to-speech/openedai-speech-integration.md index b0942bb7..5424766a 100644 --- a/docs/tutorials/text-to-speech/openedai-speech-integration.md +++ b/docs/tutorials/text-to-speech/openedai-speech-integration.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "πŸ—¨οΈ Openedai-speech Using Docker" +title: "πŸ‘ Openedai-speech Using Docker" --- :::warning diff --git a/docs/tutorials/tips/special_arguments.mdx b/docs/tutorials/tips/special_arguments.mdx index 4508b1b9..d26bffe9 100644 --- a/docs/tutorials/tips/special_arguments.mdx +++ b/docs/tutorials/tips/special_arguments.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 20 -title: "πŸ’‘ Special Arguments" +title: "πŸͺ„ Special Arguments" --- :::warning @@ -9,7 +9,7 @@ This tutorial is a community contribution and is not supported by the Open WebUI ::: -# πŸ’‘ Special Arguments +# πŸͺ„ Special Arguments When developping your own `Tools`, `Functions` (`Filters`, `Pipes` or `Actions`), `Pipelines` etc, you can use special arguments explore the full spectrum of what Open-WebUI has to offer. diff --git a/docs/tutorials/web-search/_category_.json b/docs/tutorials/web-search/_category_.json index e76e19c5..d8f52da7 100644 --- a/docs/tutorials/web-search/_category_.json +++ b/docs/tutorials/web-search/_category_.json @@ -1,5 +1,5 @@ { - "label": "🌐 Web Search", + "label": "πŸ” Web Search", "position": 6, "link": { "type": "generated-index" diff --git a/docs/tutorials/web-search/bing.md b/docs/tutorials/web-search/bing.md index db7559f6..ef16605c 100644 --- a/docs/tutorials/web-search/bing.md +++ b/docs/tutorials/web-search/bing.md @@ -1,6 +1,6 @@ --- sidebar_position: 1 -title: "Bing" +title: "πŸ—ΊοΈ Bing" --- :::warning diff --git a/docs/tutorials/web-search/brave.md b/docs/tutorials/web-search/brave.md index 567ae6dd..54d14eab 100644 --- a/docs/tutorials/web-search/brave.md +++ b/docs/tutorials/web-search/brave.md @@ -1,6 +1,6 @@ --- sidebar_position: 2 -title: "Brave" +title: "🦁 Brave" --- :::warning diff --git a/docs/tutorials/web-search/ddgs.mdx b/docs/tutorials/web-search/ddgs.mdx index 49968c8c..f3d97019 100644 --- a/docs/tutorials/web-search/ddgs.mdx +++ b/docs/tutorials/web-search/ddgs.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 3 -title: "DDGS" +title: "πŸ¦† DDGS" --- :::warning diff --git a/docs/tutorials/web-search/exa.md b/docs/tutorials/web-search/exa.md index 1ea44bb1..8475c0bf 100644 --- a/docs/tutorials/web-search/exa.md +++ b/docs/tutorials/web-search/exa.md @@ -1,6 +1,6 @@ --- sidebar_position: 4 -title: "Exa AI" +title: "✨ Exa AI" --- :::warning diff --git a/docs/tutorials/web-search/external.md b/docs/tutorials/web-search/external.md index fb045352..c653f685 100644 --- a/docs/tutorials/web-search/external.md +++ b/docs/tutorials/web-search/external.md @@ -1,6 +1,6 @@ --- sidebar_position: 17 -title: "External" +title: "↗️ External" --- :::warning diff --git a/docs/tutorials/web-search/google-pse.md b/docs/tutorials/web-search/google-pse.md index 9a019f5c..61136521 100644 --- a/docs/tutorials/web-search/google-pse.md +++ b/docs/tutorials/web-search/google-pse.md @@ -1,6 +1,6 @@ --- sidebar_position: 5 -title: "Google PSE" +title: "🎨 Google PSE" --- :::warning diff --git a/docs/tutorials/web-search/jina.md b/docs/tutorials/web-search/jina.md index 461f5142..7a65471e 100644 --- a/docs/tutorials/web-search/jina.md +++ b/docs/tutorials/web-search/jina.md @@ -1,6 +1,6 @@ --- sidebar_position: 6 -title: "Jina" +title: "🧠 Jina" --- :::warning diff --git a/docs/tutorials/web-search/kagi.md b/docs/tutorials/web-search/kagi.md index 15a93208..deb01d0d 100644 --- a/docs/tutorials/web-search/kagi.md +++ b/docs/tutorials/web-search/kagi.md @@ -1,6 +1,6 @@ --- sidebar_position: 7 -title: "Kagi" +title: "πŸ’° Kagi" --- :::warning diff --git a/docs/tutorials/web-search/mojeek.md b/docs/tutorials/web-search/mojeek.md index 1acd40ff..3c794456 100644 --- a/docs/tutorials/web-search/mojeek.md +++ b/docs/tutorials/web-search/mojeek.md @@ -1,6 +1,6 @@ --- sidebar_position: 8 -title: "Mojeek" +title: "πŸ•΅οΈ Mojeek" --- :::warning diff --git a/docs/tutorials/web-search/ollama-cloud.mdx b/docs/tutorials/web-search/ollama-cloud.mdx index 31ad052c..e59e71ef 100644 --- a/docs/tutorials/web-search/ollama-cloud.mdx +++ b/docs/tutorials/web-search/ollama-cloud.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 4 -title: "Ollama Cloud Web Search" +title: "πŸ¦™ Ollama Cloud Web Search" --- :::warning diff --git a/docs/tutorials/web-search/perplexity.mdx b/docs/tutorials/web-search/perplexity.mdx index 34ade502..7ab2e6bd 100644 --- a/docs/tutorials/web-search/perplexity.mdx +++ b/docs/tutorials/web-search/perplexity.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 30 -title: "Perplexity" +title: "❓ Perplexity" --- :::warning diff --git a/docs/tutorials/web-search/perplexity_search.mdx b/docs/tutorials/web-search/perplexity_search.mdx index f38fdba9..22cec410 100644 --- a/docs/tutorials/web-search/perplexity_search.mdx +++ b/docs/tutorials/web-search/perplexity_search.mdx @@ -1,6 +1,6 @@ --- sidebar_position: 31 -title: "Perplexity Search" +title: "❔ Perplexity Search" --- :::warning diff --git a/docs/tutorials/web-search/searchapi.md b/docs/tutorials/web-search/searchapi.md index f70c47f3..aca818b8 100644 --- a/docs/tutorials/web-search/searchapi.md +++ b/docs/tutorials/web-search/searchapi.md @@ -1,6 +1,6 @@ --- sidebar_position: 9 -title: "SearchApi" +title: "πŸ”Œ SearchApi" --- :::warning diff --git a/docs/tutorials/web-search/searxng.md b/docs/tutorials/web-search/searxng.md index fcbc79c2..b072652b 100644 --- a/docs/tutorials/web-search/searxng.md +++ b/docs/tutorials/web-search/searxng.md @@ -1,6 +1,6 @@ --- sidebar_position: 10 -title: "SearXNG" +title: "🦊 SearXNG" --- :::warning diff --git a/docs/tutorials/web-search/serpapi.md b/docs/tutorials/web-search/serpapi.md index 85ffb677..0f6fc52c 100644 --- a/docs/tutorials/web-search/serpapi.md +++ b/docs/tutorials/web-search/serpapi.md @@ -1,6 +1,6 @@ --- sidebar_position: 15 -title: "SerpApi" +title: "🐍 SerpApi" --- :::warning diff --git a/docs/tutorials/web-search/serper.md b/docs/tutorials/web-search/serper.md index f316f444..2e802289 100644 --- a/docs/tutorials/web-search/serper.md +++ b/docs/tutorials/web-search/serper.md @@ -1,6 +1,6 @@ --- sidebar_position: 11 -title: "Serper" +title: "⚑ Serper" --- :::warning diff --git a/docs/tutorials/web-search/serply.md b/docs/tutorials/web-search/serply.md index 47d9e652..b5671044 100644 --- a/docs/tutorials/web-search/serply.md +++ b/docs/tutorials/web-search/serply.md @@ -1,6 +1,6 @@ --- sidebar_position: 12 -title: "Serply" +title: "πŸ“„ Serply" --- :::warning diff --git a/docs/tutorials/web-search/serpstack.md b/docs/tutorials/web-search/serpstack.md index 5f4d861b..845c94b9 100644 --- a/docs/tutorials/web-search/serpstack.md +++ b/docs/tutorials/web-search/serpstack.md @@ -1,6 +1,6 @@ --- sidebar_position: 13 -title: "Serpstack" +title: "πŸ“š Serpstack" --- :::warning diff --git a/docs/tutorials/web-search/tavily.md b/docs/tutorials/web-search/tavily.md index 1589628d..56059a54 100644 --- a/docs/tutorials/web-search/tavily.md +++ b/docs/tutorials/web-search/tavily.md @@ -1,6 +1,6 @@ --- sidebar_position: 14 -title: "Tavily" +title: "🎯 Tavily" --- :::warning diff --git a/docs/tutorials/web-search/yacy.md b/docs/tutorials/web-search/yacy.md index c55ddf6b..1e2285df 100644 --- a/docs/tutorials/web-search/yacy.md +++ b/docs/tutorials/web-search/yacy.md @@ -1,6 +1,6 @@ --- sidebar_position: 16 -title: "Yacy" +title: "πŸ§‘β€πŸ€β€πŸ§‘ Yacy" --- :::warning diff --git a/static/images/image-generation-and-editing/automatic1111-settings.png b/static/images/image-generation-and-editing/automatic1111-settings.png new file mode 100644 index 00000000..2b1a1bb3 Binary files /dev/null and b/static/images/image-generation-and-editing/automatic1111-settings.png differ diff --git a/static/images/image-generation-and-editing/azure-openai-settings.png b/static/images/image-generation-and-editing/azure-openai-settings.png new file mode 100644 index 00000000..9f5f7a20 Binary files /dev/null and b/static/images/image-generation-and-editing/azure-openai-settings.png differ diff --git a/static/images/image-generation-and-editing/comfyui-editing-settings.png b/static/images/image-generation-and-editing/comfyui-editing-settings.png new file mode 100644 index 00000000..e6a08155 Binary files /dev/null and b/static/images/image-generation-and-editing/comfyui-editing-settings.png differ diff --git a/static/images/image-generation-and-editing/comfyui-generation-settings.png b/static/images/image-generation-and-editing/comfyui-generation-settings.png new file mode 100644 index 00000000..e6a08155 Binary files /dev/null and b/static/images/image-generation-and-editing/comfyui-generation-settings.png differ diff --git a/static/images/image-generation-and-editing/comfyui-node-mapping.png b/static/images/image-generation-and-editing/comfyui-node-mapping.png new file mode 100644 index 00000000..8f9987f8 Binary files /dev/null and b/static/images/image-generation-and-editing/comfyui-node-mapping.png differ diff --git a/static/images/image-generation-and-editing/comfyui-unet-name-node.png b/static/images/image-generation-and-editing/comfyui-unet-name-node.png new file mode 100644 index 00000000..e6a08155 Binary files /dev/null and b/static/images/image-generation-and-editing/comfyui-unet-name-node.png differ diff --git a/static/images/image-generation-and-editing/comfyui-workflow-upload.png b/static/images/image-generation-and-editing/comfyui-workflow-upload.png new file mode 100644 index 00000000..e6a08155 Binary files /dev/null and b/static/images/image-generation-and-editing/comfyui-workflow-upload.png differ diff --git a/static/images/image-generation-and-editing/gemini-settings.png b/static/images/image-generation-and-editing/gemini-settings.png new file mode 100644 index 00000000..f1e48215 Binary files /dev/null and b/static/images/image-generation-and-editing/gemini-settings.png differ diff --git a/static/images/image-generation-and-editing/image-router-settings.png b/static/images/image-generation-and-editing/image-router-settings.png new file mode 100644 index 00000000..9f5f7a20 Binary files /dev/null and b/static/images/image-generation-and-editing/image-router-settings.png differ diff --git a/static/images/image-generation-and-editing/openai-settings.png b/static/images/image-generation-and-editing/openai-settings.png new file mode 100644 index 00000000..9f5f7a20 Binary files /dev/null and b/static/images/image-generation-and-editing/openai-settings.png differ