From 7e83524a09933ebb6d294922bb6be86d56b4bd6f Mon Sep 17 00:00:00 2001 From: DrMelone <27028174+Classic298@users.noreply.github.com> Date: Tue, 24 Feb 2026 18:16:57 +0100 Subject: [PATCH] anthropic and filters --- .../extensibility/plugin/functions/filter.mdx | 71 +++++++++++++++++++ docs/reference/api-endpoints.md | 32 ++++++++- 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/docs/features/extensibility/plugin/functions/filter.mdx b/docs/features/extensibility/plugin/functions/filter.mdx index c1815e88..b762bb08 100644 --- a/docs/features/extensibility/plugin/functions/filter.mdx +++ b/docs/features/extensibility/plugin/functions/filter.mdx @@ -560,6 +560,77 @@ async def inlet(self, body: dict, __event_emitter__) -> dict: --- +### 🔌 Injecting Extra API Body Parameters + +Inlet filters can inject **extra fields into the request body** that get forwarded to the external LLM API. This is useful for API-specific parameters that Open WebUI doesn't expose in the UI. + +The request body flows from your inlet filter to the LLM API without stripping unknown fields — only internal keys like `metadata`, `features`, `tool_ids`, `files`, and `skill_ids` are removed. Any other field you add will be serialized to JSON and sent to the API provider. + +#### Example: OpenAI Safety Identifier + +OpenAI recommends sending a `safety_identifier` with each request for abuse detection. You can inject this automatically via a filter: + +```python +import hashlib + +class Filter: + def inlet(self, body: dict, __user__: dict = None) -> dict: + if __user__ and __user__.get("id"): + body["safety_identifier"] = hashlib.sha256( + __user__["id"].encode() + ).hexdigest() + return body +``` + +The hashed user UUID is added as a top-level body parameter and forwarded directly to OpenAI's API — no PII is sent, just an opaque hash. + +:::warning Cannot Inject HTTP Headers +Filters can only modify the **request body** (`form_data`). Outbound HTTP headers are constructed separately and cannot be influenced from a filter. To add custom headers to API requests, use the **Admin Panel → Settings → Connections → OpenAI API** headers configuration. +::: + +--- + +### 🔍 Resolving the Base Model (`__model__`) + +When a user selects a workspace or custom model, `body["model"]` contains the custom model ID (e.g. `"my-custom-gpt5"`), not the underlying base model. To discover the actual base model, use the `__model__` dunder parameter: + +```python +class Filter: + def inlet(self, body: dict, __model__: dict = None) -> dict: + custom_model_id = body["model"] # e.g. "my-custom-gpt5" + + base_model_id = None + if __model__ and "info" in __model__: + base_model_id = __model__["info"].get("base_model_id") + # e.g. "gpt-5.2" + + if base_model_id: + print(f"Workspace model '{custom_model_id}' → base model '{base_model_id}'") + else: + print(f"Direct base model: '{custom_model_id}'") + + return body +``` + +If no `base_model_id` is present, the user selected a base model directly (no workspace wrapper). + +#### Available Dunder Parameters + +Filters can declare any of these parameters in their function signature to receive them automatically: + +| Parameter | What it provides | +|-----------|-----------------| +| `__model__` | Full model dict (including `info.base_model_id` for workspace models) | +| `__user__` | User data (`id`, `email`, `name`, `role`) | +| `__metadata__` | Request metadata (`chat_id`, `session_id`, `interface`, etc.) | +| `__event_emitter__` | Function to send status updates, embeds, etc. to the client | +| `__chat_id__` | Chat session ID | +| `__request__` | The raw FastAPI `Request` object | + +Only parameters you declare in your function signature are injected — Open WebUI inspects the signature at runtime to determine what to pass. + +--- + ### 🎨 UI Indicators & Visual Feedback #### In the Admin Functions Panel diff --git a/docs/reference/api-endpoints.md b/docs/reference/api-endpoints.md index 0b33fd61..35144fb1 100644 --- a/docs/reference/api-endpoints.md +++ b/docs/reference/api-endpoints.md @@ -134,7 +134,7 @@ Internally, the endpoint converts the Anthropic request format to OpenAI Chat Co client = Anthropic( api_key="YOUR_OPEN_WEBUI_API_KEY", - base_url="http://localhost:3000/api/v1", + base_url="http://localhost:3000/api", ) message = client.messages.create( @@ -147,6 +147,36 @@ Internally, the endpoint converts the Anthropic request format to OpenAI Chat Co print(message.content[0].text) ``` + :::warning + The `base_url` must be `http://localhost:3000/api` (not `/api/v1`). The Anthropic SDK automatically appends `/v1/messages` to the base URL. + ::: + +- **Claude Code Configuration**: + + To use [Claude Code](https://docs.anthropic.com/en/docs/claude-code) with Open WebUI as a proxy, configure it to point at your Open WebUI instance: + + ```bash + # Set environment variables for Claude Code + export ANTHROPIC_BASE_URL="http://localhost:3000/api" + export ANTHROPIC_API_KEY="YOUR_OPEN_WEBUI_API_KEY" + + # Then run Claude Code as normal + claude + ``` + + Alternatively, create or edit `~/.claude/settings.json`: + + ```json + { + "env": { + "ANTHROPIC_BASE_URL": "http://localhost:3000/api", + "ANTHROPIC_AUTH_TOKEN": "YOUR_OPEN_WEBUI_API_KEY" + } + } + ``` + + This routes all Claude Code requests through Open WebUI's authentication and access control layer, letting you use any configured model (including local models via Ollama or vLLM) with Claude Code's interface. + :::info All models configured in Open WebUI are accessible through this endpoint — including Ollama models, OpenAI models, and any custom function models. The `model` field should use the model ID as it appears in Open WebUI. Filters (inlet/stream) apply to these requests just as they do for the OpenAI-compatible endpoint.