--- title: "Example" icon: FileCode --- ## Clean Example This example config includes all documented endpoints (Except Azure, LiteLLM, MLX, and Ollama, which all require additional configurations) ```yaml filename="librechat.yaml" version: 1.3.4 cache: true interface: # MCP Servers UI configuration mcpServers: placeholder: 'MCP Servers' # Privacy policy settings privacyPolicy: externalUrl: 'https://librechat.ai/privacy-policy' openNewTab: true # Terms of service termsOfService: externalUrl: 'https://librechat.ai/tos' openNewTab: true registration: socialLogins: ["discord", "facebook", "github", "google", "openid"] endpoints: custom: # Anyscale - name: "Anyscale" apiKey: "${ANYSCALE_API_KEY}" baseURL: "https://api.endpoints.anyscale.com/v1" models: default: [ "meta-llama/Llama-2-7b-chat-hf", ] fetch: true titleConvo: true titleModel: "meta-llama/Llama-2-7b-chat-hf" summarize: false summaryModel: "meta-llama/Llama-2-7b-chat-hf" modelDisplayLabel: "Anyscale" # APIpie - name: "APIpie" apiKey: "${APIPIE_API_KEY}" baseURL: "https://apipie.ai/v1/" models: default: [ "gpt-4", "gpt-4-turbo", "gpt-3.5-turbo", "claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "llama-3-70b-instruct", "llama-3-8b-instruct", "gemini-pro-1.5", "gemini-pro", "mistral-large", "mistral-medium", "mistral-small", "mistral-tiny", "mixtral-8x22b", ] fetch: false titleConvo: true titleModel: "gpt-3.5-turbo" dropParams: ["stream"] #cohere - name: "cohere" apiKey: "${COHERE_API_KEY}" baseURL: "https://api.cohere.ai/v1" models: default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"] fetch: false modelDisplayLabel: "cohere" titleModel: "command" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"] # Fireworks - name: "Fireworks" apiKey: "${FIREWORKS_API_KEY}" baseURL: "https://api.fireworks.ai/inference/v1" models: default: [ "accounts/fireworks/models/mixtral-8x7b-instruct", ] fetch: true titleConvo: true titleModel: "accounts/fireworks/models/llama-v2-7b-chat" summarize: false summaryModel: "accounts/fireworks/models/llama-v2-7b-chat" modelDisplayLabel: "Fireworks" dropParams: ["user"] # groq - name: "groq" apiKey: "${GROQ_API_KEY}" baseURL: "https://api.groq.com/openai/v1/" models: default: [ "llama2-70b-4096", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it", ] fetch: false titleConvo: true titleModel: "mixtral-8x7b-32768" modelDisplayLabel: "groq" # Mistral AI API - name: "Mistral" apiKey: "${MISTRAL_API_KEY}" baseURL: "https://api.mistral.ai/v1" models: default: [ "mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest" ] fetch: true titleConvo: true titleModel: "mistral-tiny" modelDisplayLabel: "Mistral" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] # OpenRouter.ai - name: "OpenRouter" apiKey: "${OPENROUTER_KEY}" baseURL: "https://openrouter.ai/api/v1" models: default: ["openai/gpt-3.5-turbo"] fetch: true titleConvo: true titleModel: "gpt-3.5-turbo" summarize: false summaryModel: "gpt-3.5-turbo" modelDisplayLabel: "OpenRouter" # Perplexity - name: "Perplexity" apiKey: "${PERPLEXITY_API_KEY}" baseURL: "https://api.perplexity.ai/" models: default: [ "mistral-7b-instruct", "sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online" ] fetch: false # fetching list of models is not supported titleConvo: true titleModel: "sonar-medium-chat" summarize: false summaryModel: "sonar-medium-chat" dropParams: ["stop", "frequency_penalty"] modelDisplayLabel: "Perplexity" # ShuttleAI API - name: "ShuttleAI" apiKey: "${SHUTTLEAI_API_KEY}" baseURL: "https://api.shuttleai.app/v1" models: default: [ "shuttle-1", "shuttle-turbo" ] fetch: true titleConvo: true titleModel: "gemini-pro" summarize: false summaryModel: "llama-summarize" modelDisplayLabel: "ShuttleAI" dropParams: ["user"] # together.ai - name: "together.ai" apiKey: "${TOGETHERAI_API_KEY}" baseURL: "https://api.together.xyz" models: default: [ "zero-one-ai/Yi-34B-Chat", "Austism/chronos-hermes-13b", "DiscoResearch/DiscoLM-mixtral-8x7b-v2", "Gryphe/MythoMax-L2-13b", "lmsys/vicuna-13b-v1.5", "lmsys/vicuna-7b-v1.5", "lmsys/vicuna-13b-v1.5-16k", "codellama/CodeLlama-13b-Instruct-hf", "codellama/CodeLlama-34b-Instruct-hf", "codellama/CodeLlama-70b-Instruct-hf", "codellama/CodeLlama-7b-Instruct-hf", "togethercomputer/llama-2-13b-chat", "togethercomputer/llama-2-70b-chat", "togethercomputer/llama-2-7b-chat", "NousResearch/Nous-Capybara-7B-V1p9", "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT", "NousResearch/Nous-Hermes-Llama2-70b", "NousResearch/Nous-Hermes-llama-2-7b", "NousResearch/Nous-Hermes-Llama2-13b", "NousResearch/Nous-Hermes-2-Yi-34B", "openchat/openchat-3.5-1210", "Open-Orca/Mistral-7B-OpenOrca", "togethercomputer/Qwen-7B-Chat", "snorkelai/Snorkel-Mistral-PairRM-DPO", "togethercomputer/alpaca-7b", "togethercomputer/falcon-40b-instruct", "togethercomputer/falcon-7b-instruct", "togethercomputer/GPT-NeoXT-Chat-Base-20B", "togethercomputer/Llama-2-7B-32K-Instruct", "togethercomputer/Pythia-Chat-Base-7B-v0.16", "togethercomputer/RedPajama-INCITE-Chat-3B-v1", "togethercomputer/RedPajama-INCITE-7B-Chat", "togethercomputer/StripedHyena-Nous-7B", "Undi95/ReMM-SLERP-L2-13B", "Undi95/Toppy-M-7B", "WizardLM/WizardLM-13B-V1.2", "garage-bAInd/Platypus2-70B-instruct", "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "teknium/OpenHermes-2-Mistral-7B", "teknium/OpenHermes-2p5-Mistral-7B", "upstage/SOLAR-10.7B-Instruct-v1.0" ] fetch: false # fetching list of models is not supported titleConvo: true titleModel: "togethercomputer/llama-2-7b-chat" summarize: false summaryModel: "togethercomputer/llama-2-7b-chat" modelDisplayLabel: "together.ai" ``` ## Example with Comments This example configuration file sets up LibreChat with detailed options across several key areas: - **Caching**: Enabled to improve performance. - **File Handling**: - **File Strategy**: Commented out but hints at possible integration with Firebase for file storage. - **File Configurations**: Customizes file upload limits and allowed MIME types for different endpoints, including a global server file size limit and a specific limit for user avatar images. - **Rate Limiting**: Defines thresholds for the maximum number of file uploads allowed per IP and user within a specified time window, aiming to prevent abuse. - **Registration**: - Allows registration from specified social login providers and email domains, enhancing security and user management. - **Endpoints**: - **Assistants**: Configures the assistants' endpoint with a polling interval and a timeout for operations, and provides an option to disable the builder interface. - **Custom Endpoints**: - Configures two external AI service endpoints, Mistral and OpenRouter, including API keys, base URLs, model handling, and specific feature toggles like conversation titles, summarization, and parameter adjustments. - For Mistral, it enables dynamic model fetching, applies additional parameters for safe prompts, and explicitly drops unsupported parameters. - For OpenRouter, it sets up a basic configuration without dynamic model fetching and specifies a model for conversation titles. ```yaml filename="librechat.yaml" # For more information, see the Configuration Guide: # https://www.librechat.ai/docs/configuration/librechat_yaml # Configuration version (required) version: 1.3.4 # Cache settings: Set to true to enable caching cache: true # Custom interface configuration interface: # MCP Servers UI configuration mcpServers: placeholder: 'MCP Servers' # Privacy policy settings privacyPolicy: externalUrl: 'https://librechat.ai/privacy-policy' openNewTab: true # Terms of service termsOfService: externalUrl: 'https://librechat.ai/tos' openNewTab: true # Example Registration Object Structure (optional) registration: socialLogins: ['github', 'google', 'discord', 'openid', 'facebook'] # allowedDomains: # - "gmail.com" # rateLimits: # fileUploads: # ipMax: 100 # ipWindowInMinutes: 60 # Rate limit window for file uploads per IP # userMax: 50 # userWindowInMinutes: 60 # Rate limit window for file uploads per user # conversationsImport: # ipMax: 100 # ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP # userMax: 50 # userWindowInMinutes: 60 # Rate limit window for conversation imports per user # Definition of custom endpoints endpoints: # assistants: # disableBuilder: false # Disable Assistants Builder Interface by setting to `true` # pollIntervalMs: 750 # Polling interval for checking assistant updates # timeoutMs: 180000 # Timeout for assistant operations # # Should only be one or the other, either `supportedIds` or `excludedIds` # supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"] # # excludedIds: ["asst_excludedAssistantId"] # Only show assistants that the user created or that were created externally (e.g. in Assistants playground). # # privateAssistants: false # Does not work with `supportedIds` or `excludedIds` # # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature # retrievalModels: ["gpt-4-turbo-preview"] # # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below. # capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"] custom: # Groq Example - name: 'groq' apiKey: '${GROQ_API_KEY}' baseURL: 'https://api.groq.com/openai/v1/' models: default: [ "llama3-70b-8192", "llama3-8b-8192", "llama2-70b-4096", "mixtral-8x7b-32768", "gemma-7b-it", ] fetch: false titleConvo: true titleModel: 'mixtral-8x7b-32768' modelDisplayLabel: 'groq' # Mistral AI Example - name: 'Mistral' # Unique name for the endpoint # For `apiKey` and `baseURL`, you can use environment variables that you define. # recommended environment variables: apiKey: '${MISTRAL_API_KEY}' baseURL: 'https://api.mistral.ai/v1' # Models configuration models: # List of default models to use. At least one value is required. default: ['mistral-tiny', 'mistral-small', 'mistral-medium'] # Fetch option: Set to true to fetch models from API. fetch: true # Defaults to false. # Optional configurations # Title Conversation setting titleConvo: true # Set to true to enable title conversation # Title Method: Choose between "completion" or "functions". # titleMethod: "completion" # Defaults to "completion" if omitted. # Title Model: Specify the model to use for titles. titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted. # Summarize setting: Set to true to enable summarization. # summarize: false # Summary Model: Specify the model to use if summarization is enabled. # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. # The label displayed for the AI model in messages. modelDisplayLabel: 'Mistral' # Default is "AI" when not set. # Add additional parameters to the request. Default params will be overwritten. # addParams: # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ # Drop Default params parameters from the request. See default params in guide linked below. # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'] # OpenRouter Example - name: 'OpenRouter' # For `apiKey` and `baseURL`, you can use environment variables that you define. # recommended environment variables: # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. apiKey: '${OPENROUTER_KEY}' baseURL: 'https://openrouter.ai/api/v1' models: default: ['meta-llama/llama-3-70b-instruct'] fetch: true titleConvo: true titleModel: 'meta-llama/llama-3-70b-instruct' # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. dropParams: ['stop'] modelDisplayLabel: 'OpenRouter' # fileConfig: # endpoints: # assistants: # fileLimit: 5 # fileSizeLimit: 10 # Maximum size for an individual file in MB # totalSizeLimit: 50 # Maximum total size for all files in a single request in MB # supportedMimeTypes: # - "image/.*" # - "application/pdf" # openAI: # disabled: true # Disables file uploading to the OpenAI endpoint # default: # totalSizeLimit: 20 # YourCustomEndpointName: # fileLimit: 2 # fileSizeLimit: 5 # serverFileSizeLimit: 100 # Global server file size limit in MB # avatarSizeLimit: 2 # Limit for user avatar image size in MB # See the Custom Configuration Guide for more information: # https://www.librechat.ai/docs/configuration/librechat_yaml ```