Spaces:
Running
Running
# For more information, see the Configuration Guide: | |
# https://www.librechat.ai/docs/configuration/librechat_yaml | |
# Configuration version (required) | |
version: 1.2.1 | |
# Cache settings: Set to true to enable caching | |
cache: true | |
# File strategy s3/firebase | |
# fileStrategy: "s3" | |
# Custom interface configuration | |
interface: | |
# Privacy policy settings | |
privacyPolicy: | |
externalUrl: 'https://librechat.ai/privacy-policy' | |
openNewTab: true | |
# Terms of service | |
termsOfService: | |
externalUrl: 'https://librechat.ai/tos' | |
openNewTab: true | |
modalAcceptance: true | |
modalTitle: "Capabilities and Terms of Service for Librechat" | |
modalContent: | | |
## Overview and Key Features | |
This platform is designed to facilitate user exploration of the Model Context Protocol (MCP) and the capabilities offered by LibreChat. It includes several powerful, built-in features accessible without requiring user-provided API keys or credentials. | |
### Core Capabilities: | |
* **Integrated Agents:** | |
* **Smarty:** An intelligent search companion optimized for web searches, providing robust performance comparable to specialized search AI tools. | |
* **CodeSandbox:** A free, powerful alternative to proprietary code interpreter APIs. This local Python MCP server enables code execution, package installation, and multi-language support within a secure sandbox environment. | |
* **Broad LLM Access:** Provides complimentary access (no API keys required) to a selection of state-of-the-art Large Language Models, including: | |
* Qwen3 | |
* Gemini 2.5 Pro | |
* GPT-4o | |
* GPT-4.1 | |
* DeepSeek R1 | |
* *(Note: Availability may be subject to change)* | |
* **Document Processing:** | |
* **RAG (Retrieval-Augmented Generation):** Enabled for enhanced information retrieval from provided documents. | |
* **OCR (Optical Character Recognition):** Allows processing of text within uploaded image-based documents. | |
* Supports uploading and processing of large files. | |
* **Simplified Access:** All features are pre-configured, eliminating the need for users to manage or provide API keys or credentials. | |
Note:By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them. | |
endpointsMenu: true | |
modelSelect: true | |
parameters: true | |
sidePanel: true | |
presets: true | |
prompts: true | |
bookmarks: true | |
multiConvo: true | |
agents: true | |
# Example Registration Object Structure (optional) | |
registration: | |
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple'] | |
# allowedDomains: | |
# - "gmail.com" | |
# Example Balance settings | |
#balance: | |
# enabled: true | |
# startBalance: 1000 | |
# autoRefillEnabled: true | |
# refillIntervalValue: 1 | |
# refillIntervalUnit: 'hours' | |
# refillAmount: 5000 | |
# speech: | |
# tts: | |
# openai: | |
# url: '' | |
# apiKey: '${TTS_API_KEY}' | |
# model: '' | |
# voices: [''] | |
# | |
# stt: | |
# openai: | |
# url: '' | |
# apiKey: '${STT_API_KEY}' | |
# model: '' | |
# rateLimits: | |
# fileUploads: | |
# ipMax: 100 | |
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP | |
# userMax: 50 | |
# userWindowInMinutes: 60 # Rate limit window for file uploads per user | |
# conversationsImport: | |
# ipMax: 100 | |
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP | |
# userMax: 50 | |
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user | |
# Example Actions Object Structure | |
actions: | |
allowedDomains: | |
- "swapi.dev" | |
- "librechat.ai" | |
- "google.com" | |
mcpServers: | |
exa: | |
type: stdio | |
command: npx | |
args: | |
- -y | |
- "/app/exa-mcp-server/build/index.js" | |
- --tools=web_search_exa,research_paper_search,twitter_search,company_research,crawling,competitor_finder | |
timeout: 60000000 | |
env: | |
EXA_API_KEY: "e4399980-1016-44ab-8789-1ef7f967a281" | |
PATH: "/usr/local/bin:/usr/bin:/bin" | |
NODE_PATH: "/usr/local/lib/node_modules" | |
twilio: | |
type: stdio | |
command: npx | |
args: | |
- -y | |
- '@twilio-alpha/mcp' | |
- ACeea63a423d0c2d38fe0e21689f8dbcad/SK6cf2cb7b3f8b8a439c7f610712af3c11:QdhPW4CCq6GnORENPX8uE6t8t20UjNGy | |
timeout: 60000000 | |
env: | |
PATH: "/usr/local/bin:/usr/bin:/bin" | |
NODE_PATH: "/usr/local/lib/node_modules" | |
memory: | |
type: stdio | |
command: npx | |
args: | |
- -y | |
- "@modelcontextprotocol/server-memory" | |
timeout: 60000000 | |
codesandbox: | |
type: stdio | |
command: python | |
args: | |
- tests.py | |
timeout: 60000000 | |
env: | |
JAVA_LIB_PATH: "/usr/lib/jvm/java-21-openjdk/lib" | |
JAVA_SERVER_LIB_PATH: "/usr/lib/jvm/java-21-openjdk/lib/server" | |
LD_LIBRARY_PATH : "/usr/lib/jvm/java-21-openjdk/lib/server:/usr/lib/jvm/java-21-openjdk/lib:" | |
# Definition of custom endpoints | |
endpoints: | |
# assistants: | |
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true` | |
# pollIntervalMs: 3000 # Polling interval for checking assistant updates | |
# timeoutMs: 180000 # Timeout for assistant operations | |
# # Should only be one or the other, either `supportedIds` or `excludedIds` | |
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"] | |
# # excludedIds: ["asst_excludedAssistantId"] | |
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground). | |
# # privateAssistants: false # Does not work with `supportedIds` or `excludedIds` | |
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature | |
# retrievalModels: ["gpt-4-turbo-preview"] | |
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below. | |
# capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"] | |
agents: | |
# (optional) Default recursion depth for agents, defaults to 25 | |
recursionLimit: 50 | |
# (optional) Max recursion depth for agents, defaults to 25 | |
maxRecursionLimit: 100 | |
# (optional) Disable the builder interface for agents | |
disableBuilder: false | |
# (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below. | |
capabilities: ["file_search", "tools", "ocr", "chain"] | |
custom: | |
- name: 'Tiny-DEV' # Unique name for the endpoint | |
# For `apiKey` and `baseURL`, you can use environment variables that you define. | |
# recommended environment variables: | |
apiKey: '77ea72d89b98d279c1848389cd027a51c408' | |
baseURL: '${TINY_API_BASE}' | |
# Models configuration | |
models: | |
# List of default models to use. At least one value is required. | |
default: ['Qwen3-235B','Qwen3-235B-A22B','Qwen3-30B-A3B','Qwen3-32B','gemini-2.5-pro-exp-03-25','gemini-2.5-flash-preview-04-17','gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','MAI-DS-R1-FP8', 'DeepSeek-R1-Zero', 'DeepSeek-R1T-Chimera' , 'QwQ-32B-ArliAI-RpR-v1' , 'DeepSeek-V3-0324','deepseek-chat','gpt4.1','gpt-4o','o4-mini','gemini-2.0-pro-exp-02-05','GLM-4-32B-0414','GLM-Z1-32B-0414','deepseek-r1-distill-llama-70b', 'qwq-32b','llama-3.3-70b-versatile','meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8','microsoft/Phi-4-multimodal-instruct','google/gemma-3-27b-it','meta-llama/Llama-4-Scout-17B-16E-Instruct'] | |
# Fetch option: Set to true to fetch models from API. | |
# fetch: false # Defaults to false. | |
# Optional configurations | |
# Title Conversation setting | |
titleConvo: true # Set to true to enable title conversation | |
modelDisplayLabel: 'Tiny' # Default is "AI" when not set. | |
ocr: | |
mistralModel: "mistral-ocr-latest" # Optional: Specify Mistral model, defaults to "mistral-ocr-latest" | |
apiKey: '${OCR_API_KEY}' # Optional: Defaults to OCR_API_KEY env variable | |
baseURL: "https://api.mistral.ai/v1" # Optional: Defaults to OCR_BASEURL env variable, or Mistral's API if no variable set | |
strategy: "mistral_ocr" | |
fileConfig: | |
endpoints: | |
agents: | |
fileLimit: 5 | |
fileSizeLimit: 100 # Maximum size for an individual file in MB | |
totalSizeLimit: 500 # Maximum total size for all files in a single request in MB | |
supportedMimeTypes: | |
- "image/.*" | |
- "application/pdf" | |
- "video/.*" | |
- "application/vnd.ms-excel" | |
- "audio/mp3" | |
- "audio/mpeg" | |
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" | |
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document" | |
- "application/msword" | |
# fileConfig: | |
# endpoints: | |
# assistants: | |
# fileLimit: 5 | |
# fileSizeLimit: 10 # Maximum size for an individual file in MB | |
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB | |
# supportedMimeTypes: | |
# - "image/.*" | |
# - "application/pdf" | |
# openAI: | |
# disabled: true # Disables file uploading to the OpenAI endpoint | |
# default: | |
# totalSizeLimit: 20 | |
# YourCustomEndpointName: | |
# fileLimit: 2 | |
# fileSizeLimit: 5 | |
# serverFileSizeLimit: 100 # Global server file size limit in MB | |
# avatarSizeLimit: 2 # Limit for user avatar image size in MB | |
# # See the Custom Configuration Guide for more information on Assistants Config: | |
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint |