File size: 9,515 Bytes
91a7855
 
 
 
 
 
 
 
 
ba060ef
 
 
91a7855
 
 
 
 
 
 
 
 
 
 
 
781b042
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91a7855
 
 
 
 
 
 
 
 
 
 
2c94865
91a7855
ba060ef
91a7855
 
 
ba060ef
 
df8d5d8
 
 
 
 
 
 
ba060ef
91a7855
 
 
 
 
 
 
 
ba060ef
91a7855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7a4e2c
91a7855
 
 
 
 
d487a84
dc64eee
f3c14ef
dc64eee
d487a84
4c3d880
d487a84
dc64eee
 
f3c14ef
 
91a7855
 
 
 
 
 
 
 
 
 
 
 
 
1f07a32
 
 
 
91a7855
 
ba060ef
 
 
 
 
 
 
 
 
 
 
 
 
91a7855
ba060ef
91a7855
ba060ef
 
 
91a7855
ba060ef
a1516ec
91a7855
 
4cc0400
 
 
a1516ec
0dd3194
 
4cc0400
 
 
e50006f
4cc0400
 
0dd3194
4cc0400
a087d7f
4cc0400
 
0dd3194
4cc0400
7921144
3493b01
 
 
 
 
 
91a7855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba060ef
 
 
 
 
 
 
 
 
 
91a7855
 
 
 
 
 
 
 
ba060ef
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# For more information, see the Configuration Guide:
# https://www.librechat.ai/docs/configuration/librechat_yaml

# Configuration version (required)
version: 1.2.1

# Cache settings: Set to true to enable caching
cache: true

# File strategy s3/firebase
# fileStrategy: "s3"

# Custom interface configuration
interface:
  # Privacy policy settings
  privacyPolicy:
    externalUrl: 'https://librechat.ai/privacy-policy'
    openNewTab: true

  # Terms of service
  termsOfService:
    externalUrl: 'https://librechat.ai/tos'
    openNewTab: true
    modalAcceptance: true
    modalTitle: "Capabilities and Terms of Service for Librechat"
    modalContent: |    
     ## Overview and Key Features

     This platform is designed to facilitate user exploration of the Model Context Protocol (MCP) and the capabilities offered by LibreChat. It includes several powerful, built-in features accessible without requiring user-provided API keys or credentials.

     ### Core Capabilities:

     *   **Integrated Agents:**
          *   **Smarty:** An intelligent search companion optimized for web searches, providing robust performance comparable to specialized search AI tools.
          *   **CodeSandbox:** A free, powerful alternative to proprietary code interpreter APIs. This local Python MCP server enables code execution, package installation, and multi-language support within a secure sandbox environment.

     *   **Broad LLM Access:** Provides complimentary access (no API keys required) to a selection of state-of-the-art Large Language Models, including:
          *   Qwen3
          *   Gemini 2.5 Pro
          *   GPT-4o
          *   GPT-4.1
          *   DeepSeek R1
          *   *(Note: Availability may be subject to change)*

     *   **Document Processing:**
          *   **RAG (Retrieval-Augmented Generation):** Enabled for enhanced information retrieval from provided documents.
          *   **OCR (Optical Character Recognition):** Allows processing of text within uploaded image-based documents.
          *   Supports uploading and processing of large files.

      *   **Simplified Access:** All features are pre-configured, eliminating the need for users to manage or provide API keys or credentials. 
     
      Note:By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.

  endpointsMenu: true
  modelSelect: true
  parameters: true
  sidePanel: true
  presets: true
  prompts: true
  bookmarks: true
  multiConvo: true
  agents: true

# Example Registration Object Structure (optional)   
registration:
  socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple']
  # allowedDomains:
  # - "gmail.com"


# Example Balance settings
#balance:
#  enabled: true
#  startBalance: 1000
#  autoRefillEnabled: true
#  refillIntervalValue: 1
#  refillIntervalUnit: 'hours'
#  refillAmount: 5000

# speech:
#   tts:
#     openai:
#       url: ''
#       apiKey: '${TTS_API_KEY}'
#       model: ''
#       voices: ['']

#
#   stt:
#     openai:
#       url: ''
#       apiKey: '${STT_API_KEY}'
#       model: ''

# rateLimits:
#   fileUploads:
#     ipMax: 100
#     ipWindowInMinutes: 60  # Rate limit window for file uploads per IP
#     userMax: 50
#     userWindowInMinutes: 60  # Rate limit window for file uploads per user
#   conversationsImport:
#     ipMax: 100
#     ipWindowInMinutes: 60  # Rate limit window for conversation imports per IP
#     userMax: 50
#     userWindowInMinutes: 60  # Rate limit window for conversation imports per user

# Example Actions Object Structure
actions:
  allowedDomains:
    - "swapi.dev"
    - "librechat.ai"
    - "google.com"
mcpServers:
  exa:
    type: stdio
    command: npx
    args:
      - -y
      - "/app/exa-mcp-server/build/index.js"
      - --tools=web_search_exa,research_paper_search,twitter_search,company_research,crawling,competitor_finder
    timeout: 60000000
    env:
     EXA_API_KEY: "e4399980-1016-44ab-8789-1ef7f967a281"
     PATH: "/usr/local/bin:/usr/bin:/bin"
     NODE_PATH: "/usr/local/lib/node_modules" 
  twilio:
    type: stdio
    command: npx
    args:
      - -y
      - '@twilio-alpha/mcp' 
      - ACeea63a423d0c2d38fe0e21689f8dbcad/SK6cf2cb7b3f8b8a439c7f610712af3c11:QdhPW4CCq6GnORENPX8uE6t8t20UjNGy
    timeout: 60000000
    env:
     PATH: "/usr/local/bin:/usr/bin:/bin"
     NODE_PATH: "/usr/local/lib/node_modules" 
  memory:
   type: stdio
   command: npx
   args:
     - -y
     - "@modelcontextprotocol/server-memory"
   timeout: 60000000
  codesandbox:
    type: stdio
    command: python
    args:
      - tests.py
    timeout: 60000000
    env:
     JAVA_LIB_PATH: "/usr/lib/jvm/java-21-openjdk/lib"
     JAVA_SERVER_LIB_PATH: "/usr/lib/jvm/java-21-openjdk/lib/server"
     LD_LIBRARY_PATH : "/usr/lib/jvm/java-21-openjdk/lib/server:/usr/lib/jvm/java-21-openjdk/lib:"
# Definition of custom endpoints
endpoints:
  # assistants:
  #   disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
  #   pollIntervalMs: 3000  # Polling interval for checking assistant updates
  #   timeoutMs: 180000  # Timeout for assistant operations
  #   # Should only be one or the other, either `supportedIds` or `excludedIds`
  #   supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
  #   # excludedIds: ["asst_excludedAssistantId"]
  #   # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
  #   # privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
  #   # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
  #   retrievalModels: ["gpt-4-turbo-preview"]
  #   # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
  #   capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
  agents:
    # (optional) Default recursion depth for agents, defaults to 25
    recursionLimit: 50
    # (optional) Max recursion depth for agents, defaults to 25
    maxRecursionLimit: 100
    # (optional) Disable the builder interface for agents
    disableBuilder: false
    # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
    capabilities: ["file_search", "tools", "ocr", "chain"]
  custom:
    - name: 'Tiny-DEV' # Unique name for the endpoint
      # For `apiKey` and `baseURL`, you can use environment variables that you define.
      # recommended environment variables:
      apiKey: '77ea72d89b98d279c1848389cd027a51c408'
      baseURL: '${TINY_API_BASE}'


      # Models configuration
      models:
        # List of default models to use. At least one value is required.
        default: ['Qwen3-235B','Qwen3-235B-A22B','Qwen3-30B-A3B','Qwen3-32B','gemini-2.5-pro-exp-03-25','gemini-2.5-flash-preview-04-17','gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','MAI-DS-R1-FP8', 'DeepSeek-R1-Zero', 'DeepSeek-R1T-Chimera' , 'QwQ-32B-ArliAI-RpR-v1' , 'DeepSeek-V3-0324','deepseek-chat','gpt4.1','gpt-4o','o4-mini','gemini-2.0-pro-exp-02-05','GLM-4-32B-0414','GLM-Z1-32B-0414','deepseek-r1-distill-llama-70b', 'qwq-32b','llama-3.3-70b-versatile','meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8','microsoft/Phi-4-multimodal-instruct','google/gemma-3-27b-it','meta-llama/Llama-4-Scout-17B-16E-Instruct']
        # Fetch option: Set to true to fetch models from API.
        # fetch: false # Defaults to false.

      # Optional configurations

      # Title Conversation setting
      titleConvo: true # Set to true to enable title conversation

      modelDisplayLabel: 'Tiny' # Default is "AI" when not set.

ocr:
  mistralModel: "mistral-ocr-latest"  # Optional: Specify Mistral model, defaults to "mistral-ocr-latest"
  apiKey:  '${OCR_API_KEY}'      # Optional: Defaults to OCR_API_KEY env variable
  baseURL: "https://api.mistral.ai/v1"  # Optional: Defaults to OCR_BASEURL env variable, or Mistral's API if no variable set
  strategy: "mistral_ocr"
  
fileConfig:
  endpoints:
    agents:
      fileLimit: 5
      fileSizeLimit: 100  # Maximum size for an individual file in MB
      totalSizeLimit: 500  # Maximum total size for all files in a single request in MB
      supportedMimeTypes:
        - "image/.*"
        - "application/pdf"
        - "video/.*"
        - "application/vnd.ms-excel"
        - "audio/mp3"
        - "audio/mpeg"
        - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
        - "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
        - "application/msword"
# fileConfig:
#   endpoints:
#     assistants:
#       fileLimit: 5
#       fileSizeLimit: 10  # Maximum size for an individual file in MB
#       totalSizeLimit: 50  # Maximum total size for all files in a single request in MB
#       supportedMimeTypes:
#         - "image/.*"
#         - "application/pdf"
#     openAI:
#       disabled: true  # Disables file uploading to the OpenAI endpoint
#     default:
#       totalSizeLimit: 20
#     YourCustomEndpointName:
#       fileLimit: 2
#       fileSizeLimit: 5
#   serverFileSizeLimit: 100  # Global server file size limit in MB
#   avatarSizeLimit: 2  # Limit for user avatar image size in MB
# # See the Custom Configuration Guide for more information on Assistants Config:
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint