Getting it all running
This commit is contained in:
205
librechat/librechat.yaml
Normal file
205
librechat/librechat.yaml
Normal file
@@ -0,0 +1,205 @@
|
||||
# For more information, see the Configuration Guide:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml
|
||||
|
||||
# Configuration version (required)
|
||||
version: 1.2.1
|
||||
|
||||
# Cache settings: Set to true to enable caching
|
||||
cache: true
|
||||
|
||||
# File strategy s3/firebase
|
||||
# fileStrategy: "s3"
|
||||
|
||||
# Custom interface configuration
|
||||
interface:
|
||||
customWelcome: "Welcome to DroidChat! How may I be of assistance?"
|
||||
# MCP Servers UI configuration
|
||||
mcpServers:
|
||||
placeholder: 'MCP Servers'
|
||||
# Privacy policy settings
|
||||
privacyPolicy:
|
||||
externalUrl: 'https://librechat.ai/privacy-policy'
|
||||
openNewTab: true
|
||||
|
||||
# Terms of service
|
||||
termsOfService:
|
||||
externalUrl: 'https://librechat.ai/tos'
|
||||
openNewTab: true
|
||||
modalAcceptance: true
|
||||
modalTitle: "Terms of Service for LibreChat"
|
||||
modalContent: |
|
||||
# Terms and Conditions for LibreChat
|
||||
|
||||
Welcome to LibreChat!
|
||||
|
||||
endpointsMenu: true
|
||||
modelSelect: true
|
||||
parameters: true
|
||||
sidePanel: true
|
||||
presets: true
|
||||
prompts: true
|
||||
bookmarks: true
|
||||
multiConvo: true
|
||||
agents: true
|
||||
# Temporary chat retention period in hours (default: 720, min: 1, max: 8760)
|
||||
# temporaryChatRetention: 1
|
||||
|
||||
speech:
|
||||
tts:
|
||||
openai:
|
||||
url: 'https://aihubmix.com/v1'
|
||||
apiKey: '${AIHUBMIX_KEY}'
|
||||
model: 'gpt-4o-mini-tts'
|
||||
voices: [
|
||||
'alloy',
|
||||
'ash',
|
||||
'ballad',
|
||||
'coral',
|
||||
'echo',
|
||||
'fable',
|
||||
'nova',
|
||||
'onyx',
|
||||
'sage',
|
||||
'shimmer',
|
||||
]
|
||||
|
||||
|
||||
stt:
|
||||
openai:
|
||||
url: 'https://aihubmix.com/v1'
|
||||
apiKey: '${AIHUBMIX_KEY}'
|
||||
model: 'distil-whisper-large-v3-en'
|
||||
|
||||
# Example MCP Servers Object Structure
|
||||
# mcpServers:
|
||||
# everything:
|
||||
# # type: sse # type can optionally be omitted
|
||||
# url: http://localhost:3001/sse
|
||||
# timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
|
||||
puppeteer:
|
||||
type: stdio
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- "@modelcontextprotocol/server-puppeteer"
|
||||
timeout: 300000 # 5 minutes timeout for this server
|
||||
filesystem:
|
||||
# type: stdio
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- "@modelcontextprotocol/server-filesystem"
|
||||
- /files/Library
|
||||
- /files/RPG/Resources
|
||||
mcp-obsidian:
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- "mcp-obsidian"
|
||||
- /files/Notes
|
||||
|
||||
# Definition of custom endpoints
|
||||
endpoints:
|
||||
# assistants:
|
||||
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
||||
# pollIntervalMs: 3000 # Polling interval for checking assistant updates
|
||||
# timeoutMs: 180000 # Timeout for assistant operations
|
||||
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
||||
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||
# # excludedIds: ["asst_excludedAssistantId"]
|
||||
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
|
||||
# # privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
|
||||
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||
# retrievalModels: ["gpt-4-turbo-preview"]
|
||||
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
# capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
|
||||
# agents:
|
||||
# # (optional) Default recursion depth for agents, defaults to 25
|
||||
# recursionLimit: 50
|
||||
# # (optional) Max recursion depth for agents, defaults to 25
|
||||
# maxRecursionLimit: 100
|
||||
# # (optional) Disable the builder interface for agents
|
||||
# disableBuilder: false
|
||||
# # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
# capabilities: ["execute_code", "file_search", "actions", "tools"]
|
||||
custom:
|
||||
- name: 'OpenRouter'
|
||||
apiKey: '${OPENROUTER_KEY}'
|
||||
baseURL: 'https://openrouter.ai/api/v1'
|
||||
models:
|
||||
default:
|
||||
- 'switchpoint/router'
|
||||
- 'moonshotai/kimi-k2:free'
|
||||
- 'deepseek/deepseek-chat-v3-0324:free'
|
||||
- 'deepseek/deepseek-r1-0528:free'
|
||||
- 'openai/gpt-4.1'
|
||||
- 'openai/o3'
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: 'meta-llama/llama-3-70b-instruct'
|
||||
dropParams: ['stop']
|
||||
modelDisplayLabel: 'OpenRouter'
|
||||
|
||||
- name: 'AiHubMix'
|
||||
apiKey: '${AIHUBMIX_KEY}'
|
||||
baseURL: 'https://aihubmix.com/v1'
|
||||
models:
|
||||
default:
|
||||
- 'moonshotai/kimi-k2:free'
|
||||
- 'deepseek/deepseek-chat-v3-0324:free'
|
||||
- 'deepseek/deepseek-r1-0528:free'
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: 'meta-llama/llama-3-70b-instruct'
|
||||
dropParams: ['stop']
|
||||
modelDisplayLabel: 'OpenRouter'
|
||||
|
||||
fileConfig:
|
||||
# endpoints:
|
||||
# assistants:
|
||||
# fileLimit: 5
|
||||
# fileSizeLimit: 10 # Maximum size for an individual file in MB
|
||||
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
||||
# supportedMimeTypes:
|
||||
# - "image/.*"
|
||||
# - "application/pdf"
|
||||
# openAI:
|
||||
# disabled: true # Disables file uploading to the OpenAI endpoint
|
||||
# default:
|
||||
# totalSizeLimit: 20
|
||||
# YourCustomEndpointName:
|
||||
# fileLimit: 2
|
||||
# fileSizeLimit: 5
|
||||
# serverFileSizeLimit: 100 # Global server file size limit in MB
|
||||
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
||||
# imageGeneration: # Image Gen settings, either percentage or px
|
||||
# percentage: 100
|
||||
# px: 1024
|
||||
# # Client-side image resizing to prevent upload errors
|
||||
clientImageResize:
|
||||
enabled: true
|
||||
maxWidth: 1900
|
||||
maxHeight: 1900
|
||||
quality: 0.92
|
||||
|
||||
# Memory configuration for user memories
|
||||
memory:
|
||||
# (optional) Disable memory functionality
|
||||
disabled: false
|
||||
# (optional) Restrict memory keys to specific values to limit memory storage and improve consistency
|
||||
validKeys: ["preferences", "work_info", "personal_info", "skills", "interests", "context"]
|
||||
# (optional) Maximum token limit for memory storage (not yet implemented for token counting)
|
||||
tokenLimit: 10000
|
||||
# (optional) Enable personalization features (defaults to true if memory is configured)
|
||||
# When false, users will not see the Personalization tab in settings
|
||||
personalize: true
|
||||
# Memory agent configuration - either use an existing agent by ID or define inline
|
||||
agent:
|
||||
# Option 1: Use existing agent by ID
|
||||
# id: "your-memory-agent-id"
|
||||
# Option 2: Define agent inline
|
||||
provider: "openrouter"
|
||||
model: "openai/gpt-4o-mini"
|
||||
instructions: "You are a memory management assistant. Store and manage user information accurately."
|
||||
model_parameters:
|
||||
temperature: 0.1
|
||||
Reference in New Issue
Block a user