This commit is contained in:
parent
baa96da7c3
commit
38d19036bd
329
.env
329
.env
@ -1 +1,328 @@
|
||||
# Template
|
||||
SERVER_PORT=3001
|
||||
STORAGE_DIR="/app/server/storage"
|
||||
UID='1000'
|
||||
GID='1000'
|
||||
# SIG_KEY='passphrase' # Please generate random string at least 32 chars long.
|
||||
# SIG_SALT='salt' # Please generate random string at least 32 chars long.
|
||||
# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
|
||||
|
||||
###########################################
|
||||
######## LLM API SElECTION ################
|
||||
###########################################
|
||||
LLM_PROVIDER='openai'
|
||||
OPEN_AI_KEY=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
|
||||
OPEN_MODEL_PREF='gpt-4o-mini'
|
||||
|
||||
# LLM_PROVIDER='gemini'
|
||||
# GEMINI_API_KEY=
|
||||
# GEMINI_LLM_MODEL_PREF='gemini-pro'
|
||||
|
||||
# LLM_PROVIDER='azure'
|
||||
# AZURE_OPENAI_ENDPOINT=
|
||||
# AZURE_OPENAI_KEY=
|
||||
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
|
||||
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
|
||||
|
||||
# LLM_PROVIDER='anthropic'
|
||||
# ANTHROPIC_API_KEY=sk-ant-xxxx
|
||||
# ANTHROPIC_MODEL_PREF='claude-2'
|
||||
|
||||
# LLM_PROVIDER='lmstudio'
|
||||
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
|
||||
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
|
||||
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
|
||||
|
||||
# LLM_PROVIDER='localai'
|
||||
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
|
||||
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
|
||||
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
|
||||
# LOCAL_AI_API_KEY="sk-123abc"
|
||||
|
||||
# LLM_PROVIDER='ollama'
|
||||
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
|
||||
# OLLAMA_MODEL_PREF='llama2'
|
||||
# OLLAMA_MODEL_TOKEN_LIMIT=4096
|
||||
# OLLAMA_AUTH_TOKEN='your-ollama-auth-token-here (optional, only for ollama running behind auth - Bearer token)'
|
||||
|
||||
# LLM_PROVIDER='togetherai'
|
||||
# TOGETHER_AI_API_KEY='my-together-ai-key'
|
||||
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
|
||||
|
||||
# LLM_PROVIDER='mistral'
|
||||
# MISTRAL_API_KEY='example-mistral-ai-api-key'
|
||||
# MISTRAL_MODEL_PREF='mistral-tiny'
|
||||
|
||||
# LLM_PROVIDER='perplexity'
|
||||
# PERPLEXITY_API_KEY='my-perplexity-key'
|
||||
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
|
||||
|
||||
# LLM_PROVIDER='openrouter'
|
||||
# OPENROUTER_API_KEY='my-openrouter-key'
|
||||
# OPENROUTER_MODEL_PREF='openrouter/auto'
|
||||
|
||||
# LLM_PROVIDER='huggingface'
|
||||
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
|
||||
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
|
||||
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
|
||||
|
||||
# LLM_PROVIDER='groq'
|
||||
# GROQ_API_KEY=gsk_abcxyz
|
||||
# GROQ_MODEL_PREF=llama3-8b-8192
|
||||
|
||||
# LLM_PROVIDER='koboldcpp'
|
||||
# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
|
||||
# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
|
||||
# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
|
||||
|
||||
# LLM_PROVIDER='textgenwebui'
|
||||
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
|
||||
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
|
||||
# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
|
||||
|
||||
# LLM_PROVIDER='generic-openai'
|
||||
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
|
||||
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
|
||||
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
|
||||
# GENERIC_OPEN_AI_API_KEY=sk-123abc
|
||||
|
||||
# LLM_PROVIDER='litellm'
|
||||
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
|
||||
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
|
||||
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
|
||||
# LITE_LLM_API_KEY='sk-123abc'
|
||||
|
||||
# LLM_PROVIDER='novita'
|
||||
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings/key-management
|
||||
# NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1'
|
||||
|
||||
# LLM_PROVIDER='cohere'
|
||||
# COHERE_API_KEY=
|
||||
# COHERE_MODEL_PREF='command-r'
|
||||
|
||||
# LLM_PROVIDER='bedrock'
|
||||
# AWS_BEDROCK_LLM_ACCESS_KEY_ID=
|
||||
# AWS_BEDROCK_LLM_ACCESS_KEY=
|
||||
# AWS_BEDROCK_LLM_REGION=us-west-2
|
||||
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
|
||||
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
|
||||
|
||||
# LLM_PROVIDER='fireworksai'
|
||||
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
|
||||
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
|
||||
|
||||
# LLM_PROVIDER='apipie'
|
||||
# APIPIE_LLM_API_KEY='sk-123abc'
|
||||
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
|
||||
|
||||
# LLM_PROVIDER='xai'
|
||||
# XAI_LLM_API_KEY='xai-your-api-key-here'
|
||||
# XAI_LLM_MODEL_PREF='grok-beta'
|
||||
|
||||
# LLM_PROVIDER='nvidia-nim'
|
||||
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
|
||||
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
|
||||
|
||||
# LLM_PROVIDER='deepseek'
|
||||
# DEEPSEEK_API_KEY='your-deepseek-api-key-here'
|
||||
# DEEPSEEK_MODEL_PREF='deepseek-chat'
|
||||
|
||||
# LLM_PROVIDER='ppio'
|
||||
# PPIO_API_KEY='your-ppio-api-key-here'
|
||||
# PPIO_MODEL_PREF=deepseek/deepseek-v3/community
|
||||
|
||||
###########################################
|
||||
######## Embedding API SElECTION ##########
|
||||
###########################################
|
||||
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
|
||||
# EMBEDDING_ENGINE='openai'
|
||||
# OPEN_AI_KEY=sk-xxxx
|
||||
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
|
||||
|
||||
# EMBEDDING_ENGINE='azure'
|
||||
# AZURE_OPENAI_ENDPOINT=
|
||||
# AZURE_OPENAI_KEY=
|
||||
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
|
||||
|
||||
# EMBEDDING_ENGINE='localai'
|
||||
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
|
||||
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
|
||||
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
|
||||
|
||||
# EMBEDDING_ENGINE='ollama'
|
||||
# EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
|
||||
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
|
||||
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
|
||||
|
||||
# EMBEDDING_ENGINE='lmstudio'
|
||||
# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
|
||||
# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
|
||||
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
|
||||
|
||||
# EMBEDDING_ENGINE='cohere'
|
||||
# COHERE_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
|
||||
|
||||
# EMBEDDING_ENGINE='voyageai'
|
||||
# VOYAGEAI_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
|
||||
|
||||
# EMBEDDING_ENGINE='litellm'
|
||||
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
|
||||
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
|
||||
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
|
||||
# LITE_LLM_API_KEY='sk-123abc'
|
||||
|
||||
# EMBEDDING_ENGINE='generic-openai'
|
||||
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
|
||||
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
|
||||
# EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
|
||||
# GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
|
||||
# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
|
||||
|
||||
# EMBEDDING_ENGINE='gemini'
|
||||
# GEMINI_EMBEDDING_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='text-embedding-004'
|
||||
|
||||
###########################################
|
||||
######## Vector Database Selection ########
|
||||
###########################################
|
||||
# Enable all below if you are using vector database: Chroma.
|
||||
# VECTOR_DB="chroma"
|
||||
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
|
||||
# CHROMA_API_HEADER="X-Api-Key"
|
||||
# CHROMA_API_KEY="sk-123abc"
|
||||
|
||||
# Enable all below if you are using vector database: Pinecone.
|
||||
# VECTOR_DB="pinecone"
|
||||
# PINECONE_API_KEY=
|
||||
# PINECONE_INDEX=
|
||||
|
||||
# Enable all below if you are using vector database: LanceDB.
|
||||
# VECTOR_DB="lancedb"
|
||||
|
||||
# Enable all below if you are using vector database: Weaviate.
|
||||
# VECTOR_DB="weaviate"
|
||||
# WEAVIATE_ENDPOINT="http://localhost:8080"
|
||||
# WEAVIATE_API_KEY=
|
||||
|
||||
# Enable all below if you are using vector database: Qdrant.
|
||||
# VECTOR_DB="qdrant"
|
||||
# QDRANT_ENDPOINT="http://localhost:6333"
|
||||
# QDRANT_API_KEY=
|
||||
|
||||
# Enable all below if you are using vector database: Milvus.
|
||||
# VECTOR_DB="milvus"
|
||||
# MILVUS_ADDRESS="http://localhost:19530"
|
||||
# MILVUS_USERNAME=
|
||||
# MILVUS_PASSWORD=
|
||||
|
||||
# Enable all below if you are using vector database: Zilliz Cloud.
|
||||
# VECTOR_DB="zilliz"
|
||||
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
|
||||
# ZILLIZ_API_TOKEN=api-token-here
|
||||
|
||||
# Enable all below if you are using vector database: Astra DB.
|
||||
# VECTOR_DB="astra"
|
||||
# ASTRA_DB_APPLICATION_TOKEN=
|
||||
# ASTRA_DB_ENDPOINT=
|
||||
|
||||
###########################################
|
||||
######## Audio Model Selection ############
|
||||
###########################################
|
||||
# (default) use built-in whisper-small model.
|
||||
# WHISPER_PROVIDER="local"
|
||||
|
||||
# use openai hosted whisper model.
|
||||
# WHISPER_PROVIDER="openai"
|
||||
# OPEN_AI_KEY=sk-xxxxxxxx
|
||||
|
||||
###########################################
|
||||
######## TTS/STT Model Selection ##########
|
||||
###########################################
|
||||
# TTS_PROVIDER="native"
|
||||
|
||||
# TTS_PROVIDER="openai"
|
||||
# TTS_OPEN_AI_KEY=sk-example
|
||||
# TTS_OPEN_AI_VOICE_MODEL=nova
|
||||
|
||||
# TTS_PROVIDER="generic-openai"
|
||||
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
|
||||
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
|
||||
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
|
||||
|
||||
# TTS_PROVIDER="elevenlabs"
|
||||
# TTS_ELEVEN_LABS_KEY=
|
||||
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
|
||||
|
||||
# CLOUD DEPLOYMENT VARIRABLES ONLY
|
||||
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
|
||||
# DISABLE_TELEMETRY="false"
|
||||
|
||||
###########################################
|
||||
######## PASSWORD COMPLEXITY ##############
|
||||
###########################################
|
||||
# Enforce a password schema for your organization users.
|
||||
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
|
||||
# Default is only 8 char minimum
|
||||
# PASSWORDMINCHAR=8
|
||||
# PASSWORDMAXCHAR=250
|
||||
# PASSWORDLOWERCASE=1
|
||||
# PASSWORDUPPERCASE=1
|
||||
# PASSWORDNUMERIC=1
|
||||
# PASSWORDSYMBOL=1
|
||||
# PASSWORDREQUIREMENTS=4
|
||||
|
||||
###########################################
|
||||
######## ENABLE HTTPS SERVER ##############
|
||||
###########################################
|
||||
# By enabling this and providing the path/filename for the key and cert,
|
||||
# the server will use HTTPS instead of HTTP.
|
||||
#ENABLE_HTTPS="true"
|
||||
#HTTPS_CERT_PATH="sslcert/cert.pem"
|
||||
#HTTPS_KEY_PATH="sslcert/key.pem"
|
||||
|
||||
###########################################
|
||||
######## AGENT SERVICE KEYS ###############
|
||||
###########################################
|
||||
|
||||
#------ SEARCH ENGINES -------
|
||||
#=============================
|
||||
#------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
|
||||
# AGENT_GSE_KEY=
|
||||
# AGENT_GSE_CTX=
|
||||
|
||||
#------ SearchApi.io ----------- https://www.searchapi.io/
|
||||
# AGENT_SEARCHAPI_API_KEY=
|
||||
# AGENT_SEARCHAPI_ENGINE=google
|
||||
|
||||
#------ Serper.dev ----------- https://serper.dev/
|
||||
# AGENT_SERPER_DEV_KEY=
|
||||
|
||||
#------ Bing Search ----------- https://portal.azure.com/
|
||||
# AGENT_BING_SEARCH_API_KEY=
|
||||
|
||||
#------ Serply.io ----------- https://serply.io/
|
||||
# AGENT_SERPLY_API_KEY=
|
||||
|
||||
#------ SearXNG ----------- https://github.com/searxng/searxng
|
||||
# AGENT_SEARXNG_API_URL=
|
||||
|
||||
#------ Tavily ----------- https://www.tavily.com/
|
||||
# AGENT_TAVILY_API_KEY=
|
||||
|
||||
###########################################
|
||||
######## Other Configurations ############
|
||||
###########################################
|
||||
|
||||
# Disable viewing chat history from the UI and frontend APIs.
|
||||
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
|
||||
# DISABLE_VIEW_CHAT_HISTORY=1
|
||||
|
||||
# Enable simple SSO passthrough to pre-authenticate users from a third party service.
|
||||
# See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
|
||||
# SIMPLE_SSO_ENABLED=1
|
||||
|
||||
# Specify the target languages for when using OCR to parse images and PDFs.
|
||||
# This is a comma separated list of language codes as a string. Unsupported languages will be ignored.
|
||||
# Default is English. See https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html for a list of valid language codes.
|
||||
# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol
|
12
README.md
12
README.md
@ -27,6 +27,18 @@ Modification des labels pour traefik
|
||||
nano docker-compose.yml
|
||||
~~~
|
||||
|
||||
~~~bash
|
||||
export STORAGE_LOCATION=$HOME/anythingllm && \
|
||||
mkdir -p $STORAGE_LOCATION && \
|
||||
touch "$STORAGE_LOCATION/.env" && \
|
||||
docker run -d -p 3001:3001 \
|
||||
--cap-add SYS_ADMIN \
|
||||
-v ${STORAGE_LOCATION}:/app/server/storage \
|
||||
-v ${STORAGE_LOCATION}/.env:/app/server/.env \
|
||||
-e STORAGE_DIR="/app/server/storage" \
|
||||
mintplexlabs/anythingllm
|
||||
~~~
|
||||
|
||||
## Lancement
|
||||
|
||||
~~~bash
|
||||
|
@ -2,41 +2,65 @@
|
||||
networks:
|
||||
traefik_front_network:
|
||||
external: true
|
||||
back_network_:
|
||||
back_network_anythingllm:
|
||||
driver: bridge
|
||||
attachable: true
|
||||
|
||||
#### SERVICES
|
||||
services:
|
||||
### hello_world
|
||||
hello_world:
|
||||
container_name: gitea-app
|
||||
hostname: gitea-app
|
||||
image: hello-world
|
||||
environment:
|
||||
restart: always
|
||||
anythingllm:
|
||||
image: mintplexlabs/anythingllm
|
||||
container_name: anythingllm
|
||||
ports:
|
||||
- "3001:3001"
|
||||
networks:
|
||||
# - back_network_gitea
|
||||
- traefik_front_network
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
environment:
|
||||
# Adjust for your environment
|
||||
- STORAGE_DIR=/app/server/storage
|
||||
- JWT_SECRET="make this a large list of random numbers and letters 20+"
|
||||
- LLM_PROVIDER=openai
|
||||
# - LLM_PROVIDER=ollama
|
||||
# - OLLAMA_BASE_PATH=http://127.0.0.1:11434
|
||||
# - OLLAMA_MODEL_PREF=llama2
|
||||
# - OLLAMA_MODEL_TOKEN_LIMIT=4096
|
||||
- OPEN_AI_KEY=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
|
||||
- OPEN_MODEL_PREF=gpt-4o-mini
|
||||
- EMBEDDING_ENGINE=ollama
|
||||
- EMBEDDING_BASE_PATH=http://127.0.0.1:11434
|
||||
- EMBEDDING_MODEL_PREF=nomic-embed-text:latest
|
||||
- EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
|
||||
- VECTOR_DB=lancedb
|
||||
- WHISPER_PROVIDER=local
|
||||
- TTS_PROVIDER=native
|
||||
- PASSWORDMINCHAR=8
|
||||
# Add any other keys here for services or settings
|
||||
# you can find in the docker/.env.example file
|
||||
volumes:
|
||||
- anythingllm_storage:/app/server/storage
|
||||
restart: always
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.docker.network=traefik_front_network"
|
||||
# HTTP
|
||||
- "traefik.http.routers.hello-world-http.rule=Host(`hello-world.tips-of-mine.com`)"
|
||||
- "traefik.http.routers.hello-world-http.entrypoints=http"
|
||||
- "traefik.http.routers.hello-world-http.priority=49"
|
||||
# HTTPS
|
||||
- "traefik.http.routers.hello-world-https.rule=Host(`hello-world.tips-of-mine.com`)"
|
||||
- "traefik.http.routers.hello-world-https.entrypoints=https"
|
||||
- "traefik.http.routers.hello-world-https.tls=true"
|
||||
- "traefik.http.routers.hello-world-https.priority=50"
|
||||
- "traefik.http.routers.gitea.service=gitea-https-service"
|
||||
# Middleware
|
||||
# Service
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.server.port=3000"
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.server.scheme=https"
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.hostname=gitea.traefik.me"
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.method=foobar"
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.timeout=10"
|
||||
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.interval=30"
|
||||
## HTTP
|
||||
- "traefik.http.routers.anythingllm-http.rule=Host(`anythingllm.tips-of-mine.com`)"
|
||||
- "traefik.http.routers.anythingllm-http.entrypoints=http"
|
||||
## HTTPS
|
||||
- "traefik.http.routers.anythingllm-https.rule=Host(`anythingllm.tips-of-mine.com`)"
|
||||
- "traefik.http.routers.anythingllm-https.entrypoints=https"
|
||||
- "traefik.http.routers.anythingllm-https.tls=true"
|
||||
- "traefik.http.routers.anythingllm.service=anythingllm-service"
|
||||
## Middleware
|
||||
## Service
|
||||
- "traefik.http.services.anythingllm-service.loadbalancer.server.port=3001"
|
||||
|
||||
volumes:
|
||||
anythingllm_storage:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: ./storage
|
Loading…
x
Reference in New Issue
Block a user