Pas exploitable
Some checks failed
Deployment Verification / deploy-and-test (push) Has been cancelled

This commit is contained in:
Hubert Cornet 2025-02-11 20:12:42 +01:00
parent 19951fc751
commit 85ea74df72
5 changed files with 297 additions and 3 deletions

10
.env
View File

@ -1 +1,9 @@
# Template # Postgre Variables
POSTGRES_IMAGE_TAG=postgres:15.6-alpine
POSTGRE_DB_NAME=n8n_db
POSTGRE_DB_USER=n8n_user
POSTGRE_DB_PASSWORD=P@ssword!Here!123456
POSTGRE_DB_DATA=/var/lib/postgresql/data/n8n
N8N_ENCRYPTION_KEY=super-secret-key
N8N_USER_MANAGEMENT_JWT_SECRET=even-more-secret

View File

@ -0,0 +1,14 @@
{
"createdAt": "2024-02-23T16:27:55.919Z",
"updatedAt": "2024-02-23T16:27:55.918Z",
"id": "sFfERYppMeBnFNeA",
"name": "Local QdrantApi database",
"data": "U2FsdGVkX18bm81Pk18TjmfyKEIbzd91Dt1O8pUPgTxVGk5v1mXp7MlE/3Fl+NHGTMBqa3u7RBS36wTQ74rijQ==",
"type": "qdrantApi",
"nodesAccess": [
{
"nodeType": "@n8n/n8n-nodes-langchain.vectorStoreQdrant",
"date": "2024-02-23T16:27:55.918Z"
}
]
}

View File

@ -0,0 +1,18 @@
{
"createdAt": "2024-02-23T16:26:54.475Z",
"updatedAt": "2024-02-23T16:26:58.928Z",
"id": "xHuYe0MDGOs9IpBW",
"name": "Local Ollama service",
"data": "U2FsdGVkX18BVmjQBCdNKSrjr0GhmcTwMgG/rSWhncWtqOLPT62WnCIktky8RgM1PhH7vMkMc5EuUFIQA/eEZA==",
"type": "ollamaApi",
"nodesAccess": [
{
"nodeType": "@n8n/n8n-nodes-langchain.lmChatOllama",
"date": "2024-02-23T16:26:58.927Z"
},
{
"nodeType": "@n8n/n8n-nodes-langchain.lmOllama",
"date": "2024-02-23T16:26:58.927Z"
}
]
}

View File

@ -0,0 +1,87 @@
{
"createdAt": "2024-02-23T16:58:31.616Z",
"updatedAt": "2024-02-23T16:58:31.616Z",
"id": "srOnR8PAY3u4RSwb",
"name": "Demo workflow",
"active": false,
"nodes": [
{
"parameters": {},
"id": "74003dcd-2ac7-4caa-a1cd-adecc5143c07",
"name": "Chat Trigger",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"typeVersion": 1,
"position": [
660,
340
],
"webhookId": "cdb5c076-d458-4b9d-8398-f43bd25059b1"
},
{
"parameters": {},
"id": "ce8c3da4-899c-4cc4-af73-8096c64eec64",
"name": "Basic LLM Chain",
"type": "@n8n/n8n-nodes-langchain.chainLlm",
"typeVersion": 1.3,
"position": [
880,
340
]
},
{
"parameters": {
"model": "llama3.2:latest",
"options": {}
},
"id": "3dee878b-d748-4829-ac0a-cfd6705d31e5",
"name": "Ollama Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
"typeVersion": 1,
"position": [
900,
560
],
"credentials": {
"ollamaApi": {
"id": "xHuYe0MDGOs9IpBW",
"name": "Local Ollama service"
}
}
}
],
"connections": {
"Chat Trigger": {
"main": [
[
{
"node": "Basic LLM Chain",
"type": "main",
"index": 0
}
]
]
},
"Ollama Chat Model": {
"ai_languageModel": [
[
{
"node": "Basic LLM Chain",
"type": "ai_languageModel",
"index": 0
}
]
]
}
},
"settings": {
"executionOrder": "v1"
},
"staticData": null,
"meta": {
"templateCredsSetupCompleted": true
},
"pinData": {},
"versionId": "4e2affe6-bb1c-4ddc-92f9-dde0b7656796",
"triggerCount": 0,
"tags": []
}

View File

@ -2,12 +2,179 @@
networks: networks:
traefik_front_network: traefik_front_network:
external: true external: true
back_network_: back_network_n8n:
driver: bridge driver: bridge
attachable: true attachable: true
#### SERVICES ###
x-n8n: &service-n8n
image: n8nio/n8n:latest
networks:
- back_network_n8n
- traefik_front_network
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=${POSTGRE_DB_PASSWORD}
- DB_POSTGRESDB_USER=${POSTGRE_DB_USER}
- DB_POSTGRESDB_PASSWORD=${POSTGRE_DB_PASSWORD}
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_PERSONALIZATION_ENABLED=false
- N8N_ENCRYPTION_KEY
- N8N_USER_MANAGEMENT_JWT_SECRET
- OLLAMA_HOST=ollama:11434
x-ollama: &service-ollama
image: ollama/ollama:latest
container_name: ollama
networks:
- back_network_n8n
- traefik_front_network
restart: unless-stopped
ports:
- 11434:11434
volumes:
- ollama_storage:/root/.ollama
x-init-ollama: &init-ollama
image: ollama/ollama:latest
networks:
- back_network_n8n
- traefik_front_network
container_name: ollama-pull-llama
volumes:
- ollama_storage:/root/.ollama
entrypoint: /bin/sh
environment:
- OLLAMA_HOST=ollama:11434
command:
- "-c"
# - "sleep 3; ollama pull llama3.2"
- "sleep 3; OLLAMA_HOST=ollame:11434 ollama pull llama3.2; OLLAMA_HOST=ollame:11434 ollama pull nomic-embed-text; OLLAMA_HOST=ollame:11434 ollama pull deepseek-r1:1.5bOLLAMA_HOST=ollame:11434 ollama pull mistral:7b;"
#### SERVICE
services: services:
postgres:
container_name: n8n-postgres
hostname: n8n-postgres
image: ${POSTGRES_IMAGE_TAG}
networks:
- back_network_n8n
restart: unless-stopped
environment:
PGDATA: ${POSTGRE_DB_DATA}
POSTGRES_DB: ${POSTGRE_DB_NAME}
POSTGRES_PASSWORD: ${POSTGRE_DB_PASSWORD}
POSTGRES_USER: ${POSTGRE_DB_USER}
TZ: Europe/Paris
volumes:
- ./data:/var/lib/postgresql/data:rw
healthcheck:
test: [ "CMD", "pg_isready", "-q", "-d", "${POSTGRE_DB_NAME}", "-U", "${POSTGRE_DB_USER}" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 60s
labels:
- "com.centurylinklabs.watchtower.enable=true"
###
n8n-import:
<<: *service-n8n
hostname: n8n-import
container_name: n8n-import
entrypoint: /bin/sh
command:
- "-c"
- "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows"
volumes:
- ./backup:/backup
depends_on:
postgres:
condition: service_healthy
###
n8n:
<<: *service-n8n
hostname: n8n
container_name: n8n
restart: unless-stopped
ports:
- 5678:5678
volumes:
- n8n_storage:/home/node/.n8n
- ./backup:/backup
- ./shared:/data/shared
depends_on:
postgres:
condition: service_healthy
n8n-import:
condition: service_completed_successfully
###
qdrant:
image: qdrant/qdrant
hostname: qdrant
container_name: qdrant
networks:
- back_network_n8n
- traefik_front_network
restart: unless-stopped
ports:
- 6333:6333
volumes:
- qdrant_storage:/qdrant/storage
###
ollama-cpu:
profiles: ["cpu"]
<<: *service-ollama
###
ollama-gpu:
profiles: ["gpu-nvidia"]
<<: *service-ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
###
ollama-gpu-amd:
profiles: ["gpu-amd"]
<<: *service-ollama
image: ollama/ollama:rocm
devices:
- "/dev/kfd"
- "/dev/dri"
###
ollama-pull-llama-cpu:
profiles: ["cpu"]
<<: *init-ollama
depends_on:
- ollama-cpu
###
ollama-pull-llama-gpu:
profiles: ["gpu-nvidia"]
<<: *init-ollama
depends_on:
- ollama-gpu
###
ollama-pull-llama-gpu-amd:
profiles: [gpu-amd]
<<: *init-ollama
image: ollama/ollama:rocm
depends_on:
- ollama-gpu-amd
### hello_world ### hello_world
hello_world: hello_world:
container_name: gitea-app container_name: gitea-app