mauroammhFREE
a month ago
Hey guys, anyone is using N8N with Railway that has been having the following error:
Problem running workflow: Unrecognized node type: n8n-nodes-mcp.mcpClientTool
The community node is installed in n8n
2 Replies
a month ago
please share your project ID and the full build and deploy logs
halobartkuFREE
a month ago
Same problem here, seems like one needs to add N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true but even after adding it nothing changed
halobartku
Same problem here, seems like one needs to add N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true but even after adding it nothing changed
knambutt-paloitFREE
a month ago
It need to be update `docker-compose.yml`
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
docker-compose.yml
volumes:
n8n_storage:
postgres_storage:
ollama_storage:
qdrant_storage:
networks:
demo:
x-n8n: &service-n8n
image: n8nio/n8n:latest
networks: ["demo"]
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=postgres
- DB_POSTGRESDB_USER=${POSTGRES_USER}
- DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_PERSONALIZATION_ENABLED=false
- N8N_ENCRYPTION_KEY
- N8N_USER_MANAGEMENT_JWT_SECRET
- OLLAMA_HOST=ollama:11434
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
x-ollama: &service-ollama
image: ollama/ollama:latest
container_name: ollama
networks: ["demo"]
restart: unless-stopped
ports:
- 11434:11434
volumes:
- ollama_storage:/root/.ollama
x-init-ollama: &init-ollama
image: ollama/ollama:latest
networks: ["demo"]
container_name: ollama-pull-llama
volumes:
- ollama_storage:/root/.ollama
entrypoint: /bin/sh
environment:
- OLLAMA_HOST=ollama:11434
command:
- "-c"
- "sleep 3; ollama pull llama3.2"
services:
postgres:
image: postgres:16-alpine
hostname: postgres
networks: ["demo"]
restart: unless-stopped
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_DB
volumes:
- postgres_storage:/var/lib/postgresql/data
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}",
]
interval: 5s
timeout: 5s
retries: 10
n8n-import:
<<: *service-n8n
hostname: n8n-import
container_name: n8n-import
entrypoint: /bin/sh
command:
- "-c"
- "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows"
volumes:
- ./n8n/backup:/backup
depends_on:
postgres:
condition: service_healthy
n8n:
<<: *service-n8n
hostname: n8n
container_name: n8n
restart: unless-stopped
ports:
- 5678:5678
volumes:
- n8n_storage:/home/node/.n8n
- ./n8n/backup:/backup
- ./shared:/data/shared
depends_on:
postgres:
condition: service_healthy
n8n-import:
condition: service_completed_successfully
qdrant:
image: qdrant/qdrant
hostname: qdrant
container_name: qdrant
networks: ["demo"]
restart: unless-stopped
ports:
- 6333:6333
volumes:
- qdrant_storage:/qdrant/storage
ollama-cpu:
profiles: ["cpu"]
<<: *service-ollama
ollama-gpu:
profiles: ["gpu-nvidia"]
<<: *service-ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ollama-gpu-amd:
profiles: ["gpu-amd"]
<<: *service-ollama
image: ollama/ollama:rocm
devices:
- "/dev/kfd"
- "/dev/dri"
ollama-pull-llama-cpu:
profiles: ["cpu"]
<<: *init-ollama
depends_on:
- ollama-cpu
ollama-pull-llama-gpu:
profiles: ["gpu-nvidia"]
<<: *init-ollama
depends_on:
- ollama-gpu
ollama-pull-llama-gpu-amd:
profiles: [gpu-amd]
<<: *init-ollama
image: ollama/ollama:rocm
depends_on:
- ollama-gpu-amd