Add full Docker setup so the app runs with a single 'docker compose up':
- Dockerfile: multi-stage build (node:22-alpine) for the SvelteKit app
- docker-compose.yml: three services:
1. ollama: runs Ollama server with persistent volume for models
2. model-init: one-shot container that pulls the configured model
after Ollama is healthy, then exits
3. app: the SvelteKit app, starts only after model-init succeeds
- .env.docker: set OLLAMA_MODEL to control which model is pulled
- .dockerignore: keeps image lean
- Switched adapter-auto to adapter-node (required for Docker/Node hosting)
- Updated README with Docker and local dev instructions
Usage:
docker compose up # default: llama3
OLLAMA_MODEL=gemma2 docker compose up # any Ollama model
48 lines
1.1 KiB
YAML
48 lines
1.1 KiB
YAML
services:
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
container_name: english-styler-ollama
|
|
ports:
|
|
- "11434:11434"
|
|
volumes:
|
|
- ollama-data:/root/.ollama
|
|
healthcheck:
|
|
test: ["CMD", "ollama", "list"]
|
|
interval: 5s
|
|
timeout: 3s
|
|
retries: 30
|
|
start_period: 5s
|
|
restart: unless-stopped
|
|
|
|
model-init:
|
|
image: ollama/ollama:latest
|
|
container_name: english-styler-model-init
|
|
depends_on:
|
|
ollama:
|
|
condition: service_healthy
|
|
environment:
|
|
OLLAMA_HOST: http://ollama:11434
|
|
entrypoint: >
|
|
sh -c "
|
|
echo 'Pulling Ollama model: ${OLLAMA_MODEL:-llama3}' &&
|
|
ollama pull ${OLLAMA_MODEL:-llama3} &&
|
|
echo 'Model ready ✅'
|
|
"
|
|
restart: "no"
|
|
|
|
app:
|
|
build: .
|
|
container_name: english-styler-app
|
|
ports:
|
|
- "3000:3000"
|
|
depends_on:
|
|
model-init:
|
|
condition: service_completed_successfully
|
|
environment:
|
|
OPENAI_BASE_URL: http://ollama:11434/v1
|
|
OPENAI_API_KEY: ollama
|
|
OPENAI_MODEL: ${OLLAMA_MODEL:-llama3}
|
|
restart: unless-stopped
|
|
|
|
volumes:
|
|
ollama-data: |