Add Ollama status checks and Docker deployment
This commit is contained in:
9
.dockerignore
Normal file
9
.dockerignore
Normal file
@@ -0,0 +1,9 @@
|
||||
.git
|
||||
.next
|
||||
node_modules
|
||||
npm-debug.log
|
||||
.env
|
||||
.env.local
|
||||
prisma/dev.db
|
||||
prisma/dev.db-journal
|
||||
coverage
|
||||
20
Dockerfile
Normal file
20
Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM node:22-bookworm-slim AS builder
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
COPY prisma ./prisma
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
RUN npm run prisma:generate && npm run build
|
||||
|
||||
FROM node:22-bookworm-slim AS runner
|
||||
WORKDIR /app
|
||||
ENV NODE_ENV=production
|
||||
ENV PORT=3000
|
||||
|
||||
COPY --from=builder /app /app
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["sh", "-c", "npx prisma migrate deploy && npm run start -- --hostname 0.0.0.0"]
|
||||
51
README.md
Normal file
51
README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Monthy Tracker
|
||||
|
||||
Private monthly expense tracking with local-first storage, offline category suggestions, and offline monthly insights via `Ollama`.
|
||||
|
||||
## Local app
|
||||
|
||||
1. Install dependencies:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
2. Create env config from `.env.example` and keep your local runtime settings:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
3. Apply migrations and start the app:
|
||||
|
||||
```bash
|
||||
npx prisma migrate deploy
|
||||
npm run dev
|
||||
```
|
||||
|
||||
4. Keep `Ollama` running with the configured model:
|
||||
|
||||
```bash
|
||||
ollama serve
|
||||
ollama pull qwen3.5:9b
|
||||
```
|
||||
|
||||
## Docker Compose
|
||||
|
||||
Start both the app and `Ollama` together:
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
|
||||
This compose stack will:
|
||||
- start `Ollama`
|
||||
- pull `qwen3.5:9b` through the `ollama-init` service
|
||||
- start the Next.js app on `http://localhost:3000`
|
||||
- persist the SQLite database and pulled model with named Docker volumes
|
||||
|
||||
## Environment
|
||||
|
||||
- `DATABASE_URL` - Prisma SQLite connection string
|
||||
- `OLLAMA_URL` - local or container Ollama base URL
|
||||
- `OLLAMA_MODEL` - selected model tag, default `qwen3.5:9b`
|
||||
50
docker-compose.yml
Normal file
50
docker-compose.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
services:
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
container_name: monthytracker-ollama
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
healthcheck:
|
||||
test: ["CMD", "ollama", "list"]
|
||||
interval: 15s
|
||||
timeout: 10s
|
||||
retries: 20
|
||||
start_period: 20s
|
||||
|
||||
ollama-init:
|
||||
image: ollama/ollama:latest
|
||||
depends_on:
|
||||
ollama:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
OLLAMA_HOST: http://ollama:11434
|
||||
OLLAMA_MODEL: ${OLLAMA_MODEL:-qwen3.5:9b}
|
||||
entrypoint: ["/bin/sh", "-c"]
|
||||
command: "ollama pull ${OLLAMA_MODEL:-qwen3.5:9b}"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
restart: "no"
|
||||
|
||||
app:
|
||||
build:
|
||||
context: .
|
||||
container_name: monthytracker-app
|
||||
depends_on:
|
||||
ollama:
|
||||
condition: service_healthy
|
||||
ollama-init:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
DATABASE_URL: file:/data/dev.db
|
||||
OLLAMA_URL: http://ollama:11434/
|
||||
OLLAMA_MODEL: ${OLLAMA_MODEL:-qwen3.5:9b}
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- app_data:/data
|
||||
|
||||
volumes:
|
||||
ollama_data:
|
||||
app_data:
|
||||
@@ -1,7 +1,7 @@
|
||||
## 1. Project setup
|
||||
|
||||
- [x] 1.1 Scaffold the `Next.js` app with TypeScript, linting, and baseline project configuration.
|
||||
- [x] 1.2 Add runtime dependencies for Prisma, SQLite, validation, charts, and `OpenAI` integration.
|
||||
- [x] 1.2 Add runtime dependencies for Prisma, SQLite, validation, charts, and offline AI integration.
|
||||
- [x] 1.3 Add development dependencies and scripts for testing, Prisma generation, and local development.
|
||||
- [x] 1.4 Add base environment and ignore-file setup for local database and API key configuration.
|
||||
|
||||
@@ -34,5 +34,5 @@
|
||||
|
||||
## 6. Verification
|
||||
|
||||
- [ ] 6.1 Add automated tests for validation, persistence, dashboard aggregates, offline insight fallback behavior, and category suggestion rules.
|
||||
- [x] 6.1 Add automated tests for validation, persistence, dashboard aggregates, offline insight fallback behavior, and category suggestion rules.
|
||||
- [x] 6.2 Verify the primary user flows in the browser, including expense entry, paycheck entry, dashboard updates, category suggestion, and insight generation.
|
||||
|
||||
8
src/app/ollama/status/route.ts
Normal file
8
src/app/ollama/status/route.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
import { getOllamaStatus } from "@/lib/ollama";
|
||||
|
||||
export async function GET() {
|
||||
const status = await getOllamaStatus();
|
||||
return NextResponse.json(status);
|
||||
}
|
||||
@@ -30,11 +30,21 @@ type DashboardSnapshot = {
|
||||
chart: Array<{ date: string; expensesCents: number; paychecksCents: number }>;
|
||||
};
|
||||
|
||||
type OllamaStatus = {
|
||||
available: boolean;
|
||||
configuredModel: string;
|
||||
configuredUrl: string;
|
||||
installedModels: string[];
|
||||
modelReady: boolean;
|
||||
message: string;
|
||||
};
|
||||
|
||||
export function HomeDashboard() {
|
||||
const [selectedMonth, setSelectedMonth] = useState(getCurrentMonthKey());
|
||||
const [snapshot, setSnapshot] = useState<DashboardSnapshot | null>(null);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [insightBusy, setInsightBusy] = useState(false);
|
||||
const [ollamaStatus, setOllamaStatus] = useState<OllamaStatus | null>(null);
|
||||
|
||||
async function loadDashboard(month: string) {
|
||||
const response = await fetch(`/dashboard?month=${month}`, { cache: "no-store" });
|
||||
@@ -57,6 +67,16 @@ export function HomeDashboard() {
|
||||
return () => window.clearTimeout(timeoutId);
|
||||
}, [selectedMonth]);
|
||||
|
||||
useEffect(() => {
|
||||
const timeoutId = window.setTimeout(async () => {
|
||||
const response = await fetch("/ollama/status", { cache: "no-store" });
|
||||
const payload = (await response.json()) as OllamaStatus;
|
||||
setOllamaStatus(payload);
|
||||
}, 0);
|
||||
|
||||
return () => window.clearTimeout(timeoutId);
|
||||
}, []);
|
||||
|
||||
const topCategoryLabel = useMemo(() => {
|
||||
if (!snapshot?.comparisons.highestCategory) {
|
||||
return "No category leader yet";
|
||||
@@ -161,6 +181,38 @@ export function HomeDashboard() {
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="mt-6 rounded-3xl border border-stone-200 bg-stone-50 px-5 py-4">
|
||||
<div className="flex flex-wrap items-center justify-between gap-3">
|
||||
<div>
|
||||
<p className="text-xs uppercase tracking-[0.2em] text-stone-500">Ollama runtime</p>
|
||||
<p className="mt-2 text-sm font-medium text-stone-700">
|
||||
{ollamaStatus?.message ?? "Checking local runtime status..."}
|
||||
</p>
|
||||
</div>
|
||||
<div className="rounded-full px-3 py-2 text-xs font-semibold uppercase tracking-[0.2em] text-white "
|
||||
data-ready={ollamaStatus?.available && ollamaStatus?.modelReady ? "true" : "false"}
|
||||
>
|
||||
<span
|
||||
className={
|
||||
ollamaStatus?.available && ollamaStatus?.modelReady
|
||||
? "rounded-full bg-emerald-600 px-3 py-2"
|
||||
: "rounded-full bg-stone-500 px-3 py-2"
|
||||
}
|
||||
>
|
||||
{ollamaStatus?.available && ollamaStatus?.modelReady ? "Ready" : "Needs attention"}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-4 grid gap-3 text-sm text-stone-600 sm:grid-cols-2">
|
||||
<p>
|
||||
Model: <span className="font-semibold text-stone-900">{ollamaStatus?.configuredModel ?? "-"}</span>
|
||||
</p>
|
||||
<p>
|
||||
URL: <span className="font-semibold text-stone-900">{ollamaStatus?.configuredUrl ?? "-"}</span>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{snapshot?.insight ? (
|
||||
<div className="mt-6 grid gap-4 lg:grid-cols-[1.2fr_0.8fr]">
|
||||
<article className="rounded-3xl border border-stone-200 bg-[#fffcf7] px-5 py-5">
|
||||
|
||||
@@ -88,4 +88,51 @@ describe("generateMonthlyInsight", () => {
|
||||
expect(result.insight.summary).toBe("Spending is stable.");
|
||||
expect(result.insight.recommendations).toBe("Keep food spending under watch.");
|
||||
});
|
||||
|
||||
it("coerces array recommendations from the local model", async () => {
|
||||
const { db } = await import("@/lib/db");
|
||||
const { generateMonthlyInsight } = await import("@/lib/insights");
|
||||
|
||||
vi.mocked(db.expense.findMany).mockResolvedValue([
|
||||
{
|
||||
id: "expense-1",
|
||||
title: "Groceries",
|
||||
date: "2026-03-23",
|
||||
amountCents: 3200,
|
||||
category: "FOOD",
|
||||
createdAt: new Date("2026-03-23T10:00:00.000Z"),
|
||||
},
|
||||
{
|
||||
id: "expense-2",
|
||||
title: "Rent",
|
||||
date: "2026-03-02",
|
||||
amountCents: 120000,
|
||||
category: "RENT",
|
||||
createdAt: new Date("2026-03-02T10:00:00.000Z"),
|
||||
},
|
||||
]);
|
||||
vi.mocked(db.paycheck.findMany).mockResolvedValue([
|
||||
{
|
||||
id: "paycheck-1",
|
||||
payDate: "2026-03-01",
|
||||
amountCents: 180000,
|
||||
createdAt: new Date("2026-03-01T10:00:00.000Z"),
|
||||
},
|
||||
]);
|
||||
|
||||
vi.spyOn(globalThis, "fetch").mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
response: JSON.stringify({
|
||||
summary: "Spending remains manageable.",
|
||||
recommendations: ["Keep groceries planned.", "Move surplus to savings."],
|
||||
}),
|
||||
}),
|
||||
} as Response);
|
||||
|
||||
const result = await generateMonthlyInsight("2026-03");
|
||||
|
||||
expect(result.insight.recommendations).toContain("Keep groceries planned.");
|
||||
expect(result.insight.recommendations).toContain("Move surplus to savings.");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -59,8 +59,10 @@ function buildInsightPrompt(snapshot: Awaited<ReturnType<typeof getDashboardSnap
|
||||
return [
|
||||
"You are a private offline financial summarizer for a single-user expense tracker.",
|
||||
"Return strict JSON with keys summary and recommendations.",
|
||||
"Keep the tone practical, concise, and non-judgmental.",
|
||||
"Focus on spending patterns, category spikes, paycheck timing, and next-month guidance.",
|
||||
"The summary must be a single compact paragraph of at most 3 sentences.",
|
||||
"The recommendations field should be an array with 2 or 3 short action items.",
|
||||
"Keep the tone practical, concise, specific, and non-judgmental.",
|
||||
"Focus on spending patterns, category spikes, paycheck timing, and realistic next-month guidance.",
|
||||
`Month: ${snapshot.month}`,
|
||||
`Total expenses cents: ${snapshot.totals.expensesCents}`,
|
||||
`Total paychecks cents: ${snapshot.totals.paychecksCents}`,
|
||||
@@ -71,6 +73,7 @@ function buildInsightPrompt(snapshot: Awaited<ReturnType<typeof getDashboardSnap
|
||||
`Category breakdown: ${JSON.stringify(snapshot.categoryBreakdown)}`,
|
||||
`Recent expenses: ${JSON.stringify(snapshot.recentExpenses)}`,
|
||||
`Daily chart points: ${JSON.stringify(snapshot.chart)}`,
|
||||
"Do not mention missing data unless it materially affects the advice.",
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
|
||||
38
src/lib/ollama.test.ts
Normal file
38
src/lib/ollama.test.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
import { generateOllamaJson, getOllamaStatus } from "@/lib/ollama";
|
||||
|
||||
describe("getOllamaStatus", () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("reports model readiness when the configured model is installed", async () => {
|
||||
vi.spyOn(globalThis, "fetch").mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({ models: [{ name: "qwen3.5:9b" }] }),
|
||||
} as Response);
|
||||
|
||||
const status = await getOllamaStatus();
|
||||
|
||||
expect(status.available).toBe(true);
|
||||
expect(status.modelReady).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("generateOllamaJson", () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("parses json from the thinking field when response is empty", async () => {
|
||||
vi.spyOn(globalThis, "fetch").mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({ response: "", thinking: '{"summary":"ok","recommendations":"ok"}' }),
|
||||
} as Response);
|
||||
|
||||
const result = await generateOllamaJson<{ summary: string; recommendations: string }>({ prompt: "test" });
|
||||
|
||||
expect(result.summary).toBe("ok");
|
||||
});
|
||||
});
|
||||
@@ -5,14 +5,75 @@ export class OllamaUnavailableError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
export type OllamaStatus = {
|
||||
available: boolean;
|
||||
configuredModel: string;
|
||||
configuredUrl: string;
|
||||
installedModels: string[];
|
||||
modelReady: boolean;
|
||||
message: string;
|
||||
};
|
||||
|
||||
type GenerateJsonInput = {
|
||||
prompt: string;
|
||||
model?: string;
|
||||
};
|
||||
|
||||
function getOllamaConfig() {
|
||||
return {
|
||||
baseUrl: (process.env.OLLAMA_URL ?? "http://127.0.0.1:11434").replace(/\/$/, ""),
|
||||
model: process.env.OLLAMA_MODEL ?? "qwen3.5:9b",
|
||||
};
|
||||
}
|
||||
|
||||
export async function getOllamaStatus(): Promise<OllamaStatus> {
|
||||
const { baseUrl, model } = getOllamaConfig();
|
||||
|
||||
try {
|
||||
const response = await fetch(`${baseUrl}/api/tags`, {
|
||||
method: "GET",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
cache: "no-store",
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new OllamaUnavailableError(`Ollama status request failed with status ${response.status}.`);
|
||||
}
|
||||
|
||||
const payload = (await response.json()) as { models?: Array<{ name?: string }> };
|
||||
const installedModels = (payload.models ?? []).map((entry) => entry.name).filter((name): name is string => Boolean(name));
|
||||
const modelReady = installedModels.includes(model);
|
||||
|
||||
return {
|
||||
available: true,
|
||||
configuredModel: model,
|
||||
configuredUrl: baseUrl,
|
||||
installedModels,
|
||||
modelReady,
|
||||
message: modelReady
|
||||
? `Ollama is reachable and ${model} is ready.`
|
||||
: `Ollama is reachable, but ${model} is not pulled yet.`,
|
||||
};
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof OllamaUnavailableError
|
||||
? error.message
|
||||
: "Ollama is not reachable at the configured URL.";
|
||||
|
||||
return {
|
||||
available: false,
|
||||
configuredModel: model,
|
||||
configuredUrl: baseUrl,
|
||||
installedModels: [],
|
||||
modelReady: false,
|
||||
message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateOllamaJson<T>({ prompt, model }: GenerateJsonInput): Promise<T> {
|
||||
const baseUrl = (process.env.OLLAMA_URL ?? "http://127.0.0.1:11434").replace(/\/$/, "");
|
||||
const selectedModel = model ?? process.env.OLLAMA_MODEL ?? "qwen3.5:9b";
|
||||
const { baseUrl, model: configuredModel } = getOllamaConfig();
|
||||
const selectedModel = model ?? configuredModel;
|
||||
|
||||
let response: Response;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user