Files
monthlytracker/src/lib/ollama.ts

133 lines
3.6 KiB
TypeScript

export class OllamaUnavailableError extends Error {
constructor(message = "Local AI runtime is unavailable.") {
super(message);
this.name = "OllamaUnavailableError";
}
}
export type OllamaStatus = {
available: boolean;
configuredModel: string;
configuredUrl: string;
installedModels: string[];
modelReady: boolean;
message: string;
};
type GenerateJsonInput = {
prompt: string;
model?: string;
};
function getOllamaConfig() {
return {
baseUrl: (process.env.OLLAMA_URL ?? "http://127.0.0.1:11434").replace(/\/$/, ""),
model: process.env.OLLAMA_MODEL ?? "qwen3.5:9b",
};
}
export async function getOllamaStatus(): Promise<OllamaStatus> {
const { baseUrl, model } = getOllamaConfig();
try {
const response = await fetch(`${baseUrl}/api/tags`, {
method: "GET",
headers: { "Content-Type": "application/json" },
cache: "no-store",
});
if (!response.ok) {
throw new OllamaUnavailableError(`Ollama status request failed with status ${response.status}.`);
}
const payload = (await response.json()) as { models?: Array<{ name?: string }> };
const installedModels = (payload.models ?? []).map((entry) => entry.name).filter((name): name is string => Boolean(name));
const modelReady = installedModels.includes(model);
return {
available: true,
configuredModel: model,
configuredUrl: baseUrl,
installedModels,
modelReady,
message: modelReady
? `Ollama is reachable and ${model} is ready.`
: `Ollama is reachable, but ${model} is not pulled yet.`,
};
} catch (error) {
const message =
error instanceof OllamaUnavailableError
? error.message
: "Ollama is not reachable at the configured URL.";
return {
available: false,
configuredModel: model,
configuredUrl: baseUrl,
installedModels: [],
modelReady: false,
message,
};
}
}
export async function pullConfiguredOllamaModel() {
const { baseUrl, model } = getOllamaConfig();
let response: Response;
try {
response = await fetch(`${baseUrl}/api/pull`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ model, stream: false }),
});
} catch {
throw new OllamaUnavailableError("Ollama is not reachable at the configured URL.");
}
if (!response.ok) {
throw new OllamaUnavailableError(`Ollama pull failed with status ${response.status}.`);
}
return {
model,
message: `${model} is available for offline use.`,
};
}
export async function generateOllamaJson<T>({ prompt, model }: GenerateJsonInput): Promise<T> {
const { baseUrl, model: configuredModel } = getOllamaConfig();
const selectedModel = model ?? configuredModel;
let response: Response;
try {
response = await fetch(`${baseUrl}/api/generate`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: selectedModel,
format: "json",
stream: false,
prompt,
}),
});
} catch {
throw new OllamaUnavailableError("Ollama is not reachable at the configured URL.");
}
if (!response.ok) {
throw new OllamaUnavailableError(`Ollama request failed with status ${response.status}.`);
}
const payload = (await response.json()) as { response?: string; thinking?: string };
const jsonText = payload.response?.trim() ? payload.response : payload.thinking;
if (!jsonText) {
throw new OllamaUnavailableError("Ollama returned an empty response.");
}
return JSON.parse(jsonText) as T;
}