5.7 KiB
5.7 KiB
Configuration Reference
Complete reference for config.json configuration options.
Overview
The configuration file uses JSON format with support for:
- Path expansion (
~expands to home directory) - Type validation via Pydantic models
- Environment-specific overrides
Schema Validation
Validate your config against the schema:
python -c "from companion.config import load_config; load_config('config.json')"
Or use the JSON Schema directly: config-schema.json
Configuration Sections
companion
Core companion personality and behavior settings.
{
"companion": {
"name": "SAN",
"persona": {
"role": "companion",
"tone": "reflective",
"style": "questioning",
"boundaries": [
"does_not_impersonate_user",
"no_future_predictions",
"no_medical_or_legal_advice"
]
},
"memory": {
"session_turns": 20,
"persistent_store": "~/.companion/memory.db",
"summarize_after": 10
},
"chat": {
"streaming": true,
"max_response_tokens": 2048,
"default_temperature": 0.7,
"allow_temperature_override": true
}
}
}
Fields
| Field | Type | Default | Description |
|---|---|---|---|
name |
string | "SAN" | Display name for the companion |
persona.role |
string | "companion" | Role description (companion/advisor/reflector) |
persona.tone |
string | "reflective" | Communication tone (reflective/supportive/analytical) |
persona.style |
string | "questioning" | Interaction style (questioning/supportive/direct) |
persona.boundaries |
string[] | [...] | Behavioral guardrails |
memory.session_turns |
int | 20 | Messages to keep in context |
memory.persistent_store |
string | "~/.companion/memory.db" | SQLite database path |
memory.summarize_after |
int | 10 | Summarize history after N turns |
chat.streaming |
bool | true | Stream responses in real-time |
chat.max_response_tokens |
int | 2048 | Max tokens per response |
chat.default_temperature |
float | 0.7 | Creativity (0.0=deterministic, 2.0=creative) |
chat.allow_temperature_override |
bool | true | Let users adjust temperature |
vault
Obsidian vault indexing configuration.
{
"vault": {
"path": "~/KnowledgeVault/Default",
"indexing": {
"auto_sync": true,
"auto_sync_interval_minutes": 1440,
"watch_fs_events": true,
"file_patterns": ["*.md"],
"deny_dirs": [".obsidian", ".trash", "zzz-Archive", ".git"],
"deny_patterns": ["*.tmp", "*.bak", "*conflict*"]
},
"chunking_rules": {
"default": {
"strategy": "sliding_window",
"chunk_size": 500,
"chunk_overlap": 100
},
"Journal/**": {
"strategy": "section",
"section_tags": ["#DayInShort", "#mentalhealth", "#work"],
"chunk_size": 300,
"chunk_overlap": 50
}
}
}
}
rag
RAG (Retrieval-Augmented Generation) engine configuration.
{
"rag": {
"embedding": {
"provider": "ollama",
"model": "mxbai-embed-large",
"base_url": "http://localhost:11434",
"dimensions": 1024,
"batch_size": 32
},
"vector_store": {
"type": "lancedb",
"path": "~/.companion/vectors.lance"
},
"search": {
"default_top_k": 8,
"max_top_k": 20,
"similarity_threshold": 0.75,
"hybrid_search": {
"enabled": true,
"keyword_weight": 0.3,
"semantic_weight": 0.7
},
"filters": {
"date_range_enabled": true,
"tag_filter_enabled": true,
"directory_filter_enabled": true
}
}
}
}
model
LLM configuration for inference and fine-tuning.
{
"model": {
"inference": {
"backend": "llama.cpp",
"model_path": "~/.companion/models/companion-7b-q4.gguf",
"context_length": 8192,
"gpu_layers": 35,
"batch_size": 512,
"threads": 8
},
"fine_tuning": {
"base_model": "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
"output_dir": "~/.companion/training",
"lora_rank": 16,
"lora_alpha": 32,
"learning_rate": 0.0002,
"batch_size": 4,
"gradient_accumulation_steps": 4,
"num_epochs": 3,
"warmup_steps": 100,
"save_steps": 500,
"eval_steps": 250,
"training_data_path": "~/.companion/training_data/",
"validation_split": 0.1
},
"retrain_schedule": {
"auto_reminder": true,
"default_interval_days": 90,
"reminder_channels": ["chat_stream", "log"]
}
}
}
api
FastAPI backend configuration.
{
"api": {
"host": "127.0.0.1",
"port": 7373,
"cors_origins": ["http://localhost:5173"],
"auth": {
"enabled": false
}
}
}
ui
Web UI configuration.
{
"ui": {
"web": {
"enabled": true,
"theme": "obsidian",
"features": {
"streaming": true,
"citations": true,
"source_preview": true
}
},
"cli": {
"enabled": true,
"rich_output": true
}
}
}
logging
Logging configuration.
{
"logging": {
"level": "INFO",
"file": "~/.companion/logs/companion.log",
"max_size_mb": 100,
"backup_count": 5
}
}
security
Security and privacy settings.
{
"security": {
"local_only": true,
"vault_path_traversal_check": true,
"sensitive_content_detection": true,
"sensitive_patterns": [
"#mentalhealth",
"#physicalhealth",
"#finance",
"#Relations"
],
"require_confirmation_for_external_apis": true
}
}
Full Example
See config.json for a complete working configuration.