AI Analysis: configurable model and increased timeout

This commit is contained in:
jeremy bayse
2026-03-22 23:19:07 +01:00
parent de0392bbe7
commit e6df75c1ff

View File

@@ -92,11 +92,12 @@ class AIAnalysisService
// For now, I'll use a mocked response or try to use a generic endpoint if configured.
// I'll check if the user has an Ollama endpoint.
$ollamaUrl = config('services.ollama.url', 'http://localhost:11434/api/generate');
$ollamaUrl = env('OLLAMA_URL', 'http://localhost:11434/api/generate');
$ollamaModel = env('OLLAMA_MODEL', 'mistral');
try {
$response = Http::timeout(120)->post($ollamaUrl, [
'model' => 'mistral',
'model' => $ollamaModel,
'prompt' => $prompt,
'stream' => false,
'format' => 'json'