From e6df75c1ff26379ffb0e491179bf7e75bed460b8 Mon Sep 17 00:00:00 2001 From: jeremy bayse Date: Sun, 22 Mar 2026 23:19:07 +0100 Subject: [PATCH] AI Analysis: configurable model and increased timeout --- app/Services/AIAnalysisService.php | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/Services/AIAnalysisService.php b/app/Services/AIAnalysisService.php index 9035f47..012a66d 100644 --- a/app/Services/AIAnalysisService.php +++ b/app/Services/AIAnalysisService.php @@ -92,11 +92,12 @@ class AIAnalysisService // For now, I'll use a mocked response or try to use a generic endpoint if configured. // I'll check if the user has an Ollama endpoint. - $ollamaUrl = config('services.ollama.url', 'http://localhost:11434/api/generate'); + $ollamaUrl = env('OLLAMA_URL', 'http://localhost:11434/api/generate'); + $ollamaModel = env('OLLAMA_MODEL', 'mistral'); try { $response = Http::timeout(120)->post($ollamaUrl, [ - 'model' => 'mistral', + 'model' => $ollamaModel, 'prompt' => $prompt, 'stream' => false, 'format' => 'json'