AI Analysis: increase timeout to 120s and add logging

This commit is contained in:
jeremy bayse
2026-03-22 23:18:32 +01:00
parent 4459cbde69
commit de0392bbe7

View File

@@ -95,8 +95,8 @@ class AIAnalysisService
$ollamaUrl = config('services.ollama.url', 'http://localhost:11434/api/generate');
try {
$response = Http::timeout(60)->post($ollamaUrl, [
'model' => 'mistral', // or llama3
$response = Http::timeout(120)->post($ollamaUrl, [
'model' => 'mistral',
'prompt' => $prompt,
'stream' => false,
'format' => 'json'
@@ -104,9 +104,11 @@ class AIAnalysisService
if ($response->successful()) {
return json_decode($response->json('response'), true);
} else {
Log::warning("AI Provider Error: HTTP " . $response->status() . " - " . $response->body());
}
} catch (\Exception $e) {
Log::error("AI Analysis Call Failed: " . $e->getMessage());
Log::error("AI Connection Failed (Ollama): " . $e->getMessage());
}
// Fallback for demo if Ollama is not running