AI Analysis: configurable model and increased timeout
This commit is contained in:
@@ -92,11 +92,12 @@ class AIAnalysisService
|
||||
// For now, I'll use a mocked response or try to use a generic endpoint if configured.
|
||||
// I'll check if the user has an Ollama endpoint.
|
||||
|
||||
$ollamaUrl = config('services.ollama.url', 'http://localhost:11434/api/generate');
|
||||
$ollamaUrl = env('OLLAMA_URL', 'http://localhost:11434/api/generate');
|
||||
$ollamaModel = env('OLLAMA_MODEL', 'mistral');
|
||||
|
||||
try {
|
||||
$response = Http::timeout(120)->post($ollamaUrl, [
|
||||
'model' => 'mistral',
|
||||
'model' => $ollamaModel,
|
||||
'prompt' => $prompt,
|
||||
'stream' => false,
|
||||
'format' => 'json'
|
||||
|
||||
Reference in New Issue
Block a user