Add complete Laravel LLM Gateway implementation

Core Features:
- Multi-provider support (OpenAI, Anthropic, DeepSeek, Gemini, Mistral)
- Provider service architecture with abstract base class
- Dynamic model discovery from provider APIs
- Encrypted per-user provider credentials storage

Admin Interface:
- Complete admin panel with Livewire components
- User management with CRUD operations
- API key management with testing capabilities
- Budget system with limits and reset schedules
- Usage logs with filtering and CSV export
- Model pricing management with cost calculator
- Dashboard with Chart.js visualizations

Database Schema:
- MariaDB migrations for all tables
- User provider credentials (encrypted)
- LLM request logging
- Budget tracking and rate limiting
- Model pricing configuration

API Implementation:
- OpenAI-compatible endpoints
- Budget checking middleware
- Rate limit enforcement
- Request logging jobs
- Cost calculation service

Testing:
- Unit tests for all provider services
- Provider factory tests
- Cost calculator tests

Documentation:
- Admin user seeder
- Model pricing seeder
- Configuration files
This commit is contained in:
wtrinkl
2025-11-18 22:18:36 +01:00
parent bef36c7ca2
commit 6573e15ba4
60 changed files with 5991 additions and 0 deletions

View File

@@ -0,0 +1,150 @@
<?php
namespace Tests\Unit\Services;
use Tests\TestCase;
use App\Services\LLM\Providers\OpenAIProvider;
use App\Models\ModelPricing;
use Illuminate\Support\Facades\Http;
use Illuminate\Support\Facades\Cache;
use Illuminate\Foundation\Testing\RefreshDatabase;
class OpenAIProviderTest extends TestCase
{
use RefreshDatabase;
private OpenAIProvider $provider;
protected function setUp(): void
{
parent::setUp();
$this->provider = new OpenAIProvider('test-api-key');
}
public function test_builds_request_correctly(): void
{
$messages = [
['role' => 'user', 'content' => 'Hello']
];
$options = [
'model' => 'gpt-4o-mini',
'temperature' => 0.8,
'max_tokens' => 1000
];
$reflection = new \ReflectionClass($this->provider);
$method = $reflection->getMethod('buildRequest');
$method->setAccessible(true);
$result = $method->invoke($this->provider, $messages, $options);
$this->assertEquals('gpt-4o-mini', $result['model']);
$this->assertEquals(0.8, $result['temperature']);
$this->assertEquals(1000, $result['max_tokens']);
$this->assertEquals($messages, $result['messages']);
$this->assertFalse($result['stream']);
}
public function test_normalizes_response_correctly(): void
{
$rawResponse = [
'id' => 'chatcmpl-123',
'model' => 'gpt-4o-mini',
'choices' => [
[
'message' => [
'role' => 'assistant',
'content' => 'Hello! How can I help you?'
],
'finish_reason' => 'stop'
]
],
'usage' => [
'prompt_tokens' => 10,
'completion_tokens' => 20,
'total_tokens' => 30
]
];
$normalized = $this->provider->normalizeResponse($rawResponse);
$this->assertEquals('chatcmpl-123', $normalized['id']);
$this->assertEquals('gpt-4o-mini', $normalized['model']);
$this->assertEquals('Hello! How can I help you?', $normalized['content']);
$this->assertEquals('assistant', $normalized['role']);
$this->assertEquals('stop', $normalized['finish_reason']);
$this->assertEquals(10, $normalized['usage']['prompt_tokens']);
$this->assertEquals(20, $normalized['usage']['completion_tokens']);
$this->assertEquals(30, $normalized['usage']['total_tokens']);
}
public function test_calculates_cost_correctly(): void
{
// Create pricing in database
ModelPricing::create([
'provider' => 'openai',
'model' => 'gpt-4o-mini',
'input_price_per_million' => 0.15,
'output_price_per_million' => 0.60,
'is_active' => true,
'effective_from' => now()
]);
Cache::flush();
$cost = $this->provider->calculateCost(1000, 500, 'gpt-4o-mini');
// Expected: (1000/1M * 0.15) + (500/1M * 0.60) = 0.00015 + 0.0003 = 0.00045
$this->assertEquals(0.00045, $cost);
}
public function test_handles_api_errors(): void
{
Http::fake([
'https://api.openai.com/*' => Http::response(['error' => 'Invalid API key'], 401)
]);
$this->expectException(\App\Exceptions\ProviderException::class);
$this->expectExceptionMessage('Invalid API key');
$this->provider->chatCompletion([
['role' => 'user', 'content' => 'test']
]);
}
public function test_retries_on_server_error(): void
{
Http::fake([
'https://api.openai.com/*' => Http::sequence()
->push(['error' => 'Server error'], 500)
->push(['error' => 'Server error'], 500)
->push([
'id' => 'test-123',
'model' => 'gpt-4o-mini',
'choices' => [[
'message' => ['content' => 'Success', 'role' => 'assistant'],
'finish_reason' => 'stop'
]],
'usage' => ['prompt_tokens' => 10, 'completion_tokens' => 5, 'total_tokens' => 15]
], 200)
]);
$result = $this->provider->chatCompletion([
['role' => 'user', 'content' => 'test']
]);
$this->assertArrayHasKey('id', $result);
$this->assertEquals('test-123', $result['id']);
}
public function test_get_supported_models(): void
{
$models = $this->provider->getSupportedModels();
$this->assertIsArray($models);
$this->assertContains('gpt-4o', $models);
$this->assertContains('gpt-4o-mini', $models);
$this->assertContains('gpt-3.5-turbo', $models);
}
}