Add complete Laravel LLM Gateway implementation
Core Features: - Multi-provider support (OpenAI, Anthropic, DeepSeek, Gemini, Mistral) - Provider service architecture with abstract base class - Dynamic model discovery from provider APIs - Encrypted per-user provider credentials storage Admin Interface: - Complete admin panel with Livewire components - User management with CRUD operations - API key management with testing capabilities - Budget system with limits and reset schedules - Usage logs with filtering and CSV export - Model pricing management with cost calculator - Dashboard with Chart.js visualizations Database Schema: - MariaDB migrations for all tables - User provider credentials (encrypted) - LLM request logging - Budget tracking and rate limiting - Model pricing configuration API Implementation: - OpenAI-compatible endpoints - Budget checking middleware - Rate limit enforcement - Request logging jobs - Cost calculation service Testing: - Unit tests for all provider services - Provider factory tests - Cost calculator tests Documentation: - Admin user seeder - Model pricing seeder - Configuration files
This commit is contained in:
132
laravel-app/app/Services/LLM/Providers/GeminiProvider.php
Normal file
132
laravel-app/app/Services/LLM/Providers/GeminiProvider.php
Normal file
@@ -0,0 +1,132 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class GeminiProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
// Gemini uses a different message format
|
||||
$contents = [];
|
||||
|
||||
foreach ($messages as $message) {
|
||||
$role = $message['role'];
|
||||
|
||||
// Gemini uses 'model' instead of 'assistant' and doesn't support 'system'
|
||||
if ($role === 'assistant') {
|
||||
$role = 'model';
|
||||
} elseif ($role === 'system') {
|
||||
// Convert system messages to user messages with context
|
||||
$role = 'user';
|
||||
}
|
||||
|
||||
$contents[] = [
|
||||
'role' => $role,
|
||||
'parts' => [
|
||||
['text' => $message['content']]
|
||||
]
|
||||
];
|
||||
}
|
||||
|
||||
$request = [
|
||||
'contents' => $contents,
|
||||
];
|
||||
|
||||
// Add generation config if options provided
|
||||
$generationConfig = array_filter([
|
||||
'temperature' => $options['temperature'] ?? null,
|
||||
'maxOutputTokens' => $options['max_tokens'] ?? null,
|
||||
'topP' => $options['top_p'] ?? null,
|
||||
'stopSequences' => $options['stop'] ?? null,
|
||||
], fn($value) => $value !== null);
|
||||
|
||||
if (!empty($generationConfig)) {
|
||||
$request['generationConfig'] = $generationConfig;
|
||||
}
|
||||
|
||||
return $request;
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$model = $options['model'] ?? 'gemini-pro';
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
|
||||
// Gemini uses API key as query parameter
|
||||
$endpoint = "/models/{$model}:generateContent?key={$this->apiKey}";
|
||||
|
||||
return $this->makeRequest($endpoint, $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
$candidate = $response['candidates'][0] ?? [];
|
||||
$content = $candidate['content'] ?? [];
|
||||
$parts = $content['parts'] ?? [];
|
||||
|
||||
$textContent = '';
|
||||
foreach ($parts as $part) {
|
||||
$textContent .= $part['text'] ?? '';
|
||||
}
|
||||
|
||||
$usageMetadata = $response['usageMetadata'] ?? [];
|
||||
|
||||
return [
|
||||
'id' => null, // Gemini doesn't provide an ID
|
||||
'model' => $response['modelVersion'] ?? null,
|
||||
'content' => $textContent,
|
||||
'role' => 'assistant',
|
||||
'finish_reason' => $candidate['finishReason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $usageMetadata['promptTokenCount'] ?? 0,
|
||||
'completion_tokens' => $usageMetadata['candidatesTokenCount'] ?? 0,
|
||||
'total_tokens' => $usageMetadata['totalTokenCount'] ?? 0,
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:gemini:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'gemini')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'gemini-pro',
|
||||
'gemini-pro-vision',
|
||||
'gemini-1.5-pro',
|
||||
'gemini-1.5-flash',
|
||||
'gemini-ultra',
|
||||
];
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user