Add complete Laravel LLM Gateway implementation
Core Features: - Multi-provider support (OpenAI, Anthropic, DeepSeek, Gemini, Mistral) - Provider service architecture with abstract base class - Dynamic model discovery from provider APIs - Encrypted per-user provider credentials storage Admin Interface: - Complete admin panel with Livewire components - User management with CRUD operations - API key management with testing capabilities - Budget system with limits and reset schedules - Usage logs with filtering and CSV export - Model pricing management with cost calculator - Dashboard with Chart.js visualizations Database Schema: - MariaDB migrations for all tables - User provider credentials (encrypted) - LLM request logging - Budget tracking and rate limiting - Model pricing configuration API Implementation: - OpenAI-compatible endpoints - Budget checking middleware - Rate limit enforcement - Request logging jobs - Cost calculation service Testing: - Unit tests for all provider services - Provider factory tests - Cost calculator tests Documentation: - Admin user seeder - Model pricing seeder - Configuration files
This commit is contained in:
103
laravel-app/app/Services/LLM/Providers/AbstractProvider.php
Normal file
103
laravel-app/app/Services/LLM/Providers/AbstractProvider.php
Normal file
@@ -0,0 +1,103 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Services\LLM\Contracts\ProviderInterface;
|
||||
use App\Exceptions\ProviderException;
|
||||
use Illuminate\Support\Facades\Http;
|
||||
use Illuminate\Support\Facades\Log;
|
||||
|
||||
abstract class AbstractProvider implements ProviderInterface
|
||||
{
|
||||
protected string $apiKey;
|
||||
protected string $baseUrl;
|
||||
protected int $timeout = 60;
|
||||
protected int $retryAttempts = 3;
|
||||
protected int $retryDelay = 1000; // milliseconds
|
||||
|
||||
public function __construct(string $apiKey)
|
||||
{
|
||||
$this->apiKey = $apiKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build request payload for provider
|
||||
*/
|
||||
abstract protected function buildRequest(array $messages, array $options): array;
|
||||
|
||||
/**
|
||||
* Get authorization headers for provider
|
||||
*/
|
||||
abstract protected function getAuthHeaders(): array;
|
||||
|
||||
/**
|
||||
* Make HTTP request with retry logic
|
||||
*/
|
||||
protected function makeRequest(string $endpoint, array $data): array
|
||||
{
|
||||
$attempt = 0;
|
||||
$lastException = null;
|
||||
|
||||
while ($attempt < $this->retryAttempts) {
|
||||
try {
|
||||
$response = Http::withHeaders($this->getAuthHeaders())
|
||||
->timeout($this->timeout)
|
||||
->post($this->baseUrl . $endpoint, $data);
|
||||
|
||||
if ($response->successful()) {
|
||||
return $response->json();
|
||||
}
|
||||
|
||||
// Handle specific HTTP errors
|
||||
if ($response->status() === 401) {
|
||||
throw new ProviderException('Invalid API key', 401);
|
||||
}
|
||||
|
||||
if ($response->status() === 429) {
|
||||
throw new ProviderException('Rate limit exceeded', 429);
|
||||
}
|
||||
|
||||
if ($response->status() >= 500) {
|
||||
throw new ProviderException('Provider server error', $response->status());
|
||||
}
|
||||
|
||||
throw new ProviderException(
|
||||
'Request failed: ' . $response->body(),
|
||||
$response->status()
|
||||
);
|
||||
|
||||
} catch (\Exception $e) {
|
||||
$lastException = $e;
|
||||
$attempt++;
|
||||
|
||||
if ($attempt < $this->retryAttempts) {
|
||||
Log::warning("Provider request failed, retrying ({$attempt}/{$this->retryAttempts})", [
|
||||
'provider' => static::class,
|
||||
'error' => $e->getMessage()
|
||||
]);
|
||||
usleep($this->retryDelay * 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new ProviderException(
|
||||
'All retry attempts failed: ' . ($lastException ? $lastException->getMessage() : 'Unknown error'),
|
||||
$lastException ? $lastException->getCode() : 500
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate API key by making a test request
|
||||
*/
|
||||
public function validateApiKey(): bool
|
||||
{
|
||||
try {
|
||||
$this->chatCompletion([
|
||||
['role' => 'user', 'content' => 'test']
|
||||
], ['max_tokens' => 5]);
|
||||
return true;
|
||||
} catch (\Exception $e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
113
laravel-app/app/Services/LLM/Providers/AnthropicProvider.php
Normal file
113
laravel-app/app/Services/LLM/Providers/AnthropicProvider.php
Normal file
@@ -0,0 +1,113 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class AnthropicProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://api.anthropic.com/v1';
|
||||
private string $apiVersion = '2023-06-01';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
// Anthropic requires system message separate
|
||||
$systemMessage = null;
|
||||
$formattedMessages = [];
|
||||
|
||||
foreach ($messages as $message) {
|
||||
if ($message['role'] === 'system') {
|
||||
$systemMessage = $message['content'];
|
||||
} else {
|
||||
$formattedMessages[] = $message;
|
||||
}
|
||||
}
|
||||
|
||||
$request = array_filter([
|
||||
'model' => $options['model'] ?? 'claude-sonnet-4',
|
||||
'max_tokens' => $options['max_tokens'] ?? 4096,
|
||||
'messages' => $formattedMessages,
|
||||
'system' => $systemMessage,
|
||||
'temperature' => $options['temperature'] ?? null,
|
||||
'top_p' => $options['top_p'] ?? null,
|
||||
'stop_sequences' => $options['stop'] ?? null,
|
||||
], fn($value) => $value !== null);
|
||||
|
||||
return $request;
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'x-api-key' => $this->apiKey,
|
||||
'anthropic-version' => $this->apiVersion,
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
return $this->makeRequest('/messages', $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
$content = '';
|
||||
if (isset($response['content']) && is_array($response['content'])) {
|
||||
foreach ($response['content'] as $block) {
|
||||
if ($block['type'] === 'text') {
|
||||
$content .= $block['text'];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return [
|
||||
'id' => $response['id'] ?? null,
|
||||
'model' => $response['model'] ?? null,
|
||||
'content' => $content,
|
||||
'role' => $response['role'] ?? 'assistant',
|
||||
'finish_reason' => $response['stop_reason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $response['usage']['input_tokens'] ?? 0,
|
||||
'completion_tokens' => $response['usage']['output_tokens'] ?? 0,
|
||||
'total_tokens' => ($response['usage']['input_tokens'] ?? 0) + ($response['usage']['output_tokens'] ?? 0),
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:anthropic:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'anthropic')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'claude-opus-4',
|
||||
'claude-sonnet-4',
|
||||
'claude-haiku-4',
|
||||
'claude-3-opus',
|
||||
'claude-3-sonnet',
|
||||
'claude-3-haiku',
|
||||
];
|
||||
}
|
||||
}
|
||||
87
laravel-app/app/Services/LLM/Providers/DeepSeekProvider.php
Normal file
87
laravel-app/app/Services/LLM/Providers/DeepSeekProvider.php
Normal file
@@ -0,0 +1,87 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class DeepSeekProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://api.deepseek.com/v1';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
return array_filter([
|
||||
'model' => $options['model'] ?? 'deepseek-chat',
|
||||
'messages' => $messages,
|
||||
'temperature' => $options['temperature'] ?? 0.7,
|
||||
'max_tokens' => $options['max_tokens'] ?? null,
|
||||
'top_p' => $options['top_p'] ?? null,
|
||||
'frequency_penalty' => $options['frequency_penalty'] ?? null,
|
||||
'presence_penalty' => $options['presence_penalty'] ?? null,
|
||||
'stop' => $options['stop'] ?? null,
|
||||
'stream' => false,
|
||||
], fn($value) => $value !== null);
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Authorization' => 'Bearer ' . $this->apiKey,
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
return $this->makeRequest('/chat/completions', $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
return [
|
||||
'id' => $response['id'] ?? null,
|
||||
'model' => $response['model'] ?? null,
|
||||
'content' => $response['choices'][0]['message']['content'] ?? '',
|
||||
'role' => $response['choices'][0]['message']['role'] ?? 'assistant',
|
||||
'finish_reason' => $response['choices'][0]['finish_reason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $response['usage']['prompt_tokens'] ?? 0,
|
||||
'completion_tokens' => $response['usage']['completion_tokens'] ?? 0,
|
||||
'total_tokens' => $response['usage']['total_tokens'] ?? 0,
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:deepseek:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'deepseek')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'deepseek-chat',
|
||||
'deepseek-coder',
|
||||
'deepseek-reasoner',
|
||||
];
|
||||
}
|
||||
}
|
||||
132
laravel-app/app/Services/LLM/Providers/GeminiProvider.php
Normal file
132
laravel-app/app/Services/LLM/Providers/GeminiProvider.php
Normal file
@@ -0,0 +1,132 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class GeminiProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
// Gemini uses a different message format
|
||||
$contents = [];
|
||||
|
||||
foreach ($messages as $message) {
|
||||
$role = $message['role'];
|
||||
|
||||
// Gemini uses 'model' instead of 'assistant' and doesn't support 'system'
|
||||
if ($role === 'assistant') {
|
||||
$role = 'model';
|
||||
} elseif ($role === 'system') {
|
||||
// Convert system messages to user messages with context
|
||||
$role = 'user';
|
||||
}
|
||||
|
||||
$contents[] = [
|
||||
'role' => $role,
|
||||
'parts' => [
|
||||
['text' => $message['content']]
|
||||
]
|
||||
];
|
||||
}
|
||||
|
||||
$request = [
|
||||
'contents' => $contents,
|
||||
];
|
||||
|
||||
// Add generation config if options provided
|
||||
$generationConfig = array_filter([
|
||||
'temperature' => $options['temperature'] ?? null,
|
||||
'maxOutputTokens' => $options['max_tokens'] ?? null,
|
||||
'topP' => $options['top_p'] ?? null,
|
||||
'stopSequences' => $options['stop'] ?? null,
|
||||
], fn($value) => $value !== null);
|
||||
|
||||
if (!empty($generationConfig)) {
|
||||
$request['generationConfig'] = $generationConfig;
|
||||
}
|
||||
|
||||
return $request;
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$model = $options['model'] ?? 'gemini-pro';
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
|
||||
// Gemini uses API key as query parameter
|
||||
$endpoint = "/models/{$model}:generateContent?key={$this->apiKey}";
|
||||
|
||||
return $this->makeRequest($endpoint, $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
$candidate = $response['candidates'][0] ?? [];
|
||||
$content = $candidate['content'] ?? [];
|
||||
$parts = $content['parts'] ?? [];
|
||||
|
||||
$textContent = '';
|
||||
foreach ($parts as $part) {
|
||||
$textContent .= $part['text'] ?? '';
|
||||
}
|
||||
|
||||
$usageMetadata = $response['usageMetadata'] ?? [];
|
||||
|
||||
return [
|
||||
'id' => null, // Gemini doesn't provide an ID
|
||||
'model' => $response['modelVersion'] ?? null,
|
||||
'content' => $textContent,
|
||||
'role' => 'assistant',
|
||||
'finish_reason' => $candidate['finishReason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $usageMetadata['promptTokenCount'] ?? 0,
|
||||
'completion_tokens' => $usageMetadata['candidatesTokenCount'] ?? 0,
|
||||
'total_tokens' => $usageMetadata['totalTokenCount'] ?? 0,
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:gemini:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'gemini')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'gemini-pro',
|
||||
'gemini-pro-vision',
|
||||
'gemini-1.5-pro',
|
||||
'gemini-1.5-flash',
|
||||
'gemini-ultra',
|
||||
];
|
||||
}
|
||||
}
|
||||
90
laravel-app/app/Services/LLM/Providers/MistralProvider.php
Normal file
90
laravel-app/app/Services/LLM/Providers/MistralProvider.php
Normal file
@@ -0,0 +1,90 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class MistralProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://api.mistral.ai/v1';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
return array_filter([
|
||||
'model' => $options['model'] ?? 'mistral-small-latest',
|
||||
'messages' => $messages,
|
||||
'temperature' => $options['temperature'] ?? 0.7,
|
||||
'max_tokens' => $options['max_tokens'] ?? null,
|
||||
'top_p' => $options['top_p'] ?? null,
|
||||
'stream' => false,
|
||||
'safe_prompt' => $options['safe_prompt'] ?? false,
|
||||
'random_seed' => $options['random_seed'] ?? null,
|
||||
], fn($value) => $value !== null && $value !== false);
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Authorization' => 'Bearer ' . $this->apiKey,
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
return $this->makeRequest('/chat/completions', $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
return [
|
||||
'id' => $response['id'] ?? null,
|
||||
'model' => $response['model'] ?? null,
|
||||
'content' => $response['choices'][0]['message']['content'] ?? '',
|
||||
'role' => $response['choices'][0]['message']['role'] ?? 'assistant',
|
||||
'finish_reason' => $response['choices'][0]['finish_reason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $response['usage']['prompt_tokens'] ?? 0,
|
||||
'completion_tokens' => $response['usage']['completion_tokens'] ?? 0,
|
||||
'total_tokens' => $response['usage']['total_tokens'] ?? 0,
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:mistral:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'mistral')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'mistral-large-latest',
|
||||
'mistral-medium-latest',
|
||||
'mistral-small-latest',
|
||||
'mistral-tiny',
|
||||
'open-mistral-7b',
|
||||
'open-mixtral-8x7b',
|
||||
'open-mixtral-8x22b',
|
||||
];
|
||||
}
|
||||
}
|
||||
89
laravel-app/app/Services/LLM/Providers/OpenAIProvider.php
Normal file
89
laravel-app/app/Services/LLM/Providers/OpenAIProvider.php
Normal file
@@ -0,0 +1,89 @@
|
||||
<?php
|
||||
|
||||
namespace App\Services\LLM\Providers;
|
||||
|
||||
use App\Models\ModelPricing;
|
||||
use Illuminate\Support\Facades\Cache;
|
||||
|
||||
class OpenAIProvider extends AbstractProvider
|
||||
{
|
||||
protected string $baseUrl = 'https://api.openai.com/v1';
|
||||
|
||||
protected function buildRequest(array $messages, array $options): array
|
||||
{
|
||||
return array_filter([
|
||||
'model' => $options['model'] ?? 'gpt-4o-mini',
|
||||
'messages' => $messages,
|
||||
'temperature' => $options['temperature'] ?? 0.7,
|
||||
'max_tokens' => $options['max_tokens'] ?? null,
|
||||
'top_p' => $options['top_p'] ?? null,
|
||||
'frequency_penalty' => $options['frequency_penalty'] ?? null,
|
||||
'presence_penalty' => $options['presence_penalty'] ?? null,
|
||||
'stop' => $options['stop'] ?? null,
|
||||
'stream' => false,
|
||||
], fn($value) => $value !== null);
|
||||
}
|
||||
|
||||
protected function getAuthHeaders(): array
|
||||
{
|
||||
return [
|
||||
'Authorization' => 'Bearer ' . $this->apiKey,
|
||||
'Content-Type' => 'application/json',
|
||||
];
|
||||
}
|
||||
|
||||
public function chatCompletion(array $messages, array $options = []): array
|
||||
{
|
||||
$data = $this->buildRequest($messages, $options);
|
||||
return $this->makeRequest('/chat/completions', $data);
|
||||
}
|
||||
|
||||
public function normalizeResponse(array $response): array
|
||||
{
|
||||
return [
|
||||
'id' => $response['id'] ?? null,
|
||||
'model' => $response['model'] ?? null,
|
||||
'content' => $response['choices'][0]['message']['content'] ?? '',
|
||||
'role' => $response['choices'][0]['message']['role'] ?? 'assistant',
|
||||
'finish_reason' => $response['choices'][0]['finish_reason'] ?? null,
|
||||
'usage' => [
|
||||
'prompt_tokens' => $response['usage']['prompt_tokens'] ?? 0,
|
||||
'completion_tokens' => $response['usage']['completion_tokens'] ?? 0,
|
||||
'total_tokens' => $response['usage']['total_tokens'] ?? 0,
|
||||
],
|
||||
'raw_response' => $response,
|
||||
];
|
||||
}
|
||||
|
||||
public function calculateCost(int $promptTokens, int $completionTokens, string $model): float
|
||||
{
|
||||
$cacheKey = "pricing:openai:{$model}";
|
||||
|
||||
$pricing = Cache::remember($cacheKey, 3600, function () use ($model) {
|
||||
return ModelPricing::where('provider', 'openai')
|
||||
->where('model', $model)
|
||||
->where('is_active', true)
|
||||
->first();
|
||||
});
|
||||
|
||||
if (!$pricing) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
$promptCost = ($promptTokens / 1_000_000) * $pricing->input_price_per_million;
|
||||
$completionCost = ($completionTokens / 1_000_000) * $pricing->output_price_per_million;
|
||||
|
||||
return round($promptCost + $completionCost, 6);
|
||||
}
|
||||
|
||||
public function getSupportedModels(): array
|
||||
{
|
||||
return [
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini',
|
||||
'gpt-4-turbo',
|
||||
'gpt-4',
|
||||
'gpt-3.5-turbo',
|
||||
];
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user