Files
laravel-llm-gateway/laravel-app/app/Http/Controllers/Api/ModelController.php
wtrinkl cb495e18e3 Fix API controllers to use correct database column names
- Fix model_pricing table references (model_id -> model, display_name -> model)
- Fix price columns (output_price_per_1k -> output_price_per_million)
- Add price conversion (per_million / 1000 = per_1k) in all API responses
- Add whereNotNull('model') filters to exclude invalid entries
- Add getModelDisplayName() helper method to all controllers
- Fix AccountController to use gateway_users budget fields directly
- Remove Budget model dependencies from AccountController
- Add custom Scramble server URL configuration for API docs
- Create ScrambleServiceProvider to set correct /api prefix
- Add migration to rename user_id to gateway_user_id in llm_requests
- Add custom ApiGuard for gateway_users authentication
- Update all API controllers: AccountController, ModelController, PricingController, ProviderController

All API endpoints now working correctly:
- GET /api/account
- GET /api/models
- GET /api/pricing
- GET /api/providers/{provider}
2025-11-19 19:36:58 +01:00

455 lines
17 KiB
PHP

<?php
namespace App\Http\Controllers\Api;
use App\Http\Controllers\Controller;
use App\Models\{ModelPricing, LlmRequest};
use Illuminate\Http\JsonResponse;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Validator;
class ModelController extends Controller
{
/**
* Get list of all available models
*
* Returns a list of all LLM models available across all providers, with optional
* filtering and sorting capabilities.
*
* ## Query Parameters
*
* - `provider` (optional) - Filter by provider (openai, anthropic, gemini, deepseek, mistral)
* - `supports_streaming` (optional) - Filter to streaming-capable models (true/false)
* - `max_price` (optional) - Maximum price per 1k tokens (filters by output price)
* - `min_context` (optional) - Minimum context window size
* - `sort` (optional) - Sort by: price, context, popularity (default: name)
*
* ## Example Request
*
* ```
* GET /api/models?provider=openai&max_price=0.05&sort=price
* ```
*
* ## Example Response
*
* ```json
* {
* "data": [
* {
* "id": "gpt-4-turbo",
* "provider": "openai",
* "provider_name": "OpenAI",
* "name": "GPT-4 Turbo",
* "description": "Most capable GPT-4 model",
* "context_window": 128000,
* "max_output_tokens": 4096,
* "supports_streaming": true,
* "supports_function_calling": true,
* "pricing": {
* "input_per_1k_tokens": 0.01,
* "output_per_1k_tokens": 0.03,
* "currency": "USD"
* },
* "availability": "available"
* }
* ],
* "meta": {
* "total": 42,
* "filtered": 12,
* "providers_count": 5
* }
* }
* ```
*
* @tags Models
*
* @param Request $request
* @return JsonResponse
*/
public function index(Request $request): JsonResponse
{
$validator = Validator::make($request->all(), [
'provider' => 'sometimes|string|in:openai,anthropic,gemini,deepseek,mistral',
'supports_streaming' => 'sometimes|boolean',
'max_price' => 'sometimes|numeric|min:0',
'min_context' => 'sometimes|integer|min:0',
'sort' => 'sometimes|string|in:price,context,popularity,name',
]);
if ($validator->fails()) {
return response()->json([
'error' => [
'code' => 'validation_error',
'message' => 'Invalid query parameters',
'status' => 422,
'details' => $validator->errors(),
],
], 422);
}
$query = ModelPricing::where('is_active', true)
->whereNotNull('model');
// Apply filters
if ($request->has('provider')) {
$query->where('provider', $request->input('provider'));
}
if ($request->has('max_price')) {
// Convert per-1k price to per-million for comparison
$maxPricePerMillion = $request->input('max_price') * 1000;
$query->where('output_price_per_million', '<=', $maxPricePerMillion);
}
if ($request->has('min_context')) {
$query->where('context_window', '>=', $request->input('min_context'));
}
// Apply sorting
$sort = $request->input('sort', 'name');
switch ($sort) {
case 'price':
$query->orderBy('output_price_per_million');
break;
case 'context':
$query->orderByDesc('context_window');
break;
case 'popularity':
// Sort by usage count (join with llm_requests)
$query->leftJoin('llm_requests', function ($join) {
$join->on('model_pricing.model_id', '=', 'llm_requests.model')
->where('llm_requests.status', '=', 'success');
})
->selectRaw('model_pricing.*, COUNT(llm_requests.id) as usage_count')
->groupBy('model_pricing.id')
->orderByDesc('usage_count');
break;
default:
$query->orderBy('model');
}
$totalCount = ModelPricing::where('is_active', true)->count();
$models = $query->get();
$data = $models->map(function ($model) {
return [
'id' => $model->model,
'provider' => $model->provider,
'provider_name' => $this->getProviderName($model->provider),
'name' => $this->getModelDisplayName($model->model),
'description' => $this->getModelDescription($model),
'context_window' => $model->context_window,
'max_output_tokens' => $model->max_output_tokens,
'supports_streaming' => true,
'supports_function_calling' => in_array($model->provider, ['openai', 'anthropic']),
'supports_vision' => $this->supportsVision($model->model_id),
'pricing' => [
'input_per_1k_tokens' => round($model->input_price_per_million / 1000, 6),
'output_per_1k_tokens' => round($model->output_price_per_million / 1000, 6),
'currency' => 'USD',
],
'availability' => 'available',
];
});
$providersCount = $models->pluck('provider')->unique()->count();
return response()->json([
'data' => $data,
'meta' => [
'total' => $totalCount,
'filtered' => $data->count(),
'providers_count' => $providersCount,
],
]);
}
/**
* Get detailed information about a specific model
*
* Returns comprehensive information about a specific LLM model, including
* capabilities, pricing, performance metrics, and user's usage statistics.
*
* ## Path Parameters
*
* - `provider` - Provider ID (openai, anthropic, gemini, deepseek, mistral)
* - `model` - Model ID (e.g., gpt-4-turbo, claude-3-5-sonnet-20241022)
*
* ## Example Response
*
* ```json
* {
* "data": {
* "id": "gpt-4-turbo",
* "provider": "openai",
* "provider_name": "OpenAI",
* "name": "GPT-4 Turbo",
* "full_name": "OpenAI GPT-4 Turbo",
* "description": "Most capable GPT-4 model",
* "status": "active",
* "capabilities": {
* "context_window": 128000,
* "max_output_tokens": 4096,
* "supports_streaming": true,
* "supports_function_calling": true,
* "supports_vision": true,
* "supports_json_mode": true
* },
* "pricing": {
* "input_per_1k_tokens": 0.01,
* "output_per_1k_tokens": 0.03,
* "currency": "USD",
* "last_updated": "2024-11-01T00:00:00Z"
* },
* "performance": {
* "avg_response_time_ms": 1250,
* "p95_response_time_ms": 2800,
* "success_rate": 99.8
* },
* "your_usage": {
* "total_requests": 145,
* "total_tokens": 250000,
* "total_cost": 3.75,
* "avg_tokens_per_request": 1724,
* "last_used": "2025-11-19T11:30:00Z"
* }
* }
* }
* ```
*
* @tags Models
*
* @param Request $request
* @param string $provider
* @param string $model
* @return JsonResponse
*/
public function show(Request $request, string $provider, string $model): JsonResponse
{
// Find the model
$modelData = ModelPricing::where('provider', $provider)
->where('model', $model)
->where('is_active', true)
->first();
if (!$modelData) {
return response()->json([
'error' => [
'code' => 'not_found',
'message' => "Model '{$model}' not found for provider '{$provider}'",
'status' => 404,
],
], 404);
}
$user = $request->user();
// Get user's usage statistics for this model
$userUsage = LlmRequest::where('gateway_user_id', $user->user_id)
->where('provider', $provider)
->where('model', $model)
->where('status', 'success')
->selectRaw('
COUNT(*) as total_requests,
SUM(total_tokens) as total_tokens,
SUM(total_cost) as total_cost,
AVG(total_tokens) as avg_tokens_per_request,
MAX(created_at) as last_used
')
->first();
// Get global performance statistics
$performance = LlmRequest::where('provider', $provider)
->where('model', $model)
->where('status', 'success')
->selectRaw('
AVG(response_time_ms) as avg_response_time_ms,
MAX(response_time_ms) as max_response_time_ms
')
->first();
// Calculate success rate
$totalRequests = LlmRequest::where('provider', $provider)
->where('model', $model)
->count();
$successfulRequests = LlmRequest::where('provider', $provider)
->where('model', $model)
->where('status', 'success')
->count();
$successRate = $totalRequests > 0 ? ($successfulRequests / $totalRequests) * 100 : 0;
$response = [
'data' => [
'id' => $modelData->model,
'provider' => $modelData->provider,
'provider_name' => $this->getProviderName($modelData->provider),
'name' => $this->getModelDisplayName($modelData->model),
'full_name' => $this->getProviderName($modelData->provider) . ' ' . $this->getModelDisplayName($modelData->model),
'description' => $this->getModelDescription($modelData),
'status' => 'active',
'capabilities' => [
'context_window' => $modelData->context_window,
'max_output_tokens' => $modelData->max_output_tokens,
'supports_streaming' => true,
'supports_function_calling' => in_array($modelData->provider, ['openai', 'anthropic']),
'supports_vision' => $this->supportsVision($modelData->model_id),
'supports_json_mode' => in_array($modelData->provider, ['openai', 'anthropic']),
],
'pricing' => [
'input_per_1k_tokens' => round($modelData->input_price_per_million / 1000, 6),
'output_per_1k_tokens' => round($modelData->output_price_per_million / 1000, 6),
'currency' => 'USD',
'last_updated' => $modelData->updated_at->toIso8601String(),
],
'performance' => [
'avg_response_time_ms' => round($performance->avg_response_time_ms ?? 0),
'p95_response_time_ms' => round($performance->max_response_time_ms ?? 0),
'success_rate' => round($successRate, 1),
],
'your_usage' => [
'total_requests' => $userUsage->total_requests ?? 0,
'total_tokens' => $userUsage->total_tokens ?? 0,
'total_cost' => round($userUsage->total_cost ?? 0, 4),
'avg_tokens_per_request' => round($userUsage->avg_tokens_per_request ?? 0),
'last_used' => $userUsage->last_used?->toIso8601String(),
],
'documentation' => $this->getModelDocUrl($provider, $model),
'best_for' => $this->getModelUseCases($model),
],
];
return response()->json($response);
}
/**
* Get human-readable provider name
*/
private function getProviderName(string $provider): string
{
return match ($provider) {
'openai' => 'OpenAI',
'anthropic' => 'Anthropic',
'gemini' => 'Google Gemini',
'deepseek' => 'DeepSeek',
'mistral' => 'Mistral AI',
default => ucfirst($provider),
};
}
/**
* Get model display name from model ID
*/
private function getModelDisplayName(string $modelId): string
{
// Convert model ID to a readable display name
// e.g., "gpt-4-turbo" -> "GPT-4 Turbo"
return ucwords(str_replace(['-', '_'], ' ', $modelId));
}
/**
* Get model description
*/
private function getModelDescription(ModelPricing $model): string
{
// Extract description from model name or provide generic one
$modelId = strtolower($model->model);
if (str_contains($modelId, 'gpt-4')) {
return 'Most capable GPT-4 model with improved instruction following';
} elseif (str_contains($modelId, 'gpt-3.5')) {
return 'Fast and efficient model for simpler tasks';
} elseif (str_contains($modelId, 'claude-3-5')) {
return 'Most intelligent Claude model';
} elseif (str_contains($modelId, 'claude-3-opus')) {
return 'Powerful model for complex tasks';
} elseif (str_contains($modelId, 'claude-3-sonnet')) {
return 'Balanced model for most use cases';
} elseif (str_contains($modelId, 'claude-3-haiku')) {
return 'Fast and cost-effective model';
} elseif (str_contains($modelId, 'gemini')) {
return 'Google\'s multimodal AI model';
} elseif (str_contains($modelId, 'deepseek')) {
return 'Efficient model for coding and reasoning';
} elseif (str_contains($modelId, 'mistral')) {
return 'Open-source model with strong performance';
}
return $this->getModelDisplayName($model->model);
}
/**
* Check if model supports vision
*/
private function supportsVision(?string $modelId): bool
{
if ($modelId === null) {
return false;
}
$visionModels = [
'gpt-4-vision-preview',
'gpt-4-turbo',
'gpt-4o',
'gpt-4o-mini',
'claude-3-opus',
'claude-3-5-sonnet',
'claude-3-sonnet',
'claude-3-haiku',
'gemini-pro-vision',
'gemini-1.5-pro',
'gemini-1.5-flash',
];
foreach ($visionModels as $visionModel) {
if (str_contains(strtolower($modelId), strtolower($visionModel))) {
return true;
}
}
return false;
}
/**
* Get model documentation URL
*/
private function getModelDocUrl(string $provider, string $model): string
{
return match ($provider) {
'openai' => "https://platform.openai.com/docs/models/{$model}",
'anthropic' => 'https://docs.anthropic.com/claude/docs/models-overview',
'gemini' => 'https://ai.google.dev/models',
'deepseek' => 'https://platform.deepseek.com/api-docs',
'mistral' => 'https://docs.mistral.ai/getting-started/models',
default => '#',
};
}
/**
* Get suggested use cases for model
*/
private function getModelUseCases(string $modelId): array
{
$modelLower = strtolower($modelId);
if (str_contains($modelLower, 'gpt-4-turbo') || str_contains($modelLower, 'gpt-4o')) {
return ['complex reasoning', 'code generation', 'analysis', 'vision tasks'];
} elseif (str_contains($modelLower, 'gpt-3.5-turbo')) {
return ['chat', 'simple tasks', 'quick responses'];
} elseif (str_contains($modelLower, 'claude-3-opus')) {
return ['complex tasks', 'research', 'analysis', 'creative writing'];
} elseif (str_contains($modelLower, 'claude-3-5-sonnet') || str_contains($modelLower, 'claude-3-sonnet')) {
return ['general purpose', 'balanced performance', 'coding'];
} elseif (str_contains($modelLower, 'claude-3-haiku')) {
return ['fast responses', 'simple tasks', 'cost-effective'];
} elseif (str_contains($modelLower, 'gemini')) {
return ['multimodal tasks', 'vision', 'general purpose'];
} elseif (str_contains($modelLower, 'deepseek')) {
return ['coding', 'reasoning', 'technical tasks'];
} elseif (str_contains($modelLower, 'mistral')) {
return ['general purpose', 'multilingual', 'efficient'];
}
return ['general purpose'];
}
}