Fügt 21 neue API-Endpoints in 4 Phasen hinzu:
Phase 1 - Foundation (Provider & Models):
- GET /api/providers - Liste aller Provider
- GET /api/providers/{provider} - Provider-Details
- GET /api/models - Liste aller Models mit Filtering/Sorting
- GET /api/models/{provider}/{model} - Model-Details
Phase 2 - Core Features (Credentials, Budget, Pricing):
- GET/POST/PUT/DELETE /api/credentials - Credential-Management
- POST /api/credentials/{id}/test - Connection Testing
- GET /api/budget - Budget-Status mit Projektionen
- GET /api/budget/history - Budget-Historie
- GET /api/pricing - Model-Pricing-Listen
- GET /api/pricing/calculator - Kosten-Kalkulator
- GET /api/pricing/compare - Preis-Vergleich
Phase 3 - Analytics (Usage Statistics):
- GET /api/usage/summary - Umfassende Statistiken
- GET /api/usage/requests - Request-History mit Pagination
- GET /api/usage/requests/{id} - Request-Details
- GET /api/usage/charts - Chart-Daten (4 Typen)
Phase 4 - Account (Account Info & Activity):
- GET /api/account - User-Informationen
- GET /api/account/activity - Activity-Log
Features:
- Vollständige Scramble/Swagger-Dokumentation
- Consistent Error-Handling
- API-Key Authentication
- Filtering, Sorting, Pagination
- Budget-Tracking mit Alerts
- Provider-Breakdown
- Performance-Metriken
- Chart-Ready-Data
Controller erstellt:
- ProviderController
- ModelController
- CredentialController
- BudgetController
- PricingController
- UsageController
- AccountController
Dokumentation:
- API_KONZEPT.md - Vollständiges API-Konzept
- API_IMPLEMENTATION_STATUS.txt - Implementation-Tracking
- API_IMPLEMENTATION_SUMMARY.md - Zusammenfassung und Workflows
438 lines
16 KiB
PHP
438 lines
16 KiB
PHP
<?php
|
|
|
|
namespace App\Http\Controllers\Api;
|
|
|
|
use App\Http\Controllers\Controller;
|
|
use App\Models\{ModelPricing, LlmRequest};
|
|
use Illuminate\Http\JsonResponse;
|
|
use Illuminate\Http\Request;
|
|
use Illuminate\Support\Facades\Validator;
|
|
|
|
class ModelController extends Controller
|
|
{
|
|
/**
|
|
* Get list of all available models
|
|
*
|
|
* Returns a list of all LLM models available across all providers, with optional
|
|
* filtering and sorting capabilities.
|
|
*
|
|
* ## Query Parameters
|
|
*
|
|
* - `provider` (optional) - Filter by provider (openai, anthropic, gemini, deepseek, mistral)
|
|
* - `supports_streaming` (optional) - Filter to streaming-capable models (true/false)
|
|
* - `max_price` (optional) - Maximum price per 1k tokens (filters by output price)
|
|
* - `min_context` (optional) - Minimum context window size
|
|
* - `sort` (optional) - Sort by: price, context, popularity (default: name)
|
|
*
|
|
* ## Example Request
|
|
*
|
|
* ```
|
|
* GET /api/models?provider=openai&max_price=0.05&sort=price
|
|
* ```
|
|
*
|
|
* ## Example Response
|
|
*
|
|
* ```json
|
|
* {
|
|
* "data": [
|
|
* {
|
|
* "id": "gpt-4-turbo",
|
|
* "provider": "openai",
|
|
* "provider_name": "OpenAI",
|
|
* "name": "GPT-4 Turbo",
|
|
* "description": "Most capable GPT-4 model",
|
|
* "context_window": 128000,
|
|
* "max_output_tokens": 4096,
|
|
* "supports_streaming": true,
|
|
* "supports_function_calling": true,
|
|
* "pricing": {
|
|
* "input_per_1k_tokens": 0.01,
|
|
* "output_per_1k_tokens": 0.03,
|
|
* "currency": "USD"
|
|
* },
|
|
* "availability": "available"
|
|
* }
|
|
* ],
|
|
* "meta": {
|
|
* "total": 42,
|
|
* "filtered": 12,
|
|
* "providers_count": 5
|
|
* }
|
|
* }
|
|
* ```
|
|
*
|
|
* @tags Models
|
|
*
|
|
* @param Request $request
|
|
* @return JsonResponse
|
|
*/
|
|
public function index(Request $request): JsonResponse
|
|
{
|
|
$validator = Validator::make($request->all(), [
|
|
'provider' => 'sometimes|string|in:openai,anthropic,gemini,deepseek,mistral',
|
|
'supports_streaming' => 'sometimes|boolean',
|
|
'max_price' => 'sometimes|numeric|min:0',
|
|
'min_context' => 'sometimes|integer|min:0',
|
|
'sort' => 'sometimes|string|in:price,context,popularity,name',
|
|
]);
|
|
|
|
if ($validator->fails()) {
|
|
return response()->json([
|
|
'error' => [
|
|
'code' => 'validation_error',
|
|
'message' => 'Invalid query parameters',
|
|
'status' => 422,
|
|
'details' => $validator->errors(),
|
|
],
|
|
], 422);
|
|
}
|
|
|
|
$query = ModelPricing::where('is_active', true);
|
|
|
|
// Apply filters
|
|
if ($request->has('provider')) {
|
|
$query->where('provider', $request->input('provider'));
|
|
}
|
|
|
|
if ($request->has('max_price')) {
|
|
$query->where('output_price_per_1k', '<=', $request->input('max_price'));
|
|
}
|
|
|
|
if ($request->has('min_context')) {
|
|
$query->where('context_window', '>=', $request->input('min_context'));
|
|
}
|
|
|
|
// Apply sorting
|
|
$sort = $request->input('sort', 'name');
|
|
switch ($sort) {
|
|
case 'price':
|
|
$query->orderBy('output_price_per_1k');
|
|
break;
|
|
case 'context':
|
|
$query->orderByDesc('context_window');
|
|
break;
|
|
case 'popularity':
|
|
// Sort by usage count (join with llm_requests)
|
|
$query->leftJoin('llm_requests', function ($join) {
|
|
$join->on('model_pricing.model_id', '=', 'llm_requests.model')
|
|
->where('llm_requests.status', '=', 'success');
|
|
})
|
|
->selectRaw('model_pricing.*, COUNT(llm_requests.id) as usage_count')
|
|
->groupBy('model_pricing.id')
|
|
->orderByDesc('usage_count');
|
|
break;
|
|
default:
|
|
$query->orderBy('display_name');
|
|
}
|
|
|
|
$totalCount = ModelPricing::where('is_active', true)->count();
|
|
$models = $query->get();
|
|
|
|
$data = $models->map(function ($model) {
|
|
return [
|
|
'id' => $model->model_id,
|
|
'provider' => $model->provider,
|
|
'provider_name' => $this->getProviderName($model->provider),
|
|
'name' => $model->display_name,
|
|
'description' => $this->getModelDescription($model),
|
|
'context_window' => $model->context_window,
|
|
'max_output_tokens' => $model->max_output_tokens,
|
|
'supports_streaming' => true,
|
|
'supports_function_calling' => in_array($model->provider, ['openai', 'anthropic']),
|
|
'supports_vision' => $this->supportsVision($model->model_id),
|
|
'pricing' => [
|
|
'input_per_1k_tokens' => $model->input_price_per_1k,
|
|
'output_per_1k_tokens' => $model->output_price_per_1k,
|
|
'currency' => 'USD',
|
|
],
|
|
'availability' => 'available',
|
|
];
|
|
});
|
|
|
|
$providersCount = $models->pluck('provider')->unique()->count();
|
|
|
|
return response()->json([
|
|
'data' => $data,
|
|
'meta' => [
|
|
'total' => $totalCount,
|
|
'filtered' => $data->count(),
|
|
'providers_count' => $providersCount,
|
|
],
|
|
]);
|
|
}
|
|
|
|
/**
|
|
* Get detailed information about a specific model
|
|
*
|
|
* Returns comprehensive information about a specific LLM model, including
|
|
* capabilities, pricing, performance metrics, and user's usage statistics.
|
|
*
|
|
* ## Path Parameters
|
|
*
|
|
* - `provider` - Provider ID (openai, anthropic, gemini, deepseek, mistral)
|
|
* - `model` - Model ID (e.g., gpt-4-turbo, claude-3-5-sonnet-20241022)
|
|
*
|
|
* ## Example Response
|
|
*
|
|
* ```json
|
|
* {
|
|
* "data": {
|
|
* "id": "gpt-4-turbo",
|
|
* "provider": "openai",
|
|
* "provider_name": "OpenAI",
|
|
* "name": "GPT-4 Turbo",
|
|
* "full_name": "OpenAI GPT-4 Turbo",
|
|
* "description": "Most capable GPT-4 model",
|
|
* "status": "active",
|
|
* "capabilities": {
|
|
* "context_window": 128000,
|
|
* "max_output_tokens": 4096,
|
|
* "supports_streaming": true,
|
|
* "supports_function_calling": true,
|
|
* "supports_vision": true,
|
|
* "supports_json_mode": true
|
|
* },
|
|
* "pricing": {
|
|
* "input_per_1k_tokens": 0.01,
|
|
* "output_per_1k_tokens": 0.03,
|
|
* "currency": "USD",
|
|
* "last_updated": "2024-11-01T00:00:00Z"
|
|
* },
|
|
* "performance": {
|
|
* "avg_response_time_ms": 1250,
|
|
* "p95_response_time_ms": 2800,
|
|
* "success_rate": 99.8
|
|
* },
|
|
* "your_usage": {
|
|
* "total_requests": 145,
|
|
* "total_tokens": 250000,
|
|
* "total_cost": 3.75,
|
|
* "avg_tokens_per_request": 1724,
|
|
* "last_used": "2025-11-19T11:30:00Z"
|
|
* }
|
|
* }
|
|
* }
|
|
* ```
|
|
*
|
|
* @tags Models
|
|
*
|
|
* @param Request $request
|
|
* @param string $provider
|
|
* @param string $model
|
|
* @return JsonResponse
|
|
*/
|
|
public function show(Request $request, string $provider, string $model): JsonResponse
|
|
{
|
|
// Find the model
|
|
$modelData = ModelPricing::where('provider', $provider)
|
|
->where('model_id', $model)
|
|
->where('is_active', true)
|
|
->first();
|
|
|
|
if (!$modelData) {
|
|
return response()->json([
|
|
'error' => [
|
|
'code' => 'not_found',
|
|
'message' => "Model '{$model}' not found for provider '{$provider}'",
|
|
'status' => 404,
|
|
],
|
|
], 404);
|
|
}
|
|
|
|
$user = $request->user();
|
|
|
|
// Get user's usage statistics for this model
|
|
$userUsage = LlmRequest::where('gateway_user_id', $user->user_id)
|
|
->where('provider', $provider)
|
|
->where('model', $model)
|
|
->where('status', 'success')
|
|
->selectRaw('
|
|
COUNT(*) as total_requests,
|
|
SUM(total_tokens) as total_tokens,
|
|
SUM(total_cost) as total_cost,
|
|
AVG(total_tokens) as avg_tokens_per_request,
|
|
MAX(created_at) as last_used
|
|
')
|
|
->first();
|
|
|
|
// Get global performance statistics
|
|
$performance = LlmRequest::where('provider', $provider)
|
|
->where('model', $model)
|
|
->where('status', 'success')
|
|
->selectRaw('
|
|
AVG(response_time_ms) as avg_response_time_ms,
|
|
MAX(response_time_ms) as max_response_time_ms
|
|
')
|
|
->first();
|
|
|
|
// Calculate success rate
|
|
$totalRequests = LlmRequest::where('provider', $provider)
|
|
->where('model', $model)
|
|
->count();
|
|
$successfulRequests = LlmRequest::where('provider', $provider)
|
|
->where('model', $model)
|
|
->where('status', 'success')
|
|
->count();
|
|
$successRate = $totalRequests > 0 ? ($successfulRequests / $totalRequests) * 100 : 0;
|
|
|
|
$response = [
|
|
'data' => [
|
|
'id' => $modelData->model_id,
|
|
'provider' => $modelData->provider,
|
|
'provider_name' => $this->getProviderName($modelData->provider),
|
|
'name' => $modelData->display_name,
|
|
'full_name' => $this->getProviderName($modelData->provider) . ' ' . $modelData->display_name,
|
|
'description' => $this->getModelDescription($modelData),
|
|
'status' => 'active',
|
|
'capabilities' => [
|
|
'context_window' => $modelData->context_window,
|
|
'max_output_tokens' => $modelData->max_output_tokens,
|
|
'supports_streaming' => true,
|
|
'supports_function_calling' => in_array($modelData->provider, ['openai', 'anthropic']),
|
|
'supports_vision' => $this->supportsVision($modelData->model_id),
|
|
'supports_json_mode' => in_array($modelData->provider, ['openai', 'anthropic']),
|
|
],
|
|
'pricing' => [
|
|
'input_per_1k_tokens' => $modelData->input_price_per_1k,
|
|
'output_per_1k_tokens' => $modelData->output_price_per_1k,
|
|
'currency' => 'USD',
|
|
'last_updated' => $modelData->updated_at->toIso8601String(),
|
|
],
|
|
'performance' => [
|
|
'avg_response_time_ms' => round($performance->avg_response_time_ms ?? 0),
|
|
'p95_response_time_ms' => round($performance->max_response_time_ms ?? 0),
|
|
'success_rate' => round($successRate, 1),
|
|
],
|
|
'your_usage' => [
|
|
'total_requests' => $userUsage->total_requests ?? 0,
|
|
'total_tokens' => $userUsage->total_tokens ?? 0,
|
|
'total_cost' => round($userUsage->total_cost ?? 0, 4),
|
|
'avg_tokens_per_request' => round($userUsage->avg_tokens_per_request ?? 0),
|
|
'last_used' => $userUsage->last_used?->toIso8601String(),
|
|
],
|
|
'documentation' => $this->getModelDocUrl($provider, $model),
|
|
'best_for' => $this->getModelUseCases($model),
|
|
],
|
|
];
|
|
|
|
return response()->json($response);
|
|
}
|
|
|
|
/**
|
|
* Get human-readable provider name
|
|
*/
|
|
private function getProviderName(string $provider): string
|
|
{
|
|
return match ($provider) {
|
|
'openai' => 'OpenAI',
|
|
'anthropic' => 'Anthropic',
|
|
'gemini' => 'Google Gemini',
|
|
'deepseek' => 'DeepSeek',
|
|
'mistral' => 'Mistral AI',
|
|
default => ucfirst($provider),
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Get model description
|
|
*/
|
|
private function getModelDescription(ModelPricing $model): string
|
|
{
|
|
// Extract description from model name or provide generic one
|
|
$modelId = strtolower($model->model_id);
|
|
|
|
if (str_contains($modelId, 'gpt-4')) {
|
|
return 'Most capable GPT-4 model with improved instruction following';
|
|
} elseif (str_contains($modelId, 'gpt-3.5')) {
|
|
return 'Fast and efficient model for simpler tasks';
|
|
} elseif (str_contains($modelId, 'claude-3-5')) {
|
|
return 'Most intelligent Claude model';
|
|
} elseif (str_contains($modelId, 'claude-3-opus')) {
|
|
return 'Powerful model for complex tasks';
|
|
} elseif (str_contains($modelId, 'claude-3-sonnet')) {
|
|
return 'Balanced model for most use cases';
|
|
} elseif (str_contains($modelId, 'claude-3-haiku')) {
|
|
return 'Fast and cost-effective model';
|
|
} elseif (str_contains($modelId, 'gemini')) {
|
|
return 'Google\'s multimodal AI model';
|
|
} elseif (str_contains($modelId, 'deepseek')) {
|
|
return 'Efficient model for coding and reasoning';
|
|
} elseif (str_contains($modelId, 'mistral')) {
|
|
return 'Open-source model with strong performance';
|
|
}
|
|
|
|
return $model->display_name;
|
|
}
|
|
|
|
/**
|
|
* Check if model supports vision
|
|
*/
|
|
private function supportsVision(string $modelId): bool
|
|
{
|
|
$visionModels = [
|
|
'gpt-4-vision-preview',
|
|
'gpt-4-turbo',
|
|
'gpt-4o',
|
|
'gpt-4o-mini',
|
|
'claude-3-opus',
|
|
'claude-3-5-sonnet',
|
|
'claude-3-sonnet',
|
|
'claude-3-haiku',
|
|
'gemini-pro-vision',
|
|
'gemini-1.5-pro',
|
|
'gemini-1.5-flash',
|
|
];
|
|
|
|
foreach ($visionModels as $visionModel) {
|
|
if (str_contains(strtolower($modelId), strtolower($visionModel))) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* Get model documentation URL
|
|
*/
|
|
private function getModelDocUrl(string $provider, string $model): string
|
|
{
|
|
return match ($provider) {
|
|
'openai' => "https://platform.openai.com/docs/models/{$model}",
|
|
'anthropic' => 'https://docs.anthropic.com/claude/docs/models-overview',
|
|
'gemini' => 'https://ai.google.dev/models',
|
|
'deepseek' => 'https://platform.deepseek.com/api-docs',
|
|
'mistral' => 'https://docs.mistral.ai/getting-started/models',
|
|
default => '#',
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Get suggested use cases for model
|
|
*/
|
|
private function getModelUseCases(string $modelId): array
|
|
{
|
|
$modelLower = strtolower($modelId);
|
|
|
|
if (str_contains($modelLower, 'gpt-4-turbo') || str_contains($modelLower, 'gpt-4o')) {
|
|
return ['complex reasoning', 'code generation', 'analysis', 'vision tasks'];
|
|
} elseif (str_contains($modelLower, 'gpt-3.5-turbo')) {
|
|
return ['chat', 'simple tasks', 'quick responses'];
|
|
} elseif (str_contains($modelLower, 'claude-3-opus')) {
|
|
return ['complex tasks', 'research', 'analysis', 'creative writing'];
|
|
} elseif (str_contains($modelLower, 'claude-3-5-sonnet') || str_contains($modelLower, 'claude-3-sonnet')) {
|
|
return ['general purpose', 'balanced performance', 'coding'];
|
|
} elseif (str_contains($modelLower, 'claude-3-haiku')) {
|
|
return ['fast responses', 'simple tasks', 'cost-effective'];
|
|
} elseif (str_contains($modelLower, 'gemini')) {
|
|
return ['multimodal tasks', 'vision', 'general purpose'];
|
|
} elseif (str_contains($modelLower, 'deepseek')) {
|
|
return ['coding', 'reasoning', 'technical tasks'];
|
|
} elseif (str_contains($modelLower, 'mistral')) {
|
|
return ['general purpose', 'multilingual', 'efficient'];
|
|
}
|
|
|
|
return ['general purpose'];
|
|
}
|
|
}
|