at41rv commited on
Commit
43d02e6
·
verified ·
1 Parent(s): c1c8f4b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +981 -0
app.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Request, UploadFile, File, Depends, Header
2
+ from fastapi.responses import StreamingResponse, HTMLResponse, Response
3
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
4
+ from pydantic import BaseModel, Field
5
+ from typing import Optional, List, Dict, Any
6
+ import httpx
7
+ import os
8
+ import json
9
+ import logging
10
+ from datetime import datetime
11
+
12
+ # Setup logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ app = FastAPI(
17
+ title="OMNIAPI Services",
18
+ description="Professional AI Services - Chat Completions, Image Generation, Web Search, Speech-To-Text, and Text-to-Speech",
19
+ version="1.0.0",
20
+ servers=[
21
+ {
22
+ "url": "https://at41rv-a77.hf.space",
23
+ "description": "Production server"
24
+ }
25
+ ],
26
+ swagger_ui_parameters={"defaultModelsExpandDepth": -1}
27
+ )
28
+
29
+ # Get HF token from environment variable
30
+ HF_TOKEN = os.getenv("HF_TOKEN")
31
+ BACKEND_URL = "https://at41rv-a77backend.hf.space"
32
+ ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "") # Set this in your HF Space secrets
33
+
34
+ if not HF_TOKEN:
35
+ logger.warning("HF_TOKEN not found in environment variables")
36
+
37
+ security = HTTPBearer()
38
+
39
+ # In-memory storage for generated API keys with metadata
40
+ generated_api_keys = {} # Changed from set to dict
41
+ user_api_keys = {} # Track API keys per user (by IP or session)
42
+
43
+ async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
44
+ """
45
+ Verify the access token from the Authorization header
46
+ Accepts both the original ACCESS_TOKEN and forwards to backend for validation
47
+ """
48
+ token = credentials.credentials
49
+
50
+ # Check if it's the original access token
51
+ if ACCESS_TOKEN and token == ACCESS_TOKEN:
52
+ return token
53
+
54
+ # Attempt to forward the token to backend for validation
55
+ try:
56
+ response = await make_backend_request("/validate-token", "POST", {"token": token})
57
+ if response.status_code == 200:
58
+ data = response.json()
59
+ if data.get("valid", False):
60
+ return token
61
+ else:
62
+ logger.error(f"Token validation failed: Backend response indicates token is invalid. Response: {data}")
63
+ raise HTTPException(status_code=401, detail="Invalid access token. Backend validation failed.")
64
+ elif response.status_code == 404:
65
+ logger.warning(f"Token validation endpoint not found (404). Skipping validation and using token directly. Response: {response.text}")
66
+ return token # Skip validation if endpoint is not found, allow token to be used directly
67
+ else:
68
+ logger.error(f"Token validation failed: Backend returned status code {response.status_code}. Response: {response.text}")
69
+ raise HTTPException(status_code=401, detail=f"Invalid access token. Backend returned status {response.status_code}.")
70
+ except Exception as e:
71
+ logger.error(f"Token validation error: {str(e)}")
72
+ raise HTTPException(status_code=401, detail=f"Invalid access token. Validation error: {str(e)}")
73
+
74
+ # Request timeout configuration
75
+ REQUEST_TIMEOUT = 60.0
76
+
77
+ # Pydantic models for requests (OpenAI Compatible)
78
+ class Message(BaseModel):
79
+ role: str = Field(..., description="Role of the message sender", example="user")
80
+ content: str = Field(..., description="Content of the message", example="Hello, how are you?")
81
+
82
+ class SimpleChatRequest(BaseModel):
83
+ model: str = Field(..., description="AI model to use for completion", example="gpt-4o")
84
+ prompt: str = Field(None, nullable=True, description="Text prompt for the AI to respond to (legacy)", example="Explain quantum computing in simple terms")
85
+ messages: List[Message] = Field(None, nullable=True, description="List of messages for OpenAI-compatible chat")
86
+ system_prompt: str = Field(
87
+ default="You are a helpful AI assistant.",
88
+ nullable=True,
89
+ description="System prompt to set AI behavior and personality",
90
+ example="You are a helpful physics teacher who explains complex topics simply."
91
+ )
92
+ max_tokens: int = Field(default=2048, nullable=True, description="Maximum tokens to generate", example=2048)
93
+ temperature: float = Field(default=0.7, nullable=True, description="Temperature for response randomness", example=0.7)
94
+ stream: bool = Field(default=False, nullable=True, description="Whether to stream the response", example=False)
95
+
96
+ class GenerationRequest(BaseModel):
97
+ prompt: str = Field(..., description="Text description of the image to generate", example="A beautiful sunset over mountains")
98
+ model: str = Field(..., description="Image generation model to use", example="flux.1-dev")
99
+ size: str = Field(default="1024x1024", nullable=True, description="Image size", example="1024x1024")
100
+
101
+ class SearchRequest(BaseModel):
102
+ query: str = Field(..., description="Search query", example="artificial intelligence")
103
+ max_results: int = Field(default=10, nullable=True, description="Maximum number of results", example=10)
104
+ region: str = Field(default="us", nullable=True, description="Search region", example="us")
105
+ safesearch: str = Field(default="moderate", nullable=True, description="Safe search level", example="moderate")
106
+ max_chars: int = Field(default=2000, nullable=True, description="Maximum characters to scrape from URL", example=2000)
107
+
108
+ class TTSRequest(BaseModel):
109
+ text: str = Field(..., description="Text to convert to speech", example="Hello, this is a test message")
110
+ provider: str = Field(..., description="TTS provider to use", example="GesseritTTS")
111
+ voice: str = Field(..., description="Voice to use for TTS", example="Emma")
112
+
113
+ class APIKeyCreateRequest(BaseModel):
114
+ name: str = Field(..., min_length=1, max_length=50, description="Name for the API key", example="My API Key")
115
+
116
+ class APIKeyResponse(BaseModel):
117
+ success: bool = Field(..., description="Whether the operation was successful", example=True)
118
+ api_key: str = Field(..., description="Generated API key", example="oa1234567890abcdef...")
119
+ name: str = Field(default="", nullable=True, description="Name of the API key", example="My API Key")
120
+ message: str = Field(..., description="Response message", example="API key generated successfully")
121
+ created_at: str = Field(..., description="Creation timestamp", example="2024-01-20 10:30:00 UTC")
122
+ expires: str = Field(default="Never", description="Expiration date", example="Never")
123
+ usage_note: str = Field(..., description="Usage instructions", example="Include this API key in the Authorization header")
124
+
125
+ class APIKeyInfo(BaseModel):
126
+ key: str = Field(..., description="API key (full or masked)", example="oa1234567890abcdef...")
127
+ name: str = Field(..., description="Name of the API key", example="My API Key")
128
+ created_at: str = Field(..., description="Creation timestamp", example="2024-01-20 10:30:00 UTC")
129
+ last_used: Optional[str] = Field(None, description="Last used timestamp", example="2024-01-20 15:45:00 UTC")
130
+ usage_count: int = Field(..., description="Number of times used", example=42)
131
+ key_preview: str = Field(..., description="Masked preview of the key", example="oa1234...********")
132
+
133
+ class UserAPIKeysResponse(BaseModel):
134
+ success: bool = Field(..., description="Whether the operation was successful", example=True)
135
+ keys: List[APIKeyInfo] = Field(..., description="List of user's API keys")
136
+ total_keys: int = Field(..., description="Total number of keys", example=2)
137
+ max_keys: int = Field(default=3, description="Maximum allowed keys", example=3)
138
+ remaining_slots: int = Field(..., description="Remaining key slots", example=1)
139
+
140
+ class APIKeyDeleteResponse(BaseModel):
141
+ success: bool = Field(..., description="Whether the operation was successful", example=True)
142
+ message: str = Field(..., description="Response message", example="API key deleted successfully")
143
+
144
+ async def make_backend_request(
145
+ endpoint: str,
146
+ method: str = "GET",
147
+ data: dict = None,
148
+ params: dict = None,
149
+ files: dict = None,
150
+ timeout: float = REQUEST_TIMEOUT
151
+ ) -> httpx.Response:
152
+ """Make authenticated request to backend API"""
153
+ headers = {
154
+ "Authorization": f"Bearer {HF_TOKEN}",
155
+ "User-Agent": "OMNIAPI-Proxy/1.0"
156
+ }
157
+
158
+ # Only add Content-Type for JSON requests
159
+ if not files:
160
+ headers["Content-Type"] = "application/json"
161
+
162
+ try:
163
+ async with httpx.AsyncClient(timeout=timeout) as client:
164
+ if method.upper() == "GET":
165
+ response = await client.get(
166
+ f"{BACKEND_URL}{endpoint}",
167
+ headers=headers,
168
+ params=params
169
+ )
170
+ elif method.upper() == "POST":
171
+ if files:
172
+ # For file uploads, don't set Content-Type (let httpx handle multipart)
173
+ headers.pop("Content-Type", None)
174
+ response = await client.post(
175
+ f"{BACKEND_URL}{endpoint}",
176
+ headers=headers,
177
+ files=files,
178
+ params=params
179
+ )
180
+ else:
181
+ response = await client.post(
182
+ f"{BACKEND_URL}{endpoint}",
183
+ headers=headers,
184
+ json=data,
185
+ params=params
186
+ )
187
+ elif method.upper() == "DELETE":
188
+ response = await client.delete(
189
+ f"{BACKEND_URL}{endpoint}",
190
+ headers=headers,
191
+ params=params
192
+ )
193
+ else:
194
+ raise HTTPException(status_code=405, detail="Method not allowed")
195
+
196
+ return response
197
+
198
+ except httpx.TimeoutException:
199
+ logger.error(f"Request timeout to {endpoint}")
200
+ raise HTTPException(status_code=504, detail="Backend request timeout")
201
+ except httpx.RequestError as e:
202
+ logger.error(f"Request error to {endpoint}: {str(e)}")
203
+ raise HTTPException(status_code=502, detail="Backend connection error")
204
+ except Exception as e:
205
+ logger.error(f"Unexpected error: {str(e)}")
206
+ raise HTTPException(status_code=500, detail="Internal server error")
207
+
208
+ @app.get("/")
209
+ async def root():
210
+ """Root endpoint returning OpenAPI specification"""
211
+ return {
212
+ "openapi": "3.0.0",
213
+ "info": {
214
+ "title": "OMNIAPI Services",
215
+ "description": "Professional AI Services - Chat Completions, Image Generation, Web Search, Speech-To-Text, and Text-to-Speech",
216
+ "version": "1.0.0"
217
+ },
218
+ "servers": [
219
+ {
220
+ "url": "https://at41rv-a77.hf.space",
221
+ "description": "Production server"
222
+ }
223
+ ],
224
+ "paths": {
225
+ "/": {
226
+ "get": {
227
+ "summary": "Root endpoint returning OpenAPI specification",
228
+ "responses": {
229
+ "200": {
230
+ "description": "OpenAPI specification",
231
+ "content": {
232
+ "application/json": {
233
+ "schema": {
234
+ "type": "object"
235
+ }
236
+ }
237
+ }
238
+ }
239
+ }
240
+ }
241
+ },
242
+ "/health": {
243
+ "get": {
244
+ "summary": "Health check endpoint",
245
+ "responses": {
246
+ "200": {
247
+ "description": "Service health status"
248
+ }
249
+ }
250
+ }
251
+ },
252
+ "/models": {
253
+ "get": {
254
+ "summary": "Get available chat models",
255
+ "security": [{"bearerAuth": []}],
256
+ "responses": {
257
+ "200": {
258
+ "description": "List of available models"
259
+ }
260
+ }
261
+ }
262
+ },
263
+ "/chat/completions": {
264
+ "post": {
265
+ "summary": "Chat completions (OpenAI Compatible)",
266
+ "security": [{"bearerAuth": []}],
267
+ "requestBody": {
268
+ "required": True,
269
+ "content": {
270
+ "application/json": {
271
+ "schema": {
272
+ "type": "object",
273
+ "properties": {
274
+ "model": {"type": "string"},
275
+ "messages": {
276
+ "type": "array",
277
+ "items": {
278
+ "type": "object",
279
+ "properties": {
280
+ "role": {"type": "string"},
281
+ "content": {"type": "string"}
282
+ }
283
+ }
284
+ },
285
+ "max_tokens": {"type": "integer"},
286
+ "temperature": {"type": "number"},
287
+ "stream": {"type": "boolean"}
288
+ }
289
+ }
290
+ }
291
+ }
292
+ },
293
+ "responses": {
294
+ "200": {
295
+ "description": "Chat completion response"
296
+ }
297
+ }
298
+ }
299
+ },
300
+ "/image/generate": {
301
+ "post": {
302
+ "summary": "Generate images",
303
+ "security": [{"bearerAuth": []}],
304
+ "requestBody": {
305
+ "required": True,
306
+ "content": {
307
+ "application/json": {
308
+ "schema": {
309
+ "type": "object",
310
+ "properties": {
311
+ "prompt": {"type": "string"},
312
+ "model": {"type": "string"},
313
+ "size": {"type": "string"}
314
+ }
315
+ }
316
+ }
317
+ }
318
+ },
319
+ "responses": {
320
+ "200": {
321
+ "description": "Generated image response"
322
+ }
323
+ }
324
+ }
325
+ },
326
+ "/web/search": {
327
+ "post": {
328
+ "summary": "Web search",
329
+ "security": [{"bearerAuth": []}],
330
+ "requestBody": {
331
+ "required": True,
332
+ "content": {
333
+ "application/json": {
334
+ "schema": {
335
+ "type": "object",
336
+ "properties": {
337
+ "query": {"type": "string"},
338
+ "max_results": {"type": "integer"},
339
+ "region": {"type": "string"},
340
+ "safesearch": {"type": "string"}
341
+ }
342
+ }
343
+ }
344
+ }
345
+ },
346
+ "responses": {
347
+ "200": {
348
+ "description": "Search results"
349
+ }
350
+ }
351
+ }
352
+ },
353
+ "/image/search": {
354
+ "post": {
355
+ "summary": "Image search",
356
+ "security": [{"bearerAuth": []}],
357
+ "requestBody": {
358
+ "required": True,
359
+ "content": {
360
+ "application/json": {
361
+ "schema": {
362
+ "type": "object",
363
+ "properties": {
364
+ "query": {"type": "string"},
365
+ "max_results": {"type": "integer"},
366
+ "region": {"type": "string"},
367
+ "safesearch": {"type": "string"}
368
+ }
369
+ }
370
+ }
371
+ }
372
+ },
373
+ "responses": {
374
+ "200": {
375
+ "description": "Image search results"
376
+ }
377
+ }
378
+ }
379
+ },
380
+ "/videos/search": {
381
+ "post": {
382
+ "summary": "Video search",
383
+ "security": [{"bearerAuth": []}],
384
+ "requestBody": {
385
+ "required": True,
386
+ "content": {
387
+ "application/json": {
388
+ "schema": {
389
+ "type": "object",
390
+ "properties": {
391
+ "query": {"type": "string"},
392
+ "max_results": {"type": "integer"},
393
+ "region": {"type": "string"},
394
+ "safesearch": {"type": "string"}
395
+ }
396
+ }
397
+ }
398
+ }
399
+ },
400
+ "responses": {
401
+ "200": {
402
+ "description": "Video search results"
403
+ }
404
+ }
405
+ }
406
+ },
407
+ "/tts/{provider}/voices": {
408
+ "get": {
409
+ "summary": "Get TTS voices for provider",
410
+ "security": [{"bearerAuth": []}],
411
+ "parameters": [
412
+ {
413
+ "name": "provider",
414
+ "in": "path",
415
+ "required": True,
416
+ "schema": {"type": "string"}
417
+ }
418
+ ],
419
+ "responses": {
420
+ "200": {
421
+ "description": "Available voices"
422
+ }
423
+ }
424
+ }
425
+ },
426
+ "/tts/generate": {
427
+ "post": {
428
+ "summary": "Generate TTS audio",
429
+ "security": [{"bearerAuth": []}],
430
+ "requestBody": {
431
+ "required": True,
432
+ "content": {
433
+ "application/json": {
434
+ "schema": {
435
+ "type": "object",
436
+ "properties": {
437
+ "text": {"type": "string"},
438
+ "provider": {"type": "string"},
439
+ "voice": {"type": "string"}
440
+ }
441
+ }
442
+ }
443
+ }
444
+ },
445
+ "responses": {
446
+ "200": {
447
+ "description": "Generated audio response"
448
+ }
449
+ }
450
+ }
451
+ },
452
+ "/transcribe": {
453
+ "post": {
454
+ "summary": "Audio transcription",
455
+ "security": [{"bearerAuth": []}],
456
+ "requestBody": {
457
+ "required": True,
458
+ "content": {
459
+ "multipart/form-data": {
460
+ "schema": {
461
+ "type": "object",
462
+ "properties": {
463
+ "audio_file": {
464
+ "type": "string",
465
+ "format": "binary"
466
+ }
467
+ }
468
+ }
469
+ }
470
+ }
471
+ },
472
+ "responses": {
473
+ "200": {
474
+ "description": "Transcription result"
475
+ }
476
+ }
477
+ }
478
+ },
479
+ "/audio/{audio_id}": {
480
+ "get": {
481
+ "summary": "Serve audio file",
482
+ "security": [{"bearerAuth": []}],
483
+ "parameters": [
484
+ {
485
+ "name": "audio_id",
486
+ "in": "path",
487
+ "required": True,
488
+ "schema": {"type": "string"}
489
+ }
490
+ ],
491
+ "responses": {
492
+ "200": {
493
+ "description": "Audio file",
494
+ "content": {
495
+ "audio/mpeg": {
496
+ "schema": {
497
+ "type": "string",
498
+ "format": "binary"
499
+ }
500
+ }
501
+ }
502
+ }
503
+ }
504
+ }
505
+ },
506
+ "/image/{image_id}": {
507
+ "get": {
508
+ "summary": "Serve image file",
509
+ "security": [{"bearerAuth": []}],
510
+ "parameters": [
511
+ {
512
+ "name": "image_id",
513
+ "in": "path",
514
+ "required": True,
515
+ "schema": {"type": "string"}
516
+ }
517
+ ],
518
+ "responses": {
519
+ "200": {
520
+ "description": "Image file",
521
+ "content": {
522
+ "image/jpeg": {
523
+ "schema": {
524
+ "type": "string",
525
+ "format": "binary"
526
+ }
527
+ }
528
+ }
529
+ }
530
+ }
531
+ }
532
+ },
533
+ "/generate-api-key": {
534
+ "post": {
535
+ "summary": "Generate a new API key",
536
+ "requestBody": {
537
+ "required": True,
538
+ "content": {
539
+ "application/json": {
540
+ "schema": {
541
+ "type": "object",
542
+ "properties": {
543
+ "name": {"type": "string"}
544
+ }
545
+ }
546
+ }
547
+ }
548
+ },
549
+ "responses": {
550
+ "200": {
551
+ "description": "API key generated successfully"
552
+ }
553
+ }
554
+ }
555
+ },
556
+ "/api-keys/list": {
557
+ "get": {
558
+ "summary": "List user API keys",
559
+ "responses": {
560
+ "200": {
561
+ "description": "List of user API keys"
562
+ }
563
+ }
564
+ }
565
+ },
566
+ "/api-keys/{api_key}": {
567
+ "delete": {
568
+ "summary": "Delete an API key",
569
+ "parameters": [
570
+ {
571
+ "name": "api_key",
572
+ "in": "path",
573
+ "required": True,
574
+ "schema": {"type": "string"}
575
+ }
576
+ ],
577
+ "responses": {
578
+ "200": {
579
+ "description": "API key deleted successfully"
580
+ }
581
+ }
582
+ }
583
+ }
584
+ },
585
+ "components": {
586
+ "securitySchemes": {
587
+ "bearerAuth": {
588
+ "type": "http",
589
+ "scheme": "bearer",
590
+ "bearerFormat": "JWT"
591
+ }
592
+ }
593
+ }
594
+ }
595
+
596
+ @app.get("/health")
597
+ async def health_check():
598
+ """Health check endpoint"""
599
+ try:
600
+ # Check backend connectivity
601
+ response = await make_backend_request("/health")
602
+ backend_status = "healthy" if response.status_code == 200 else "unhealthy"
603
+
604
+ return {
605
+ "status": "healthy",
606
+ "backend_status": backend_status,
607
+ "backend_url": BACKEND_URL,
608
+ "hf_token_configured": bool(HF_TOKEN),
609
+ "timestamp": datetime.now().isoformat()
610
+ }
611
+ except Exception as e:
612
+ return {
613
+ "status": "unhealthy",
614
+ "error": str(e),
615
+ "backend_url": BACKEND_URL,
616
+ "hf_token_configured": bool(HF_TOKEN),
617
+ "timestamp": datetime.now().isoformat()
618
+ }
619
+
620
+
621
+ # ==================== CHAT COMPLETIONS ====================
622
+
623
+ @app.get("/models")
624
+ async def get_chat_models(token: str = Depends(verify_token)):
625
+ """Get available chat models"""
626
+ response = await make_backend_request("/models")
627
+ if response.status_code == 200:
628
+ return response.json()
629
+ else:
630
+ raise HTTPException(status_code=response.status_code, detail="Failed to fetch models")
631
+
632
+ @app.post("/chat/completions")
633
+ async def chat_completions(request: SimpleChatRequest, token: str = Depends(verify_token)):
634
+ """Chat completions proxy (OpenAI Compatible)"""
635
+ # Validate input - either messages or prompt must be provided
636
+ if not request.messages and not request.prompt:
637
+ raise HTTPException(status_code=400, detail="Either 'messages' or 'prompt' must be provided")
638
+
639
+ # Convert request to dict and handle None values properly
640
+ request_data = request.dict(exclude_none=True)
641
+
642
+ response = await make_backend_request("/chat/completions", "POST", request_data)
643
+ if response.status_code == 200:
644
+ return response.json()
645
+ else:
646
+ raise HTTPException(status_code=response.status_code, detail=response.text)
647
+
648
+ # ==================== IMAGE GENERATION ====================
649
+
650
+ @app.post("/image/generate")
651
+ async def generate_image(request: GenerationRequest, token: str = Depends(verify_token)):
652
+ """Image generation proxy"""
653
+ response = await make_backend_request("/image/generate", "POST", request.dict())
654
+ if response.status_code == 200:
655
+ return response.json()
656
+ else:
657
+ raise HTTPException(status_code=response.status_code, detail=response.text)
658
+
659
+ # ==================== WEB SEARCH ====================
660
+
661
+ @app.post("/web/search")
662
+ async def web_search(request: SearchRequest, token: str = Depends(verify_token)):
663
+ """Web search proxy"""
664
+ response = await make_backend_request("/web/search", "POST", request.dict())
665
+ if response.status_code == 200:
666
+ return response.json()
667
+ else:
668
+ raise HTTPException(status_code=response.status_code, detail=response.text)
669
+
670
+ @app.post("/image/search")
671
+ async def image_search(request: SearchRequest, token: str = Depends(verify_token)):
672
+ """Image search proxy"""
673
+ response = await make_backend_request("/image/search", "POST", request.dict())
674
+ if response.status_code == 200:
675
+ return response.json()
676
+ else:
677
+ raise HTTPException(status_code=response.status_code, detail=response.text)
678
+
679
+ @app.post("/videos/search")
680
+ async def video_search(request: SearchRequest, token: str = Depends(verify_token)):
681
+ """Video search proxy"""
682
+ response = await make_backend_request("/videos/search", "POST", request.dict())
683
+ if response.status_code == 200:
684
+ return response.json()
685
+ else:
686
+ raise HTTPException(status_code=response.status_code, detail=response.text)
687
+
688
+ # ==================== TEXT-TO-SPEECH ====================
689
+
690
+ @app.get("/tts/{provider}/voices")
691
+ async def get_tts_voices(provider: str, token: str = Depends(verify_token)):
692
+ """Get TTS voices for provider"""
693
+ response = await make_backend_request(f"/tts/{provider}/voices")
694
+ if response.status_code == 200:
695
+ return response.json()
696
+ else:
697
+ raise HTTPException(status_code=response.status_code, detail=response.text)
698
+
699
+ @app.post("/tts/generate")
700
+ async def generate_tts(request: TTSRequest, token: str = Depends(verify_token)):
701
+ """Generate TTS audio"""
702
+ response = await make_backend_request("/tts/generate", "POST", request.dict())
703
+ if response.status_code == 200:
704
+ return response.json()
705
+ else:
706
+ raise HTTPException(status_code=response.status_code, detail=response.text)
707
+
708
+ @app.get("/audio/{audio_id}")
709
+ async def serve_audio(audio_id: str, token: str = Depends(verify_token)):
710
+ """
711
+ Proxy audio file serving
712
+
713
+ To access audio files, use: https://at41rv-a77.hf.space/v1/audio/{audio_id}
714
+ Example: https://at41rv-a77.hf.space/v1/audio/GesseritTTS_Emma_1750428123_abc12345
715
+ """
716
+ try:
717
+ response = await make_backend_request(f"/audio/{audio_id}")
718
+ if response.status_code == 200:
719
+ return StreamingResponse(
720
+ iter([response.content]),
721
+ media_type="audio/mpeg",
722
+ headers={"Content-Disposition": f"inline; filename={audio_id}.mp3"}
723
+ )
724
+ else:
725
+ raise HTTPException(status_code=response.status_code, detail="Audio file not found")
726
+ except Exception as e:
727
+ logger.error(f"Audio serving error: {str(e)}")
728
+ raise HTTPException(status_code=404, detail="Audio file not found")
729
+
730
+ @app.get("/image/{image_id}")
731
+ async def serve_image(image_id: str, token: str = Depends(verify_token)):
732
+ """
733
+ Proxy image file serving
734
+
735
+ To access image files, use: https://at41rv-a77.hf.space/v1/image/{image_id}
736
+ Example: https://at41rv-a77.hf.space/v1/image/img_1750428123_abc12345
737
+ """
738
+ try:
739
+ response = await make_backend_request(f"/image/{image_id}")
740
+ if response.status_code == 200:
741
+ return StreamingResponse(
742
+ iter([response.content]),
743
+ media_type="image/jpeg",
744
+ headers={"Content-Disposition": f"inline; filename={image_id}.jpg"}
745
+ )
746
+ else:
747
+ raise HTTPException(status_code=response.status_code, detail="Image file not found")
748
+ except Exception as e:
749
+ logger.error(f"Image serving error: {str(e)}")
750
+ raise HTTPException(status_code=404, detail="Image file not found")
751
+
752
+ @app.post("/transcribe")
753
+ async def transcribe_audio(audio_file: UploadFile = File(...), token: str = Depends(verify_token)):
754
+ """Audio transcription proxy"""
755
+ try:
756
+ logger.info(f"Proxy: Received transcription request for file: {audio_file.filename}")
757
+ logger.info(f"Proxy: Content type: {audio_file.content_type}")
758
+
759
+ # Read file content
760
+ file_content = await audio_file.read()
761
+ logger.info(f"Proxy: File size: {len(file_content)} bytes")
762
+
763
+ if len(file_content) == 0:
764
+ return {
765
+ "success": False,
766
+ "message": "Empty file uploaded",
767
+ "transcription": None,
768
+ "filename": audio_file.filename,
769
+ "file_size": 0
770
+ }
771
+
772
+ # Check file size (limit to 50MB)
773
+ max_size = 50 * 1024 * 1024 # 50MB
774
+ if len(file_content) > max_size:
775
+ return {
776
+ "success": False,
777
+ "message": "File too large. Maximum size is 50MB",
778
+ "transcription": None,
779
+ "filename": audio_file.filename,
780
+ "file_size": len(file_content)
781
+ }
782
+
783
+ # Reset file pointer for forwarding
784
+ await audio_file.seek(0)
785
+
786
+ # Prepare file for backend request
787
+ files = {
788
+ "audio_file": (audio_file.filename or "audio.mp3", await audio_file.read(), audio_file.content_type or "audio/mpeg")
789
+ }
790
+
791
+ logger.info(f"Proxy: Forwarding request to backend...")
792
+
793
+ # Use longer timeout for transcription
794
+ response = await make_backend_request("/transcribe", "POST", files=files, timeout=120.0)
795
+
796
+ logger.info(f"Proxy: Backend responded with status: {response.status_code}")
797
+
798
+ if response.status_code == 200:
799
+ result = response.json()
800
+ logger.info("Proxy: Successfully received transcription from backend")
801
+ return result
802
+ else:
803
+ logger.error(f"Proxy: Backend error: {response.text}")
804
+ return {
805
+ "success": False,
806
+ "message": f"Backend error: {response.status_code}",
807
+ "transcription": None,
808
+ "filename": audio_file.filename,
809
+ "file_size": len(file_content)
810
+ }
811
+
812
+ except Exception as e:
813
+ logger.error(f"Proxy: Transcription error: {str(e)}")
814
+ return {
815
+ "success": False,
816
+ "message": f"Proxy error: {str(e)}",
817
+ "transcription": None,
818
+ "filename": audio_file.filename if audio_file else None,
819
+ "file_size": None
820
+ }
821
+
822
+ # ==================== GENERIC PROXY ====================
823
+
824
+ # Generic proxy endpoints removed for better security and control
825
+
826
+ # ==================== API KEY MANAGEMENT ====================
827
+
828
+ def get_user_id_from_request(request) -> str:
829
+ """Generate a user ID from request (using IP address as identifier)"""
830
+ # In a real application, you might use session tokens, user authentication, etc.
831
+ # For now, we'll use IP address as a simple identifier
832
+ client_ip = getattr(request.client, 'host', 'unknown')
833
+ return f"user_{client_ip}"
834
+
835
+ @app.post("/generate-api-key", response_model=APIKeyResponse)
836
+ async def create_api_key(request: APIKeyCreateRequest, http_request: Request):
837
+ """
838
+ Generate a new named API key for accessing the OMNIAPI services
839
+
840
+ This endpoint creates a permanent API key with a custom name that starts with 'oa'
841
+ and can be used to authenticate with all API endpoints. Each user can create up to 1 API key.
842
+
843
+ - **name**: Custom name for your API key (1-50 characters)
844
+
845
+ Returns:
846
+ - **api_key**: Your new API key (starts with 'oa')
847
+ - **name**: The name you assigned to this key
848
+ - **usage_note**: Instructions on how to use the API key
849
+
850
+ Usage:
851
+ Include the API key in the Authorization header as: `Bearer your_api_key_here`
852
+ """
853
+ try:
854
+ # Get user ID from request
855
+ user_id = get_user_id_from_request(http_request)
856
+
857
+ # Forward request to backend with user tracking
858
+ request_data = {"name": request.name, "user_id": user_id}
859
+ response = await make_backend_request("/generate-api-key", "POST", request_data)
860
+
861
+ if response.status_code == 200:
862
+ return response.json()
863
+ else:
864
+ raise HTTPException(status_code=response.status_code, detail=response.text)
865
+
866
+ except HTTPException:
867
+ raise
868
+ except Exception as e:
869
+ raise HTTPException(
870
+ status_code=500,
871
+ detail=f"Failed to generate API key: {str(e)}"
872
+ )
873
+
874
+ @app.get("/api-keys/list", response_model=UserAPIKeysResponse)
875
+ async def list_user_api_keys(http_request: Request, token: str = Depends(verify_token)):
876
+ """
877
+ List all API keys for the current authenticated user
878
+
879
+ This endpoint shows all API keys created by the current user, including:
880
+ - Key name and masked API key (for security)
881
+ - Creation date
882
+ - Last used date
883
+ - Usage count
884
+
885
+ Requires authentication - users can only view their own keys
886
+ """
887
+ try:
888
+ logger.info(f"Proxy: Authenticated API key list request")
889
+
890
+ # Forward the authenticated request to backend
891
+ headers = {
892
+ "Authorization": f"Bearer {token}",
893
+ "User-Agent": "OMNIAPI-Proxy/1.0",
894
+ "Content-Type": "application/json"
895
+ }
896
+
897
+ async with httpx.AsyncClient(timeout=REQUEST_TIMEOUT) as client:
898
+ response = await client.get(
899
+ f"{BACKEND_URL}/api-keys/list",
900
+ headers=headers
901
+ )
902
+
903
+ logger.info(f"Proxy: Backend responded with status {response.status_code}")
904
+
905
+ if response.status_code == 200:
906
+ result = response.json()
907
+ logger.info(f"Proxy: Found {result.get('total_keys', 0)} API keys for authenticated user")
908
+ return result
909
+ else:
910
+ logger.error(f"Proxy: Backend error: {response.text}")
911
+ try:
912
+ error_data = response.json()
913
+ detail = error_data.get("detail", response.text)
914
+ except:
915
+ detail = response.text
916
+ raise HTTPException(status_code=response.status_code, detail=detail)
917
+
918
+ except HTTPException:
919
+ raise
920
+ except Exception as e:
921
+ logger.error(f"Proxy: Failed to list API keys: {str(e)}")
922
+ raise HTTPException(
923
+ status_code=500,
924
+ detail=f"Failed to list API keys: {str(e)}"
925
+ )
926
+
927
+ @app.delete("/api-keys/{api_key}", response_model=APIKeyDeleteResponse)
928
+ async def delete_user_api_key(api_key: str, http_request: Request, token: str = Depends(verify_token)):
929
+ """
930
+ Delete a specific API key
931
+
932
+ This endpoint allows users to delete their own API keys.
933
+ Only the user who created the key can delete it.
934
+
935
+ - **api_key**: The full API key to delete
936
+ """
937
+ try:
938
+ user_id = get_user_id_from_request(http_request)
939
+ logger.info(f"Proxy: Delete request for API key {api_key[:10]}... from user {user_id}")
940
+
941
+ # Forward the authenticated user's token to the backend
942
+ headers = {
943
+ "Authorization": f"Bearer {token}",
944
+ "User-Agent": "OMNIAPI-Proxy/1.0",
945
+ "Content-Type": "application/json"
946
+ }
947
+
948
+ async with httpx.AsyncClient(timeout=REQUEST_TIMEOUT) as client:
949
+ response = await client.delete(
950
+ f"{BACKEND_URL}/api-keys/{api_key}",
951
+ headers=headers
952
+ )
953
+
954
+ logger.info(f"Proxy: Backend responded with status {response.status_code}")
955
+
956
+ if response.status_code == 200:
957
+ result = response.json()
958
+ logger.info("Proxy: API key deleted successfully")
959
+ return result
960
+ else:
961
+ logger.error(f"Proxy: Backend error: {response.text}")
962
+ # Try to parse the error response
963
+ try:
964
+ error_data = response.json()
965
+ detail = error_data.get("detail", response.text)
966
+ except:
967
+ detail = response.text
968
+ raise HTTPException(status_code=response.status_code, detail=detail)
969
+
970
+ except HTTPException:
971
+ raise
972
+ except Exception as e:
973
+ logger.error(f"Proxy: Failed to delete API key: {str(e)}")
974
+ raise HTTPException(
975
+ status_code=500,
976
+ detail=f"Failed to delete API key: {str(e)}"
977
+ )
978
+
979
+ if __name__ == "__main__":
980
+ import uvicorn
981
+ uvicorn.run(app, host="0.0.0.0", port=7862)