Spaces:
Sleeping
Sleeping
Commit
·
988f362
1
Parent(s):
015cfbb
feat: provide model name through env vars
Browse files- .env.example +3 -0
- README.md +2 -2
- backend/ai_service.py +19 -12
- backend/config.py +1 -0
.env.example
CHANGED
|
@@ -3,6 +3,9 @@
|
|
| 3 |
# OpenAI API Configuration (for AI narrator Desland)
|
| 4 |
USE_OPENAI=true
|
| 5 |
OPENAI_API_KEY=your_openai_api_key_here
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Application Settings
|
| 8 |
APP_NAME=Cluedo Custom
|
|
|
|
| 3 |
# OpenAI API Configuration (for AI narrator Desland)
|
| 4 |
USE_OPENAI=true
|
| 5 |
OPENAI_API_KEY=your_openai_api_key_here
|
| 6 |
+
# OpenAI model to use (default: gpt-5-nano)
|
| 7 |
+
# Options: gpt-5-nano, gpt-5-mini, gpt-4o-mini, gpt-4o, etc.
|
| 8 |
+
OPENAI_MODEL=gpt-5-nano
|
| 9 |
|
| 10 |
# Application Settings
|
| 11 |
APP_NAME=Cluedo Custom
|
README.md
CHANGED
|
@@ -170,7 +170,7 @@ chat du voisin."
|
|
| 170 |
|
| 171 |
### Configuration IA
|
| 172 |
|
| 173 |
-
- Modèle: gpt-5-
|
| 174 |
- Température: 0.9 (créativité élevée)
|
| 175 |
- Timeout: 3 secondes max
|
| 176 |
- Fallback gracieux si indisponible
|
|
@@ -227,7 +227,7 @@ custom-cluedo/
|
|
| 227 |
|
| 228 |
- **Backend** : FastAPI, Python 3.11, Pydantic
|
| 229 |
- **Frontend** : React 18, Vite, TailwindCSS
|
| 230 |
-
- **IA** : OpenAI gpt-5-
|
| 231 |
- **Stockage** : JSON (games.json)
|
| 232 |
- **Déploiement** : Docker, Hugging Face Spaces
|
| 233 |
|
|
|
|
| 170 |
|
| 171 |
### Configuration IA
|
| 172 |
|
| 173 |
+
- Modèle: gpt-5-mini
|
| 174 |
- Température: 0.9 (créativité élevée)
|
| 175 |
- Timeout: 3 secondes max
|
| 176 |
- Fallback gracieux si indisponible
|
|
|
|
| 227 |
|
| 228 |
- **Backend** : FastAPI, Python 3.11, Pydantic
|
| 229 |
- **Frontend** : React 18, Vite, TailwindCSS
|
| 230 |
+
- **IA** : OpenAI gpt-5-mini (optionnel)
|
| 231 |
- **Stockage** : JSON (games.json)
|
| 232 |
- **Déploiement** : Docker, Hugging Face Spaces
|
| 233 |
|
backend/ai_service.py
CHANGED
|
@@ -27,6 +27,7 @@ class AIService:
|
|
| 27 |
Attributes:
|
| 28 |
enabled (bool): Whether the AI service is active and ready to use
|
| 29 |
client (OpenAI): OpenAI API client instance
|
|
|
|
| 30 |
"""
|
| 31 |
|
| 32 |
def __init__(self):
|
|
@@ -40,9 +41,13 @@ class AIService:
|
|
| 40 |
- Extended timeout: 30 seconds total (connect: 5s, read: 25s)
|
| 41 |
- Automatic retries: 3 attempts with exponential backoff
|
| 42 |
- This handles network instability and API rate limits gracefully
|
|
|
|
|
|
|
|
|
|
| 43 |
"""
|
| 44 |
self.enabled = settings.USE_OPENAI and bool(settings.OPENAI_API_KEY)
|
| 45 |
self.client = None
|
|
|
|
| 46 |
|
| 47 |
if self.enabled:
|
| 48 |
try:
|
|
@@ -52,16 +57,18 @@ class AIService:
|
|
| 52 |
30.0, # Total timeout
|
| 53 |
connect=5.0, # Connection timeout
|
| 54 |
read=25.0, # Read timeout (API processing time)
|
| 55 |
-
write=5.0 # Write timeout
|
| 56 |
)
|
| 57 |
|
| 58 |
# Initialize client with timeout and retry strategy
|
| 59 |
self.client = OpenAI(
|
| 60 |
api_key=settings.OPENAI_API_KEY,
|
| 61 |
timeout=timeout,
|
| 62 |
-
max_retries=3 # Retry up to 3 times on network errors
|
|
|
|
|
|
|
|
|
|
| 63 |
)
|
| 64 |
-
logger.info("OpenAI client initialized successfully (timeout=30s, retries=3)")
|
| 65 |
except Exception as e:
|
| 66 |
logger.error(f"Failed to initialize OpenAI client: {e}", exc_info=True)
|
| 67 |
self.enabled = False
|
|
@@ -173,11 +180,10 @@ VOCABULAIRE À UTILISER (subtilement):
|
|
| 173 |
- "poupouille/péchailloux/tchoupinoux" = petit coquin
|
| 174 |
- "chnawax masqué" = vilain coquinou
|
| 175 |
- "armankaboul" = bordel !
|
| 176 |
-
- "All RS5, erreur réseau" = il y a erreur
|
| 177 |
- "Une poupée en pénitence calisse de sibouere" = quelque chose de bizarre
|
| 178 |
|
| 179 |
Ton narratif: {narrative_tone}
|
| 180 |
-
Sois sarcastique, condescendant et incisif. Moque la logique (ou l'absence de logique) de la suggestion. Utilise subtilement
|
| 181 |
|
| 182 |
logger.info(f"Generating suggestion comment for {player_name}")
|
| 183 |
response = await asyncio.wait_for(
|
|
@@ -243,9 +249,8 @@ VOCABULAIRE À UTILISER (subtilement):
|
|
| 243 |
- "chnawax masqué" = vilain coquinou
|
| 244 |
- "armankaboul/Fourlestourtes" = bordel !
|
| 245 |
- "Koikoubaiseyyyyy" = surprise !
|
| 246 |
-
- "All RS5, erreur réseau" = il y a erreur
|
| 247 |
|
| 248 |
-
Rends-le incisif et mémorable. Utilise subtilement
|
| 249 |
|
| 250 |
logger.info(
|
| 251 |
f"Generating accusation comment for {player_name} (correct={was_correct})"
|
|
@@ -348,14 +353,15 @@ Sois sarcastique, minimise la victoire, suggère que c'était de la chance."""
|
|
| 348 |
|
| 349 |
try:
|
| 350 |
import time
|
|
|
|
| 351 |
start_time = time.time()
|
| 352 |
-
logger.debug("Calling OpenAI API with chat completion (model:
|
| 353 |
|
| 354 |
# Call OpenAI API without max_tokens or temperature parameters
|
| 355 |
# The API will use default values which are appropriate for most use cases
|
| 356 |
# The client has built-in retry logic (3 attempts) and 30s timeout
|
| 357 |
response = self.client.chat.completions.create(
|
| 358 |
-
model=
|
| 359 |
messages=[
|
| 360 |
{
|
| 361 |
"role": "system",
|
|
@@ -395,7 +401,9 @@ Garde tes réponses brèves (1 phrase pour les commentaires, 2-3 pour les scéna
|
|
| 395 |
if response.choices and len(response.choices) > 0:
|
| 396 |
content = response.choices[0].message.content
|
| 397 |
if content:
|
| 398 |
-
logger.debug(
|
|
|
|
|
|
|
| 399 |
return content.strip()
|
| 400 |
else:
|
| 401 |
logger.warning("Response content is None or empty")
|
|
@@ -407,8 +415,7 @@ Garde tes réponses brèves (1 phrase pour les commentaires, 2-3 pour les scéna
|
|
| 407 |
except Exception as e:
|
| 408 |
elapsed_time = time.time() - start_time
|
| 409 |
logger.error(
|
| 410 |
-
f"Error in _generate_text after {elapsed_time:.2f}s: {e}",
|
| 411 |
-
exc_info=True
|
| 412 |
)
|
| 413 |
return ""
|
| 414 |
|
|
|
|
| 27 |
Attributes:
|
| 28 |
enabled (bool): Whether the AI service is active and ready to use
|
| 29 |
client (OpenAI): OpenAI API client instance
|
| 30 |
+
model (str): OpenAI model to use for text generation
|
| 31 |
"""
|
| 32 |
|
| 33 |
def __init__(self):
|
|
|
|
| 41 |
- Extended timeout: 30 seconds total (connect: 5s, read: 25s)
|
| 42 |
- Automatic retries: 3 attempts with exponential backoff
|
| 43 |
- This handles network instability and API rate limits gracefully
|
| 44 |
+
|
| 45 |
+
The model is configurable via OPENAI_MODEL environment variable
|
| 46 |
+
(default: gpt-5-nano)
|
| 47 |
"""
|
| 48 |
self.enabled = settings.USE_OPENAI and bool(settings.OPENAI_API_KEY)
|
| 49 |
self.client = None
|
| 50 |
+
self.model = settings.OPENAI_MODEL
|
| 51 |
|
| 52 |
if self.enabled:
|
| 53 |
try:
|
|
|
|
| 57 |
30.0, # Total timeout
|
| 58 |
connect=5.0, # Connection timeout
|
| 59 |
read=25.0, # Read timeout (API processing time)
|
| 60 |
+
write=5.0, # Write timeout
|
| 61 |
)
|
| 62 |
|
| 63 |
# Initialize client with timeout and retry strategy
|
| 64 |
self.client = OpenAI(
|
| 65 |
api_key=settings.OPENAI_API_KEY,
|
| 66 |
timeout=timeout,
|
| 67 |
+
max_retries=3, # Retry up to 3 times on network errors
|
| 68 |
+
)
|
| 69 |
+
logger.info(
|
| 70 |
+
f"OpenAI client initialized successfully (model={self.model}, timeout=30s, retries=3)"
|
| 71 |
)
|
|
|
|
| 72 |
except Exception as e:
|
| 73 |
logger.error(f"Failed to initialize OpenAI client: {e}", exc_info=True)
|
| 74 |
self.enabled = False
|
|
|
|
| 180 |
- "poupouille/péchailloux/tchoupinoux" = petit coquin
|
| 181 |
- "chnawax masqué" = vilain coquinou
|
| 182 |
- "armankaboul" = bordel !
|
|
|
|
| 183 |
- "Une poupée en pénitence calisse de sibouere" = quelque chose de bizarre
|
| 184 |
|
| 185 |
Ton narratif: {narrative_tone}
|
| 186 |
+
Sois sarcastique, condescendant et incisif. Moque la logique (ou l'absence de logique) de la suggestion. Utilise subtilement une expression si approprié."""
|
| 187 |
|
| 188 |
logger.info(f"Generating suggestion comment for {player_name}")
|
| 189 |
response = await asyncio.wait_for(
|
|
|
|
| 249 |
- "chnawax masqué" = vilain coquinou
|
| 250 |
- "armankaboul/Fourlestourtes" = bordel !
|
| 251 |
- "Koikoubaiseyyyyy" = surprise !
|
|
|
|
| 252 |
|
| 253 |
+
Rends-le incisif et mémorable. Utilise subtilement une expression si approprié."""
|
| 254 |
|
| 255 |
logger.info(
|
| 256 |
f"Generating accusation comment for {player_name} (correct={was_correct})"
|
|
|
|
| 353 |
|
| 354 |
try:
|
| 355 |
import time
|
| 356 |
+
|
| 357 |
start_time = time.time()
|
| 358 |
+
logger.debug(f"Calling OpenAI API with chat completion (model: {self.model})")
|
| 359 |
|
| 360 |
# Call OpenAI API without max_tokens or temperature parameters
|
| 361 |
# The API will use default values which are appropriate for most use cases
|
| 362 |
# The client has built-in retry logic (3 attempts) and 30s timeout
|
| 363 |
response = self.client.chat.completions.create(
|
| 364 |
+
model=self.model,
|
| 365 |
messages=[
|
| 366 |
{
|
| 367 |
"role": "system",
|
|
|
|
| 401 |
if response.choices and len(response.choices) > 0:
|
| 402 |
content = response.choices[0].message.content
|
| 403 |
if content:
|
| 404 |
+
logger.debug(
|
| 405 |
+
f"Generated content ({len(content)} chars): {content[:100]}..."
|
| 406 |
+
)
|
| 407 |
return content.strip()
|
| 408 |
else:
|
| 409 |
logger.warning("Response content is None or empty")
|
|
|
|
| 415 |
except Exception as e:
|
| 416 |
elapsed_time = time.time() - start_time
|
| 417 |
logger.error(
|
| 418 |
+
f"Error in _generate_text after {elapsed_time:.2f}s: {e}", exc_info=True
|
|
|
|
| 419 |
)
|
| 420 |
return ""
|
| 421 |
|
backend/config.py
CHANGED
|
@@ -20,6 +20,7 @@ class Settings:
|
|
| 20 |
# AI settings
|
| 21 |
USE_OPENAI: bool = os.getenv("USE_OPENAI", "false").lower() == "true"
|
| 22 |
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
|
|
|
|
| 23 |
|
| 24 |
# Game settings
|
| 25 |
MIN_PLAYERS: int = 3
|
|
|
|
| 20 |
# AI settings
|
| 21 |
USE_OPENAI: bool = os.getenv("USE_OPENAI", "false").lower() == "true"
|
| 22 |
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
|
| 23 |
+
OPENAI_MODEL: str = os.getenv("OPENAI_MODEL", "gpt-5-nano")
|
| 24 |
|
| 25 |
# Game settings
|
| 26 |
MIN_PLAYERS: int = 3
|