Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient, login | |
| # Configuraci贸n inicial | |
| HF_TOKEN = os.getenv("HF_TOKEN", "HF_TOKEN") # Usa variable de entorno o reemplaza | |
| MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" | |
| # Autenticaci贸n | |
| try: | |
| login(token=HF_TOKEN) | |
| client = InferenceClient(model=MODEL_NAME, token=HF_TOKEN, timeout=60) | |
| except Exception as auth_error: | |
| print(f"Error de autenticaci贸n: {auth_error}") | |
| def format_prompt(message, history, system_message): | |
| """Formatea el prompt seg煤n las especificaciones de Zephyr""" | |
| prompt = f"<|system|>\n{system_message}</s>\n" | |
| for user_msg, assistant_msg in history: | |
| prompt += f"<|user|>\n{user_msg}</s>\n" | |
| prompt += f"<|assistant|>\n{assistant_msg}</s>\n" | |
| prompt += f"<|user|>\n{message}</s>\n<|assistant|>\n" | |
| return prompt | |
| def respond(message, history, system_message, max_tokens, temperature, top_p): | |
| """Genera respuestas del chatbot con manejo robusto de errores""" | |
| try: | |
| # Validaci贸n de entrada | |
| if not message or len(message.strip()) == 0: | |
| yield "Por favor, ingresa un mensaje v谩lido." | |
| return | |
| prompt = format_prompt(message, history, system_message) | |
| # Configuraci贸n de generaci贸n | |
| generation_params = { | |
| "max_new_tokens": min(max_tokens, 1024), # Limita a 1024 tokens m谩ximo | |
| "temperature": max(0.1, min(temperature, 1.0)), | |
| "top_p": max(0.1, min(top_p, 1.0)), | |
| "do_sample": True, | |
| "truncate": 2048 | |
| } | |
| # Generaci贸n de respuesta | |
| stream = client.text_generation( | |
| prompt, | |
| stream=True, | |
| **generation_params | |
| ) | |
| response = "" | |
| for token in stream: | |
| response += token | |
| yield response | |
| except Exception as e: | |
| error_msg = f"Error en la generaci贸n: {str(e)}" | |
| print(error_msg) | |
| yield "馃敶 Lo siento, tuve un problema al procesar tu mensaje. Int茅ntalo de nuevo m谩s tarde." | |
| # CSS mejorado | |
| custom_css = """ | |
| :root { | |
| --primary: #6e48aa; | |
| --secondary: #9d50bb; | |
| --accent: #4776E6; | |
| } | |
| .gradio-container { | |
| max-width: 900px; | |
| margin: 20px auto; | |
| border-radius: 12px; | |
| box-shadow: 0 6px 18px rgba(0,0,0,0.1); | |
| background: white; | |
| padding: 25px; | |
| } | |
| .gradio-header { | |
| text-align: center; | |
| margin-bottom: 25px; | |
| } | |
| h1 { | |
| background: linear-gradient(45deg, var(--primary), var(--secondary)); | |
| -webkit-background-clip: text; | |
| background-clip: text; | |
| color: transparent; | |
| font-size: 2.2rem; | |
| margin-bottom: 10px; | |
| } | |
| .gradio-description { | |
| color: #555; | |
| font-size: 1rem; | |
| } | |
| .gradio-chatbot { | |
| min-height: 450px; | |
| border: 1px solid #e0e0e0; | |
| border-radius: 10px; | |
| padding: 15px; | |
| background: #fafafa; | |
| margin-bottom: 20px; | |
| } | |
| .gradio-textbox textarea { | |
| border-radius: 8px !important; | |
| border: 1px solid #ddd !important; | |
| padding: 12px 15px !important; | |
| font-size: 15px !important; | |
| min-height: 100px !important; | |
| } | |
| .gradio-button { | |
| background: linear-gradient(45deg, var(--primary), var(--secondary)) !important; | |
| color: white !important; | |
| border: none !important; | |
| border-radius: 8px !important; | |
| padding: 12px 28px !important; | |
| font-weight: 500 !important; | |
| transition: all 0.3s !important; | |
| } | |
| .gradio-button:hover { | |
| transform: translateY(-2px) !important; | |
| box-shadow: 0 4px 12px rgba(110, 72, 170, 0.3) !important; | |
| } | |
| .gradio-slider .wrap { | |
| margin: 15px 0 !important; | |
| } | |
| .dark .gradio-container { | |
| background: #1a1a1a; | |
| } | |
| .dark .gradio-chatbot { | |
| background: #252525; | |
| border-color: #444; | |
| } | |
| """ | |
| # Configuraci贸n de la interfaz | |
| demo = gr.ChatInterface( | |
| fn=respond, | |
| additional_inputs=[ | |
| gr.Textbox( | |
| value="Eres ELISA, un asistente de IA 煤til, preciso y amable. Desarrollado por Gerardo.", | |
| label="Configuraci贸n del Sistema", | |
| lines=3, | |
| max_lines=6 | |
| ), | |
| gr.Slider( | |
| minimum=64, | |
| maximum=1024, | |
| value=256, | |
| step=32, | |
| label="Longitud de Respuesta (tokens)", | |
| info="Controla cu谩n extensa ser谩 la respuesta" | |
| ), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.05, | |
| label="Creatividad (Temperatura)", | |
| info="Valores m谩s altos = respuestas m谩s creativas" | |
| ), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.9, | |
| step=0.05, | |
| label="Enfoque (Top-p)", | |
| info="Controla la diversidad de palabras" | |
| ), | |
| ], | |
| css=custom_css, | |
| title="馃 ELISA - Asistente de IA", | |
| description="Chatbot avanzado desarrollado por Gerardo usando Hugging Face", | |
| examples=[ | |
| ["Hola, 驴qu茅 puedes hacer?"], | |
| ["Expl铆came el machine learning en t茅rminos simples"], | |
| ["Recomi茅ndame libros sobre IA"] | |
| ], | |
| submit_btn="Enviar", | |
| retry_btn="Reintentar", | |
| undo_btn="Deshacer", | |
| clear_btn="Limpiar", | |
| theme="soft" | |
| ) | |
| # Configuraci贸n de lanzamiento | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, | |
| debug=True, | |
| favicon_path=None, | |
| auth=None | |
| ) |