tayyab-chat / app.py
tayyab-077's picture
updatation
d81040e
# app.py
import gradio as gr
import os
import tempfile
import textwrap
from datetime import datetime
from typing import List, Dict, Any, Optional
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from src.model_loader import load_local_model
from src.conversation import ConversationMemory
from src.chatbot import LocalChatbot
# -----------------------
# Initialize
# -----------------------
llm = load_local_model()
memory = ConversationMemory()
bot = LocalChatbot(llm, memory)
INTENT_TEMPLATES = {
"math": "You are a math solver. Solve step-by-step only.",
"code": "You are a coding expert. Provide clean, working code.",
"civics": "Explain clearly like a Class 10 SST teacher.",
"exam": "Prepare concise exam-focused notes and important questions."
}
def now_ts():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# ----------------------
# Export TXT/PDF
# ----------------------
# ----------------------
# Clean PDF Export
# ----------------------
def export_chat_files(history: List[Dict[str, Any]]) -> Dict[str, Optional[str]]:
tmpdir = tempfile.gettempdir()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
def clean_content(content):
"""Extract string from Gradio format and remove final timestamp."""
text = ""
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and "text" in item:
text += item["text"] + "\n"
elif isinstance(content, dict) and "text" in content:
text = content["text"]
else:
text = str(content)
# Remove timestamp line starting with β–  or πŸ•’
lines = [l for l in text.splitlines() if not l.strip().startswith(("β– ", "πŸ•’"))]
return "\n".join(lines).strip()
# ---------------- TXT FILE ----------------
with open(txt_path, "w", encoding="utf-8") as f:
for msg in history:
role = msg.get("role", "user").capitalize()
content = clean_content(msg.get("content", ""))
f.write(f"{role}:\n{content}\n\n")
# ---------------- PDF FILE ----------------
try:
c = canvas.Canvas(pdf_path, pagesize=A4)
page_width, page_height = A4
margin = 40
y = page_height - margin
line_height = 16
font_size = 11
c.setFont("Helvetica", font_size)
for msg in history:
role = msg.get("role", "user").capitalize()
content = clean_content(msg.get("content", ""))
lines = content.splitlines()
for line in lines:
wrapped = textwrap.wrap(line, width=90)
for wline in wrapped:
if y < margin + line_height:
c.showPage()
c.setFont("Helvetica", font_size)
y = page_height - margin
c.drawString(margin, y, f"{role}: {wline}" if role=="User" else wline)
y -= line_height
y -= line_height // 2
c.showPage()
c.save()
except Exception as e:
print("PDF export failed:", e)
pdf_path = None
return {"txt": txt_path, "pdf": pdf_path}
# ----------------------
# Core chat function
# ----------------------
# def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
# if history is None:
# history = []
# if not user_msg.strip():
# return history
# # Detect intent for system prompt if needed
# intent = None
# low = user_msg.lower()
# for key in INTENT_TEMPLATES:
# if low.startswith(key):
# intent = key
# user_msg = user_msg[len(key):].strip()
# break
# # Use intent in bot.ask (optional, can pass intent)
# system_prefix = INTENT_TEMPLATES.get(intent, "Answer briefly in 2–3 sentences.")
# prompt = f"{system_prefix}\nUser: {user_msg}"
# bot_reply = bot.ask(user_msg, intent=intent)
# # Timestamp only for UI display
# ts = now_ts()
# bot_reply_ui = f"{bot_reply}\n\nπŸ•’ {ts}"
# # Append **clean text** to history
# history.append({"role": "user", "content": str(user_msg)})
# history.append({"role": "assistant", "content": str(bot_reply)})
# try:
# memory.add(user_msg, bot_reply)
# except:
# pass
# return history
def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
if history is None:
history = []
if not user_msg.strip():
return history
# Detect intent prefix from templates
intent = None
low = user_msg.lower()
for key in INTENT_TEMPLATES:
if low.startswith(key):
intent = key
user_msg = user_msg[len(key):].strip()
break
# Ask chatbot (pass intent)
bot_reply = bot.ask(user_msg, intent=intent)
ts = now_ts()
bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
history.append({"role": "user", "content": str(user_msg)})
history.append({"role": "assistant", "content": str(bot_reply_ts)})
try:
memory.add(user_msg, bot_reply)
except:
pass
return history
# ----------------------
# UI / Gradio
# ----------------------
CUSTOM_CSS = """
/* GLOBAL */
.gradio-container {
background: linear-gradient(135deg, #f6f7f9 0%, #e9ecf1 100%);
font-family: Inter, system-ui;
}
#main_card h3 {
font-size: 28px !important;
font-weight: 700 !important;
background: linear-gradient(90deg, #0ea5e9, #06b6d4);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
text-align: center;
border-bottom: 2px solid rgba(0,0,0,0.15);
}
/* MAIN CARD */
#main_card {
background: #ffffff;
border: 1px solid #e3e8ef;
border-radius: 16px;
padding: 16px;
box-shadow: 0 4px 16px rgba(0,0,0,0.05);
}
/* Chat UI */
.gradio-chatbot .assistant {
background: #4f46e5 !important;
color: white !important;
border-radius: 14px;
padding: 12px;
}
.gradio-chatbot .user {
background: #f1f5f9 !important;
border-radius: 14px;
padding: 12px;
}
/* Input box */
#message-box textarea {
background: #e0e7ff !important;
border-radius: 12px !important;
font-size: 24px !important;
}
/* Send button */
.send-btn {
background: #4f46e5 !important;
color: white !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.send-btn:hover {
background: #4338ca !important;
transform: scale(1.05);
}
/* Mic / Voice input button */
.icon-btn {
background: #f1f5f9 !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.icon-btn:hover {
background: #e2e8f0 !important;
transform: scale(1.05);
}
"""
PAGE_JS = """
<script>
(function(){
window.startVoiceRecognition = function(elem_id){
const wrapper = document.getElementById(elem_id);
if(!wrapper) return;
const textarea = wrapper.querySelector('textarea');
if(!textarea) return;
const SR = window.SpeechRecognition || window.webkitSpeechRecognition;
if(!SR) return alert('Speech recognition not supported');
const recog = new SR();
recog.lang = 'en-US';
recog.interimResults = false;
recog.onresult = function(e){
textarea.value = e.results[0][0].transcript;
textarea.style.background = "#e7f5ff";
setTimeout(() => { textarea.style.background = ""; }, 400);
};
recog.start();
};
})();
</script>
"""
with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
gr.HTML(f"<style>{CUSTOM_CSS}</style><script>{PAGE_JS}</script>")
with gr.Row():
with gr.Column(scale=1, min_width=220):
gr.Markdown("### ⚑ Tools & Export")
new_chat_btn = gr.Button("βž• New Chat")
export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
with gr.Column(scale=3, elem_id="main_card"):
gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
chatbot = gr.Chatbot(height=480, elem_id="chatbot_box")
with gr.Row():
msg = gr.Textbox(placeholder="Type a message or use the mic", elem_id="message-box", show_label=False, lines=3)
send_btn = gr.Button("Send", elem_classes="send-btn")
mic_btn = gr.Button("🎀 Voice input", elem_classes="icon-btn")
mic_btn.click(None, None, None, js='startVoiceRecognition("message-box")')
file_txt = gr.File(visible=False)
file_pdf = gr.File(visible=False)
send_btn.click(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
msg.submit(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
def new_chat():
memory.clear()
return []
new_chat_btn.click(new_chat, outputs=[chatbot])
def export_handler(history):
files = export_chat_files(history or [])
return (
gr.update(value=files.get("txt"), visible=True),
gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
)
export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
if __name__ == "__main__":
demo.launch()