import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification # تحميل النموذج العربي model_name = "CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) labels = ["negative", "neutral", "positive"] # دالة تحليل المشاعر def analyze_arabic_sentiment(text): if not text.strip(): return "الرجاء إدخال نص" inputs = tokenizer(text, return_tensors="pt", truncation=True) outputs = model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=1) label_id = torch.argmax(probs).item() confidence = float(probs[0][label_id]) return f"التصنيف: {labels[label_id]}\nدرجة الثقة: {round(confidence, 3)}" # واجهة Gradio with gr.Blocks() as demo: gr.Markdown("## 🔍 Arabic Sentiment Analyzer") gr.Markdown("اكتب نصًا عربيًا وسيتم تحديد نوع المشاعر.") text_input = gr.Textbox(label="النص") text_output = gr.Textbox(label="النتيجة") analyze_btn = gr.Button("تحليل") analyze_btn.click(fn=analyze_arabic_sentiment, inputs=text_input, outputs=text_output) demo.launch()