#!/usr/bin/env python3 """MCP Sentiment Analysis Tool using Hugging Face Inference API. This application provides both a Gradio UI and an MCP server endpoint for sentiment analysis functionality. """ import logging import os import gradio as gr import requests import uvicorn from dotenv import load_dotenv from fastapi import FastAPI, HTTPException from fastapi.responses import JSONResponse # Load environment variables load_dotenv() # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Configuration HF_API_TOKEN = os.getenv("HF_TOKEN") SENTIMENT_API_URL = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment" # UI Configuration TITLE = "🎭 MCP Sentiment Analysis Tool" DESCRIPTION = """ Enter text to analyze its sentiment using AI. This tool provides sentiment analysis with confidence scores and can be integrated with AI assistants via MCP. **Supported Sentiments:** - 😊 **Positive** - Happy, excited, satisfied content - 😐 **Neutral** - Factual, balanced, or mixed sentiment - 😞 **Negative** - Sad, angry, disappointed content **MCP Usage:** - Endpoint: `POST /gradio_api/mcp/sse` - Payload: `{"data": [""]}` - Response: `{"data": [{"label": "POSITIVE/NEGATIVE/NEUTRAL", "score": 0.95, "all_scores": {...}}]}` Example: Analyze customer feedback, social media posts, reviews, or any text content. """ EXAMPLES = [ ["I absolutely love this new product! It exceeded all my expectations and works perfectly."], ["The weather today is partly cloudy with temperatures around 72 degrees."], ["I'm really frustrated with the poor customer service and long wait times."], ["This movie was okay, not great but not terrible either."], ["Congratulations on your amazing achievement! You deserve all the success."] ] def analyze_sentiment_core(text: str) -> dict: """ Core sentiment analysis function that returns structured data. Args: text: The text to analyze for sentiment Returns: dict: Sentiment analysis result with label, score, and all_scores """ # Input validation if not text or not text.strip(): return {"error": "Please provide text to analyze."} if len(text.strip()) < 3: return {"error": "Text too short for meaningful sentiment analysis."} if not HF_API_TOKEN: return {"error": "Hugging Face API Token not found. Please set HF_TOKEN environment variable."} # Prepare the API request headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} payload = {"inputs": text.strip()} try: logger.info(f"Analyzing sentiment for text: {text[:50]}...") # Make API request with timeout response = requests.post( SENTIMENT_API_URL, headers=headers, json=payload, timeout=30 ) response.raise_for_status() result = response.json() # Handle different response formats if isinstance(result, list) and len(result) > 0: scores = result[0] elif isinstance(result, dict) and "error" in result: if "currently loading" in result["error"].lower(): return {"error": "Model is currently loading. Please try again in a few moments."} return {"error": f"API Error: {result['error']}"} else: return {"error": "Unexpected response format from API."} # Process sentiment scores if not scores: return {"error": "No sentiment scores returned."} # Find the highest confidence sentiment top_sentiment = max(scores, key=lambda x: x["score"]) label = top_sentiment["label"].upper() confidence = top_sentiment["score"] # Create all_scores dictionary all_scores = {} for sentiment in scores: all_scores[sentiment["label"].upper()] = sentiment["score"] return { "label": label, "score": confidence, "all_scores": all_scores } except requests.exceptions.HTTPError as e: if e.response.status_code == 503: return {"error": "Service temporarily unavailable. The sentiment analysis model may be loading. Please try again in a moment."} if e.response.status_code == 429: return {"error": "Rate limit exceeded. Please wait a moment before trying again."} return {"error": f"HTTP Error {e.response.status_code}: {e!s}"} except requests.exceptions.Timeout: return {"error": "Request timed out. Please try again with shorter text."} except requests.exceptions.RequestException as e: return {"error": f"Request failed: {e!s}"} except Exception as e: logger.error(f"Unexpected error in sentiment analysis: {e}") return {"error": f"Unexpected error: {e!s}"} def analyze_sentiment(text: str) -> str: """ Analyze sentiment of the provided text for Gradio UI. Args: text: The text to analyze for sentiment Returns: str: Formatted sentiment analysis result or error message """ result = analyze_sentiment_core(text) if "error" in result: return f"❌ Error: {result['error']}" label = result["label"] confidence = result["score"] all_scores = result["all_scores"] # Format emoji and description emoji_map = { "POSITIVE": "😊", "NEGATIVE": "😞", "NEUTRAL": "😐" } emoji = emoji_map.get(label, "🤔") # Create detailed result result_text = f"## {emoji} Sentiment Analysis Result\n\n" result_text += f"**Primary Sentiment:** {label} ({confidence:.1%} confidence)\n\n" # Add all scores result_text += "**Detailed Scores:**\n" for score_label, score_value in sorted(all_scores.items(), key=lambda x: x[1], reverse=True): score_emoji = emoji_map.get(score_label, "🤔") result_text += f"- {score_emoji} {score_label}: {score_value:.1%}\n" # Add interpretation result_text += "\n**Interpretation:**\n" if confidence >= 0.8: result_text += f"High confidence {label.lower()} sentiment detected." elif confidence >= 0.6: result_text += f"Moderate confidence {label.lower()} sentiment detected." else: result_text += "Low confidence - sentiment may be mixed or ambiguous." return result_text # FastAPI app for MCP endpoint app = FastAPI() @app.post("/gradio_api/mcp/sse") async def mcp_sentiment_endpoint(request: dict): """MCP endpoint for sentiment analysis.""" try: # Extract data from request if "data" not in request or not isinstance(request["data"], list): raise HTTPException(status_code=400, detail="Invalid request format. Expected: {'data': ['text_to_analyze']}") if len(request["data"]) < 1: raise HTTPException(status_code=400, detail="Missing text data. Expected: {'data': ['text_to_analyze']}") text = request["data"][0] # Perform sentiment analysis result = analyze_sentiment_core(text) if "error" in result: return JSONResponse(content={"data": [f"❌ Error: {result['error']}"]}) # Return MCP-compatible response return JSONResponse(content={"data": [result]}) except Exception as e: logger.error(f"MCP endpoint error: {e}") return JSONResponse( status_code=500, content={"data": [f"❌ Server error: {e!s}"]} ) def create_gradio_interface() -> gr.Interface: """Create and configure the Gradio interface.""" interface = gr.Interface( fn=analyze_sentiment, inputs=[ gr.Textbox( lines=4, max_lines=10, placeholder="Enter text to analyze sentiment (e.g., 'I love this product!', 'This is terrible service', 'The weather is nice today')...", label="Text to Analyze", info="Enter any text content - reviews, social media posts, feedback, etc." ) ], outputs=[ gr.Markdown(label="Sentiment Analysis Result") ], title=TITLE, description=DESCRIPTION, examples=EXAMPLES, theme=gr.themes.Soft(), allow_flagging="never", analytics_enabled=False, css=""" .gradio-container { max-width: 800px !important; margin: auto !important; } .example-inputs { display: flex; flex-wrap: wrap; gap: 10px; } """ ) return interface def main(): """Main application entry point.""" logger.info("Starting MCP Sentiment Analysis Tool...") # Create the Gradio interface interface = create_gradio_interface() # Mount Gradio app to FastAPI app.mount("/", interface.app) # Launch the combined FastAPI + Gradio app logger.info("Launching FastAPI + Gradio interface...") uvicorn.run( app, host="0.0.0.0", port=7860, log_level="info" ) if __name__ == "__main__": main()