|
|
""" |
|
|
Main entrypoint for Research Agent |
|
|
Exposes HTTP API server for Blaxel deployment with agentic capabilities |
|
|
""" |
|
|
|
|
|
import os |
|
|
import logging |
|
|
from typing import Dict, Any |
|
|
from fastapi import FastAPI, HTTPException |
|
|
from fastapi.responses import JSONResponse, StreamingResponse |
|
|
from pydantic import BaseModel |
|
|
import blaxel.core |
|
|
|
|
|
from agent import ResearchAgent |
|
|
from models import RiskData, BuildingType |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title="Research Agent", |
|
|
description="Agentic construction research using DuckDuckGo and Fetch MCPs with LLM analysis" |
|
|
) |
|
|
|
|
|
|
|
|
class ResearchRequest(BaseModel): |
|
|
"""Request model for research""" |
|
|
risks: Dict[str, Any] |
|
|
building_type: str |
|
|
|
|
|
|
|
|
class ResearchResponse(BaseModel): |
|
|
"""Response model for research""" |
|
|
success: bool |
|
|
recommendations: Dict[str, Any] | None = None |
|
|
error: str | None = None |
|
|
|
|
|
|
|
|
@app.get("/health") |
|
|
async def health_check(): |
|
|
"""Health check endpoint""" |
|
|
return {"status": "healthy", "agent": "research-agent", "agentic": True} |
|
|
|
|
|
|
|
|
@app.post("/", response_model=ResearchResponse) |
|
|
@app.post("/research", response_model=ResearchResponse) |
|
|
async def research_construction(request: ResearchRequest): |
|
|
""" |
|
|
Research construction recommendations with agentic LLM analysis |
|
|
|
|
|
Args: |
|
|
request: Research request with risk data and building type |
|
|
|
|
|
Returns: |
|
|
Construction recommendations with LLM-enhanced analysis or error response |
|
|
""" |
|
|
try: |
|
|
logger.info(f"Researching construction recommendations for {request.building_type}") |
|
|
|
|
|
|
|
|
agent = ResearchAgent() |
|
|
|
|
|
|
|
|
risks = RiskData(**request.risks) |
|
|
|
|
|
|
|
|
recommendations = await agent.get_agentic_recommendations( |
|
|
risks=risks, |
|
|
building_type=request.building_type |
|
|
) |
|
|
|
|
|
|
|
|
return ResearchResponse( |
|
|
success=True, |
|
|
recommendations=recommendations.model_dump() |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Research error: {str(e)}") |
|
|
raise HTTPException(status_code=500, detail={ |
|
|
'success': False, |
|
|
'error': str(e) |
|
|
}) |
|
|
|
|
|
|
|
|
@app.post("/chat") |
|
|
async def chat_research(request: ResearchRequest): |
|
|
""" |
|
|
Streaming agentic research with LLM analysis |
|
|
|
|
|
Args: |
|
|
request: Research request with risk data and building type |
|
|
|
|
|
Returns: |
|
|
Streaming text response with recommendations |
|
|
""" |
|
|
try: |
|
|
logger.info(f"Starting streaming research for {request.building_type}") |
|
|
|
|
|
|
|
|
agent = ResearchAgent() |
|
|
|
|
|
|
|
|
risks = RiskData(**request.risks) |
|
|
|
|
|
|
|
|
async def generate(): |
|
|
try: |
|
|
async for chunk in agent.get_streaming_recommendations( |
|
|
risks=risks, |
|
|
building_type=request.building_type |
|
|
): |
|
|
yield chunk |
|
|
except Exception as e: |
|
|
logger.error(f"Streaming error: {str(e)}") |
|
|
yield f"\n\nError: {str(e)}\n" |
|
|
|
|
|
return StreamingResponse( |
|
|
generate(), |
|
|
media_type="text/plain" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Chat research error: {str(e)}") |
|
|
raise HTTPException(status_code=500, detail={ |
|
|
'success': False, |
|
|
'error': str(e) |
|
|
}) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import uvicorn |
|
|
|
|
|
|
|
|
host = os.getenv("BL_SERVER_HOST", "0.0.0.0") |
|
|
port = int(os.getenv("BL_SERVER_PORT", "8000")) |
|
|
|
|
|
logger.info(f"Starting Research Agent on {host}:{port}") |
|
|
|
|
|
uvicorn.run(app, host=host, port=port) |
|
|
|