Dexter Edep
Adjust research agent
30ea2d5
"""
Main entrypoint for Research Agent
Exposes HTTP API server for Blaxel deployment with agentic capabilities
"""
import os
import logging
from typing import Dict, Any
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
import blaxel.core # Enable instrumentation
from agent import ResearchAgent
from models import RiskData, BuildingType
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Create FastAPI app
app = FastAPI(
title="Research Agent",
description="Agentic construction research using DuckDuckGo and Fetch MCPs with LLM analysis"
)
class ResearchRequest(BaseModel):
"""Request model for research"""
risks: Dict[str, Any]
building_type: str
class ResearchResponse(BaseModel):
"""Response model for research"""
success: bool
recommendations: Dict[str, Any] | None = None
error: str | None = None
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy", "agent": "research-agent", "agentic": True}
@app.post("/", response_model=ResearchResponse)
@app.post("/research", response_model=ResearchResponse)
async def research_construction(request: ResearchRequest):
"""
Research construction recommendations with agentic LLM analysis
Args:
request: Research request with risk data and building type
Returns:
Construction recommendations with LLM-enhanced analysis or error response
"""
try:
logger.info(f"Researching construction recommendations for {request.building_type}")
# Create research agent
agent = ResearchAgent()
# Parse risk data
risks = RiskData(**request.risks)
# Get agentic recommendations (with LLM if available)
recommendations = await agent.get_agentic_recommendations(
risks=risks,
building_type=request.building_type
)
# Convert to dict for JSON serialization
return ResearchResponse(
success=True,
recommendations=recommendations.model_dump()
)
except Exception as e:
logger.error(f"Research error: {str(e)}")
raise HTTPException(status_code=500, detail={
'success': False,
'error': str(e)
})
@app.post("/chat")
async def chat_research(request: ResearchRequest):
"""
Streaming agentic research with LLM analysis
Args:
request: Research request with risk data and building type
Returns:
Streaming text response with recommendations
"""
try:
logger.info(f"Starting streaming research for {request.building_type}")
# Create research agent
agent = ResearchAgent()
# Parse risk data
risks = RiskData(**request.risks)
# Stream recommendations
async def generate():
try:
async for chunk in agent.get_streaming_recommendations(
risks=risks,
building_type=request.building_type
):
yield chunk
except Exception as e:
logger.error(f"Streaming error: {str(e)}")
yield f"\n\nError: {str(e)}\n"
return StreamingResponse(
generate(),
media_type="text/plain"
)
except Exception as e:
logger.error(f"Chat research error: {str(e)}")
raise HTTPException(status_code=500, detail={
'success': False,
'error': str(e)
})
if __name__ == "__main__":
import uvicorn
# Get host and port from environment variables (required by Blaxel)
host = os.getenv("BL_SERVER_HOST", "0.0.0.0")
port = int(os.getenv("BL_SERVER_PORT", "8000"))
logger.info(f"Starting Research Agent on {host}:{port}")
uvicorn.run(app, host=host, port=port)