File size: 4,084 Bytes
30ea2d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""
Main entrypoint for Research Agent
Exposes HTTP API server for Blaxel deployment with agentic capabilities
"""

import os
import logging
from typing import Dict, Any
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
import blaxel.core  # Enable instrumentation

from agent import ResearchAgent
from models import RiskData, BuildingType

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Create FastAPI app
app = FastAPI(
    title="Research Agent",
    description="Agentic construction research using DuckDuckGo and Fetch MCPs with LLM analysis"
)


class ResearchRequest(BaseModel):
    """Request model for research"""
    risks: Dict[str, Any]
    building_type: str


class ResearchResponse(BaseModel):
    """Response model for research"""
    success: bool
    recommendations: Dict[str, Any] | None = None
    error: str | None = None


@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {"status": "healthy", "agent": "research-agent", "agentic": True}


@app.post("/", response_model=ResearchResponse)
@app.post("/research", response_model=ResearchResponse)
async def research_construction(request: ResearchRequest):
    """
    Research construction recommendations with agentic LLM analysis
    
    Args:
        request: Research request with risk data and building type
        
    Returns:
        Construction recommendations with LLM-enhanced analysis or error response
    """
    try:
        logger.info(f"Researching construction recommendations for {request.building_type}")
        
        # Create research agent
        agent = ResearchAgent()
        
        # Parse risk data
        risks = RiskData(**request.risks)
        
        # Get agentic recommendations (with LLM if available)
        recommendations = await agent.get_agentic_recommendations(
            risks=risks,
            building_type=request.building_type
        )
        
        # Convert to dict for JSON serialization
        return ResearchResponse(
            success=True,
            recommendations=recommendations.model_dump()
        )
        
    except Exception as e:
        logger.error(f"Research error: {str(e)}")
        raise HTTPException(status_code=500, detail={
            'success': False,
            'error': str(e)
        })


@app.post("/chat")
async def chat_research(request: ResearchRequest):
    """
    Streaming agentic research with LLM analysis
    
    Args:
        request: Research request with risk data and building type
        
    Returns:
        Streaming text response with recommendations
    """
    try:
        logger.info(f"Starting streaming research for {request.building_type}")
        
        # Create research agent
        agent = ResearchAgent()
        
        # Parse risk data
        risks = RiskData(**request.risks)
        
        # Stream recommendations
        async def generate():
            try:
                async for chunk in agent.get_streaming_recommendations(
                    risks=risks,
                    building_type=request.building_type
                ):
                    yield chunk
            except Exception as e:
                logger.error(f"Streaming error: {str(e)}")
                yield f"\n\nError: {str(e)}\n"
        
        return StreamingResponse(
            generate(),
            media_type="text/plain"
        )
        
    except Exception as e:
        logger.error(f"Chat research error: {str(e)}")
        raise HTTPException(status_code=500, detail={
            'success': False,
            'error': str(e)
        })


if __name__ == "__main__":
    import uvicorn
    
    # Get host and port from environment variables (required by Blaxel)
    host = os.getenv("BL_SERVER_HOST", "0.0.0.0")
    port = int(os.getenv("BL_SERVER_PORT", "8000"))
    
    logger.info(f"Starting Research Agent on {host}:{port}")
    
    uvicorn.run(app, host=host, port=port)