Spaces:
Running
Running
| from fastapi import FastAPI | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import StreamingResponse | |
| from pydantic import BaseModel | |
| from scripts.main import stream_chat_response | |
| app = FastAPI(title="Cortex AI API", description="Backend API for Cortex AI chat model") | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| class ChatRequest(BaseModel): | |
| message: str | |
| thread_id: str | |
| async def root(): | |
| return {"message": "Cortex AI API is running"} | |
| async def chat_stream(request: ChatRequest): | |
| def event_generator(): | |
| try: | |
| for chunk in stream_chat_response( | |
| user_message=request.message, | |
| thread_id=request.thread_id | |
| ): | |
| # chunk structure depends on model/provider | |
| if hasattr(chunk[0], "content"): | |
| token = chunk[0].content | |
| if token: | |
| yield token | |
| except Exception as e: | |
| yield f"\n[ERROR]: {str(e)}" | |
| return StreamingResponse( | |
| event_generator(), | |
| media_type="text/plain" | |
| ) |