File size: 1,295 Bytes
58ddd29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from scripts.main import stream_chat_response

app = FastAPI(title="Cortex AI API", description="Backend API for Cortex AI chat model")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class ChatRequest(BaseModel):
    message: str
    thread_id: str


@app.get("/")
async def root():
    return {"message": "Cortex AI API is running"}


@app.post("/chat/stream")
async def chat_stream(request: ChatRequest):

    def event_generator():
        try:
            for chunk in stream_chat_response(
                user_message=request.message,
                thread_id=request.thread_id
            ):
                # chunk structure depends on model/provider
                if hasattr(chunk[0], "content"):
                    token = chunk[0].content
                    if token:
                        yield token

        except Exception as e:
            yield f"\n[ERROR]: {str(e)}"

    return StreamingResponse(
        event_generator(),
        media_type="text/plain"
    )