|
from fastapi import FastAPI, HTTPException |
|
from fastapi.responses import StreamingResponse |
|
from pydantic import BaseModel |
|
import requests |
|
import json |
|
from typing import AsyncIterator |
|
|
|
app = FastAPI() |
|
|
|
|
|
class ChatRequest(BaseModel): |
|
messages: list = [{"role": "user", "content": "Lol full form"}] |
|
model: str = "gemini-1.5-pro-latest" |
|
temperature: float = 1.0 |
|
top_p: float = 0.8 |
|
max_tokens: int = 4000 |
|
|
|
|
|
url = "https://chat.typegpt.net/api/openai/v1/chat/completions" |
|
headers = { |
|
"Accept": "application/json, text/event-stream", |
|
"Content-Type": "application/json", |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0", |
|
} |
|
|
|
@app.post("/chat") |
|
async def chat(request: ChatRequest): |
|
|
|
payload = { |
|
"messages": request.messages, |
|
"stream": True, |
|
"model": request.model, |
|
"temperature": request.temperature, |
|
"top_p": request.top_p, |
|
"max_tokens": request.max_tokens |
|
} |
|
|
|
|
|
try: |
|
response = requests.post(url, headers=headers, data=json.dumps(payload), stream=True) |
|
|
|
|
|
if response.status_code == 200: |
|
async def event_stream() -> AsyncIterator[str]: |
|
|
|
for line in response.iter_lines(): |
|
if line: |
|
|
|
decoded_line = line.decode('utf-8') |
|
|
|
if decoded_line.startswith("data: "): |
|
try: |
|
data = json.loads(line[len('data: '):]) |
|
content = data.get("choices", [{}])[0].get("delta", {}).get("content", '') |
|
if content: |
|
yield f"{json.dumps({'response': content})}\n\n" |
|
except json.JSONDecodeError: |
|
continue |
|
|
|
return StreamingResponse(event_stream(), media_type="text/event-stream") |
|
else: |
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
except Exception as e: |
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=8083) |