File size: 1,468 Bytes
1301813
5946574
1301813
5946574
e00b411
 
 
5946574
1301813
 
5946574
1301813
5946574
1301813
 
 
 
 
 
 
 
 
5946574
 
 
 
1301813
5946574
 
1301813
5946574
 
 
 
 
 
 
 
1301813
5946574
e00b411
 
 
5946574
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
import requests
import json

app = FastAPI()

# Ollama internal URL
OLLAMA_BASE_URL = "http://localhost:11434"

# Generic proxy for other API endpoints
@app.get("/api/{path:path}")
async def ollama_proxy_get(path: str, query: str = None):
    url = f"{OLLAMA_BASE_URL}/api/{path}"
    params = {"query": query} if query else {}
    try:
        response = requests.get(url, params=params)
        response.raise_for_status()
        return response.json()
    except requests.exceptions.RequestException as e:
        raise HTTPException(status_code=500, detail=str(e))

# Handle /api/chat specifically
@app.post("/api/chat")
async def ollama_chat(body: dict):
    url = f"{OLLAMA_BASE_URL}/api/chat"
    try:
        # Forward the request to Ollama with streaming support
        response = requests.post(url, json=body, stream=True)
        response.raise_for_status()

        # Stream the response back to the client
        def generate():
            for chunk in response.iter_lines():
                if chunk:
                    yield chunk + b"\n"

        return StreamingResponse(generate(), media_type="text/event-stream")
    except requests.exceptions.RequestException as e:
        raise HTTPException(status_code=500, detail=f"Ollama error: {str(e)}")

@app.get("/")
async def root():
    return {"message": "Ollama API proxy running on Hugging Face Spaces!"}