Spaces:
Build error
Build error
Update main.py
Browse files
main.py
CHANGED
@@ -1,58 +1,60 @@
|
|
1 |
-
from fastapi import FastAPI, HTTPException
|
2 |
-
from pydantic import BaseModel
|
3 |
-
from langchain_community.llms import Ollama # Correct Import
|
4 |
-
import logging
|
5 |
-
import time # Import time module
|
6 |
-
|
7 |
-
# Configure logging
|
8 |
-
logging.basicConfig(level=logging.INFO)
|
9 |
-
|
10 |
-
app = FastAPI()
|
11 |
-
|
12 |
-
# OpenAI-compatible request format
|
13 |
-
class OpenAIRequest(BaseModel):
|
14 |
-
model: str
|
15 |
-
messages: list
|
16 |
-
stream: bool = False # Default to non-streaming
|
17 |
-
|
18 |
-
# Initialize LangChain LLM with Ollama
|
19 |
-
def get_llm(model_name: str):
|
20 |
-
return Ollama(model=model_name)
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
"
|
39 |
-
"
|
40 |
-
"
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
"
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
"
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from langchain_community.llms import Ollama # Correct Import
|
4 |
+
import logging
|
5 |
+
import time # Import time module
|
6 |
+
|
7 |
+
# Configure logging
|
8 |
+
logging.basicConfig(level=logging.INFO)
|
9 |
+
|
10 |
+
app = FastAPI()
|
11 |
+
|
12 |
+
# OpenAI-compatible request format
|
13 |
+
class OpenAIRequest(BaseModel):
|
14 |
+
model: str
|
15 |
+
messages: list
|
16 |
+
stream: bool = False # Default to non-streaming
|
17 |
+
|
18 |
+
# Initialize LangChain LLM with Ollama
|
19 |
+
def get_llm(model_name: str):
|
20 |
+
return Ollama(model=model_name)
|
21 |
+
@app.get("/")
|
22 |
+
def home():
|
23 |
+
return {"message": "OpenAI-compatible LangChain + Ollama API is running"}
|
24 |
+
@app.post("/v1/chat/completions")
|
25 |
+
def generate_text(request: OpenAIRequest):
|
26 |
+
try:
|
27 |
+
llm = get_llm(request.model)
|
28 |
+
|
29 |
+
# Extract last user message from messages
|
30 |
+
user_message = next((msg["content"] for msg in reversed(request.messages) if msg["role"] == "user"), None)
|
31 |
+
if not user_message:
|
32 |
+
raise HTTPException(status_code=400, detail="User message is required")
|
33 |
+
|
34 |
+
response_text = llm.invoke(user_message)
|
35 |
+
|
36 |
+
# OpenAI-like response format
|
37 |
+
response = {
|
38 |
+
"id": "chatcmpl-123",
|
39 |
+
"object": "chat.completion",
|
40 |
+
"created": int(time.time()),
|
41 |
+
"model": request.model,
|
42 |
+
"choices": [
|
43 |
+
{
|
44 |
+
"index": 0,
|
45 |
+
"message": {"role": "assistant", "content": response_text},
|
46 |
+
"finish_reason": "stop",
|
47 |
+
}
|
48 |
+
],
|
49 |
+
"usage": {
|
50 |
+
"prompt_tokens": len(user_message.split()),
|
51 |
+
"completion_tokens": len(response_text.split()),
|
52 |
+
"total_tokens": len(user_message.split()) + len(response_text.split()),
|
53 |
+
}
|
54 |
+
}
|
55 |
+
|
56 |
+
return response
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
logging.error(f"Error generating response: {e}")
|
60 |
+
raise HTTPException(status_code=500, detail="Internal server error")
|