Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from fastapi import FastAPI, Request
|
|
4 |
import subprocess
|
5 |
from huggingface_hub import InferenceClient, login #, configure_http_backend, get_session
|
6 |
import langid
|
|
|
7 |
# import requests
|
8 |
|
9 |
|
@@ -78,15 +79,25 @@ def generate_response(text):
|
|
78 |
|
79 |
messages = [{"role": "user", "content": content}]
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
try:
|
82 |
-
|
83 |
model=model,
|
84 |
messages=messages,
|
85 |
-
max_tokens=
|
86 |
temperature=0.5,
|
87 |
top_p=0.7
|
88 |
)
|
89 |
-
|
|
|
90 |
except Exception as e:
|
91 |
logger.error(f"Error generating response: {e}")
|
92 |
return "Error: Could not generate response."
|
|
|
4 |
import subprocess
|
5 |
from huggingface_hub import InferenceClient, login #, configure_http_backend, get_session
|
6 |
import langid
|
7 |
+
from litellm import completion
|
8 |
# import requests
|
9 |
|
10 |
|
|
|
79 |
|
80 |
messages = [{"role": "user", "content": content}]
|
81 |
|
82 |
+
# try:
|
83 |
+
# completion = client.chat.completions.create(
|
84 |
+
# model=model,
|
85 |
+
# messages=messages,
|
86 |
+
# max_tokens=2048,
|
87 |
+
# temperature=0.5,
|
88 |
+
# top_p=0.7
|
89 |
+
# )
|
90 |
+
# return completion.choices[0].message.content
|
91 |
try:
|
92 |
+
completion_res = completions(
|
93 |
model=model,
|
94 |
messages=messages,
|
95 |
+
max_tokens=200,
|
96 |
temperature=0.5,
|
97 |
top_p=0.7
|
98 |
)
|
99 |
+
print(f"completion_res: {completion_res}")
|
100 |
+
return completion_res
|
101 |
except Exception as e:
|
102 |
logger.error(f"Error generating response: {e}")
|
103 |
return "Error: Could not generate response."
|