Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -86,22 +86,6 @@ class ChatRequest(BaseModel):
|
|
86 |
top_p: float = 0.95
|
87 |
temperature: float = 0.7
|
88 |
|
89 |
-
@spaces.GPU(duration=0)
|
90 |
-
def generate_chat_response(request, model_data):
|
91 |
-
try:
|
92 |
-
user_input = normalize_input(request.message)
|
93 |
-
llm = model_data['model']
|
94 |
-
response = llm.create_chat_completion(
|
95 |
-
messages=[{"role": "user", "content": user_input}],
|
96 |
-
top_k=request.top_k,
|
97 |
-
top_p=request.top_p,
|
98 |
-
temperature=request.temperature
|
99 |
-
)
|
100 |
-
reply = response['choices'][0]['message']['content']
|
101 |
-
return {"response": reply, "literal": user_input, "model_name": model_data['name']}
|
102 |
-
except Exception:
|
103 |
-
pass
|
104 |
-
|
105 |
def normalize_input(input_text):
|
106 |
return input_text.strip()
|
107 |
|
@@ -128,6 +112,16 @@ def remove_repetitive_responses(responses):
|
|
128 |
unique_responses.append(response)
|
129 |
return unique_responses
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
@spaces.GPU(duration=0)
|
132 |
async def generate(request: ChatRequest):
|
133 |
try:
|
@@ -146,7 +140,7 @@ async def generate(request: ChatRequest):
|
|
146 |
raise HTTPException(status_code=500, detail="Error: No responses generated.")
|
147 |
|
148 |
responses = remove_repetitive_responses(responses)
|
149 |
-
best_response =
|
150 |
return {
|
151 |
"best_response": best_response,
|
152 |
"all_responses": responses
|
@@ -154,8 +148,5 @@ async def generate(request: ChatRequest):
|
|
154 |
except Exception:
|
155 |
pass
|
156 |
|
157 |
-
def select_best_response(responses):
|
158 |
-
return responses[0] if responses else {}
|
159 |
-
|
160 |
if __name__ == "__main__":
|
161 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
86 |
top_p: float = 0.95
|
87 |
temperature: float = 0.7
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
def normalize_input(input_text):
|
90 |
return input_text.strip()
|
91 |
|
|
|
112 |
unique_responses.append(response)
|
113 |
return unique_responses
|
114 |
|
115 |
+
@spaces.GPU(duration=0)
|
116 |
+
def generate_chat_response(request, model_data):
|
117 |
+
try:
|
118 |
+
user_input = normalize_input(request.message)
|
119 |
+
llm = model_data['model']
|
120 |
+
response = llm(user_input, top_k=request.top_k, top_p=request.top_p, temperature=request.temperature)
|
121 |
+
return {"model": model_data['name'], "response": response}
|
122 |
+
except Exception:
|
123 |
+
pass
|
124 |
+
|
125 |
@spaces.GPU(duration=0)
|
126 |
async def generate(request: ChatRequest):
|
127 |
try:
|
|
|
140 |
raise HTTPException(status_code=500, detail="Error: No responses generated.")
|
141 |
|
142 |
responses = remove_repetitive_responses(responses)
|
143 |
+
best_response = responses[0] if responses else {}
|
144 |
return {
|
145 |
"best_response": best_response,
|
146 |
"all_responses": responses
|
|
|
148 |
except Exception:
|
149 |
pass
|
150 |
|
|
|
|
|
|
|
151 |
if __name__ == "__main__":
|
152 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|