Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -136,10 +136,9 @@ def filter_by_similarity(responses):
|
|
| 136 |
break
|
| 137 |
return best_response
|
| 138 |
|
| 139 |
-
def worker_function(llm, request
|
| 140 |
print(f"Generando respuesta con el modelo {llm}...")
|
| 141 |
response = generate_chat_response(request, llm)
|
| 142 |
-
progress_bar.update(1)
|
| 143 |
return response
|
| 144 |
|
| 145 |
@app.post("/generate_chat")
|
|
@@ -152,15 +151,14 @@ async def generate_chat(request: ChatRequest):
|
|
| 152 |
responses = []
|
| 153 |
num_models = len(global_data['models'])
|
| 154 |
|
| 155 |
-
with
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
print(f"Error en la generaci贸n de respuesta: {exc}")
|
| 164 |
|
| 165 |
best_response = select_best_response(responses)
|
| 166 |
|
|
|
|
| 136 |
break
|
| 137 |
return best_response
|
| 138 |
|
| 139 |
+
def worker_function(llm, request):
|
| 140 |
print(f"Generando respuesta con el modelo {llm}...")
|
| 141 |
response = generate_chat_response(request, llm)
|
|
|
|
| 142 |
return response
|
| 143 |
|
| 144 |
@app.post("/generate_chat")
|
|
|
|
| 151 |
responses = []
|
| 152 |
num_models = len(global_data['models'])
|
| 153 |
|
| 154 |
+
with ThreadPoolExecutor(max_workers=num_models) as executor:
|
| 155 |
+
futures = [executor.submit(worker_function, llm, request) for llm in global_data['models']]
|
| 156 |
+
for future in tqdm(as_completed(futures), total=num_models, desc="Generando respuestas", unit="modelo"):
|
| 157 |
+
try:
|
| 158 |
+
response = future.result()
|
| 159 |
+
responses.append(response['response'])
|
| 160 |
+
except Exception as exc:
|
| 161 |
+
print(f"Error en la generaci贸n de respuesta: {exc}")
|
|
|
|
| 162 |
|
| 163 |
best_response = select_best_response(responses)
|
| 164 |
|