Spaces:
Sleeping
Sleeping
Commit
·
0c78e15
1
Parent(s):
0be597b
Implementamos stream de la respuesta.
Browse files
app.py
CHANGED
@@ -36,7 +36,7 @@ def initLLM():
|
|
36 |
"""
|
37 |
llm = ChatGroq(
|
38 |
model="mixtral-8x7b-32768",
|
39 |
-
temperature=0,
|
40 |
max_tokens=None,
|
41 |
timeout=None,
|
42 |
max_retries=2,
|
@@ -51,12 +51,14 @@ def respond(message, history):
|
|
51 |
response = ""
|
52 |
|
53 |
try:
|
54 |
-
response = llm_chain.
|
55 |
except:
|
56 |
raise gradio.Error("Se ha producido un error al interactuar con el modelo LLM.", duratio=5)
|
57 |
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
|
61 |
|
62 |
|
|
|
36 |
"""
|
37 |
llm = ChatGroq(
|
38 |
model="mixtral-8x7b-32768",
|
39 |
+
temperature=0.7,
|
40 |
max_tokens=None,
|
41 |
timeout=None,
|
42 |
max_retries=2,
|
|
|
51 |
response = ""
|
52 |
|
53 |
try:
|
54 |
+
response = llm_chain.stream({"input": message})
|
55 |
except:
|
56 |
raise gradio.Error("Se ha producido un error al interactuar con el modelo LLM.", duratio=5)
|
57 |
|
58 |
+
partial_message = ""
|
59 |
+
for chunk in response:
|
60 |
+
partial_message = partial_message + chunk.dict()['content']
|
61 |
+
yield partial_message
|
62 |
|
63 |
|
64 |
|