NCTCMumbai commited on
Commit
1a6e703
·
verified ·
1 Parent(s): 222ff4e

Update backend/query_llm.py

Browse files
Files changed (1) hide show
  1. backend/query_llm.py +3 -6
backend/query_llm.py CHANGED
@@ -106,17 +106,14 @@ def generate_hf(prompt: str, history: str, temperature: float = 0.5, max_new_tok
106
  return "I do not know what happened, but I couldn't understand you."
107
 
108
  def generate_qwen(formatted_prompt: str, history: str):
109
- stream = client.predict(
110
  query=formatted_prompt,
111
  history=[],
112
  system="You are a helpful assistant.",
113
  api_name="/model_chat"
114
  )
115
- print('Response:',stream)
116
- output = ""
117
- for response in stream:
118
- output += response.token.text
119
- yield output
120
  return output
121
  #return response[1][0][1]
122
 
 
106
  return "I do not know what happened, but I couldn't understand you."
107
 
108
  def generate_qwen(formatted_prompt: str, history: str):
109
+ response = client.predict(
110
  query=formatted_prompt,
111
  history=[],
112
  system="You are a helpful assistant.",
113
  api_name="/model_chat"
114
  )
115
+ print('Response:',response)
116
+
 
 
 
117
  return output
118
  #return response[1][0][1]
119