Updated
Browse files
app.py
CHANGED
@@ -166,16 +166,11 @@ chain = (
|
|
166 |
"""
|
167 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
168 |
"""
|
169 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
170 |
|
171 |
def predict(message, history, min_hist_memo = 3):
|
172 |
# streamer = chain(message)
|
173 |
streamer = chain.invoke({"name":message, "format_instructions":format_instructions})
|
174 |
-
# partial_message = ""
|
175 |
-
# for new_token in streamer:
|
176 |
-
# if new_token != '<':
|
177 |
-
# partial_message += new_token
|
178 |
-
# yield partial_message
|
179 |
yield str(streamer)
|
180 |
|
181 |
gr.ChatInterface(predict, title="Mistral 7B").queue().launch(debug=True)
|
|
|
166 |
"""
|
167 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
168 |
"""
|
169 |
+
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
170 |
|
171 |
def predict(message, history, min_hist_memo = 3):
|
172 |
# streamer = chain(message)
|
173 |
streamer = chain.invoke({"name":message, "format_instructions":format_instructions})
|
|
|
|
|
|
|
|
|
|
|
174 |
yield str(streamer)
|
175 |
|
176 |
gr.ChatInterface(predict, title="Mistral 7B").queue().launch(debug=True)
|