Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
|
@@ -55,72 +55,5 @@ demo = gr.ChatInterface(
|
|
55 |
)
|
56 |
|
57 |
|
58 |
-
if __name__ == "__main__":
|
59 |
-
demo.launch()
|
60 |
-
"""
|
61 |
-
|
62 |
-
import os
|
63 |
-
import gradio as gr
|
64 |
-
from huggingface_hub import InferenceClient
|
65 |
-
import json
|
66 |
-
|
67 |
-
# Retrieve the API token from the environment variable
|
68 |
-
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
69 |
-
|
70 |
-
# Initialize the Hugging Face Inference Client
|
71 |
-
client = InferenceClient(
|
72 |
-
"mistralai/Mistral-Nemo-Instruct-2407",
|
73 |
-
token=API_TOKEN
|
74 |
-
)
|
75 |
-
|
76 |
-
# System prompt to define model behavior
|
77 |
-
system_prompt = "You are a helpful assistant that provides concise and accurate answers."
|
78 |
-
|
79 |
-
# Function to handle the chat completion
|
80 |
-
def hf_chat(user_input):
|
81 |
-
messages = [
|
82 |
-
{"role": "system", "content": system_prompt},
|
83 |
-
{"role": "user", "content": user_input}
|
84 |
-
]
|
85 |
-
response = ""
|
86 |
-
|
87 |
-
try:
|
88 |
-
# Stream the response
|
89 |
-
for message in client.chat_completion(
|
90 |
-
messages=messages,
|
91 |
-
max_tokens=500,
|
92 |
-
stream=True,
|
93 |
-
):
|
94 |
-
try:
|
95 |
-
# Parse each part of the response carefully
|
96 |
-
content = message.choices[0].delta.content
|
97 |
-
response += content
|
98 |
-
except (KeyError, json.JSONDecodeError) as e:
|
99 |
-
# Print error details for debugging
|
100 |
-
print(f"Error while parsing response: {e}")
|
101 |
-
continue # Continue receiving the stream
|
102 |
-
|
103 |
-
except Exception as e:
|
104 |
-
# Catch and print any unexpected errors during the stream
|
105 |
-
return f"Error occurred: {str(e)}"
|
106 |
-
|
107 |
-
return response
|
108 |
-
|
109 |
-
# Gradio interface
|
110 |
-
with gr.Blocks() as demo:
|
111 |
-
gr.Markdown("# mistral nemo prompt enhancer")
|
112 |
-
with gr.Row():
|
113 |
-
with gr.Column():
|
114 |
-
user_input = gr.Textbox(
|
115 |
-
label="Enter your message",
|
116 |
-
placeholder="Ask me anything..."
|
117 |
-
)
|
118 |
-
submit_btn = gr.Button("Submit")
|
119 |
-
with gr.Column():
|
120 |
-
output = gr.Textbox(label="Response")
|
121 |
-
|
122 |
-
submit_btn.click(fn=hf_chat, inputs=user_input, outputs=output)
|
123 |
-
|
124 |
-
# Launch Gradio app
|
125 |
if __name__ == "__main__":
|
126 |
demo.launch(show_api=True, share=False)
|
|
|
1 |
+
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
|
|
|
55 |
)
|
56 |
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
if __name__ == "__main__":
|
59 |
demo.launch(show_api=True, share=False)
|