Update app.py
Browse files
app.py
CHANGED
|
@@ -42,7 +42,7 @@ async def respond(
|
|
| 42 |
):
|
| 43 |
token = message.choices[0].text
|
| 44 |
response += token
|
| 45 |
-
|
| 46 |
except APIError as e:
|
| 47 |
error_details = e.body
|
| 48 |
error_type = error_details.get("type")
|
|
@@ -56,33 +56,36 @@ async def respond(
|
|
| 56 |
error_str = "An error occurred during streaming"
|
| 57 |
|
| 58 |
print(f"Error: {error_str}")
|
| 59 |
-
|
| 60 |
except Exception as e:
|
| 61 |
print(f"Error: {e}")
|
| 62 |
-
|
| 63 |
|
| 64 |
def launch_app():
|
| 65 |
try:
|
| 66 |
-
demo = gr.
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
| 86 |
demo.launch(show_error=True)
|
| 87 |
except KeyError as e:
|
| 88 |
print(f"Error: {e}")
|
|
|
|
| 42 |
):
|
| 43 |
token = message.choices[0].text
|
| 44 |
response += token
|
| 45 |
+
return response
|
| 46 |
except APIError as e:
|
| 47 |
error_details = e.body
|
| 48 |
error_type = error_details.get("type")
|
|
|
|
| 56 |
error_str = "An error occurred during streaming"
|
| 57 |
|
| 58 |
print(f"Error: {error_str}")
|
| 59 |
+
return error_str
|
| 60 |
except Exception as e:
|
| 61 |
print(f"Error: {e}")
|
| 62 |
+
return "Error occurred. Please try again."
|
| 63 |
|
| 64 |
def launch_app():
|
| 65 |
try:
|
| 66 |
+
demo = gr.Blocks()
|
| 67 |
+
with demo:
|
| 68 |
+
gr.Markdown("# Chatbot")
|
| 69 |
+
message = gr.Textbox(label="Message")
|
| 70 |
+
history = gr.State([["", ""]])
|
| 71 |
+
system_message = gr.Textbox(label="System message")
|
| 72 |
+
max_tokens = gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens")
|
| 73 |
+
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
| 74 |
+
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
| 75 |
+
response = gr.Text(label="Response")
|
| 76 |
+
|
| 77 |
+
def generate_response(message, history, system_message, max_tokens, temperature, top_p):
|
| 78 |
+
new_history = history + [[message, ""]]
|
| 79 |
+
response = asyncio.run(respond(message, history, system_message, max_tokens, temperature, top_p))
|
| 80 |
+
new_history[-1][1] = response
|
| 81 |
+
return response, new_history
|
| 82 |
+
|
| 83 |
+
gr.Button("Generate Response").click(
|
| 84 |
+
generate_response,
|
| 85 |
+
inputs=[message, history, system_message, max_tokens, temperature, top_p],
|
| 86 |
+
outputs=[response, history],
|
| 87 |
+
show_progress=False,
|
| 88 |
+
)
|
| 89 |
demo.launch(show_error=True)
|
| 90 |
except KeyError as e:
|
| 91 |
print(f"Error: {e}")
|