Update app.py
Browse files
app.py
CHANGED
@@ -11,25 +11,19 @@ client = OpenAI(
|
|
11 |
api_key=ACCESS_TOKEN,
|
12 |
)
|
13 |
|
14 |
-
|
|
|
15 |
async def respond(
|
16 |
message,
|
17 |
-
history,
|
18 |
system_message,
|
19 |
max_tokens,
|
20 |
temperature,
|
21 |
top_p,
|
22 |
):
|
23 |
try:
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
if val[0]:
|
28 |
-
messages.append({"role": "user", "content": val[0]})
|
29 |
-
if val[1]:
|
30 |
-
messages.append({"role": "assistant", "content": val[1]})
|
31 |
-
|
32 |
-
messages.append({"role": "user", "content": message})
|
33 |
|
34 |
response = ""
|
35 |
# Properly stream chat completions using dot notation
|
@@ -41,11 +35,15 @@ async def respond(
|
|
41 |
top_p=top_p,
|
42 |
messages=messages,
|
43 |
)
|
44 |
-
|
|
|
|
|
45 |
if hasattr(chunk.choices[0].delta, 'content'):
|
46 |
token = chunk.choices[0].delta.content
|
47 |
response += token
|
|
|
48 |
return response
|
|
|
49 |
except APIError as e:
|
50 |
error_details = e.body
|
51 |
error_type = error_details.get("type")
|
@@ -57,20 +55,18 @@ async def respond(
|
|
57 |
error_str = f"{error_type}: {error_message} (code: {error_code}, param: {error_param})"
|
58 |
else:
|
59 |
error_str = "An error occurred during streaming"
|
60 |
-
|
61 |
print(f"Error: {error_str}")
|
62 |
return error_str
|
|
|
63 |
except Exception as e:
|
64 |
print(f"Error: {e}")
|
65 |
return "Error occurred. Please try again."
|
66 |
|
67 |
|
68 |
-
#
|
69 |
-
async def generate_response(message,
|
70 |
-
|
71 |
-
response
|
72 |
-
new_history[-1][1] = response
|
73 |
-
return response, new_history
|
74 |
|
75 |
|
76 |
def launch_app():
|
@@ -79,18 +75,17 @@ def launch_app():
|
|
79 |
with demo:
|
80 |
gr.Markdown("# Chatbot")
|
81 |
message = gr.Textbox(label="Message")
|
82 |
-
history = gr.State([["", ""]])
|
83 |
system_message = gr.Textbox(label="System message")
|
84 |
max_tokens = gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens")
|
85 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
86 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
87 |
response = gr.Text(label="Response")
|
88 |
|
89 |
-
# Use the async version of generate_response
|
90 |
gr.Button("Generate Response").click(
|
91 |
generate_response,
|
92 |
-
inputs=[message,
|
93 |
-
outputs=[response
|
94 |
show_progress=False,
|
95 |
)
|
96 |
demo.launch(show_error=True)
|
|
|
11 |
api_key=ACCESS_TOKEN,
|
12 |
)
|
13 |
|
14 |
+
# Retry logic with tenacity for handling API rate limits
|
15 |
+
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), stop=tenacity.stop_after_attempt(5))
|
16 |
async def respond(
|
17 |
message,
|
|
|
18 |
system_message,
|
19 |
max_tokens,
|
20 |
temperature,
|
21 |
top_p,
|
22 |
):
|
23 |
try:
|
24 |
+
# Only use the system message and the current message for the response
|
25 |
+
messages = [{"role": "system", "content": system_message},
|
26 |
+
{"role": "user", "content": message}]
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
response = ""
|
29 |
# Properly stream chat completions using dot notation
|
|
|
35 |
top_p=top_p,
|
36 |
messages=messages,
|
37 |
)
|
38 |
+
|
39 |
+
# Stream response and concatenate tokens
|
40 |
+
for chunk in stream:
|
41 |
if hasattr(chunk.choices[0].delta, 'content'):
|
42 |
token = chunk.choices[0].delta.content
|
43 |
response += token
|
44 |
+
|
45 |
return response
|
46 |
+
|
47 |
except APIError as e:
|
48 |
error_details = e.body
|
49 |
error_type = error_details.get("type")
|
|
|
55 |
error_str = f"{error_type}: {error_message} (code: {error_code}, param: {error_param})"
|
56 |
else:
|
57 |
error_str = "An error occurred during streaming"
|
|
|
58 |
print(f"Error: {error_str}")
|
59 |
return error_str
|
60 |
+
|
61 |
except Exception as e:
|
62 |
print(f"Error: {e}")
|
63 |
return "Error occurred. Please try again."
|
64 |
|
65 |
|
66 |
+
# Async Gradio function to handle user input and response generation without history
|
67 |
+
async def generate_response(message, system_message, max_tokens, temperature, top_p):
|
68 |
+
response = await respond(message, system_message, max_tokens, temperature, top_p)
|
69 |
+
return response
|
|
|
|
|
70 |
|
71 |
|
72 |
def launch_app():
|
|
|
75 |
with demo:
|
76 |
gr.Markdown("# Chatbot")
|
77 |
message = gr.Textbox(label="Message")
|
|
|
78 |
system_message = gr.Textbox(label="System message")
|
79 |
max_tokens = gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens")
|
80 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
81 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
|
82 |
response = gr.Text(label="Response")
|
83 |
|
84 |
+
# Use the async version of generate_response without history
|
85 |
gr.Button("Generate Response").click(
|
86 |
generate_response,
|
87 |
+
inputs=[message, system_message, max_tokens, temperature, top_p],
|
88 |
+
outputs=[response],
|
89 |
show_progress=False,
|
90 |
)
|
91 |
demo.launch(show_error=True)
|