Eric Michael Martinez commited on
Commit
7b70181
·
1 Parent(s): 6bc77f3

update async

Browse files
Files changed (1) hide show
  1. app/app.py +120 -73
app/app.py CHANGED
@@ -1,4 +1,4 @@
1
- import httpx
2
  import os
3
  import requests
4
  import gradio as gr
@@ -23,52 +23,50 @@ elif current_environment == "prod":
23
  load_dotenv(".env.prod")
24
  else:
25
  raise ValueError("Invalid environment specified")
26
-
27
-
28
  def api_login(email, password):
29
  port = os.getenv("APP_PORT")
30
  scheme = os.getenv("APP_SCHEME")
31
  host = os.getenv("APP_HOST")
32
 
33
  url = f"{scheme}://{host}:{port}/auth/jwt/login"
34
- payload = {
35
- 'username': email,
36
- 'password': password
37
- }
38
- headers = {
39
- 'Content-Type': 'application/x-www-form-urlencoded'
40
- }
41
 
42
- response = requests.post(
43
- url,
44
- data=payload,
45
- headers=headers
46
- )
47
-
48
- if(response.status_code==200):
49
  response_json = response.json()
50
- api_key = response_json['access_token']
51
  return True, api_key
52
  else:
53
  response_json = response.json()
54
- detail = response_json['detail']
55
  return False, detail
56
-
57
 
58
  def get_api_key(email, password):
59
  successful, message = api_login(email, password)
60
-
61
- if(successful):
62
  return os.getenv("APP_API_BASE"), message
63
  else:
64
  raise gr.Error(message)
65
  return "", ""
66
-
 
67
  # Define a function to get the AI's reply using the OpenAI API
68
- def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]):
 
 
 
 
 
 
69
  # Initialize the messages list
70
  messages = []
71
-
72
  # Add the system message to the messages list
73
  if system_message is not None:
74
  messages += [{"role": "system", "content": system_message}]
@@ -76,34 +74,31 @@ def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperatur
76
  # Add the message history to the messages list
77
  if message_history is not None:
78
  messages += message_history
79
-
80
  # Add the user's message to the messages list
81
  messages += [{"role": "user", "content": message}]
82
-
83
  # Make an API call to the OpenAI ChatCompletion endpoint with the model and messages
84
  completion = openai.ChatCompletion.create(
85
- model=model,
86
- messages=messages,
87
- temperature=temperature
88
  )
89
-
90
  # Extract and return the AI's response from the API response
91
  return completion.choices[0].message.content.strip()
92
 
 
93
  def get_ai_image(prompt, size="512x512"):
94
- response = openai.Image.create(
95
- prompt=prompt,
96
- n=1,
97
- size=size
98
- )
99
- image_1_url = response.data[0]['url']
100
  return image_1_url
101
 
 
102
  def get_ai_transcript(path_to_audio, language=None):
103
- audio_file= open(path_to_audio, "rb")
104
  transcript = openai.Audio.transcribe("whisper-1", audio_file, language=language)
105
  return transcript.text
106
 
 
107
  def generate_transcription(path_to_audio_file):
108
  try:
109
  transcript = get_ai_transcript(path_to_audio_file)
@@ -111,7 +106,8 @@ def generate_transcription(path_to_audio_file):
111
  except Exception as e:
112
  raise gr.Error(e)
113
  return ""
114
-
 
115
  def generate_image(prompt):
116
  try:
117
  image_url = get_ai_image(prompt)
@@ -119,42 +115,49 @@ def generate_image(prompt):
119
  except Exception as e:
120
  raise gr.Error(e)
121
  return None
122
-
 
123
  # Define a function to handle the chat interaction with the AI model
124
  def chat(model, system_message, message, chatbot_messages, history_state):
125
  # Initialize chatbot_messages and history_state if they are not provided
126
  chatbot_messages = chatbot_messages or []
127
  history_state = history_state or []
128
-
129
  # Try to get the AI's reply using the get_ai_reply function
130
  try:
131
- ai_reply = get_ai_reply(message, model=model, system_message=system_message, message_history=history_state)
 
 
 
 
 
132
  except Exception as e:
133
  # If an error occurs, raise a Gradio error
134
  raise gr.Error(e)
135
-
136
  # Append the user's message and the AI's reply to the chatbot_messages list
137
  chatbot_messages.append((message, ai_reply))
138
-
139
  # Append the user's message and the AI's reply to the history_state list
140
  history_state.append({"role": "user", "content": message})
141
  history_state.append({"role": "assistant", "content": ai_reply})
142
-
143
  # Return None (empty out the user's message textbox), the updated chatbot_messages, and the updated history_state
144
  return None, chatbot_messages, history_state
145
 
 
146
  # Define a function to launch the chatbot interface using Gradio
147
  def get_chatbot_app(additional_examples=[]):
148
  # Load chatbot examples and merge with any additional examples provided
149
  examples = chatbot_examples.load_examples(additional=additional_examples)
150
-
151
  # Define a function to get the names of the examples
152
  def get_examples():
153
  return [example["name"] for example in examples]
154
 
155
  # Define a function to choose an example based on the index
156
  def choose_example(index):
157
- if(index!=None):
158
  system_message = examples[index]["system_message"].strip()
159
  user_message = examples[index]["message"].strip()
160
  return system_message, user_message, [], []
@@ -167,17 +170,22 @@ def get_chatbot_app(additional_examples=[]):
167
  with gr.Row():
168
  with gr.Column():
169
  # Create a dropdown to select examples
170
- example_dropdown = gr.Dropdown(get_examples(), label="Examples", type="index")
 
 
171
  # Create a button to load the selected example
172
  example_load_btn = gr.Button(value="Load")
173
  # Create a textbox for the system message (prompt)
174
- system_message = gr.TextArea(label="System Message (Prompt)", value="You are a helpful assistant.", lines=20, max_lines=400)
 
 
 
 
 
175
  with gr.Column():
176
  # Create a dropdown to select the AI model
177
  model_selector = gr.Dropdown(
178
- ["gpt-3.5-turbo"],
179
- label="Model",
180
- value="gpt-3.5-turbo"
181
  )
182
  # Create a chatbot interface for the conversation
183
  chatbot = gr.Chatbot(label="Conversation")
@@ -189,11 +197,27 @@ def get_chatbot_app(additional_examples=[]):
189
  btn = gr.Button(value="Send")
190
 
191
  # Connect the example load button to the choose_example function
192
- example_load_btn.click(choose_example, inputs=[example_dropdown], outputs=[system_message, message, chatbot, history_state])
 
 
 
 
193
  # Connect the send button to the chat function
194
- btn.click(chat, inputs=[model_selector, system_message, message, chatbot, history_state], outputs=[message, chatbot, history_state])
 
 
 
 
 
 
 
 
 
 
195
  with gr.Tab("Image Generation"):
196
- image_prompt = gr.Textbox(label="Prompt", placeholder="A cute puppy wearing sunglasses.")
 
 
197
  image_btn = gr.Button(value="Generate")
198
  image = gr.Image(label="Result", interactive=False, type="filepath")
199
  image_btn.click(generate_image, inputs=[image_prompt], outputs=[image])
@@ -201,17 +225,26 @@ def get_chatbot_app(additional_examples=[]):
201
  audio_file = gr.Audio(label="Audio", source="microphone", type="filepath")
202
  transcribe = gr.Button(value="Transcribe")
203
  audio_transcript = gr.Textbox(label="Transcription", interactive=False)
204
- transcribe.click(generate_transcription, inputs=[audio_file], outputs=[audio_transcript])
 
 
205
  with gr.Tab("Get API Key"):
206
  email_box = gr.Textbox(label="Email Address", placeholder="Student Email")
207
- password_box = gr.Textbox(label="Password", type="password", placeholder="Student ID")
208
- btn = gr.Button(value ="Generate")
 
 
209
  api_host_box = gr.Textbox(label="OpenAI API Base", interactive=False)
210
  api_key_box = gr.Textbox(label="OpenAI API Key", interactive=False)
211
- btn.click(get_api_key, inputs = [email_box, password_box], outputs = [api_host_box, api_key_box])
 
 
 
 
212
  # Return the app
213
  return app
214
 
 
215
  app = FastAPI()
216
 
217
  app.include_router(
@@ -228,10 +261,12 @@ app.include_router(
228
  tags=["users"],
229
  )
230
 
 
231
  @app.get("/authenticated-route")
232
  async def authenticated_route(user: User = Depends(current_active_user)):
233
  return {"message": f"Hello {user.email}!"}
234
 
 
235
  @app.post("/v1/completions")
236
  async def openai_api_completions_passthrough(
237
  request: Request,
@@ -254,7 +289,17 @@ async def openai_api_completions_passthrough(
254
  "Authorization": f"Bearer {openai_api_key}",
255
  },
256
  )
257
- print(response)
 
 
 
 
 
 
 
 
 
 
258
 
259
  # Return the OpenAI API response
260
  return response.json()
@@ -272,30 +317,32 @@ async def openai_api_chat_completions_passthrough(
272
  request_data = await request.json()
273
  request_headers = request.headers
274
  openai_api_key = os.getenv("OPENAI_API_KEY")
275
-
276
- if(request_data['model']=='gpt-4' or request_data['model'] == 'gpt-4-32k'):
277
  print("User requested gpt-4, falling back to gpt-3.5-turbo")
278
- request_data['model'] = 'gpt-3.5-turbo'
279
 
280
  # Forward the request to the OpenAI API
281
- response = requests.post(
282
- "https://api.openai.com/v1/chat/completions",
283
- json=request_data,
284
- headers={
285
- "Content-Type": request_headers.get("Content-Type"),
286
- "Authorization": f"Bearer {openai_api_key}",
287
- },
288
- )
289
- print(response)
290
 
291
  # Return the OpenAI API response
292
  return response.json()
293
 
 
294
  @app.on_event("startup")
295
  async def on_startup():
296
  # Not needed if you setup a migration system like Alembic
297
  await create_db_and_tables()
298
-
 
299
  gradio_gui = get_chatbot_app()
300
  gradio_gui.auth = api_login
301
  gradio_gui.auth_message = "Welcome, to the 4341 OpenAI Service"
 
1
+ from httpx import AsyncClient
2
  import os
3
  import requests
4
  import gradio as gr
 
23
  load_dotenv(".env.prod")
24
  else:
25
  raise ValueError("Invalid environment specified")
26
+
27
+
28
  def api_login(email, password):
29
  port = os.getenv("APP_PORT")
30
  scheme = os.getenv("APP_SCHEME")
31
  host = os.getenv("APP_HOST")
32
 
33
  url = f"{scheme}://{host}:{port}/auth/jwt/login"
34
+ payload = {"username": email, "password": password}
35
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
 
 
 
 
 
36
 
37
+ response = requests.post(url, data=payload, headers=headers)
38
+
39
+ if response.status_code == 200:
 
 
 
 
40
  response_json = response.json()
41
+ api_key = response_json["access_token"]
42
  return True, api_key
43
  else:
44
  response_json = response.json()
45
+ detail = response_json["detail"]
46
  return False, detail
47
+
48
 
49
  def get_api_key(email, password):
50
  successful, message = api_login(email, password)
51
+
52
+ if successful:
53
  return os.getenv("APP_API_BASE"), message
54
  else:
55
  raise gr.Error(message)
56
  return "", ""
57
+
58
+
59
  # Define a function to get the AI's reply using the OpenAI API
60
+ def get_ai_reply(
61
+ message,
62
+ model="gpt-3.5-turbo",
63
+ system_message=None,
64
+ temperature=0,
65
+ message_history=[],
66
+ ):
67
  # Initialize the messages list
68
  messages = []
69
+
70
  # Add the system message to the messages list
71
  if system_message is not None:
72
  messages += [{"role": "system", "content": system_message}]
 
74
  # Add the message history to the messages list
75
  if message_history is not None:
76
  messages += message_history
77
+
78
  # Add the user's message to the messages list
79
  messages += [{"role": "user", "content": message}]
80
+
81
  # Make an API call to the OpenAI ChatCompletion endpoint with the model and messages
82
  completion = openai.ChatCompletion.create(
83
+ model=model, messages=messages, temperature=temperature
 
 
84
  )
85
+
86
  # Extract and return the AI's response from the API response
87
  return completion.choices[0].message.content.strip()
88
 
89
+
90
  def get_ai_image(prompt, size="512x512"):
91
+ response = openai.Image.create(prompt=prompt, n=1, size=size)
92
+ image_1_url = response.data[0]["url"]
 
 
 
 
93
  return image_1_url
94
 
95
+
96
  def get_ai_transcript(path_to_audio, language=None):
97
+ audio_file = open(path_to_audio, "rb")
98
  transcript = openai.Audio.transcribe("whisper-1", audio_file, language=language)
99
  return transcript.text
100
 
101
+
102
  def generate_transcription(path_to_audio_file):
103
  try:
104
  transcript = get_ai_transcript(path_to_audio_file)
 
106
  except Exception as e:
107
  raise gr.Error(e)
108
  return ""
109
+
110
+
111
  def generate_image(prompt):
112
  try:
113
  image_url = get_ai_image(prompt)
 
115
  except Exception as e:
116
  raise gr.Error(e)
117
  return None
118
+
119
+
120
  # Define a function to handle the chat interaction with the AI model
121
  def chat(model, system_message, message, chatbot_messages, history_state):
122
  # Initialize chatbot_messages and history_state if they are not provided
123
  chatbot_messages = chatbot_messages or []
124
  history_state = history_state or []
125
+
126
  # Try to get the AI's reply using the get_ai_reply function
127
  try:
128
+ ai_reply = get_ai_reply(
129
+ message,
130
+ model=model,
131
+ system_message=system_message,
132
+ message_history=history_state,
133
+ )
134
  except Exception as e:
135
  # If an error occurs, raise a Gradio error
136
  raise gr.Error(e)
137
+
138
  # Append the user's message and the AI's reply to the chatbot_messages list
139
  chatbot_messages.append((message, ai_reply))
140
+
141
  # Append the user's message and the AI's reply to the history_state list
142
  history_state.append({"role": "user", "content": message})
143
  history_state.append({"role": "assistant", "content": ai_reply})
144
+
145
  # Return None (empty out the user's message textbox), the updated chatbot_messages, and the updated history_state
146
  return None, chatbot_messages, history_state
147
 
148
+
149
  # Define a function to launch the chatbot interface using Gradio
150
  def get_chatbot_app(additional_examples=[]):
151
  # Load chatbot examples and merge with any additional examples provided
152
  examples = chatbot_examples.load_examples(additional=additional_examples)
153
+
154
  # Define a function to get the names of the examples
155
  def get_examples():
156
  return [example["name"] for example in examples]
157
 
158
  # Define a function to choose an example based on the index
159
  def choose_example(index):
160
+ if index != None:
161
  system_message = examples[index]["system_message"].strip()
162
  user_message = examples[index]["message"].strip()
163
  return system_message, user_message, [], []
 
170
  with gr.Row():
171
  with gr.Column():
172
  # Create a dropdown to select examples
173
+ example_dropdown = gr.Dropdown(
174
+ get_examples(), label="Examples", type="index"
175
+ )
176
  # Create a button to load the selected example
177
  example_load_btn = gr.Button(value="Load")
178
  # Create a textbox for the system message (prompt)
179
+ system_message = gr.TextArea(
180
+ label="System Message (Prompt)",
181
+ value="You are a helpful assistant.",
182
+ lines=20,
183
+ max_lines=400,
184
+ )
185
  with gr.Column():
186
  # Create a dropdown to select the AI model
187
  model_selector = gr.Dropdown(
188
+ ["gpt-3.5-turbo"], label="Model", value="gpt-3.5-turbo"
 
 
189
  )
190
  # Create a chatbot interface for the conversation
191
  chatbot = gr.Chatbot(label="Conversation")
 
197
  btn = gr.Button(value="Send")
198
 
199
  # Connect the example load button to the choose_example function
200
+ example_load_btn.click(
201
+ choose_example,
202
+ inputs=[example_dropdown],
203
+ outputs=[system_message, message, chatbot, history_state],
204
+ )
205
  # Connect the send button to the chat function
206
+ btn.click(
207
+ chat,
208
+ inputs=[
209
+ model_selector,
210
+ system_message,
211
+ message,
212
+ chatbot,
213
+ history_state,
214
+ ],
215
+ outputs=[message, chatbot, history_state],
216
+ )
217
  with gr.Tab("Image Generation"):
218
+ image_prompt = gr.Textbox(
219
+ label="Prompt", placeholder="A cute puppy wearing sunglasses."
220
+ )
221
  image_btn = gr.Button(value="Generate")
222
  image = gr.Image(label="Result", interactive=False, type="filepath")
223
  image_btn.click(generate_image, inputs=[image_prompt], outputs=[image])
 
225
  audio_file = gr.Audio(label="Audio", source="microphone", type="filepath")
226
  transcribe = gr.Button(value="Transcribe")
227
  audio_transcript = gr.Textbox(label="Transcription", interactive=False)
228
+ transcribe.click(
229
+ generate_transcription, inputs=[audio_file], outputs=[audio_transcript]
230
+ )
231
  with gr.Tab("Get API Key"):
232
  email_box = gr.Textbox(label="Email Address", placeholder="Student Email")
233
+ password_box = gr.Textbox(
234
+ label="Password", type="password", placeholder="Student ID"
235
+ )
236
+ btn = gr.Button(value="Generate")
237
  api_host_box = gr.Textbox(label="OpenAI API Base", interactive=False)
238
  api_key_box = gr.Textbox(label="OpenAI API Key", interactive=False)
239
+ btn.click(
240
+ get_api_key,
241
+ inputs=[email_box, password_box],
242
+ outputs=[api_host_box, api_key_box],
243
+ )
244
  # Return the app
245
  return app
246
 
247
+
248
  app = FastAPI()
249
 
250
  app.include_router(
 
261
  tags=["users"],
262
  )
263
 
264
+
265
  @app.get("/authenticated-route")
266
  async def authenticated_route(user: User = Depends(current_active_user)):
267
  return {"message": f"Hello {user.email}!"}
268
 
269
+
270
  @app.post("/v1/completions")
271
  async def openai_api_completions_passthrough(
272
  request: Request,
 
289
  "Authorization": f"Bearer {openai_api_key}",
290
  },
291
  )
292
+
293
+ # Forward the request to the OpenAI API
294
+ async with AsyncClient() as client:
295
+ response = await client.post(
296
+ "https://api.openai.com/v1/completions",
297
+ json=request_data,
298
+ headers={
299
+ "Content-Type": request_headers.get("Content-Type"),
300
+ "Authorization": f"Bearer {openai_api_key}",
301
+ },
302
+ )
303
 
304
  # Return the OpenAI API response
305
  return response.json()
 
317
  request_data = await request.json()
318
  request_headers = request.headers
319
  openai_api_key = os.getenv("OPENAI_API_KEY")
320
+
321
+ if "gpt-4" in request_data["model"]:
322
  print("User requested gpt-4, falling back to gpt-3.5-turbo")
323
+ request_data["model"] = "gpt-3.5-turbo"
324
 
325
  # Forward the request to the OpenAI API
326
+ async with AsyncClient() as client:
327
+ response = await client.post(
328
+ "https://api.openai.com/v1/chat/completions",
329
+ json=request_data,
330
+ headers={
331
+ "Content-Type": request_headers.get("Content-Type"),
332
+ "Authorization": f"Bearer {openai_api_key}",
333
+ },
334
+ )
335
 
336
  # Return the OpenAI API response
337
  return response.json()
338
 
339
+
340
  @app.on_event("startup")
341
  async def on_startup():
342
  # Not needed if you setup a migration system like Alembic
343
  await create_db_and_tables()
344
+
345
+
346
  gradio_gui = get_chatbot_app()
347
  gradio_gui.auth = api_login
348
  gradio_gui.auth_message = "Welcome, to the 4341 OpenAI Service"