Jyothikamalesh commited on
Commit
e630485
·
verified ·
1 Parent(s): 93e340c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -24
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import os
3
- import aiohttp
4
  import tenacity
5
  import nest_asyncio
6
  import asyncio
@@ -8,6 +8,7 @@ import asyncio
8
  nest_asyncio.apply()
9
 
10
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
 
11
 
12
  # Retry logic with tenacity for handling API rate limits
13
  @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), stop=tenacity.stop_after_attempt(5))
@@ -19,36 +20,36 @@ async def respond(
19
  top_p,
20
  ):
21
  try:
 
22
  # Only use the system message and the current message for the response
23
  messages = [{"role": "system", "content": system_message},
24
  {"role": "user", "content": message}]
25
 
26
- async with aiohttp.ClientSession() as session:
27
- async with session.post(
28
- "https://api.openai.com/v1/chat/completions",
29
- headers={
30
- "Authorization": f"Bearer {ACCESS_TOKEN}",
31
- "Content-Type": "application/json",
32
- },
33
- json={
34
- "model": "NousResearch/Hermes-3-Llama-3.1-8B",
35
- "max_tokens": max_tokens,
36
- "temperature": temperature,
37
- "top_p": top_p,
38
- "messages": messages,
39
- "stream": True,
40
- },
41
- ) as response:
42
- response.raise_for_status()
43
- response_text = await response.text()
44
- return response_text
45
 
46
- except aiohttp.ClientError as e:
47
- print(f"ClientError: {e}")
 
 
 
 
 
 
 
 
 
 
48
  return "Error occurred. Please try again."
49
 
50
  except Exception as e:
51
- print(f"Exception: {e}")
52
  return "Error occurred. Please try again."
53
 
54
 
@@ -81,7 +82,7 @@ def launch_app():
81
  )
82
  demo.launch(show_error=True)
83
  except KeyError as e:
84
- print(f"Error: {e}")
85
  print("Please try again.")
86
 
87
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  import os
3
+ import openai
4
  import tenacity
5
  import nest_asyncio
6
  import asyncio
 
8
  nest_asyncio.apply()
9
 
10
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
11
+ openai.api_key = ACCESS_TOKEN
12
 
13
  # Retry logic with tenacity for handling API rate limits
14
  @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), stop=tenacity.stop_after_attempt(5))
 
20
  top_p,
21
  ):
22
  try:
23
+ print("Making request to OpenAI API...")
24
  # Only use the system message and the current message for the response
25
  messages = [{"role": "system", "content": system_message},
26
  {"role": "user", "content": message}]
27
 
28
+ response = openai.ChatCompletion.create(
29
+ model="NousResearch/Hermes-3-Llama-3.1-8B",
30
+ max_tokens=max_tokens,
31
+ temperature=temperature,
32
+ top_p=top_p,
33
+ messages=messages,
34
+ stream=True,
35
+ )
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ print("Received response from OpenAI API...")
38
+ response_text = ""
39
+ for chunk in response:
40
+ if 'choices' in chunk and 'delta' in chunk['choices'][0] and 'content' in chunk['choices'][0]['delta']:
41
+ token = chunk['choices'][0]['delta']['content']
42
+ response_text += token
43
+
44
+ print("Response text:", response_text)
45
+ return response_text
46
+
47
+ except openai.error.APIError as e:
48
+ print("APIError:", e)
49
  return "Error occurred. Please try again."
50
 
51
  except Exception as e:
52
+ print("Exception:", e)
53
  return "Error occurred. Please try again."
54
 
55
 
 
82
  )
83
  demo.launch(show_error=True)
84
  except KeyError as e:
85
+ print("Error:", e)
86
  print("Please try again.")
87
 
88
  if __name__ == "__main__":