abdullahzunorain commited on
Commit
f3c2ab5
·
verified ·
1 Parent(s): c9d57cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -192
app.py CHANGED
@@ -1,14 +1,18 @@
 
 
 
 
 
1
  import whisper
2
  import os
3
  from gtts import gTTS
4
  import gradio as gr
5
  from groq import Groq
6
- from datetime import datetime # Import datetime to handle timestamps
7
- from IPython.display import display, HTML
8
 
9
  # Load a smaller Whisper model for faster processing
10
  try:
11
- model = whisper.load_model("tiny", weights_only=True) # Set weights_only=True to address the warning
12
  except Exception as e:
13
  print(f"Error loading Whisper model: {e}")
14
  model = None
@@ -89,43 +93,6 @@ def chatbot(audio):
89
  print(f"Error in chatbot function: {e}")
90
  return "Sorry, there was an error processing your request.", None, chat_history
91
 
92
- # Inject custom CSS for background and styling
93
- def set_background():
94
- display(HTML(f'''
95
- <style>
96
- body {{
97
- background-image: url("https://github.com/abdullahzunorain/voice-to-voice-Chatbot/blob/main/image_1");
98
- background-size: cover;
99
- background-position: center;
100
- background-repeat: no-repeat;
101
- color: white;
102
- font-family: Arial, sans-serif;
103
- }}
104
- .gradio-container {{
105
- background-color: rgba(0, 0, 0, 0.6);
106
- padding: 20px;
107
- border-radius: 8px;
108
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
109
- }}
110
- h1, h2, p, .gradio-label {{
111
- color: #FFD700; /* Gold color for labels and titles */
112
- }}
113
- .gradio-button {{
114
- background-color: #FFD700;
115
- color: black;
116
- border-radius: 4px;
117
- font-weight: bold;
118
- }}
119
- .gradio-input {{
120
- background-color: rgba(255, 255, 255, 0.9);
121
- border-radius: 4px;
122
- }}
123
- </style>
124
- '''))
125
-
126
- # Display custom background styling
127
- set_background()
128
-
129
  # Gradio interface for real-time interaction with chat history display
130
  iface = gr.Interface(
131
  fn=chatbot,
@@ -137,161 +104,41 @@ iface = gr.Interface(
137
  live=True,
138
  title="Stylish Audio Chatbot with Groq API",
139
  description="Upload your audio, and the chatbot will transcribe and respond to it with a synthesized response.",
140
- theme="default"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  )
142
 
143
- # Launch the Gradio app with public sharing enabled
144
  if __name__ == "__main__":
145
- iface.launch(share=True)
146
-
147
-
148
-
149
-
150
-
151
- #----------------------------------------------------------------------------------------------------------------------------------------------------------
152
-
153
-
154
-
155
-
156
-
157
-
158
-
159
- # import whisper
160
- # import os
161
- # from gtts import gTTS
162
- # import gradio as gr
163
- # from groq import Groq
164
- # from datetime import datetime
165
-
166
- # # Load a smaller Whisper model for faster processing
167
- # try:
168
- # model = whisper.load_model("tiny")
169
- # except Exception as e:
170
- # print(f"Error loading Whisper model: {e}")
171
- # model = None
172
-
173
- # # Set up Groq API client using environment variable
174
- # GROQ_API_TOKEN = os.getenv("GROQ_API")
175
- # if not GROQ_API_TOKEN:
176
- # raise ValueError("Groq API token is missing. Set 'GROQ_API' in your environment variables.")
177
- # client = Groq(api_key=GROQ_API_TOKEN)
178
-
179
- # # Initialize the chat history
180
- # chat_history = []
181
-
182
- # # Function to get the LLM response from Groq with timeout handling
183
- # def get_llm_response(user_input, role="detailed responder"):
184
- # prompt = f"As an expert, provide a detailed and knowledgeable response: {user_input}" if role == "expert" else \
185
- # f"As a good assistant, provide a clear, concise, and helpful response: {user_input}" if role == "good assistant" else \
186
- # f"Provide a thorough and detailed response: {user_input}"
187
-
188
- # try:
189
- # chat_completion = client.chat.completions.create(
190
- # messages=[{"role": "user", "content": user_input}],
191
- # model="llama3-8b-8192", # Replace with your desired model
192
- # timeout=20 # Increased timeout to 20 seconds
193
- # )
194
- # return chat_completion.choices[0].message.content
195
- # except Exception as e:
196
- # print(f"Error during LLM response retrieval: {e}")
197
- # return "Sorry, there was an error retrieving the response. Please try again."
198
-
199
- # # Function to convert text to speech using gTTS
200
- # def text_to_speech(text):
201
- # try:
202
- # tts = gTTS(text)
203
- # output_audio = "output_audio.mp3"
204
- # tts.save(output_audio)
205
- # return output_audio
206
- # except Exception as e:
207
- # print(f"Error generating TTS: {e}")
208
- # return None
209
-
210
- # # Main chatbot function to handle audio input and output with chat history
211
- # def chatbot(audio):
212
- # if not model:
213
- # return "Error: Whisper model is not available.", None, chat_history
214
-
215
- # if not audio:
216
- # return "No audio provided. Please upload a valid audio file.", None, chat_history
217
-
218
- # try:
219
- # # Step 1: Transcribe the audio using Whisper
220
- # result = model.transcribe(audio)
221
- # user_text = result.get("text", "")
222
- # if not user_text.strip():
223
- # return "Could not understand the audio. Please try speaking more clearly.", None, chat_history
224
-
225
- # # Get current timestamp
226
- # timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
227
-
228
- # # Display transcription in chat history
229
- # chat_history.append((timestamp, "User", user_text))
230
-
231
- # # Step 2: Get LLM response from Groq
232
- # response_text = get_llm_response(user_text)
233
-
234
- # # Step 3: Convert the response text to speech
235
- # output_audio = text_to_speech(response_text)
236
-
237
- # # Append the latest interaction to the chat history
238
- # chat_history.append((timestamp, "Chatbot", response_text))
239
-
240
- # # Format the chat history for display with timestamps and clear labels
241
- # formatted_history = "\n".join([f"[{time}] {speaker}: {text}" for time, speaker, text in chat_history])
242
-
243
- # return formatted_history, output_audio, chat_history
244
-
245
- # except Exception as e:
246
- # print(f"Error in chatbot function: {e}")
247
- # return "Sorry, there was an error processing your request.", None, chat_history
248
-
249
- # # Gradio interface for real-time interaction with chat history display
250
- # iface = gr.Interface(
251
- # fn=chatbot,
252
- # inputs=gr.Audio(type="filepath"),
253
- # outputs=[
254
- # gr.Textbox(label="Chat History"), # Display chat history
255
- # gr.Audio(type="filepath", label="Response Audio"),
256
- # ],
257
- # live=True,
258
- # title="Stylish Audio Chatbot with Groq API",
259
- # description="Upload your audio, and the chatbot will transcribe and respond to it with a synthesized response.",
260
- # theme="default",
261
- # css='''
262
- # body {
263
- # background-image: url("https://raw.githubusercontent.com/username/repository/main/path/to/your-image.png");
264
- # background-size: cover;
265
- # background-position: center;
266
- # background-repeat: no-repeat;
267
- # color: white;
268
- # font-family: Arial, sans-serif;
269
- # }
270
- # .gradio-container {
271
- # background-color: rgba(0, 0, 0, 0.6);
272
- # padding: 20px;
273
- # border-radius: 8px;
274
- # box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
275
- # }
276
- # h1, h2, p, .gradio-label {
277
- # color: #FFD700; /* Gold color for labels and titles */
278
- # }
279
- # .gradio-button {
280
- # background-color: #FFD700;
281
- # color: black;
282
- # border-radius: 4px;
283
- # font-weight: bold;
284
- # }
285
- # .gradio-input {
286
- # background-color: rgba(255, 255, 255, 0.9);
287
- # border-radius: 4px;
288
- # }
289
- # '''
290
- # )
291
-
292
- # # Launch the Gradio app
293
- # if __name__ == "__main__":
294
- # iface.launch()
295
 
296
 
297
 
 
1
+
2
+
3
+
4
+
5
+
6
  import whisper
7
  import os
8
  from gtts import gTTS
9
  import gradio as gr
10
  from groq import Groq
11
+ from datetime import datetime
 
12
 
13
  # Load a smaller Whisper model for faster processing
14
  try:
15
+ model = whisper.load_model("tiny")
16
  except Exception as e:
17
  print(f"Error loading Whisper model: {e}")
18
  model = None
 
93
  print(f"Error in chatbot function: {e}")
94
  return "Sorry, there was an error processing your request.", None, chat_history
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  # Gradio interface for real-time interaction with chat history display
97
  iface = gr.Interface(
98
  fn=chatbot,
 
104
  live=True,
105
  title="Stylish Audio Chatbot with Groq API",
106
  description="Upload your audio, and the chatbot will transcribe and respond to it with a synthesized response.",
107
+ theme="default",
108
+ css='''
109
+ body {
110
+ background-image: url("https://raw.githubusercontent.com/username/repository/main/path/to/your-image.png");
111
+ background-size: cover;
112
+ background-position: center;
113
+ background-repeat: no-repeat;
114
+ color: white;
115
+ font-family: Arial, sans-serif;
116
+ }
117
+ .gradio-container {
118
+ background-color: rgba(0, 0, 0, 0.6);
119
+ padding: 20px;
120
+ border-radius: 8px;
121
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
122
+ }
123
+ h1, h2, p, .gradio-label {
124
+ color: #FFD700; /* Gold color for labels and titles */
125
+ }
126
+ .gradio-button {
127
+ background-color: #FFD700;
128
+ color: black;
129
+ border-radius: 4px;
130
+ font-weight: bold;
131
+ }
132
+ .gradio-input {
133
+ background-color: rgba(255, 255, 255, 0.9);
134
+ border-radius: 4px;
135
+ }
136
+ '''
137
  )
138
 
139
+ # Launch the Gradio app
140
  if __name__ == "__main__":
141
+ iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
 
144