Pijush2023 commited on
Commit
7ce54b3
·
verified ·
1 Parent(s): b4b2728

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -116
app.py CHANGED
@@ -257,53 +257,7 @@ chain_neo4j = (
257
  | StrOutputParser()
258
  )
259
 
260
- # Define a function to select between Pinecone and Neo4j
261
- # def generate_answer(message, choice, retrieval_mode):
262
- # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
263
-
264
- # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
265
-
266
- # if retrieval_mode == "VDB":
267
- # qa_chain = RetrievalQA.from_chain_type(
268
- # llm=chat_model,
269
- # chain_type="stuff",
270
- # retriever=retriever,
271
- # chain_type_kwargs={"prompt": prompt_template}
272
- # )
273
- # response = qa_chain({"query": message})
274
- # logging.debug(f"Vector response: {response}")
275
- # return response['result'], extract_addresses(response['result'])
276
- # elif retrieval_mode == "KGF":
277
- # response = chain_neo4j.invoke({"question": message})
278
- # logging.debug(f"Knowledge-Graph response: {response}")
279
- # return response, extract_addresses(response)
280
- # else:
281
- # return "Invalid retrieval mode selected.", []
282
-
283
- # def bot(history, choice, tts_choice, retrieval_mode):
284
- # if not history:
285
- # return history
286
-
287
- # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode)
288
- # history[-1][1] = ""
289
-
290
- # with concurrent.futures.ThreadPoolExecutor() as executor:
291
- # if tts_choice == "Alpha":
292
- # audio_future = executor.submit(generate_audio_elevenlabs, response)
293
- # elif tts_choice == "Beta":
294
- # audio_future = executor.submit(generate_audio_parler_tts, response)
295
- # elif tts_choice == "Gamma":
296
- # audio_future = executor.submit(generate_audio_mars5, response)
297
-
298
- # for character in response:
299
- # history[-1][1] += character
300
- # time.sleep(0.05)
301
- # yield history, None
302
-
303
- # audio_path = audio_future.result()
304
- # yield history, audio_path
305
 
306
- # history.append([response, None]) # Ensure the response is added in the correct format
307
 
308
 
309
 
@@ -354,38 +308,7 @@ def generate_answer(message, choice, retrieval_mode):
354
 
355
 
356
 
357
- # def bot(history, choice, tts_choice, retrieval_mode):
358
- # if not history:
359
- # return history
360
-
361
- # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode)
362
- # history[-1][1] = ""
363
-
364
- # # Detect if the response is from Yelp (i.e., HTML formatted response)
365
- # if "<table>" in response:
366
- # for chunk in response.splitlines():
367
- # history[-1][1] += chunk + "\n"
368
- # time.sleep(0.1) # Adjust the delay as needed
369
- # yield history, None
370
- # return
371
-
372
- # with concurrent.futures.ThreadPoolExecutor() as executor:
373
- # if tts_choice == "Alpha":
374
- # audio_future = executor.submit(generate_audio_elevenlabs, response)
375
- # elif tts_choice == "Beta":
376
- # audio_future = executor.submit(generate_audio_parler_tts, response)
377
- # elif tts_choice == "Gamma":
378
- # audio_future = executor.submit(generate_audio_mars5, response)
379
-
380
- # for character in response:
381
- # history[-1][1] += character
382
- # time.sleep(0.05)
383
- # yield history, None
384
-
385
- # audio_path = audio_future.result()
386
- # yield history, audio_path
387
-
388
- # history.append([response, None]) # Ensure the response is added in the correct format
389
 
390
  def bot(history, choice, tts_choice, retrieval_mode):
391
  if not history:
@@ -603,44 +526,7 @@ def show_map_if_details(history, choice):
603
  else:
604
  return gr.update(visible(False), "")
605
 
606
- # def generate_audio_elevenlabs(text):
607
- # XI_API_KEY = os.environ['ELEVENLABS_API']
608
- # VOICE_ID = 'd9MIrwLnvDeH7aZb61E9'
609
- # tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
610
- # headers = {
611
- # "Accept": "application/json",
612
- # "xi-api-key": XI_API_KEY
613
- # }
614
- # data = {
615
- # "text": str(text),
616
- # "model_id": "eleven_multilingual_v2",
617
- # "voice_settings": {
618
- # "stability": 1.0,
619
- # "similarity_boost": 0.0,
620
- # "style": 0.60,
621
- # "use_speaker_boost": False
622
- # }
623
- # }
624
- # response = requests.post(tts_url, headers=headers, json=data, stream=True)
625
- # if response.ok:
626
- # audio_segments = []
627
- # with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
628
- # for chunk in response.iter_content(chunk_size=1024):
629
- # if chunk:
630
- # f.write(chunk)
631
- # audio_segments.append(chunk)
632
- # temp_audio_path = f.name
633
-
634
- # # Combine all audio chunks into a single file
635
- # combined_audio = AudioSegment.from_file(temp_audio_path, format="mp3")
636
- # combined_audio_path = os.path.join(tempfile.gettempdir(), "elevenlabs_combined_audio.mp3")
637
- # combined_audio.export(combined_audio_path, format="mp3")
638
-
639
- # logging.debug(f"Audio saved to {combined_audio_path}")
640
- # return combined_audio_path
641
- # else:
642
- # logging.error(f"Error generating audio: {response.text}")
643
- # return None
644
 
645
 
646
  def preprocess_for_tts(text):
 
257
  | StrOutputParser()
258
  )
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
 
261
 
262
 
263
 
 
308
 
309
 
310
 
311
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
  def bot(history, choice, tts_choice, retrieval_mode):
314
  if not history:
 
526
  else:
527
  return gr.update(visible(False), "")
528
 
529
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530
 
531
 
532
  def preprocess_for_tts(text):