Pijush2023 commited on
Commit
148de2d
·
verified ·
1 Parent(s): a36352a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -612,7 +612,6 @@ def install_parler_tts():
612
  # Call the function to install parler-tts
613
  install_parler_tts()
614
 
615
-
616
  import gradio as gr
617
  import requests
618
  import os
@@ -943,7 +942,7 @@ def generate_map(location_names):
943
  if geocode_result:
944
  location = geocode_result[0]['geometry']['location']
945
  folium.Marker(
946
- [location['lat'], location['lng']],
947
  tooltip=f"{geocode_result[0]['formatted_address']}"
948
  ).add_to(m)
949
 
@@ -1154,7 +1153,14 @@ def process_chunk(chunk, model, tokenizer, device):
1154
  def generate_audio_parler_tts(text):
1155
  model_id = 'parler-tts/parler_tts_mini_v0.1'
1156
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
1157
- model = ParlerTTSForConditionalGeneration.from_pretrained(model_id).to(device)
 
 
 
 
 
 
 
1158
  tokenizer = AutoTokenizer.from_pretrained(model_id)
1159
 
1160
  text_chunks = chunk_text(text)
@@ -1259,3 +1265,4 @@ demo.launch(share=True)
1259
 
1260
 
1261
 
 
 
612
  # Call the function to install parler-tts
613
  install_parler_tts()
614
 
 
615
  import gradio as gr
616
  import requests
617
  import os
 
942
  if geocode_result:
943
  location = geocode_result[0]['geometry']['location']
944
  folium.Marker(
945
+ [location['lat'], 'lng']],
946
  tooltip=f"{geocode_result[0]['formatted_address']}"
947
  ).add_to(m)
948
 
 
1153
  def generate_audio_parler_tts(text):
1154
  model_id = 'parler-tts/parler_tts_mini_v0.1'
1155
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
1156
+
1157
+ try:
1158
+ model = ParlerTTSForConditionalGeneration.from_pretrained(model_id).to(device)
1159
+ except torch.cuda.OutOfMemoryError:
1160
+ print("CUDA out of memory. Switching to CPU.")
1161
+ device = "cpu"
1162
+ model = ParlerTTSForConditionalGeneration.from_pretrained(model_id).to(device)
1163
+
1164
  tokenizer = AutoTokenizer.from_pretrained(model_id)
1165
 
1166
  text_chunks = chunk_text(text)
 
1265
 
1266
 
1267
 
1268
+