Spaces:
Runtime error
Runtime error
Update LLMwithvoice.py
Browse files- LLMwithvoice.py +14 -20
LLMwithvoice.py
CHANGED
@@ -19,7 +19,13 @@ torch_dtype = torch.float16 if device != "cpu" else torch.float32
|
|
19 |
model = ParlerTTSForConditionalGeneration.from_pretrained("parler-tts/parler_tts_mini_v0.1").to(device, dtype=torch_dtype)
|
20 |
tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler_tts_mini_v0.1")
|
21 |
|
22 |
-
def query_roberta(api_token,
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
headers = {"Authorization": f"Bearer {api_token}"}
|
24 |
response = requests.post(API_URL_ROBERTA, headers=headers, json=payload)
|
25 |
try:
|
@@ -27,11 +33,10 @@ def query_roberta(api_token, payload):
|
|
27 |
except ValueError:
|
28 |
return {"error": "Invalid JSON response"}
|
29 |
|
30 |
-
def generate_speech(
|
31 |
-
input_ids = tokenizer(
|
32 |
-
prompt_input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
33 |
|
34 |
-
generation = model.generate(input_ids=input_ids
|
35 |
audio_arr = generation.cpu().numpy().squeeze()
|
36 |
|
37 |
# Construct the audio path dynamically
|
@@ -46,18 +51,7 @@ def generate_speech(prompt, description):
|
|
46 |
|
47 |
return audio_path
|
48 |
|
49 |
-
def
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
"context": context
|
54 |
-
}
|
55 |
-
}
|
56 |
-
response = query_roberta(api_token, payload)
|
57 |
-
if "error" in response:
|
58 |
-
return f"Error: {response['error']}"
|
59 |
-
else:
|
60 |
-
try:
|
61 |
-
return response['answer']
|
62 |
-
except (IndexError, KeyError):
|
63 |
-
return f"Unexpected response structure: {response}"
|
|
|
19 |
model = ParlerTTSForConditionalGeneration.from_pretrained("parler-tts/parler_tts_mini_v0.1").to(device, dtype=torch_dtype)
|
20 |
tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler_tts_mini_v0.1")
|
21 |
|
22 |
+
def query_roberta(api_token, prompt, context):
|
23 |
+
payload = {
|
24 |
+
"inputs": {
|
25 |
+
"question": prompt,
|
26 |
+
"context": context
|
27 |
+
}
|
28 |
+
}
|
29 |
headers = {"Authorization": f"Bearer {api_token}"}
|
30 |
response = requests.post(API_URL_ROBERTA, headers=headers, json=payload)
|
31 |
try:
|
|
|
33 |
except ValueError:
|
34 |
return {"error": "Invalid JSON response"}
|
35 |
|
36 |
+
def generate_speech(answer):
|
37 |
+
input_ids = tokenizer(answer, return_tensors="pt").input_ids.to(device)
|
|
|
38 |
|
39 |
+
generation = model.generate(input_ids=input_ids).to(torch.float32)
|
40 |
audio_arr = generation.cpu().numpy().squeeze()
|
41 |
|
42 |
# Construct the audio path dynamically
|
|
|
51 |
|
52 |
return audio_path
|
53 |
|
54 |
+
def gradio_interface(api_token, prompt, context):
|
55 |
+
answer = query_roberta(api_token, prompt, context)
|
56 |
+
audio_path = generate_speech(answer)
|
57 |
+
return answer, audio_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|