Update app.py
Browse files
app.py
CHANGED
|
@@ -168,16 +168,25 @@ def generate_tts():
|
|
| 168 |
model = tts_models[language]
|
| 169 |
inputs = processor(text_input, return_tensors="pt")
|
| 170 |
|
|
|
|
| 171 |
with torch.no_grad():
|
| 172 |
-
output = model
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
except Exception as e:
|
| 180 |
-
|
|
|
|
| 181 |
|
| 182 |
|
| 183 |
@app.route("/download/<filename>", methods=["GET"])
|
|
|
|
| 168 |
model = tts_models[language]
|
| 169 |
inputs = processor(text_input, return_tensors="pt")
|
| 170 |
|
| 171 |
+
# Generate speech - using model(**inputs) instead of model.generate()
|
| 172 |
with torch.no_grad():
|
| 173 |
+
output = model(**inputs).waveform
|
| 174 |
+
waveform = output.squeeze().cpu().numpy()
|
| 175 |
+
|
| 176 |
+
# Save to file
|
| 177 |
+
output_filename = os.path.join(OUTPUT_DIR, f"{language}_output.wav")
|
| 178 |
+
# Use the model's sampling rate
|
| 179 |
+
sampling_rate = model.config.sampling_rate
|
| 180 |
+
sf.write(output_filename, waveform, sampling_rate)
|
| 181 |
+
print(f"✅ Speech generated! File saved: {output_filename}")
|
| 182 |
+
|
| 183 |
+
return jsonify({
|
| 184 |
+
"message": "TTS audio generated",
|
| 185 |
+
"file_url": f"/download/{language}_output.wav"
|
| 186 |
+
})
|
| 187 |
except Exception as e:
|
| 188 |
+
print(f"❌ Error generating TTS: {e}")
|
| 189 |
+
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
|
| 190 |
|
| 191 |
|
| 192 |
@app.route("/download/<filename>", methods=["GET"])
|