Update app.py
Browse files
app.py
CHANGED
@@ -6,36 +6,39 @@ from flask import Flask, request, jsonify, send_file
|
|
6 |
from flask_cors import CORS
|
7 |
from transformers import VitsModel, AutoTokenizer
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
|
12 |
os.environ["HF_HOME"] = "/tmp/hf_home"
|
|
|
|
|
13 |
os.environ["TORCH_HOME"] = "/tmp/torch_home"
|
14 |
|
|
|
15 |
app = Flask(__name__)
|
16 |
CORS(app) # Allow external requests
|
17 |
|
18 |
-
#
|
19 |
MODELS = {
|
20 |
"kapampangan": "facebook/mms-tts-pam",
|
21 |
"tagalog": "facebook/mms-tts-tgl",
|
22 |
"english": "facebook/mms-tts-eng"
|
23 |
}
|
24 |
|
25 |
-
# Model storage
|
26 |
loaded_models = {}
|
27 |
loaded_processors = {}
|
28 |
|
29 |
-
|
30 |
-
for lang, model_path in MODELS.items():
|
31 |
try:
|
32 |
-
print(f"π Loading {lang} model: {
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
35 |
print(f"β
{lang.capitalize()} model loaded successfully!")
|
36 |
except Exception as e:
|
37 |
-
print(f"β Error loading {lang} model: {e}")
|
38 |
-
loaded_models[lang] = None
|
39 |
loaded_processors[lang] = None
|
40 |
|
41 |
# Constants
|
|
|
6 |
from flask_cors import CORS
|
7 |
from transformers import VitsModel, AutoTokenizer
|
8 |
|
9 |
+
|
10 |
+
# Set ALL cache directories to /tmp (writable in Hugging Face Spaces)
|
|
|
11 |
os.environ["HF_HOME"] = "/tmp/hf_home"
|
12 |
+
os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
|
13 |
+
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface_cache"
|
14 |
os.environ["TORCH_HOME"] = "/tmp/torch_home"
|
15 |
|
16 |
+
|
17 |
app = Flask(__name__)
|
18 |
CORS(app) # Allow external requests
|
19 |
|
20 |
+
# Model paths for different languages (Hugging Face Hub)
|
21 |
MODELS = {
|
22 |
"kapampangan": "facebook/mms-tts-pam",
|
23 |
"tagalog": "facebook/mms-tts-tgl",
|
24 |
"english": "facebook/mms-tts-eng"
|
25 |
}
|
26 |
|
|
|
27 |
loaded_models = {}
|
28 |
loaded_processors = {}
|
29 |
|
30 |
+
for lang, path in MODELS.items():
|
|
|
31 |
try:
|
32 |
+
print(f"π Loading {lang} model: {path}...")
|
33 |
+
|
34 |
+
# Force models to save in /tmp
|
35 |
+
loaded_models[lang] = VitsModel.from_pretrained(path, cache_dir="/tmp/huggingface_cache")
|
36 |
+
loaded_processors[lang] = AutoTokenizer.from_pretrained(path, cache_dir="/tmp/huggingface_cache")
|
37 |
+
|
38 |
print(f"β
{lang.capitalize()} model loaded successfully!")
|
39 |
except Exception as e:
|
40 |
+
print(f"β Error loading {lang} model: {str(e)}")
|
41 |
+
loaded_models[lang] = None # Mark as unavailable
|
42 |
loaded_processors[lang] = None
|
43 |
|
44 |
# Constants
|