Spaces:
Sleeping
Sleeping
Back to using GPU...
Browse files- app.py +2 -2
- model_translation.py +1 -2
app.py
CHANGED
@@ -116,7 +116,7 @@ def detect_language(text):
|
|
116 |
lang = langdetect.detect(text)
|
117 |
return lang
|
118 |
|
119 |
-
|
120 |
def translate_with_multilingual_model(
|
121 |
text: str,
|
122 |
tgt_lang: str,
|
@@ -194,7 +194,7 @@ def translate_text(
|
|
194 |
with gr.Blocks() as demo:
|
195 |
|
196 |
gr.Markdown("""
|
197 |
-
## Text translation v0.0.2 (
|
198 |
""")
|
199 |
input_text = gr.Textbox(
|
200 |
lines=15,
|
|
|
116 |
lang = langdetect.detect(text)
|
117 |
return lang
|
118 |
|
119 |
+
@spaces.GPU
|
120 |
def translate_with_multilingual_model(
|
121 |
text: str,
|
122 |
tgt_lang: str,
|
|
|
194 |
with gr.Blocks() as demo:
|
195 |
|
196 |
gr.Markdown("""
|
197 |
+
## Text translation v0.0.2 (small paragraph, multilingual)
|
198 |
""")
|
199 |
input_text = gr.Textbox(
|
200 |
lines=15,
|
model_translation.py
CHANGED
@@ -66,7 +66,6 @@ model_MADLAD_name = "google/madlad400-3b-mt"
|
|
66 |
tokenizer_multilingual = AutoTokenizer.from_pretrained(model_MADLAD_name, use_fast=True)
|
67 |
model_multilingual = AutoModelForSeq2SeqLM.from_pretrained(
|
68 |
model_MADLAD_name,
|
69 |
-
|
70 |
torch_dtype=torch.float16,
|
71 |
low_cpu_mem_usage=True)
|
72 |
-
|
|
|
66 |
tokenizer_multilingual = AutoTokenizer.from_pretrained(model_MADLAD_name, use_fast=True)
|
67 |
model_multilingual = AutoModelForSeq2SeqLM.from_pretrained(
|
68 |
model_MADLAD_name,
|
69 |
+
device_map="auto",
|
70 |
torch_dtype=torch.float16,
|
71 |
low_cpu_mem_usage=True)
|
|