sachin
commited on
Commit
·
57245bd
1
Parent(s):
0fb44d7
test
Browse files- Dockerfile +2 -21
Dockerfile
CHANGED
@@ -33,27 +33,8 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
33 |
# Create a directory for pre-downloaded models
|
34 |
RUN mkdir -p /app/models
|
35 |
|
36 |
-
# Pre-download models using a Python script
|
37 |
-
RUN python -c "
|
38 |
-
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoProcessor, AutoModel; \
|
39 |
-
from transformers import Gemma3ForConditionalGeneration; \
|
40 |
-
import os; \
|
41 |
-
models = { \
|
42 |
-
'llm_model': ('google/gemma-3-4b-it', Gemma3ForConditionalGeneration, AutoProcessor), \
|
43 |
-
'tts_model': ('ai4bharat/IndicF5', AutoModel, None), \
|
44 |
-
'asr_model': ('ai4bharat/indic-conformer-600m-multilingual', AutoModel, None), \
|
45 |
-
'trans_en_indic': ('ai4bharat/indictrans2-en-indic-dist-200M', AutoModelForSeq2SeqLM, AutoTokenizer), \
|
46 |
-
'trans_indic_en': ('ai4bharat/indictrans2-indic-en-dist-200M', AutoModelForSeq2SeqLM, AutoTokenizer), \
|
47 |
-
'trans_indic_indic': ('ai4bharat/indictrans2-indic-indic-dist-320M', AutoModelForSeq2SeqLM, AutoTokenizer), \
|
48 |
-
}; \
|
49 |
-
for name, (model_name, model_class, processor_class) in models.items(): \
|
50 |
-
print(f'Downloading {model_name}...'); \
|
51 |
-
model = model_class.from_pretrained(model_name, trust_remote_code=True); \
|
52 |
-
model.save_pretrained(f'/app/models/{name}'); \
|
53 |
-
if processor_class: \
|
54 |
-
processor = processor_class.from_pretrained(model_name, trust_remote_code=True); \
|
55 |
-
processor.save_pretrained(f'/app/models/{name}'); \
|
56 |
-
"
|
57 |
|
58 |
# Copy application code
|
59 |
COPY . .
|
|
|
33 |
# Create a directory for pre-downloaded models
|
34 |
RUN mkdir -p /app/models
|
35 |
|
36 |
+
# Pre-download models using a single-line Python script
|
37 |
+
RUN python -c "from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoProcessor, AutoModel; from transformers import Gemma3ForConditionalGeneration; import os; models = {'llm_model': ('google/gemma-3-4b-it', Gemma3ForConditionalGeneration, AutoProcessor), 'tts_model': ('ai4bharat/IndicF5', AutoModel, None), 'asr_model': ('ai4bharat/indic-conformer-600m-multilingual', AutoModel, None), 'trans_en_indic': ('ai4bharat/indictrans2-en-indic-dist-200M', AutoModelForSeq2SeqLM, AutoTokenizer), 'trans_indic_en': ('ai4bharat/indictrans2-indic-en-dist-200M', AutoModelForSeq2SeqLM, AutoTokenizer), 'trans_indic_indic': ('ai4bharat/indictrans2-indic-indic-dist-320M', AutoModelForSeq2SeqLM, AutoTokenizer)}; for name, (model_name, model_class, processor_class) in models.items(): print(f'Downloading {model_name}...'); model = model_class.from_pretrained(model_name, trust_remote_code=True); model.save_pretrained(f'/app/models/{name}'); if processor_class: processor = processor_class.from_pretrained(model_name, trust_remote_code=True); processor.save_pretrained(f'/app/models/{name}');"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
# Copy application code
|
40 |
COPY . .
|