sachin
commited on
Commit
·
678506e
1
Parent(s):
0699851
tet
Browse files- Dockerfile +6 -0
- download_models.py +7 -2
Dockerfile
CHANGED
@@ -33,6 +33,12 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
33 |
# Create a directory for pre-downloaded models
|
34 |
RUN mkdir -p /app/models
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
# Copy and run the model download script
|
37 |
COPY download_models.py .
|
38 |
RUN python download_models.py
|
|
|
33 |
# Create a directory for pre-downloaded models
|
34 |
RUN mkdir -p /app/models
|
35 |
|
36 |
+
# Define build argument for HF_TOKEN
|
37 |
+
ARG HF_TOKEN
|
38 |
+
|
39 |
+
# Set environment variable for the build process
|
40 |
+
ENV HF_TOKEN=$HF_TOKEN
|
41 |
+
|
42 |
# Copy and run the model download script
|
43 |
COPY download_models.py .
|
44 |
RUN python download_models.py
|
download_models.py
CHANGED
@@ -3,6 +3,11 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoProcessor, Au
|
|
3 |
from transformers import Gemma3ForConditionalGeneration
|
4 |
import os
|
5 |
|
|
|
|
|
|
|
|
|
|
|
6 |
# Define the models to download
|
7 |
models = {
|
8 |
'llm_model': ('google/gemma-3-4b-it', Gemma3ForConditionalGeneration, AutoProcessor),
|
@@ -22,9 +27,9 @@ os.makedirs(save_dir, exist_ok=True)
|
|
22 |
# Download and save each model
|
23 |
for name, (model_name, model_class, processor_class) in models.items():
|
24 |
print(f'Downloading {model_name}...')
|
25 |
-
model = model_class.from_pretrained(model_name, trust_remote_code=True)
|
26 |
model.save_pretrained(f'{save_dir}/{name}')
|
27 |
if processor_class:
|
28 |
-
processor = processor_class.from_pretrained(model_name, trust_remote_code=True)
|
29 |
processor.save_pretrained(f'{save_dir}/{name}')
|
30 |
print(f'Saved {model_name} to {save_dir}/{name}')
|
|
|
3 |
from transformers import Gemma3ForConditionalGeneration
|
4 |
import os
|
5 |
|
6 |
+
# Get the Hugging Face token from environment variable
|
7 |
+
hf_token = os.getenv("HF_TOKEN")
|
8 |
+
if not hf_token:
|
9 |
+
print("Warning: HF_TOKEN not set. Some models may require authentication.")
|
10 |
+
|
11 |
# Define the models to download
|
12 |
models = {
|
13 |
'llm_model': ('google/gemma-3-4b-it', Gemma3ForConditionalGeneration, AutoProcessor),
|
|
|
27 |
# Download and save each model
|
28 |
for name, (model_name, model_class, processor_class) in models.items():
|
29 |
print(f'Downloading {model_name}...')
|
30 |
+
model = model_class.from_pretrained(model_name, trust_remote_code=True, token=hf_token)
|
31 |
model.save_pretrained(f'{save_dir}/{name}')
|
32 |
if processor_class:
|
33 |
+
processor = processor_class.from_pretrained(model_name, trust_remote_code=True, token=hf_token)
|
34 |
processor.save_pretrained(f'{save_dir}/{name}')
|
35 |
print(f'Saved {model_name} to {save_dir}/{name}')
|