Spaces:
Sleeping
Sleeping
File size: 1,632 Bytes
f451bf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import os
import gradio as gr
from transformers import AutoModel, AutoTokenizer
def process_models(model_name, save_dir, additional_models):
log_lines = []
# Process primary model
log_lines.append(f"π Loading model: **{model_name}**")
try:
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_save_path = os.path.join(save_dir, model_name.replace("/", "_"))
os.makedirs(model_save_path, exist_ok=True)
model.save_pretrained(model_save_path)
log_lines.append(f"β
Saved **{model_name}** to `{model_save_path}`")
except Exception as e:
log_lines.append(f"β Error with **{model_name}**: {e}")
# Process additional models if any
if additional_models:
for m in additional_models:
log_lines.append(f"π Loading model: **{m}**")
try:
model = AutoModel.from_pretrained(m)
tokenizer = AutoTokenizer.from_pretrained(m)
model_save_path = os.path.join(save_dir, m.replace("/", "_"))
os.makedirs(model_save_path, exist_ok=True)
model.save_pretrained(model_save_path)
log_lines.append(f"β
Saved **{m}** to `{model_save_path}`")
except Exception as e:
log_lines.append(f"β Error with **{m}**: {e}")
return "\n".join(log_lines)
# Mermaid glossary: a one-line flow summary of our UI actions.
mermaid_glossary = """graph LR
A[π Model Input] --> B[Load Model]
B --> C[πΎ Save Model]
D[𧩠Additional Models] --> B
"""
|