File size: 1,815 Bytes
b9cf68a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
import torch
from datasets import Dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
import pandas as pd
from huggingface_hub import login
def train_model(file, hf_token):
try:
# Login to Hugging Face
if not hf_token:
return "Please provide a Hugging Face token"
login(hf_token)
# Load and prepare data
df = pd.read_csv(file.name)
dataset = Dataset.from_pandas(df)
# Model setup
model_name = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Training configuration
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=3,
per_device_train_batch_size=2,
learning_rate=3e-5,
save_strategy="epoch",
push_to_hub=True,
hub_token=hf_token
)
# Initialize trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset,
tokenizer=tokenizer
)
# Run training
trainer.train()
return "Training completed successfully!"
except Exception as e:
return f"Error occurred: {str(e)}"
# Create Gradio interface
demo = gr.Interface(
fn=train_model,
inputs=[
gr.File(label="Upload your CSV file"),
gr.Textbox(label="Hugging Face Token", type="password")
],
outputs="text",
title="Product Classifier Training",
description="Upload your CSV data to train a product classifier model."
)
if __name__ == "__main__":
demo.launch()
|