File size: 7,073 Bytes
5755412
 
f82c314
 
 
 
 
 
 
 
 
 
 
eccd8f6
f82c314
 
 
 
 
b446d41
f82c314
 
 
 
 
 
 
 
 
b26485f
f82c314
 
 
 
 
 
b26485f
f82c314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b26485f
f82c314
b26485f
 
 
 
f82c314
b26485f
 
 
eccd8f6
5755412
 
eccd8f6
f82c314
 
eccd8f6
 
f82c314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eccd8f6
b26485f
 
f82c314
 
 
b26485f
f82c314
 
 
 
 
 
 
 
 
b446d41
f82c314
 
b26485f
f82c314
 
 
b26485f
f82c314
 
 
 
 
 
 
 
 
 
 
b26485f
eccd8f6
f82c314
 
 
b446d41
f82c314
b26485f
f82c314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eccd8f6
5755412
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import gradio as gr
import spaces
import torch
from datasets import load_dataset
from transformers import (
    AutoConfig,
    AutoTokenizer,
    AutoModelForCausalLM,
    DataCollatorForLanguageModeling,
    Trainer,
    TrainingArguments,
    pipeline
)

#############################################################
# ZeroGPU REQUIREMENT:
#   - No CUDA references at global scope.
#   - All GPU usage within @spaces.GPU(...) functions.
#############################################################

# We'll do a small subset of WikiText-2 for demonstration.
# Real finetuning on the entire dataset likely exceeds typical ZeroGPU time.
NUM_EXAMPLES = 1000  # or fewer to keep it quick

# We'll store the "inference pipeline" after training
TEXT_PIPELINE = None

@spaces.GPU(duration=300)  # up to 5 minutes for a mini-finetraining
def finetune_small_subset():
    """
    1) Loads the model & tokenizer from 'wuhp/myr1'.
    2) Loads a small subset of WikiText-2 for language modeling.
    3) Runs a quick 1-epoch finetune.
    4) Saves model + tokenizer to 'finetuned_myr1'.
    5) Loads the newly trained model back into a text-generation pipeline.
    Returns a success message.
    """

    # -------------------------------
    # A) Load a small dataset
    # -------------------------------
    ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
    # Keep only a subset so we don't exceed time. 
    ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))

    def format_and_tokenize(ex):
        # For standard LM, we just treat each line as text
        return tokenizer(ex["text"], truncation=True, max_length=512)

    # We'll define them once we have the tokenizer below.

    # -------------------------------
    # B) Load config, tokenizer, model from HF
    #    (trust_remote_code = True for custom modeling_deepseek)
    # -------------------------------
    config = AutoConfig.from_pretrained(
        "wuhp/myr1", 
        subfolder="myr1",
        trust_remote_code=True
    )
    tokenizer = AutoTokenizer.from_pretrained(
        "wuhp/myr1", 
        subfolder="myr1",
        trust_remote_code=True
    )
    model = AutoModelForCausalLM.from_pretrained(
        "wuhp/myr1",
        subfolder="myr1",
        config=config,
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto", 
        trust_remote_code=True
    )

    # -------------------------------
    # C) Process dataset
    # -------------------------------
    ds = ds.map(format_and_tokenize, batched=True, remove_columns=["text"])
    ds.set_format("torch")

    # -------------------------------
    # D) Data Collator
    # -------------------------------
    collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer,
        mlm=False
    )

    # -------------------------------
    # E) Training Arguments + Trainer
    # -------------------------------
    training_args = TrainingArguments(
        output_dir="finetuned_myr1",
        num_train_epochs=1,  # 1 epoch for demonstration
        per_device_train_batch_size=1,
        gradient_accumulation_steps=2,
        logging_steps=10,
        save_steps=999999,        # effectively "don't save mid-training"
        save_total_limit=1,
        fp16=torch.cuda.is_available(),
        # ZeroGPU ephemeral environment => no real advantage to push_to_hub
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=ds,
        data_collator=collator,
    )

    # -------------------------------
    # F) Train
    # -------------------------------
    trainer.train()

    # -------------------------------
    # G) Save local checkpoint
    # -------------------------------
    trainer.save_model("finetuned_myr1")
    tokenizer.save_pretrained("finetuned_myr1")

    # -------------------------------
    # H) Reload the newly finetuned model as a pipeline
    # -------------------------------
    # (We do this so we can do inference in the same GPU session)
    # However, if the pipeline is used *after* this function returns,
    # we might need to re-load in a separate function call.
    finetuned_model = AutoModelForCausalLM.from_pretrained(
        "finetuned_myr1",
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto",
        trust_remote_code=True
    )
    global TEXT_PIPELINE
    TEXT_PIPELINE = pipeline(
        "text-generation",
        model=finetuned_model,
        tokenizer=tokenizer
    )
    return "Finetuning complete. Model reloaded for inference!"

def ensure_pipeline():
    """
    If TEXT_PIPELINE is None (e.g., we didn't finetune yet),
    let's just load the *original* model from wuhp/myr1
    so that 'predict' can still run.
    """
    global TEXT_PIPELINE
    if TEXT_PIPELINE is None:
        # Load the original model for inference
        TEXT_PIPELINE = pipeline(
            "text-generation",
            model="wuhp/myr1/myr1",    # subfolder syntax
            trust_remote_code=True
        )
    return TEXT_PIPELINE

@spaces.GPU(duration=120)  # up to 2 minutes to generate text
def predict(prompt, min_new_tokens=260, max_new_tokens=2600):
    """
    Generate text from the (possibly finetuned) model. 
    We default max_new_tokens to 2,600, but allow up to 5,000 in the UI slider.
    We'll also ensure a minimum of 260 tokens.
    """
    pipe = ensure_pipeline()  # load model if not already
    # Use pipeline generate params. 
    # The pipeline will handle do_sample by default. 
    # We set a large max_new_tokens, but be careful about timeouts.
    outputs = pipe(
        prompt,
        min_new_tokens=int(min_new_tokens),
        max_new_tokens=int(max_new_tokens),
        temperature=0.7,
        top_p=0.9
    )
    return outputs[0]["generated_text"]

#############################################################
# Build a Gradio UI
#############################################################
with gr.Blocks() as demo:
    gr.Markdown("## ZeroGPU Finetuning & Long-Text Generation Demo")

    finetune_btn = gr.Button("Finetune on a small WikiText-2 subset (5 min limit)")
    finetune_status = gr.Textbox(label="Status")
    # When user clicks, we run 'finetune_small_subset'
    finetune_btn.click(fn=finetune_small_subset, outputs=finetune_status)

    gr.Markdown(
        "Once finetuning completes, or if you skip it, you can still do inference "
        "with either the new or original model."
    )

    prompt_in = gr.Textbox(label="Prompt", lines=3)
    min_tok_slider = gr.Slider(
        minimum=260, maximum=5000, value=260, step=10,
        label="Minimum New Tokens"
    )
    max_tok_slider = gr.Slider(
        minimum=260, maximum=5000, value=2600, step=50,
        label="Maximum New Tokens"
    )
    gen_btn = gr.Button("Generate")
    output_box = gr.Textbox(label="Generated Text", lines=12)

    gen_btn.click(
        fn=predict,
        inputs=[prompt_in, min_tok_slider, max_tok_slider],
        outputs=output_box
    )

demo.launch()