File size: 2,685 Bytes
1f3f75e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
import uuid
import gradio as gr
from pathlib import Path
from huggingface_hub import hf_hub_download
from your_existing_training_file import create_dataset, start_training  # <-- update this import as needed

# Constants
REPO_ID = "rahul7star/ohamlab"
FOLDER_IN_REPO = "filter-demo/upload_20250708_041329_9c5c81"
CONCEPT_SENTENCE = "ohamlab style"
LORA_NAME = "ohami_filter_autorun"

def auto_run_lora_from_repo():
    local_dir = Path(f"/tmp/{LORA_NAME}-{uuid.uuid4()}")
    os.makedirs(local_dir, exist_ok=True)

    # Download at least one file to force HF to pull full folder
    hf_hub_download(
        repo_id=REPO_ID,
        repo_type="dataset",
        subfolder=FOLDER_IN_REPO,
        local_dir=local_dir,
        local_dir_use_symlinks=False,
        force_download=False,
        etag_timeout=10,
        allow_patterns=["*.jpg", "*.png", "*.jpeg"],
    )

    image_dir = local_dir / FOLDER_IN_REPO
    image_paths = list(image_dir.rglob("*.jpg")) + list(image_dir.rglob("*.jpeg")) + list(image_dir.rglob("*.png"))

    if not image_paths:
        raise gr.Error("No images found in the Hugging Face repo folder.")

    # Captions
    captions = [
        f"Generated image caption for {img.stem} in the {CONCEPT_SENTENCE} [trigger]" for img in image_paths
    ]

    # Create dataset
    dataset_path = create_dataset(image_paths, *captions)

    # Static prompts
    sample_1 = f"A stylized portrait using {CONCEPT_SENTENCE}"
    sample_2 = f"A cat in the {CONCEPT_SENTENCE}"
    sample_3 = f"A selfie processed in {CONCEPT_SENTENCE}"

    # Config
    steps = 1000
    lr = 4e-4
    rank = 16
    model_to_train = "dev"
    low_vram = True
    use_more_advanced_options = True
    more_advanced_options = """\
training:
  seed: 42
  precision: bf16
  batch_size: 2
augmentation:
  flip: true
  color_jitter: true
"""

    # Train
    return start_training(
        lora_name=LORA_NAME,
        concept_sentence=CONCEPT_SENTENCE,
        steps=steps,
        lr=lr,
        rank=rank,
        model_to_train=model_to_train,
        low_vram=low_vram,
        dataset_folder=dataset_path,
        sample_1=sample_1,
        sample_2=sample_2,
        sample_3=sample_3,
        use_more_advanced_options=use_more_advanced_options,
        more_advanced_options=more_advanced_options
    )

# Gradio UI
with gr.Blocks(title="LoRA Autorun from HF Repo") as demo:
    gr.Markdown("# 🚀 Auto Run LoRA from Hugging Face Repo")
    output = gr.Textbox(label="Training Status", lines=3)
    run_button = gr.Button("Run Training from HF Repo")
    run_button.click(fn=auto_run_lora_from_repo, outputs=output)

if __name__ == "__main__":
    demo.launch(share=True)