Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -14,74 +14,9 @@ with open('loras.json', 'r') as f: | |
| 14 | 
             
            # Initialize the base model
         | 
| 15 | 
             
            base_model = "black-forest-labs/FLUX.1-dev"
         | 
| 16 | 
             
            pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
         | 
| 17 | 
            -
            original_load_lora = copy.deepcopy(pipe.load_lora_into_transformer)
         | 
| 18 | 
             
            pipe.to("cuda")
         | 
| 19 |  | 
| 20 | 
            -
             | 
| 21 | 
            -
                from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
         | 
| 22 | 
            -
             | 
| 23 | 
            -
                keys = list(state_dict.keys())
         | 
| 24 | 
            -
             | 
| 25 | 
            -
                transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)]
         | 
| 26 | 
            -
                state_dict = {
         | 
| 27 | 
            -
                    k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys
         | 
| 28 | 
            -
                }
         | 
| 29 | 
            -
             | 
| 30 | 
            -
                if len(state_dict.keys()) > 0:
         | 
| 31 | 
            -
                    # check with first key if is not in peft format
         | 
| 32 | 
            -
                    first_key = next(iter(state_dict.keys()))
         | 
| 33 | 
            -
                    if "lora_A" not in first_key:
         | 
| 34 | 
            -
                        state_dict = convert_unet_state_dict_to_peft(state_dict)
         | 
| 35 | 
            -
             | 
| 36 | 
            -
                    if adapter_name in getattr(transformer, "peft_config", {}):
         | 
| 37 | 
            -
                        raise ValueError(
         | 
| 38 | 
            -
                            f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name."
         | 
| 39 | 
            -
                        )
         | 
| 40 | 
            -
             | 
| 41 | 
            -
                    rank = {}
         | 
| 42 | 
            -
                    for key, val in state_dict.items():
         | 
| 43 | 
            -
                        if "lora_B" in key:
         | 
| 44 | 
            -
                            rank[key] = val.shape[1]
         | 
| 45 | 
            -
             | 
| 46 | 
            -
                    lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=None, peft_state_dict=state_dict)
         | 
| 47 | 
            -
                    if "use_dora" in lora_config_kwargs:
         | 
| 48 | 
            -
                        if lora_config_kwargs["use_dora"] and is_peft_version("<", "0.9.0"):
         | 
| 49 | 
            -
                            raise ValueError(
         | 
| 50 | 
            -
                                "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`."
         | 
| 51 | 
            -
                            )
         | 
| 52 | 
            -
                        else:
         | 
| 53 | 
            -
                            lora_config_kwargs.pop("use_dora")
         | 
| 54 | 
            -
             | 
| 55 | 
            -
                    
         | 
| 56 | 
            -
                    lora_config_kwargs["lora_alpha"] = 42
         | 
| 57 | 
            -
                    lora_config = LoraConfig(**lora_config_kwargs)
         | 
| 58 | 
            -
             | 
| 59 | 
            -
                    # adapter_name
         | 
| 60 | 
            -
                    if adapter_name is None:
         | 
| 61 | 
            -
                        adapter_name = get_adapter_name(transformer)
         | 
| 62 | 
            -
             | 
| 63 | 
            -
                    # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks
         | 
| 64 | 
            -
                    # otherwise loading LoRA weights will lead to an error
         | 
| 65 | 
            -
                    is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline)
         | 
| 66 | 
            -
             | 
| 67 | 
            -
                    inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name)
         | 
| 68 | 
            -
                    incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name)
         | 
| 69 | 
            -
             | 
| 70 | 
            -
                    if incompatible_keys is not None:
         | 
| 71 | 
            -
                        # check only for unexpected keys
         | 
| 72 | 
            -
                        unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
         | 
| 73 | 
            -
                        if unexpected_keys:
         | 
| 74 | 
            -
                            logger.warning(
         | 
| 75 | 
            -
                                f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
         | 
| 76 | 
            -
                                f" {unexpected_keys}. "
         | 
| 77 | 
            -
                            )
         | 
| 78 | 
            -
             | 
| 79 | 
            -
                    # Offload back.
         | 
| 80 | 
            -
                    if is_model_cpu_offload:
         | 
| 81 | 
            -
                        _pipeline.enable_model_cpu_offload()
         | 
| 82 | 
            -
                    elif is_sequential_cpu_offload:
         | 
| 83 | 
            -
                        _pipeline.enable_sequential_cpu_offload()
         | 
| 84 | 
            -
                    # Unsafe code />
         | 
| 85 |  | 
| 86 | 
             
            def update_selection(evt: gr.SelectData):
         | 
| 87 | 
             
                selected_lora = loras[evt.index]
         | 
| @@ -95,7 +30,7 @@ def update_selection(evt: gr.SelectData): | |
| 95 | 
             
                )
         | 
| 96 |  | 
| 97 | 
             
            @spaces.GPU(duration=90)
         | 
| 98 | 
            -
            def run_lora(prompt, cfg_scale, steps, selected_index, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
         | 
| 99 | 
             
                if selected_index is None:
         | 
| 100 | 
             
                    raise gr.Error("You must select a LoRA before proceeding.")
         | 
| 101 |  | 
| @@ -115,18 +50,19 @@ def run_lora(prompt, cfg_scale, steps, selected_index, seed, width, height, lora | |
| 115 | 
             
                    pipe.load_lora_into_transformer = original_load_lora
         | 
| 116 |  | 
| 117 | 
             
                # Set random seed for reproducibility
         | 
|  | |
|  | |
| 118 | 
             
                generator = torch.Generator(device="cuda").manual_seed(seed)
         | 
| 119 |  | 
| 120 | 
             
                # Generate image
         | 
| 121 | 
             
                image = pipe(
         | 
| 122 | 
             
                    prompt=f"{prompt} {trigger_word}",
         | 
| 123 | 
            -
                    #negative_prompt=negative_prompt,
         | 
| 124 | 
             
                    num_inference_steps=steps,
         | 
| 125 | 
             
                    guidance_scale=cfg_scale,
         | 
| 126 | 
             
                    width=width,
         | 
| 127 | 
             
                    height=height,
         | 
| 128 | 
             
                    generator=generator,
         | 
| 129 | 
            -
                     | 
| 130 | 
             
                ).images[0]
         | 
| 131 |  | 
| 132 | 
             
                # Unload LoRA weights
         | 
| @@ -159,10 +95,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as app: | |
| 159 | 
             
                        result = gr.Image(label="Generated Image")
         | 
| 160 |  | 
| 161 | 
             
                with gr.Row():
         | 
| 162 | 
            -
                     | 
| 163 | 
            -
                        #prompt_title = gr.Markdown("### Click on a LoRA in the gallery to select it")
         | 
| 164 | 
            -
                        #negative_prompt = gr.Textbox(label="Negative Prompt", lines=2, value="low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry")
         | 
| 165 | 
            -
             | 
| 166 | 
             
                    with gr.Column():
         | 
| 167 | 
             
                        with gr.Row():
         | 
| 168 | 
             
                            cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
         | 
| @@ -173,7 +106,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as app: | |
| 173 | 
             
                            height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
         | 
| 174 |  | 
| 175 | 
             
                        with gr.Row():
         | 
| 176 | 
            -
                             | 
|  | |
| 177 | 
             
                            lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.85)
         | 
| 178 |  | 
| 179 | 
             
                gallery.select(update_selection, outputs=[prompt, selected_info, selected_index])
         | 
| @@ -181,7 +115,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as app: | |
| 181 | 
             
                gr.on(
         | 
| 182 | 
             
                    triggers=[generate_button.click, prompt.submit],
         | 
| 183 | 
             
                    fn=run_lora,
         | 
| 184 | 
            -
                    inputs=[prompt, cfg_scale, steps, selected_index, seed, width, height, lora_scale],
         | 
| 185 | 
             
                    outputs=[result]
         | 
| 186 | 
             
                )
         | 
| 187 |  | 
|  | |
| 14 | 
             
            # Initialize the base model
         | 
| 15 | 
             
            base_model = "black-forest-labs/FLUX.1-dev"
         | 
| 16 | 
             
            pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
         | 
|  | |
| 17 | 
             
            pipe.to("cuda")
         | 
| 18 |  | 
| 19 | 
            +
            MAX_SEED = 2**32-1
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 20 |  | 
| 21 | 
             
            def update_selection(evt: gr.SelectData):
         | 
| 22 | 
             
                selected_lora = loras[evt.index]
         | 
|  | |
| 30 | 
             
                )
         | 
| 31 |  | 
| 32 | 
             
            @spaces.GPU(duration=90)
         | 
| 33 | 
            +
            def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
         | 
| 34 | 
             
                if selected_index is None:
         | 
| 35 | 
             
                    raise gr.Error("You must select a LoRA before proceeding.")
         | 
| 36 |  | 
|  | |
| 50 | 
             
                    pipe.load_lora_into_transformer = original_load_lora
         | 
| 51 |  | 
| 52 | 
             
                # Set random seed for reproducibility
         | 
| 53 | 
            +
                if randomize_seed:
         | 
| 54 | 
            +
                    seed = random.randint(0, MAX_SEED)
         | 
| 55 | 
             
                generator = torch.Generator(device="cuda").manual_seed(seed)
         | 
| 56 |  | 
| 57 | 
             
                # Generate image
         | 
| 58 | 
             
                image = pipe(
         | 
| 59 | 
             
                    prompt=f"{prompt} {trigger_word}",
         | 
|  | |
| 60 | 
             
                    num_inference_steps=steps,
         | 
| 61 | 
             
                    guidance_scale=cfg_scale,
         | 
| 62 | 
             
                    width=width,
         | 
| 63 | 
             
                    height=height,
         | 
| 64 | 
             
                    generator=generator,
         | 
| 65 | 
            +
                    joint_attention_kwargs={"scale": lora_scale},
         | 
| 66 | 
             
                ).images[0]
         | 
| 67 |  | 
| 68 | 
             
                # Unload LoRA weights
         | 
|  | |
| 95 | 
             
                        result = gr.Image(label="Generated Image")
         | 
| 96 |  | 
| 97 | 
             
                with gr.Row():
         | 
| 98 | 
            +
                    with gr.Accordion("Advanced Settings", open=False)
         | 
|  | |
|  | |
|  | |
| 99 | 
             
                    with gr.Column():
         | 
| 100 | 
             
                        with gr.Row():
         | 
| 101 | 
             
                            cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
         | 
|  | |
| 106 | 
             
                            height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
         | 
| 107 |  | 
| 108 | 
             
                        with gr.Row():
         | 
| 109 | 
            +
                            randomize_seed = gr.Checkbox(True, label="Randomize seed")
         | 
| 110 | 
            +
                            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
         | 
| 111 | 
             
                            lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.85)
         | 
| 112 |  | 
| 113 | 
             
                gallery.select(update_selection, outputs=[prompt, selected_info, selected_index])
         | 
|  | |
| 115 | 
             
                gr.on(
         | 
| 116 | 
             
                    triggers=[generate_button.click, prompt.submit],
         | 
| 117 | 
             
                    fn=run_lora,
         | 
| 118 | 
            +
                    inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
         | 
| 119 | 
             
                    outputs=[result]
         | 
| 120 | 
             
                )
         | 
| 121 |  | 
