Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	
		LPX55
		
	commited on
		
		
					Commit 
							
							·
						
						85f921d
	
1
								Parent(s):
							
							30f6115
								
♻️ refactor(app): improve pipeline loading mechanism
Browse files- move pipeline loading into a separate function `load_pipeline` for lazy loading
- use a global `pipe` variable and update it dynamically based on the selected model
- remove redundant pipeline loading code and use the `load_pipeline` function instead
- improve code organization and readability by separating concerns into different functions
    	
        app.py
    CHANGED
    
    | @@ -39,25 +39,17 @@ vae = AutoencoderKL.from_pretrained( | |
| 39 | 
             
                "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
         | 
| 40 | 
             
            ).to("cuda")
         | 
| 41 |  | 
| 42 | 
            -
             | 
| 43 | 
            -
             | 
| 44 | 
            -
                 | 
| 45 | 
            -
             | 
| 46 | 
            -
             | 
| 47 | 
            -
             | 
| 48 | 
            -
             | 
| 49 | 
            -
             | 
| 50 | 
            -
            pipe | 
| 51 | 
            -
                 | 
| 52 | 
            -
                 | 
| 53 | 
            -
                vae=vae,
         | 
| 54 | 
            -
                controlnet=model,
         | 
| 55 | 
            -
            )
         | 
| 56 | 
            -
             | 
| 57 | 
            -
            pipe.to("cuda")
         | 
| 58 | 
            -
             | 
| 59 | 
            -
            pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
         | 
| 60 | 
            -
             | 
| 61 |  | 
| 62 | 
             
            def can_expand(source_width, source_height, target_width, target_height, alignment):
         | 
| 63 | 
             
                """Checks if the image can be expanded based on the alignment."""
         | 
| @@ -187,12 +179,8 @@ def preview_image_and_mask(image, width, height, overlap_percentage, resize_opti | |
| 187 | 
             
            def inpaint(prompt, image, model_name, paste_back):
         | 
| 188 | 
             
                global pipe
         | 
| 189 | 
             
                if pipe.config.model_name != MODELS[model_name]:
         | 
| 190 | 
            -
                     | 
| 191 | 
            -
             | 
| 192 | 
            -
                        torch_dtype=torch.float16,
         | 
| 193 | 
            -
                        vae=vae,
         | 
| 194 | 
            -
                        controlnet=model,
         | 
| 195 | 
            -
                    ).to("cuda")
         | 
| 196 |  | 
| 197 | 
             
                mask = Image.fromarray(image["mask"]).convert("L")
         | 
| 198 | 
             
                image = Image.fromarray(image["image"])
         | 
| @@ -206,6 +194,7 @@ def inpaint(prompt, image, model_name, paste_back): | |
| 206 |  | 
| 207 | 
             
            @spaces.GPU(duration=24)
         | 
| 208 | 
             
            def outpaint(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
         | 
|  | |
| 209 | 
             
                background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
         | 
| 210 |  | 
| 211 | 
             
                if not can_expand(background.width, background.height, width, height, alignment):
         | 
|  | |
| 39 | 
             
                "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
         | 
| 40 | 
             
            ).to("cuda")
         | 
| 41 |  | 
| 42 | 
            +
            # Move pipeline loading into a function to enable lazy loading
         | 
| 43 | 
            +
            def load_pipeline(model_name):
         | 
| 44 | 
            +
                pipe = StableDiffusionXLFillPipeline.from_pretrained(
         | 
| 45 | 
            +
                    MODELS[model_name],
         | 
| 46 | 
            +
                    torch_dtype=torch.float16,
         | 
| 47 | 
            +
                    vae=vae,
         | 
| 48 | 
            +
                    controlnet=model,
         | 
| 49 | 
            +
                )
         | 
| 50 | 
            +
                pipe.to("cuda")
         | 
| 51 | 
            +
                pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
         | 
| 52 | 
            +
                return pipe
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 53 |  | 
| 54 | 
             
            def can_expand(source_width, source_height, target_width, target_height, alignment):
         | 
| 55 | 
             
                """Checks if the image can be expanded based on the alignment."""
         | 
|  | |
| 179 | 
             
            def inpaint(prompt, image, model_name, paste_back):
         | 
| 180 | 
             
                global pipe
         | 
| 181 | 
             
                if pipe.config.model_name != MODELS[model_name]:
         | 
| 182 | 
            +
                    # Lazily load the pipeline for the selected model
         | 
| 183 | 
            +
                    pipe = load_pipeline(model_name)
         | 
|  | |
|  | |
|  | |
|  | |
| 184 |  | 
| 185 | 
             
                mask = Image.fromarray(image["mask"]).convert("L")
         | 
| 186 | 
             
                image = Image.fromarray(image["image"])
         | 
|  | |
| 194 |  | 
| 195 | 
             
            @spaces.GPU(duration=24)
         | 
| 196 | 
             
            def outpaint(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom):
         | 
| 197 | 
            +
                # Use the currently loaded pipeline
         | 
| 198 | 
             
                background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
         | 
| 199 |  | 
| 200 | 
             
                if not can_expand(background.width, background.height, width, height, alignment):
         |