Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						059f3f4
	
1
								Parent(s):
							
							422eed1
								
Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -52,6 +52,7 @@ sdxl_loras_raw_new = [item for item in sdxl_loras_raw if item.get("new") == True 
     | 
|
| 52 | 
         | 
| 53 | 
         
             
            sdxl_loras_raw = [item for item in sdxl_loras_raw if item.get("new") != True]
         
     | 
| 54 | 
         | 
| 
         | 
|
| 55 | 
         | 
| 56 | 
         
             
            vae = AutoencoderKL.from_pretrained(
         
     | 
| 57 | 
         
             
                "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
         
     | 
| 
         @@ -184,39 +185,24 @@ def run_lora(prompt, negative, lora_scale, selected_state, sdxl_loras, sdxl_lora 
     | 
|
| 184 | 
         
             
                loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
         
     | 
| 185 | 
         
             
                cross_attention_kwargs = None
         
     | 
| 186 | 
         
             
                if last_lora != repo_name:
         
     | 
| 187 | 
         
            -
                    if 
     | 
| 188 | 
         
            -
                        del pipe
         
     | 
| 189 | 
         
            -
                        gc.collect()
         
     | 
| 190 | 
         
            -
                        pipe = copy.deepcopy(original_pipe)
         
     | 
| 191 | 
         
            -
                        pipe.to(device)
         
     | 
| 192 | 
         
            -
                    elif(last_fused):
         
     | 
| 193 | 
         
             
                        pipe.unfuse_lora()
         
     | 
| 194 | 
         
             
                        pipe.unload_lora_weights()
         
     | 
| 195 | 
         
            -
                    is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
         
     | 
| 196 | 
         
            -
                    
         
     | 
| 197 | 
         
            -
                     
     | 
| 198 | 
         
            -
             
     | 
| 199 | 
         
            -
             
     | 
| 200 | 
         
            -
             
     | 
| 201 | 
         
            -
                     
     | 
| 202 | 
         
            -
             
     | 
| 203 | 
         
            -
                         
     | 
| 204 | 
         
            -
             
     | 
| 205 | 
         
            -
             
     | 
| 206 | 
         
            -
             
     | 
| 207 | 
         
            -
             
     | 
| 208 | 
         
            -
             
     | 
| 209 | 
         
            -
             
     | 
| 210 | 
         
            -
                            text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
         
     | 
| 211 | 
         
            -
                            tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
         
     | 
| 212 | 
         
            -
                            embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
         
     | 
| 213 | 
         
            -
                            embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers)
         
     | 
| 214 | 
         
            -
                            embhandler.load_embeddings(embedding_path)
         
     | 
| 215 | 
         
            -
                            
         
     | 
| 216 | 
         
            -
                        else:
         
     | 
| 217 | 
         
            -
                            merge_incompatible_lora(full_path_lora, lora_scale)
         
     | 
| 218 | 
         
            -
                            last_fused=False
         
     | 
| 219 | 
         
            -
                        last_merged = True
         
     | 
| 220 | 
         | 
| 221 | 
         
             
                image = pipe(
         
     | 
| 222 | 
         
             
                    prompt=prompt,
         
     | 
| 
         | 
|
| 52 | 
         | 
| 53 | 
         
             
            sdxl_loras_raw = [item for item in sdxl_loras_raw if item.get("new") != True]
         
     | 
| 54 | 
         | 
| 55 | 
         
            +
            lcm_lora_id = "lcm-sd/lcm-sdxl-base-1.0-lora"
         
     | 
| 56 | 
         | 
| 57 | 
         
             
            vae = AutoencoderKL.from_pretrained(
         
     | 
| 58 | 
         
             
                "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
         
     | 
| 
         | 
|
| 185 | 
         
             
                loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
         
     | 
| 186 | 
         
             
                cross_attention_kwargs = None
         
     | 
| 187 | 
         
             
                if last_lora != repo_name:
         
     | 
| 188 | 
         
            +
                    if(last_fused):
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 189 | 
         
             
                        pipe.unfuse_lora()
         
     | 
| 190 | 
         
             
                        pipe.unload_lora_weights()
         
     | 
| 191 | 
         
            +
                    #is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
         
     | 
| 192 | 
         
            +
                    pipe.load_lora_weights(loaded_state_dict)#, adapter_name="loaded_lora")
         
     | 
| 193 | 
         
            +
                    #pipe.load_lora_weights(lcm_lora_id, adapter_name="lcm_lora")
         
     | 
| 194 | 
         
            +
                    #pipe.set_adapters(["loaded_lora", "lcm_lora"], adapter_weights=[0.8, 1.0])
         
     | 
| 195 | 
         
            +
                    pipe.fuse_lora()
         
     | 
| 196 | 
         
            +
                    last_fused = True
         
     | 
| 197 | 
         
            +
                    is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
         
     | 
| 198 | 
         
            +
                    if(is_pivotal):
         
     | 
| 199 | 
         
            +
                        #Add the textual inversion embeddings from pivotal tuning models
         
     | 
| 200 | 
         
            +
                        text_embedding_name = sdxl_loras[selected_state.index]["text_embedding_weights"]
         
     | 
| 201 | 
         
            +
                        text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
         
     | 
| 202 | 
         
            +
                        tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
         
     | 
| 203 | 
         
            +
                        embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
         
     | 
| 204 | 
         
            +
                        embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers)
         
     | 
| 205 | 
         
            +
                        embhandler.load_embeddings(embedding_path)
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 206 | 
         | 
| 207 | 
         
             
                image = pipe(
         
     | 
| 208 | 
         
             
                    prompt=prompt,
         
     |