John6666 commited on
Commit
85454c3
·
verified ·
1 Parent(s): 18f4eda

Upload 2 files

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -131,7 +131,7 @@ def load_pipeline(repo_id: str, cn_on: bool, model_type: str, task: str, dtype_s
131
  pipe_i2i = pipeline_i2i.from_pipe(pipe, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
132
  elif ".safetensors" in repo_id or ".gguf" in repo_id: # from single file
133
  file_url = repo_id.replace("/resolve/main/", "/blob/main/").replace("?download=true", "")
134
- if ".gguf" in file_url: transformer_model.from_single_file(file_url, subfolder="transformer",
135
  quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), torch_dtype=dtype, config=single_file_base_model)
136
  else: transformer = transformer_model.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
137
  pipe = pipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=hf_token, **kwargs)
@@ -160,7 +160,7 @@ def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, mode
160
  global pipe, pipe_i2i, taef1, good_vae, controlnet_union, controlnet, last_model, last_cn_on, last_task, last_dtype_str, dtype
161
  try:
162
  if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on and task == last_task and dtype_str == last_dtype_str)\
163
- or ((not is_repo_name(repo_id) or not is_repo_exists(repo_id)) and not ".safetensors" in repo_id): return gr.update()
164
  unload_lora()
165
  pipe.to("cpu")
166
  pipe_i2i.to("cpu")
 
131
  pipe_i2i = pipeline_i2i.from_pipe(pipe, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=dtype)
132
  elif ".safetensors" in repo_id or ".gguf" in repo_id: # from single file
133
  file_url = repo_id.replace("/resolve/main/", "/blob/main/").replace("?download=true", "")
134
+ if ".gguf" in file_url: transformer = transformer_model.from_single_file(file_url, subfolder="transformer",
135
  quantization_config=GGUFQuantizationConfig(compute_dtype=dtype), torch_dtype=dtype, config=single_file_base_model)
136
  else: transformer = transformer_model.from_single_file(file_url, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
137
  pipe = pipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=hf_token, **kwargs)
 
160
  global pipe, pipe_i2i, taef1, good_vae, controlnet_union, controlnet, last_model, last_cn_on, last_task, last_dtype_str, dtype
161
  try:
162
  if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on and task == last_task and dtype_str == last_dtype_str)\
163
+ or ((not is_repo_name(repo_id) or not is_repo_exists(repo_id)) and not ".safetensors" in repo_id and not ".gguf" in repo_id): return gr.update()
164
  unload_lora()
165
  pipe.to("cpu")
166
  pipe_i2i.to("cpu")