RageshAntony commited on
Commit
a134882
·
verified ·
1 Parent(s): cac78c6

pre load pipeline pipe_class

Browse files
Files changed (1) hide show
  1. check_app.py +19 -17
check_app.py CHANGED
@@ -22,7 +22,7 @@ from datetime import datetime
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1024
24
 
25
-
26
 
27
  class ProgressPipeline(DiffusionPipeline):
28
  def __init__(self, original_pipeline):
@@ -180,24 +180,26 @@ def create_pipeline_logic(prompt_text, model_name, negative_prompt="", seed=42,
180
  progress = gr.Progress(track_tqdm=False)
181
  config = MODEL_CONFIGS[model_name]
182
  pipe_class = config["pipeline_class"]
183
- pipe = None
184
- b_pipe = AutoPipelineForText2Image.from_pretrained(
185
- config["repo_id"],
186
- #variant="fp16",
187
- #cache_dir=config["cache_dir"],
188
- torch_dtype=torch.bfloat16
189
- ).to("cuda")
190
- pipe_signature = signature(b_pipe)
191
- # Check for the presence of "callback_on_step_end" in the signature
192
- has_callback_on_step_end = "callback_on_step_end" in pipe_signature.parameters
193
- if not has_callback_on_step_end:
194
- pipe = ProgressPipeline(b_pipe)
195
- print("ProgressPipeline specal")
196
- else:
197
- pipe = b_pipe
 
 
198
 
199
  gen_seed,image, images = generate_image_with_progress(
200
- model_name,pipe, prompt_text, num_steps=num_inference_steps, guidance_scale=guidance_scale, seed=seed,negative_prompt = negative_prompt, randomize_seed = randomize_seed, width = width, height = height, progress=progress
201
  )
202
  return f"Seed: {gen_seed}", image, images
203
  def main():
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1024
24
 
25
+ pipe = {}
26
 
27
  class ProgressPipeline(DiffusionPipeline):
28
  def __init__(self, original_pipeline):
 
180
  progress = gr.Progress(track_tqdm=False)
181
  config = MODEL_CONFIGS[model_name]
182
  pipe_class = config["pipeline_class"]
183
+ global pipe
184
+ if ["pipeline_class"] != pipe_class
185
+ pipe["pipeline_class"] = pipe_class
186
+ b_pipe = AutoPipelineForText2Image.from_pretrained(
187
+ config["repo_id"],
188
+ #variant="fp16",
189
+ #cache_dir=config["cache_dir"],
190
+ torch_dtype=torch.bfloat16
191
+ ).to("cuda")
192
+ pipe_signature = signature(b_pipe)
193
+ # Check for the presence of "callback_on_step_end" in the signature
194
+ has_callback_on_step_end = "callback_on_step_end" in pipe_signature.parameters
195
+ if not has_callback_on_step_end:
196
+ pipe["pipeline"] = ProgressPipeline(b_pipe)
197
+ print("ProgressPipeline specal")
198
+ else:
199
+ pipe["pipeline"] = b_pipe
200
 
201
  gen_seed,image, images = generate_image_with_progress(
202
+ model_name,pipe["pipeline"], prompt_text, num_steps=num_inference_steps, guidance_scale=guidance_scale, seed=seed,negative_prompt = negative_prompt, randomize_seed = randomize_seed, width = width, height = height, progress=progress
203
  )
204
  return f"Seed: {gen_seed}", image, images
205
  def main():