multimodalart HF staff commited on
Commit
3a8556a
·
verified ·
1 Parent(s): bf3af40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
2
- from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
3
  from transformers import CLIPFeatureExtractor
4
  from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
5
 
@@ -11,7 +11,7 @@ safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diff
11
  feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
12
  pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE"
13
  scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
14
- pipe = DiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, vae=vae, torch_dtype=torch.float16).to("cuda")
15
 
16
  # # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage.
17
  #pipe.enable_model_cpu_offload()
 
1
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
2
+ from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
3
  from transformers import CLIPFeatureExtractor
4
  from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
5
 
 
11
  feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
12
  pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE"
13
  scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
14
+ pipe = StableDiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, vae=vae, torch_dtype=torch.float16).to("cuda")
15
 
16
  # # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage.
17
  #pipe.enable_model_cpu_offload()