ford442 commited on
Commit
15cf306
·
verified ·
1 Parent(s): 6731d9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -62,7 +62,7 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
63
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
64
 
65
- pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16") #.to(torch.bfloat16)
66
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
67
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
68
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
@@ -81,6 +81,8 @@ refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffu
81
  refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
82
  #refiner.enable_model_cpu_offload()
83
 
 
 
84
  #refiner.scheduler.config.requires_aesthetics_score=False
85
  #refiner.to(device)
86
  #refiner = torch.compile(refiner)
@@ -133,7 +135,6 @@ def infer(
133
  progress=gr.Progress(track_tqdm=True),
134
  ):
135
  upscaler_2.to(torch.device('cpu'))
136
- pipe.to(device=device, dtype=torch.bfloat16)
137
  torch.set_float32_matmul_precision("highest")
138
  seed = random.randint(0, MAX_SEED)
139
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
63
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
64
 
65
+ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(device=device, dtype=torch.bfloat16)
66
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
67
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
68
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
 
81
  refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
82
  #refiner.enable_model_cpu_offload()
83
 
84
+ #pipe.to(device=device, dtype=torch.bfloat16)
85
+
86
  #refiner.scheduler.config.requires_aesthetics_score=False
87
  #refiner.to(device)
88
  #refiner = torch.compile(refiner)
 
135
  progress=gr.Progress(track_tqdm=True),
136
  ):
137
  upscaler_2.to(torch.device('cpu'))
 
138
  torch.set_float32_matmul_precision("highest")
139
  seed = random.randint(0, MAX_SEED)
140
  generator = torch.Generator(device='cuda').manual_seed(seed)