ford442 commited on
Commit
096a9e5
·
verified ·
1 Parent(s): 7cd2e7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -53,16 +53,15 @@ def upload_to_ftp(filename):
53
  except Exception as e:
54
  print(f"FTP upload error: {e}")
55
 
56
- device = torch.device("cuda:0")
57
  torch_dtype = torch.bfloat16
58
 
59
  checkpoint = "microsoft/Phi-3.5-mini-instruct"
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
 
61
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
- #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
63
- vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",safety_checker=None)
64
 
65
- pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(device=torch.device("cuda:0")).to(torch.bfloat16)
66
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
67
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
68
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
@@ -76,12 +75,9 @@ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-me
76
  #pipe = torch.compile(pipe)
77
  # pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
78
 
79
- refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", requires_aesthetics_score=True)
80
- refiner.vae=vae
81
- refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="sde-dpmsolver++")
82
- refiner.to(device=torch.device("cuda:0"))
83
- refiner.to(torch.bfloat16)
84
  #refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
 
85
  #refiner.enable_model_cpu_offload()
86
 
87
  #refiner.scheduler.config.requires_aesthetics_score=False
@@ -121,7 +117,7 @@ def filter_text(text,phraseC):
121
  MAX_SEED = np.iinfo(np.int32).max
122
  MAX_IMAGE_SIZE = 4096
123
 
124
- @spaces.GPU(duration=90)
125
  def infer(
126
  prompt,
127
  negative_prompt,
 
53
  except Exception as e:
54
  print(f"FTP upload error: {e}")
55
 
56
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
57
  torch_dtype = torch.bfloat16
58
 
59
  checkpoint = "microsoft/Phi-3.5-mini-instruct"
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
61
+ vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
 
 
63
 
64
+ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(device=torch.device("cuda:0"), dtype=torch.bfloat16)
65
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
66
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
67
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
 
75
  #pipe = torch.compile(pipe)
76
  # pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear")
77
 
78
+ refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("ford442/stable-diffusion-xl-refiner-1.0-bf16", vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16"), use_safetensors=True, requires_aesthetics_score=True).to(device=torch.device("cuda:0").to(torch.bfloat16)
 
 
 
 
79
  #refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float32, requires_aesthetics_score=True, device_map='balanced')
80
+ refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
81
  #refiner.enable_model_cpu_offload()
82
 
83
  #refiner.scheduler.config.requires_aesthetics_score=False
 
117
  MAX_SEED = np.iinfo(np.int32).max
118
  MAX_IMAGE_SIZE = 4096
119
 
120
+ @spaces.GPU(duration=80)
121
  def infer(
122
  prompt,
123
  negative_prompt,