ford442 commited on
Commit
b5fa3d2
·
verified ·
1 Parent(s): 012840c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -4,7 +4,7 @@ import numpy as np
4
  #import tensorrt as trt
5
  import random
6
  import torch
7
- from diffusers import StableDiffusion3Pipeline, AutoencoderKL, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
8
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
9
  #from threading import Thread
10
  #from transformers import pipeline
@@ -60,9 +60,16 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
61
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
63
- vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
64
 
65
- pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-large-bf16").to(device=device, dtype=torch.bfloat16)
66
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
67
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
68
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
 
4
  #import tensorrt as trt
5
  import random
6
  import torch
7
+ from diffusers import StableDiffusion3Pipeline, AutoencoderKL
8
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
9
  #from threading import Thread
10
  #from transformers import pipeline
 
60
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
61
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
62
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
63
+ #vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
64
+
65
+ pipe = StableDiffusion3Pipeline.from_pretrained(
66
+ "ford442/stable-diffusion-3.5-large-bf16",
67
+ token=True,
68
+ use_safetensors=True
69
+ )
70
+
71
+ pipe.to(device=device, dtype=torch.bfloat16)
72
 
 
73
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
74
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
75
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')