Update app.py
Browse files
app.py
CHANGED
|
@@ -61,11 +61,12 @@ def upload_to_ftp(filename):
|
|
| 61 |
pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
|
| 62 |
|
| 63 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 64 |
-
|
|
|
|
| 65 |
pipe = StableDiffusion3Pipeline.from_pretrained(
|
| 66 |
#"stabilityai # stable-diffusion-3.5-large",
|
| 67 |
"ford442/stable-diffusion-3.5-large-bf16",
|
| 68 |
-
#
|
| 69 |
#scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
|
| 70 |
# text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
|
| 71 |
# text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
|
|
@@ -76,10 +77,12 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
|
|
| 76 |
#torch_dtype=torch.bfloat16,
|
| 77 |
#use_safetensors=False,
|
| 78 |
)
|
|
|
|
| 79 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
|
|
|
| 80 |
pipe.to(device=device, dtype=torch.bfloat16)
|
| 81 |
#pipe.to(device)
|
| 82 |
-
|
| 83 |
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('cpu'))
|
| 84 |
|
| 85 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -102,6 +105,7 @@ def infer_30(
|
|
| 102 |
seed = random.randint(0, MAX_SEED)
|
| 103 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 104 |
print('-- generating image --')
|
|
|
|
| 105 |
sd_image = pipe(
|
| 106 |
prompt=prompt,
|
| 107 |
prompt_2=prompt,
|
|
|
|
| 61 |
pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
|
| 62 |
|
| 63 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 64 |
+
#vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", use_safetensors=True, subfolder='vae',token=True)
|
| 65 |
+
vaeX=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", use_safetensors=True, subfolder='vae',token=True)
|
| 66 |
pipe = StableDiffusion3Pipeline.from_pretrained(
|
| 67 |
#"stabilityai # stable-diffusion-3.5-large",
|
| 68 |
"ford442/stable-diffusion-3.5-large-bf16",
|
| 69 |
+
#vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", use_safetensors=True, subfolder='vae',token=True),
|
| 70 |
#scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
|
| 71 |
# text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
|
| 72 |
# text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
|
|
|
|
| 77 |
#torch_dtype=torch.bfloat16,
|
| 78 |
#use_safetensors=False,
|
| 79 |
)
|
| 80 |
+
|
| 81 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
| 82 |
+
|
| 83 |
pipe.to(device=device, dtype=torch.bfloat16)
|
| 84 |
#pipe.to(device)
|
| 85 |
+
pipe.vae=vaeX
|
| 86 |
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('cpu'))
|
| 87 |
|
| 88 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 105 |
seed = random.randint(0, MAX_SEED)
|
| 106 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 107 |
print('-- generating image --')
|
| 108 |
+
|
| 109 |
sd_image = pipe(
|
| 110 |
prompt=prompt,
|
| 111 |
prompt_2=prompt,
|