Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,391 Bytes
04ef268 f56c4de 04ef268 97f0430 04ef268 97f0430 04ef268 f56c4de 04ef268 6854c35 04ef268 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import torch
import secrets
from gradio.networking import setup_tunnel
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
UNet2DConditionModel,
LCMScheduler,
EulerDiscreteScheduler,
StableDiffusionPipeline,
)
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
isLCM = False
HF_ACCESS_TOKEN = ""
model_path = "segmind/small-sd"
inpaint_model_path = "Lykon/dreamshaper-8-inpainting"
prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
promptA = "Self-portrait oil painting, a beautiful man with golden hair, 8k"
promptB = "Self-portrait oil painting, a beautiful woman with golden hair, 8k"
negative_prompt = "a photo frame"
num_images = 5
degree = 360
perturbation_size = 0.1
num_inference_steps = 8
seed = 69420
guidance_scale = 8
guidance_values = "1, 8, 20"
intermediate = True
pokeX, pokeY = 256, 256
pokeHeight, pokeWidth = 128, 128
imageHeight, imageWidth = 512, 512
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="text_encoder").to(
torch_device
)
if isLCM:
scheduler = LCMScheduler.from_pretrained(model_path, subfolder="scheduler")
else:
scheduler = EulerDiscreteScheduler.from_pretrained(model_path, subfolder="scheduler")
unet = UNet2DConditionModel.from_pretrained(model_path, subfolder="unet").to(
torch_device
)
vae = AutoencoderKL.from_pretrained(model_path, subfolder="vae").to(torch_device)
pipe = StableDiffusionPipeline(
tokenizer=tokenizer,
text_encoder=text_encoder,
unet=unet,
scheduler=scheduler,
vae=vae,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
).to(torch_device)
dash_tunnel = setup_tunnel("0.0.0.0", 8000, secrets.token_urlsafe(32), None)
__all__ = [
"prompt",
"negative_prompt",
"num_images",
"degree",
"perturbation_size",
"num_inference_steps",
"seed",
"intermediate",
"pokeX",
"pokeY",
"pokeHeight",
"pokeWidth",
"promptA",
"promptB",
"tokenizer",
"text_encoder",
"scheduler",
"unet",
"vae",
"torch_device",
"imageHeight",
"imageWidth",
"guidance_scale",
"guidance_values",
"HF_ACCESS_TOKEN",
"model_path",
"inpaint_model_path",
"dash_tunnel",
"pipe",
]
|