Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -66,12 +66,10 @@ pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
|
|
66 |
edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
|
67 |
)
|
68 |
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
|
69 |
-
pipe_edit.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
|
70 |
-
pipe_edit.set_adapters(["lora"])
|
71 |
pipe_edit.to("cuda")
|
72 |
|
73 |
# Generator
|
74 |
-
@spaces.GPU(duration=
|
75 |
def king(type ,
|
76 |
input_image ,
|
77 |
instruction: str ,
|
@@ -106,12 +104,12 @@ def king(type ,
|
|
106 |
generator = torch.Generator().manual_seed(seed)
|
107 |
if style=="3D":
|
108 |
instruction = f"3DRenderAF, 3D Render, {instruction}"
|
109 |
-
image = pipe_3D( prompt = instruction, guidance_scale =
|
110 |
elif style=="Logo":
|
111 |
instruction = f"LogoRedAF, {instruction}"
|
112 |
-
image = pipe_logo( prompt = instruction, guidance_scale =
|
113 |
else:
|
114 |
-
image = pipe_best( prompt = instruction, guidance_scale =
|
115 |
return seed, image
|
116 |
|
117 |
client = InferenceClient()
|
|
|
66 |
edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
|
67 |
)
|
68 |
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
|
|
|
|
|
69 |
pipe_edit.to("cuda")
|
70 |
|
71 |
# Generator
|
72 |
+
@spaces.GPU(duration=45, queue=False)
|
73 |
def king(type ,
|
74 |
input_image ,
|
75 |
instruction: str ,
|
|
|
104 |
generator = torch.Generator().manual_seed(seed)
|
105 |
if style=="3D":
|
106 |
instruction = f"3DRenderAF, 3D Render, {instruction}"
|
107 |
+
image = pipe_3D( prompt = instruction, guidance_scale = 4, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
|
108 |
elif style=="Logo":
|
109 |
instruction = f"LogoRedAF, {instruction}"
|
110 |
+
image = pipe_logo( prompt = instruction, guidance_scale = 4, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
|
111 |
else:
|
112 |
+
image = pipe_best( prompt = instruction, guidance_scale = 4, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
|
113 |
return seed, image
|
114 |
|
115 |
client = InferenceClient()
|