Maria commited on
Commit
3c3014b
·
1 Parent(s): b3c0aba

Add application file

Browse files
Files changed (1) hide show
  1. app.py +185 -0
app.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import os
5
+
6
+ # import spaces #[uncomment to use ZeroGPU]
7
+ from diffusers import DiffusionPipeline
8
+ from peft import PeftModel, LoraConfig
9
+ import torch
10
+
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+
13
+ if torch.cuda.is_available():
14
+ torch_dtype = torch.float16
15
+ else:
16
+ torch_dtype = torch.float32
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ MAX_IMAGE_SIZE = 1024
20
+
21
+ LoRA_path = 'new_model'
22
+
23
+ # @spaces.GPU #[uncomment to use ZeroGPU]
24
+ def infer(
25
+ model_id,
26
+ prompt,
27
+ negative_prompt,
28
+ seed,
29
+ randomize_seed,
30
+ width,
31
+ height,
32
+ guidance_scale,
33
+ num_inference_steps,
34
+ progress=gr.Progress(track_tqdm=True),
35
+ ):
36
+ if randomize_seed:
37
+ seed = random.randint(0, MAX_SEED)
38
+
39
+ generator = torch.Generator().manual_seed(seed)
40
+
41
+ if model_id == 'Maria_Lashina_LoRA':
42
+ adapter_name = 'a cartoonish mouse'
43
+ unet_sub_dir = os.path.join(LoRA_path, "unet")
44
+ text_encoder_sub_dir = os.path.join(LoRA_path, "text_encoder")
45
+
46
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
47
+ pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
48
+
49
+ pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
50
+
51
+ if torch_dtype == torch.float16:
52
+ pipe.unet.half()
53
+ pipe.text_encoder.half()
54
+
55
+ pipe.to(device)
56
+
57
+ else:
58
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
59
+
60
+ image = pipe(
61
+ prompt=prompt,
62
+ negative_prompt=negative_prompt,
63
+ guidance_scale=guidance_scale,
64
+ num_inference_steps=num_inference_steps,
65
+ width=width,
66
+ height=height,
67
+ generator=generator,
68
+ ).images[0]
69
+
70
+ return image, seed
71
+
72
+
73
+ examples = [
74
+ "The image of a cartoonish mouse eating from a red bowl of yellow triangle chips, her cheeks are full. The mouse is gray with big pink ears, small white eyes and a black pointed nose. It has a simple design, the background color is white. The style of the image is reminiscent of a sticker or a digital illustration.",
75
+ "The image of a cartoonish mouse with red hearts instead of eyes meaning that the mouse is in love with something. The mouse is gray with big pink ears and a black pointed nose. It has a simple design, the background color is white. The style of the image is reminiscent of a sticker or a digital illustration.",
76
+ "The image of a cartoonish mouse with sunglasses and smiling. The mouse is gray with big pink ears and a black pointed nose. It has a simple design, the background color is white. The style of the image is reminiscent of a sticker or a digital illustration.",
77
+ ]
78
+
79
+ css = """
80
+ #col-container {
81
+ margin: 0 auto;
82
+ max-width: 640px;
83
+ }
84
+ """
85
+
86
+ with gr.Blocks(css=css) as demo:
87
+ with gr.Column(elem_id="col-container"):
88
+ gr.Markdown(" # Text-to-Image Gradio Template")
89
+
90
+ MODEL_LIST = [
91
+ "CompVis/stable-diffusion-v1-4",
92
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
93
+ "Maria_Lashina_LoRA"
94
+ ]
95
+ with gr.Row():
96
+ model_id = gr.Dropdown(
97
+ label="Model",
98
+ choices=MODEL_LIST
99
+ )
100
+
101
+ with gr.Row():
102
+ prompt = gr.Text(
103
+ label="Prompt",
104
+ show_label=False,
105
+ max_lines=1,
106
+ placeholder="Enter your prompt",
107
+ container=False,
108
+ )
109
+
110
+ run_button = gr.Button("Run", scale=0, variant="primary")
111
+
112
+ result = gr.Image(label="Result", show_label=False)
113
+
114
+ with gr.Accordion("Advanced Settings", open=False):
115
+ negative_prompt = gr.Text(
116
+ label="Negative prompt",
117
+ max_lines=1,
118
+ placeholder="Enter a negative prompt",
119
+ visible=False,
120
+ )
121
+
122
+ seed = gr.Slider(
123
+ label="Seed",
124
+ minimum=0,
125
+ maximum=MAX_SEED,
126
+ step=1,
127
+ value=42,
128
+ )
129
+
130
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
131
+
132
+ with gr.Row():
133
+ width = gr.Slider(
134
+ label="Width",
135
+ minimum=256,
136
+ maximum=MAX_IMAGE_SIZE,
137
+ step=32,
138
+ value=1024, # Replace with defaults that work for your model
139
+ )
140
+
141
+ height = gr.Slider(
142
+ label="Height",
143
+ minimum=256,
144
+ maximum=MAX_IMAGE_SIZE,
145
+ step=32,
146
+ value=1024, # Replace with defaults that work for your model
147
+ )
148
+
149
+ with gr.Row():
150
+ guidance_scale = gr.Slider(
151
+ label="Guidance scale",
152
+ minimum=0.0,
153
+ maximum=10.0,
154
+ step=0.1,
155
+ value=7.0, # Replace with defaults that work for your model
156
+ )
157
+
158
+ num_inference_steps = gr.Slider(
159
+ label="Number of inference steps",
160
+ minimum=1,
161
+ maximum=50,
162
+ step=1,
163
+ value=20, # Replace with defaults that work for your model
164
+ )
165
+
166
+ gr.Examples(examples=examples, inputs=[prompt])
167
+ gr.on(
168
+ triggers=[run_button.click, prompt.submit],
169
+ fn=infer,
170
+ inputs=[
171
+ model_id,
172
+ prompt,
173
+ negative_prompt,
174
+ seed,
175
+ randomize_seed,
176
+ width,
177
+ height,
178
+ guidance_scale,
179
+ num_inference_steps,
180
+ ],
181
+ outputs=[result, seed],
182
+ )
183
+
184
+ if __name__ == "__main__":
185
+ demo.launch()