onnew commited on
Commit
59c4bf6
·
verified ·
1 Parent(s): f6e429d

Delete app (1).py

Browse files
Files changed (1) hide show
  1. app (1).py +0 -338
app (1).py DELETED
@@ -1,338 +0,0 @@
1
- import gradio as gr
2
- import requests
3
- import time
4
- import json
5
- import base64
6
- import os
7
- from io import BytesIO
8
- import html
9
- import re
10
-
11
-
12
-
13
- class Prodia:
14
- def __init__(self, api_key, base=None):
15
- self.base = base or "https://api.prodia.com/v1"
16
- self.headers = {
17
- "X-Prodia-Key": api_key
18
- }
19
-
20
- def generate(self, params):
21
- response = self._post(f"{self.base}/sd/generate", params)
22
- return response.json()
23
-
24
- def transform(self, params):
25
- response = self._post(f"{self.base}/sd/transform", params)
26
- return response.json()
27
-
28
- def controlnet(self, params):
29
- response = self._post(f"{self.base}/sd/controlnet", params)
30
- return response.json()
31
-
32
- def get_job(self, job_id):
33
- response = self._get(f"{self.base}/job/{job_id}")
34
- return response.json()
35
-
36
- def wait(self, job):
37
- job_result = job
38
-
39
- while job_result['status'] not in ['succeeded', 'failed']:
40
- time.sleep(0.25)
41
- job_result = self.get_job(job['job'])
42
-
43
- return job_result
44
-
45
- def list_models(self):
46
- response = self._get(f"{self.base}/sd/models")
47
- return response.json()
48
-
49
- def list_samplers(self):
50
- response = self._get(f"{self.base}/sd/samplers")
51
- return response.json()
52
-
53
- def _post(self, url, params):
54
- headers = {
55
- **self.headers,
56
- "Content-Type": "application/json"
57
- }
58
- response = requests.post(url, headers=headers, data=json.dumps(params))
59
-
60
- if response.status_code != 200:
61
- print(params)
62
- raise Exception(f"Bad Prodia Response: {response.status_code}")
63
-
64
- return response
65
-
66
- def _get(self, url):
67
- response = requests.get(url, headers=self.headers)
68
-
69
- if response.status_code != 200:
70
- raise Exception(f"Bad Prodia Response: {response.status_code}")
71
-
72
- return response
73
-
74
-
75
- def image_to_base64(image):
76
- # Convert the image to bytes
77
- buffered = BytesIO()
78
- image.save(buffered, format="PNG") # You can change format to PNG if needed
79
-
80
- # Encode the bytes to base64
81
- img_str = base64.b64encode(buffered.getvalue())
82
-
83
- return img_str.decode('utf-8') # Convert bytes to string
84
-
85
-
86
- def remove_id_and_ext(text):
87
- text = re.sub(r'\[.*\]$', '', text)
88
- extension = text[-12:].strip()
89
- if extension == "safetensors":
90
- text = text[:-13]
91
- elif extension == "ckpt":
92
- text = text[:-4]
93
- return text
94
-
95
-
96
- def get_data(text):
97
- results = {}
98
- patterns = {
99
- 'prompt': r'(.*)',
100
- 'negative_prompt': r'Negative prompt: (.*)',
101
- 'steps': r'Steps: (\d+),',
102
- 'seed': r'Seed: (\d+),',
103
- 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
104
- 'model': r'Model:\s*([^\s,]+)',
105
- 'cfg_scale': r'CFG scale:\s*([\d\.]+)',
106
- 'size': r'Size:\s*([0-9]+x[0-9]+)'
107
- }
108
- for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
109
- match = re.search(patterns[key], text)
110
- if match:
111
- results[key] = match.group(1)
112
- else:
113
- results[key] = None
114
- if results['size'] is not None:
115
- w, h = results['size'].split("x")
116
- results['w'] = w
117
- results['h'] = h
118
- else:
119
- results['w'] = None
120
- results['h'] = None
121
- return results
122
-
123
-
124
- def send_to_txt2img(image):
125
-
126
- result = {tabs: gr.update(selected="t2i")}
127
-
128
- try:
129
- text = image.info['parameters']
130
- data = get_data(text)
131
- result[prompt] = gr.update(value=data['prompt'])
132
- result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
133
- result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
134
- result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
135
- result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
136
- result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
137
- result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
138
- result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
139
- if model in model_names:
140
- result[model] = gr.update(value=model_names[model])
141
- else:
142
- result[model] = gr.update()
143
- return result
144
-
145
- except Exception as e:
146
- print(e)
147
-
148
- return result
149
-
150
-
151
- prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
152
- model_list = prodia_client.list_models()
153
- model_names = {}
154
-
155
- for model_name in model_list:
156
- name_without_ext = remove_id_and_ext(model_name)
157
- model_names[name_without_ext] = model_name
158
-
159
-
160
- def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
161
-
162
- result = prodia_client.generate({
163
- "prompt": prompt,
164
- "negative_prompt": negative_prompt,
165
- "model": model,
166
- "steps": steps,
167
- "sampler": sampler,
168
- "cfg_scale": cfg_scale,
169
- "width": width,
170
- "height": height,
171
- "seed": seed
172
- })
173
-
174
- job = prodia_client.wait(result)
175
-
176
- if job['status'] != "succeeded":
177
- raise gr.Error("job failed")
178
-
179
- return job["imageUrl"]
180
-
181
-
182
- def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
183
- if input_image is None:
184
- raise gr.Error("Please add an image to run img2img")
185
-
186
- result = prodia_client.transform({
187
- "imageData": image_to_base64(input_image),
188
- "denoising_strength": denoising,
189
- "prompt": prompt,
190
- "negative_prompt": negative_prompt,
191
- "model": model,
192
- "steps": steps,
193
- "sampler": sampler,
194
- "cfg_scale": cfg_scale,
195
- "width": width,
196
- "height": height,
197
- "seed": seed
198
- })
199
-
200
- job = prodia_client.wait(result)
201
-
202
- if job['status'] != "succeeded":
203
- raise gr.Error("job failed")
204
-
205
- return job["imageUrl"]
206
-
207
-
208
- css = """
209
- #generate {
210
- height: 100%;
211
- }
212
- """
213
-
214
- with gr.Blocks(css=css) as demo:
215
- with gr.Row():
216
- with gr.Column(scale=6):
217
- model = gr.Dropdown(interactive=True,value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
218
-
219
- with gr.Column(scale=1):
220
- gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI.<br>Powered by [Prodia](https://prodia.com).<br>For more features and faster generation times check out our [API Docs](https://docs.prodia.com/reference/getting-started-guide).")
221
-
222
- with gr.Tabs() as tabs:
223
- with gr.Tab("txt2img", id='t2i'):
224
- with gr.Row():
225
- with gr.Column(scale=6, min_width=600):
226
- prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
227
- negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
228
- with gr.Column():
229
- text_button = gr.Button("Generate", variant='primary', elem_id="generate")
230
-
231
- with gr.Row():
232
- with gr.Column(scale=3):
233
- with gr.Tab("Generation"):
234
- with gr.Row():
235
- with gr.Column(scale=1):
236
- sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
237
-
238
- with gr.Column(scale=1):
239
- steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
240
-
241
- with gr.Row():
242
- with gr.Column(scale=1):
243
- width = gr.Slider(label="Width", maximum=1024, minimum=128, value=512, step=8)
244
- height = gr.Slider(label="Height", maximum=1024, minimum=128, value=512, step=8)
245
-
246
- with gr.Column(scale=1):
247
- batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
248
- batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
249
-
250
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
251
- seed = gr.Number(label="Seed", value=-1)
252
-
253
- with gr.Column(scale=2):
254
- image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
255
-
256
- text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
257
- seed], outputs=image_output, concurrency_limit=64)
258
-
259
- with gr.Tab("img2img", id='i2i'):
260
- with gr.Row():
261
- with gr.Column(scale=6, min_width=600):
262
- i2i_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
263
- i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
264
- with gr.Column():
265
- i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
266
-
267
- with gr.Row():
268
- with gr.Column(scale=3):
269
- with gr.Tab("Generation"):
270
- i2i_image_input = gr.Image(type="pil")
271
-
272
- with gr.Row():
273
- with gr.Column(scale=1):
274
- i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
275
-
276
- with gr.Column(scale=1):
277
- i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
278
-
279
- with gr.Row():
280
- with gr.Column(scale=1):
281
- i2i_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
282
- i2i_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
283
-
284
- with gr.Column(scale=1):
285
- i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
286
- i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
287
-
288
- i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
289
- i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
290
- i2i_seed = gr.Number(label="Seed", value=-1)
291
-
292
- with gr.Column(scale=2):
293
- i2i_image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
294
-
295
- i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
296
- model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
297
- i2i_seed], outputs=i2i_image_output, concurrency_limit=64)
298
-
299
- with gr.Tab("PNG Info"):
300
- def plaintext_to_html(text, classname=None):
301
- content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
302
-
303
- return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
304
-
305
-
306
- def get_exif_data(image):
307
- items = image.info
308
-
309
- info = ''
310
- for key, text in items.items():
311
- info += f"""
312
- <div>
313
- <p><b>{plaintext_to_html(str(key))}</b></p>
314
- <p>{plaintext_to_html(str(text))}</p>
315
- </div>
316
- """.strip()+"\n"
317
-
318
- if len(info) == 0:
319
- message = "Nothing found in the image."
320
- info = f"<div><p>{message}<p></div>"
321
-
322
- return info
323
-
324
- with gr.Row():
325
- with gr.Column():
326
- image_input = gr.Image(type="pil")
327
-
328
- with gr.Column():
329
- exif_output = gr.HTML(label="EXIF Data")
330
- send_to_txt2img_btn = gr.Button("Send to txt2img")
331
-
332
- image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
333
- send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt,
334
- steps, seed, model, sampler,
335
- width, height, cfg_scale],
336
- concurrency_limit=32)
337
-
338
- demo.queue(max_size=20, api_open=False).launch(max_threads=40, show_api=False)