Davit12 commited on
Commit
57a213e
·
verified ·
1 Parent(s): 6071dfc

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -35
  2. README.md +7 -7
  3. all_models.py +7 -14
  4. app.py +126 -158
  5. externalmod.py +1 -1
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Sexy Reality
3
- emoji:
4
- colorFrom: indigo
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.10.0
8
  app_file: app.py
9
- pinned: false
10
- short_description: Photorealistic Pron
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Compare-6
3
+ emoji: ✨6️⃣✨
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
+ pinned: true
10
+ short_description: Compare 6 image gen models at a time, feat. FLUX & SD 3.5
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
all_models.py CHANGED
@@ -2,8 +2,8 @@ models = [
2
  "black-forest-labs/FLUX.1-dev",
3
  "black-forest-labs/FLUX.1-schnell",
4
  "XLabs-AI/flux-RealismLora",
5
- "prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA",
6
- "xey/sldr_flux_nsfw_v2-studio",
7
  "aleksa-codes/flux-ghibsky-illustration",
8
  "dataautogpt3/FLUX-SyntheticAnime",
9
  "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
@@ -12,17 +12,10 @@ models = [
12
  "stabilityai/stable-diffusion-3.5-large",
13
  "stabilityai/stable-diffusion-3.5-large-turbo",
14
  "stabilityai/stable-diffusion-xl-base-1.0",
 
15
  "Yntec/epiCPhotoGasm",
16
- "Jovie/Midjourney",
17
  "dataautogpt3/OpenDalleV1.1",
18
- "falanaja/Amateur-Photography",
19
- "lexa862/NSFWmodel",
20
- "Keltezaa/ShowerGirls",
21
- "pimpilikipilapi1/fltax",
22
- "Jonny001/Alita-v1",
23
- "Jonny001/EXD-v1",
24
- "Jonny001/NSFW_master",
25
- "aifeifei798/sldr_flux_nsfw_v2-studio",
26
- "getad72493/innipssy",
27
- "strangerzonehf/Flux-Super-Realism-LoRA",
28
- ]
 
2
  "black-forest-labs/FLUX.1-dev",
3
  "black-forest-labs/FLUX.1-schnell",
4
  "XLabs-AI/flux-RealismLora",
5
+ "enhanceaiteam/Flux-uncensored",
6
+ "enhanceaiteam/Flux-Uncensored-V2",
7
  "aleksa-codes/flux-ghibsky-illustration",
8
  "dataautogpt3/FLUX-SyntheticAnime",
9
  "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
 
12
  "stabilityai/stable-diffusion-3.5-large",
13
  "stabilityai/stable-diffusion-3.5-large-turbo",
14
  "stabilityai/stable-diffusion-xl-base-1.0",
15
+ "SG161222/RealVisXL_V4.0",
16
  "Yntec/epiCPhotoGasm",
17
+ "dataautogpt3/ProteusV0.2",
18
  "dataautogpt3/OpenDalleV1.1",
19
+ "prompthero/openjourney",
20
+ "nerijs/pixel-art-xl",
21
+ ]
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,204 +1,172 @@
1
  import gradio as gr
 
2
  from all_models import models
3
- from _prompt import thePrompt, howManyModelsToUse
4
- from externalmod import gr_Interface_load, save_image, randomize_seed
 
5
  import asyncio
6
  import os
7
  from threading import RLock
8
- from datetime import datetime
9
-
10
- preSetPrompt = thePrompt
11
- negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness, asian, african, collage, composite, combined image"
12
 
 
13
  lock = RLock()
14
-
15
  HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
16
 
17
- def get_current_time():
18
- now = datetime.now()
19
- current_time = now.strftime("%y-%m-%d %H:%M:%S")
20
- return current_time
21
-
22
  def load_fn(models):
23
  global models_load
24
  models_load = {}
 
 
25
  for model in models:
26
  if model not in models_load.keys():
27
  try:
 
 
 
28
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
 
29
  except Exception as error:
30
- print(error)
 
31
  m = gr.Interface(lambda: None, ['text'], ['image'])
 
32
  models_load.update({model: m})
33
 
34
-
 
35
  load_fn(models)
 
36
 
37
- num_models = howManyModelsToUse
38
- max_images = howManyModelsToUse
39
- inference_timeout = 400
40
- default_models = models[:num_models]
41
- MAX_SEED = 2**32-1
42
 
 
 
 
 
 
 
 
43
 
 
44
  def extend_choices(choices):
45
- return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
46
-
 
 
47
 
 
48
  def update_imgbox(choices):
 
49
  choices_plus = extend_choices(choices[:num_models])
50
- return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
51
-
 
52
 
53
- def random_choices():
54
- import random
55
- random.seed()
56
- return random.choices(models, k=num_models)
57
-
58
-
59
- async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
60
  kwargs = {}
61
- if height > 0: kwargs["height"] = height
62
- if width > 0: kwargs["width"] = width
63
- if steps > 0: kwargs["num_inference_steps"] = steps
64
- if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
65
-
66
- if seed == -1:
67
- theSeed = randomize_seed()
68
- else:
69
- theSeed = seed
70
- kwargs["seed"] = theSeed
71
-
72
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
73
- await asyncio.sleep(0)
74
  try:
 
75
  result = await asyncio.wait_for(task, timeout=timeout)
76
- except asyncio.TimeoutError as e:
77
- print(e)
78
- print(f"infer: Task timed out: {model_str}")
79
- if not task.done(): task.cancel()
 
 
 
80
  result = None
81
- raise Exception(f"Task timed out: {model_str}") from e
82
- except Exception as e:
83
- print(e)
84
- print(f"infer: exception: {model_str}")
85
- if not task.done(): task.cancel()
86
- result = None
87
- raise Exception() from e
88
- if task.done() and result is not None and not isinstance(result, tuple):
89
  with lock:
90
- png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
91
- image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed)
 
 
92
  return image
 
93
  return None
94
 
95
- def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
 
 
 
 
96
  try:
 
 
97
  loop = asyncio.new_event_loop()
98
- result = loop.run_until_complete(infer(model_str, prompt, nprompt,
99
- height, width, steps, cfg, seed, inference_timeout))
100
  except (Exception, asyncio.CancelledError) as e:
101
- print(e)
102
- print(f"gen_fn: Task aborted: {model_str}")
103
  result = None
104
- raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
105
  finally:
 
106
  loop.close()
 
107
  return result
108
 
109
-
110
- def add_gallery(image, model_str, gallery):
111
- if gallery is None: gallery = []
112
- with lock:
113
- if image is not None: gallery.insert(0, (image, model_str))
114
- return gallery
115
-
116
- with gr.Blocks(fill_width=True) as demo:
117
- with gr.Tab(str(num_models) + ' Models'):
118
- with gr.Column(scale=2):
119
- with gr.Group():
120
- txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
121
- neg_input = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
122
- with gr.Accordion("Advanced", open=False, visible=True):
123
- with gr.Row():
124
- width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
125
- height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
126
- with gr.Row():
127
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
128
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
129
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
130
- seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
131
- seed_rand.click(randomize_seed, None, [seed], queue=False)
132
- with gr.Row():
133
- gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
134
- random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
135
-
136
- with gr.Column(scale=1):
137
- with gr.Group():
138
- with gr.Row():
139
- output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"],
140
- interactive=False, width=112, height=112, show_share_button=False, format="png",
141
- visible=True) for m in default_models]
142
- current_models = [gr.Textbox(m, visible=False) for m in default_models]
143
-
144
- with gr.Column(scale=2):
145
- gallery = gr.Gallery(label="Output", show_download_button=True,
146
- interactive=False, show_share_button=False, container=True, format="png",
147
- preview=True, object_fit="cover", columns=2, rows=2)
148
-
149
- for m, o in zip(current_models, output):
150
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
151
- inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
152
- concurrency_limit=None, queue=False)
153
- o.change(add_gallery, [o, m, gallery], [gallery])
154
-
155
- with gr.Column(scale=4):
156
- with gr.Accordion('Model selection'):
157
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
158
- model_choice.change(update_imgbox, model_choice, output)
159
- model_choice.change(extend_choices, model_choice, current_models)
160
- random_button.click(random_choices, None, model_choice)
161
-
162
- with gr.Tab('Single model'):
163
- with gr.Column(scale=2):
164
- model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
165
- with gr.Group():
166
- txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
167
- neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
168
- with gr.Accordion("Advanced", open=False, visible=True):
169
- with gr.Row():
170
- width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
171
- height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
172
- with gr.Row():
173
- steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
174
- cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
175
- seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
176
- seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
177
- seed_rand2.click(randomize_seed, None, [seed2], queue=False)
178
- num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
179
- with gr.Row():
180
- gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"])
181
-
182
- with gr.Column(scale=1):
183
- with gr.Group():
184
- with gr.Row():
185
- output2 = [gr.Image(label='', show_download_button=True,
186
- interactive=False, width=112, height=112, visible=True, format="png",
187
- show_share_button=False, show_label=False) for _ in range(max_images)]
188
-
189
- with gr.Column(scale=2):
190
- gallery2 = gr.Gallery(label="Output", show_download_button=True,
191
- interactive=False, show_share_button=True, container=True, format="png",
192
- preview=True, object_fit="cover", columns=2, rows=2)
193
-
194
- for i, o in enumerate(output2):
195
- img_i = gr.Number(i, visible=False)
196
- num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
197
- gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
198
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
199
- inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
200
- height2, width2, steps2, cfg2, seed2], outputs=[o],
201
- concurrency_limit=None, queue=False)
202
- o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
203
-
204
  demo.launch(show_api=False, max_threads=400)
 
 
1
  import gradio as gr
2
+ from random import randint
3
  from all_models import models
4
+
5
+ from externalmod import gr_Interface_load, randomize_seed
6
+
7
  import asyncio
8
  import os
9
  from threading import RLock
 
 
 
 
10
 
11
+ # Create a lock to ensure thread safety when accessing shared resources
12
  lock = RLock()
13
+ # Load Hugging Face token from environment variable, if available
14
  HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
15
 
16
+ # Function to load all models specified in the 'models' list
 
 
 
 
17
  def load_fn(models):
18
  global models_load
19
  models_load = {}
20
+
21
+ # Iterate through all models to load them
22
  for model in models:
23
  if model not in models_load.keys():
24
  try:
25
+ # Log model loading attempt
26
+ print(f"Attempting to load model: {model}")
27
+ # Load model interface using externalmod function
28
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
29
+ print(f"Successfully loaded model: {model}")
30
  except Exception as error:
31
+ # In case of an error, print it and create a placeholder interface
32
+ print(f"Error loading model {model}: {error}")
33
  m = gr.Interface(lambda: None, ['text'], ['image'])
34
+ # Update the models_load dictionary with the loaded model
35
  models_load.update({model: m})
36
 
37
+ # Load all models defined in the 'models' list
38
+ print("Loading models...")
39
  load_fn(models)
40
+ print("Models loaded successfully.")
41
 
42
+ num_models = 6
 
 
 
 
43
 
44
+ # Set the default models to use for inference
45
+ default_models = models[:num_models]
46
+ inference_timeout = 600
47
+ MAX_SEED = 3999999999
48
+ # Generate a starting seed randomly between 1941 and 2024
49
+ starting_seed = randint(1941, 2024)
50
+ print(f"Starting seed: {starting_seed}")
51
 
52
+ # Extend the choices list to ensure it contains 'num_models' elements
53
  def extend_choices(choices):
54
+ print(f"Extending choices: {choices}")
55
+ extended = choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
56
+ print(f"Extended choices: {extended}")
57
+ return extended
58
 
59
+ # Update the image boxes based on selected models
60
  def update_imgbox(choices):
61
+ print(f"Updating image boxes with choices: {choices}")
62
  choices_plus = extend_choices(choices[:num_models])
63
+ imgboxes = [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
64
+ print(f"Updated image boxes: {imgboxes}")
65
+ return imgboxes
66
 
67
+ # Asynchronous function to perform inference on a given model
68
+ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
69
+ from pathlib import Path
 
 
 
 
70
  kwargs = {}
71
+ noise = ""
72
+ kwargs["seed"] = seed
73
+ # Create an asynchronous task to run the model inference
74
+ print(f"Starting inference for model: {model_str} with prompt: '{prompt}' and seed: {seed}")
75
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
76
+ prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
77
+ await asyncio.sleep(0) # Allow other tasks to run
 
 
 
 
 
 
78
  try:
79
+ # Wait for the task to complete within the specified timeout
80
  result = await asyncio.wait_for(task, timeout=timeout)
81
+ print(f"Inference completed for model: {model_str}")
82
+ except (Exception, asyncio.TimeoutError) as e:
83
+ # Handle any exceptions or timeout errors
84
+ print(f"Error during inference for model {model_str}: {e}")
85
+ if not task.done():
86
+ task.cancel()
87
+ print(f"Task cancelled for model: {model_str}")
88
  result = None
89
+ # If the task completed successfully, save the result as an image
90
+ if task.done() and result is not None:
 
 
 
 
 
 
91
  with lock:
92
+ png_path = "image.png"
93
+ result.save(png_path)
94
+ image = str(Path(png_path).resolve())
95
+ print(f"Result saved as image: {image}")
96
  return image
97
+ print(f"No result for model: {model_str}")
98
  return None
99
 
100
+ # Function to generate an image based on the given model, prompt, and seed
101
+ def gen_fnseed(model_str, prompt, seed=1):
102
+ if model_str == 'NA':
103
+ print(f"Model is 'NA', skipping generation.")
104
+ return None
105
  try:
106
+ # Create a new event loop to run the asynchronous inference function
107
+ print(f"Generating image for model: {model_str} with prompt: '{prompt}' and seed: {seed}")
108
  loop = asyncio.new_event_loop()
109
+ result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
 
110
  except (Exception, asyncio.CancelledError) as e:
111
+ # Handle any exceptions or cancelled tasks
112
+ print(f"Error during generation for model {model_str}: {e}")
113
  result = None
 
114
  finally:
115
+ # Close the event loop
116
  loop.close()
117
+ print(f"Event loop closed for model: {model_str}")
118
  return result
119
 
120
+ # Create the Gradio Blocks interface with a custom theme
121
+ print("Creating Gradio interface...")
122
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
123
+ gr.HTML("<center><h1>Compare-6</h1></center>")
124
+ with gr.Tab('Compare-6'):
125
+ # Text input for user prompt
126
+ txt_input = gr.Textbox(label='Your prompt:', lines=4)
127
+ # Button to generate images
128
+ gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
129
+ with gr.Row():
130
+ # Slider to select a seed for reproducibility
131
+ seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
132
+ # Button to randomize the seed
133
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
134
+ # Set up click event to randomize the seed
135
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
136
+ print("Seed randomization button set up.")
137
+ # Button click to start generation
138
+ gen_button.click(lambda s: gr.update(interactive=True), None)
139
+ print("Generation button set up.")
140
+
141
+ with gr.Row():
142
+ # Create image output components for each model
143
+ output = [gr.Image(label=m, min_width=480) for m in default_models]
144
+ # Create hidden textboxes to store the current models
145
+ current_models = [gr.Textbox(m, visible=False) for m in default_models]
146
+
147
+ # Set up generation events for each model and output image
148
+ for m, o in zip(current_models, output):
149
+ print(f"Setting up generation event for model: {m.value}")
150
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
151
+ inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
152
+ # The commented stop button could be used to cancel the generation event
153
+ #stop_button.click(lambda s: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
154
+ # Accordion to allow model selection
155
+ with gr.Accordion('Model selection'):
156
+ # Checkbox group to select up to 'num_models' different models
157
+ model_choice = gr.CheckboxGroup(models, label=f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
158
+ # Update image boxes and current models based on model selection
159
+ model_choice.change(update_imgbox, model_choice, output)
160
+ model_choice.change(extend_choices, model_choice, current_models)
161
+ print("Model selection setup complete.")
162
+ with gr.Row():
163
+ # Placeholder HTML to add additional UI elements if needed
164
+ gr.HTML(
165
+ )
166
+
167
+ # Queue settings for handling multiple concurrent requests
168
+ print("Setting up queue...")
169
+ demo.queue(default_concurrency_limit=200, max_size=200)
170
+ print("Launching Gradio interface...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  demo.launch(show_api=False, max_threads=400)
172
+ print("Gradio interface launched successfully.")
externalmod.py CHANGED
@@ -606,7 +606,7 @@ def save_image(image, savefile, modelname, prompt, nprompt, height=0, width=0, s
606
 
607
  def randomize_seed():
608
  from random import seed, randint
609
- MAX_SEED = 2**32-1
610
  seed()
611
  rseed = randint(0, MAX_SEED)
612
  return rseed
 
606
 
607
  def randomize_seed():
608
  from random import seed, randint
609
+ MAX_SEED = 3999999999
610
  seed()
611
  rseed = randint(0, MAX_SEED)
612
  return rseed