File size: 6,977 Bytes
6fef025 89c5f18 f5b8400 53b0019 e1f04d1 9534b33 53b0019 0318f31 f29f8ef 15a21d3 53b0019 0318f31 c469318 0318f31 b21c027 f5b8400 e5cd0b9 2eb1abd 6fef025 81987e1 6fef025 19aac56 f5b8400 6fef025 9534b33 f5b8400 6fef025 f5b8400 0318f31 6fef025 1fb3ca0 e1f04d1 1fb3ca0 e5cd0b9 9534b33 1fb3ca0 e1f04d1 1fb3ca0 e1f04d1 1fb3ca0 6fef025 9534b33 bdcf524 132d5bb ca4c542 4424a6b ca4c542 9534b33 781fec0 8f8f343 ca4c542 0b760cf 8f8f343 5fd8ae0 e96189e ca4c542 3a0cfaa ca4c542 3a0cfaa 0318f31 2944f54 1fb3ca0 2944f54 26b4194 9534b33 ca4c542 26b4194 f97541a 5fd8ae0 5929363 37eec56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import gradio as gr
from random import randint
from all_models import models
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import requests
now2 = 0
kii=" mohawk femboy racecar driver ";
def get_current_time():
now = datetime.now()
now2 = now
current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
ki = f'{kii} {current_time}'
return ki
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr.load(f'models/{model}')
except Exception as error:
print(f"Error loading model {model}: {error}")
m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
models_load.update({model: m})
load_fn(models)
num_models = len(models)
default_models = models[:num_models]
def extend_choices(choices):
return choices + (num_models - len(choices)) * ['NA']
def update_imgbox(choices):
choices_plus = extend_choices(choices)
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
executor = ThreadPoolExecutor(max_workers=num_models)
def gen_fn(model_str, prompt, negative_prompt):
if model_str == 'NA':
return None
noise = str(randint(0, 9999))
combined_prompt = f'{prompt} {model_str} {negative_prompt} {noise}'
print(f"Generating with prompt: {combined_prompt}")
# try:
# future = executor.submit(models_load[model_str], f'{prompt} {negative_prompt} {noise}')
# result = future.result()
# return result
# except requests.exceptions.Timeout:
# print(f"Timeout occurred for model {model_str}. Please try again later.")
# return None
# except Exception as e:
# print(f"Error occurred: {e}")
# return None
try:
# Attempt to generate the image
image_response = models_load[model_str](f'{prompt} {negative_prompt} {noise}')
# Check if the image_response is a tuple, handle accordingly
if isinstance(image_response, tuple):
# If the response is a tuple, return the first item assuming it's the image
image_response = image_response[0]
# Ensure the response is an image or image-like object
if isinstance(image_response, gr.Image):
return image_response
elif isinstance(image_response, str): # If the response is a path or URL, pass it as a string
return gr.Image(image_response) # You can handle it based on your model's return type
else:
print(f"Unexpected response type: {type(image_response)}")
return None
except Exception as e:
print(f"Error occurred: {e}")
return None
# noise = str(randint(0, 9999))
# combined_prompt = f'{prompt} {model_str} {negative_prompt} {noise}'
# print(f"Generating with prompt: {combined_prompt}") # Debug line
# result = models_load[model_str](f'{prompt} {negative_prompt} {noise}')
# end_time = time.time() # End timing
# runtime = end_time - start_time
# model_timings[model_str] = runtime # Log the model's execution time
# queue_size -= 1 # Decrement queue size after processing
# return f"Model {model_str} ran for {runtime:.2f} seconds", result
# return models_load[model_str](f'{prompt} {negative_prompt} {noise}')
def make_me():
# with gr.Tab('The Dream'):
with gr.Row():
txt_input = gr.Textbox(lines=2, value=kii )
#txt_input = gr.Textbox(label='Your prompt:', lines=2, value=kii)
negative_prompt_input = gr.Textbox(lines=2, value="", label="Negative Prompt" )
gen_button = gr.Button('Generate images')
stop_button = gr.Button('Stop', variant='secondary', interactive=False)
gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
gr.HTML("""
<div style="text-align: center; max-width: 100%; margin: 0 auto;">
<body>
</body>
</div>
""")
with gr.Row():
output = [gr.Image(label=m ) for m in default_models]
current_models = [gr.Textbox(m, visible=False) for m in default_models]
for m, o in zip(current_models, output):
gen_event = gen_button.click(gen_fn, [m, txt_input, negative_prompt_input], o, queue = False)
# stop_button.click(lambda _: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
with gr.Accordion('Model selection'):
# model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, multiselect=True, max_choices=num_models, interactive=True, filterable=False)
# model_choice.change(update_imgbox, model_choice, output)
# model_choice.change(extend_choices, model_choice, current_models)
model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, interactive=True )
model_choice.change(update_imgbox, model_choice, output)
model_choice.change(extend_choices, model_choice, current_models)
# with gr.Row():
# gr.HTML("""
# <div class="footer">
# <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier!
# </p>
# """)
js_code = """
<script>
const originalScroll = window.scrollTo;
const originalShowToast = gradio.Toast.show;
gradio.Toast.show = function() {
originalShowToast.apply(this, arguments);
window.scrollTo = function() {};};
setTimeout(() => {
window.scrollTo = originalScroll;
}, 300000); // Restore scroll function after 3 seconds
</script>
"""
with gr.Blocks(css="""
label.float.svelte-i3tvor { top:auto!important; bottom: 0; position: absolute; background: rgba(0,0,0,0.0); left: var(--block-label-margin); color: rgba(200,200,200,.7);}
.genbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
.stopbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}
""") as demo:
gr.Markdown("<script>" + js_code + "</script>")
make_me()
demo.queue()
demo.queue = False
demo.config["queue"] = False
demo.launch(max_threads=200)
|