File size: 4,690 Bytes
6fef025 89c5f18 f5b8400 53b0019 e1f04d1 9534b33 53b0019 0318f31 f1c6c08 15a21d3 53b0019 0318f31 c469318 0318f31 b21c027 f1c6c08 f5b8400 e5cd0b9 2eb1abd 6fef025 81987e1 6fef025 19aac56 f5b8400 6fef025 9534b33 f5b8400 6fef025 f1c6c08 f5b8400 0318f31 6fef025 1fb3ca0 e1f04d1 f1c6c08 e1f04d1 1fb3ca0 6e00654 e5cd0b9 6e00654 e5cd0b9 6e00654 f1c6c08 6e00654 e5cd0b9 6e00654 2c694c9 9534b33 bdcf524 f1c6c08 0318f31 f1c6c08 2944f54 1fb3ca0 2944f54 ca4c542 26b4194 f97541a 5fd8ae0 5929363 37eec56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
from random import randint
from all_models import models
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import requests
now2 = 0
kii=" mohawk femboy racecar driver "
def get_current_time():
now = datetime.now()
now2 = now
current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
ki = f'{kii} {current_time}'
return ki
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr.load(f'models/{model}')
except Exception as error:
print(f"Error loading model {model}: {error}")
m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
models_load.update({model: m})
load_fn(models)
num_models = len(models)
default_models = models[:num_models]
def extend_choices(choices):
return choices + (num_models - len(choices)) * ['NA']
def update_imgbox(choices):
choices_plus = extend_choices(choices)
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
executor = ThreadPoolExecutor(max_workers=num_models)
def gen_fn(model_str, prompt, negative_prompt):
if model_str == 'NA':
return None
noise = str(randint(0, 9999))
combined_prompt = f'{prompt} {model_str} {negative_prompt} {noise}'
print(f"Generating with prompt: {combined_prompt}")
try:
# Attempt to generate the image
image_response = models_load[model_str](f'{prompt} {negative_prompt} {noise}')
# Check if the image_response is a tuple, handle accordingly
if isinstance(image_response, tuple):
# If the response is a tuple, assume the first item is the image
image_response = image_response[0]
# Ensure the response is an image or image-like object
if isinstance(image_response, gr.Image):
return image_response
elif isinstance(image_response, str): # If the response is a path or URL, pass it as a string
return gr.Image(image_response) # You can handle it based on your model's return type
else:
print(f"Unexpected response type: {type(image_response)}")
return None
except Exception as e:
print(f"Error occurred: {e}")
return None
def make_me():
with gr.Row():
txt_input = gr.Textbox(lines=2, value=kii)
negative_prompt_input = gr.Textbox(lines=2, value="", label="Negative Prompt")
gen_button = gr.Button('Generate images')
stop_button = gr.Button('Stop', variant='secondary', interactive=False)
gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
gr.HTML("""
<div style="text-align: center; max-width: 100%; margin: 0 auto;">
<body>
</body>
</div>
""")
with gr.Row():
output = [gr.Image(label=m) for m in default_models]
current_models = [gr.Textbox(m, visible=False) for m in default_models]
for m, o in zip(current_models, output):
gen_event = gen_button.click(gen_fn, [m, txt_input, negative_prompt_input], o, queue=False)
with gr.Accordion('Model selection'):
model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, interactive=True)
model_choice.change(update_imgbox, model_choice, output)
model_choice.change(extend_choices, model_choice, current_models)
js_code = """
<script>
const originalScroll = window.scrollTo;
const originalShowToast = gradio.Toast.show;
gradio.Toast.show = function() {
originalShowToast.apply(this, arguments);
window.scrollTo = function() {};};
setTimeout(() => {
window.scrollTo = originalScroll;
}, 300000); // Restore scroll function after 3 seconds
</script>
"""
with gr.Blocks(css="""
label.float.svelte-i3tvor { top:auto!important; bottom: 0; position: absolute; background: rgba(0,0,0,0.0); left: var(--block-label-margin); color: rgba(200,200,200,.7);}
.genbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
.stopbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}
""") as demo:
gr.Markdown("<script>" + js_code + "</script>")
make_me()
demo.queue()
demo.queue = False
demo.config["queue"] = False
demo.launch(max_threads=200)
|