import gradio as gr from random import randint from all_models import models from datetime import datetime def get_current_time(): now = datetime.now() now2 = now current_time = now2.strftime("%Y-%m-%d %H:%M:%S") ki = f'{current_time}' return ki def load_fn(models): global models_load models_load = {} for model in models: if model not in models_load.keys(): try: m = gr.load(f'models/{model}') except Exception as error: m = gr.Interface(lambda txt: None, ['text'], ['image']) models_load.update({model: m}) load_fn(models) num_models = len(models) default_models = models[:num_models] def extend_choices(choices): return choices + (num_models - len(choices)) * ['NA'] def update_imgbox(choices): choices_plus = extend_choices(choices) return [gr.Image(None, label=m, visible=(m != 'NA'), elem_id="custom_image") for m in choices_plus] def gen_fn(model_str, prompt, negative_prompt, max_retries=10): if model_str == 'NA': return None retries = 0 while retries < max_retries: try: noise = str(randint(0, 9999999)) if hasattr(models_load[model_str], 'negative_prompt'): result = models_load[model_str](f'{prompt} {noise}', negative_prompt=negative_prompt) else: result = models_load[model_str](f'{prompt} {noise}') return result except Exception as e: # Check for specific error messages or status codes if "CUDA out of memory" in str(e) or "500" in str(e): print(f"CUDA out of memory or server error: {e}") else: print(f"Error generating image: {e}") retries += 1 if retries >= max_retries: raise Exception(f"Failed to generate image after {max_retries} retries.") return None def img_to_img_fn(model_str, image, prompt, negative_prompt, max_retries=10): if model_str == 'NA' or image is None: return None retries = 0 while retries < max_retries: try: noise = str(randint(0, 9999999)) if hasattr(models_load[model_str], 'negative_prompt'): result = models_load[model_str](image, prompt=f'{prompt} {noise}', negative_prompt=negative_prompt) else: result = models_load[model_str](image, prompt=f'{prompt} {noise}') return result except Exception as e: # Check for specific error messages or status codes if "CUDA out of memory" in str(e) or "500" in str(e): print(f"CUDA out of memory or server error: {e}") else: print(f"Error generating image: {e}") retries += 1 if retries >= max_retries: raise Exception(f"Failed to generate image after {max_retries} retries.") return None def make_text_to_image(): with gr.Row(): with gr.Column(scale=1): txt_input = gr.Textbox(label='Your prompt:', lines=3, container=False, elem_id="custom_textbox", placeholder="Prompt") negative_txt_input = gr.Textbox(label='Negative prompt:', lines=3, container=False, elem_id="custom_negative_textbox", placeholder="Negative Prompt") with gr.Row(): gen_button = gr.Button('Generate images', elem_id="custom_gen_button") stop_button = gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button") def on_generate_click(): return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=True, elem_id="custom_stop_button") def on_stop_click(): return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button") gen_button.click(on_generate_click, inputs=None, outputs=[gen_button, stop_button]) stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button]) with gr.Row(): output = [gr.Image(label=m, min_width=250, height=250, elem_id="custom_image") for m in default_models] current_models = [gr.Textbox(m, visible=False) for m in default_models] for m, o in zip(current_models, output): gen_event = gen_button.click(gen_fn, [m, txt_input, negative_txt_input], o) stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button], cancels=[gen_event]) with gr.Accordion('Model selection', elem_id="custom_accordion"): model_choice = gr.CheckboxGroup(models, label=f'{num_models} different models selected', value=default_models, interactive=True, elem_id="custom_checkbox_group") model_choice.change(update_imgbox, model_choice, output) model_choice.change(extend_choices, model_choice, current_models) def make_image_to_image(): with gr.Row(): with gr.Column(scale=1): img_input = gr.Image(label='Input Image', type='pil') txt_input = gr.Textbox(label='Your prompt:', lines=3, container=False, elem_id="custom_textbox", placeholder="Prompt") negative_txt_input = gr.Textbox(label='Negative prompt:', lines=3, container=False, elem_id="custom_negative_textbox", placeholder="Negative Prompt") with gr.Row(): gen_button = gr.Button('Generate images', elem_id="custom_gen_button") stop_button = gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button") def on_generate_click(): return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=True, elem_id="custom_stop_button") def on_stop_click(): return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button") gen_button.click(on_generate_click, inputs=None, outputs=[gen_button, stop_button]) stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button]) with gr.Row(): output = [gr.Image(label=m, min_width=250, height=250, elem_id="custom_image") for m in default_models] current_models = [gr.Textbox(m, visible=False) for m in default_models] for m, o in zip(current_models, output): gen_event = gen_button.click(img_to_img_fn, [m, img_input, txt_input, negative_txt_input], o) stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button], cancels=[gen_event]) with gr.Accordion('Model selection', elem_id="custom_accordion"): model_choice = gr.CheckboxGroup(models, label=f'{num_models} different models selected', value=default_models, interactive=True, elem_id="custom_checkbox_group") model_choice.change(update_imgbox, model_choice, output) model_choice.change(extend_choices, model_choice, current_models) custom_css = """ /* Your existing CSS styles here */ """ with gr.Blocks(css=custom_css) as demo: with gr.Tabs(): with gr.TabItem("Text-to-Image"): make_text_to_image() with gr.TabItem("Image-to-Image"): make_image_to_image() demo.queue(concurrency_count=500) demo.launch()