File size: 5,311 Bytes
6fef025
89c5f18
f5b8400
53b0019
e1f04d1
7954c2d
9534b33
53b0019
0318f31
f1c6c08
2974b84
15a21d3
ad36d43
 
984eb50
 
 
ad36d43
 
 
 
 
 
 
 
53b0019
 
0318f31
c469318
0318f31
b21c027
 
f1c6c08
f5b8400
 
 
 
 
 
e5cd0b9
004534f
e5cd0b9
 
 
 
2eb1abd
6fef025
81987e1
6fef025
19aac56
f5b8400
6fef025
9534b33
f5b8400
 
6fef025
f1c6c08
f5b8400
 
0318f31
6fef025
1fb3ca0
e1f04d1
 
f1c6c08
e1f04d1
 
 
 
 
e2dcb90
e1f04d1
1fb3ca0
6e00654
9765ec1
 
 
e5cd0b9
6e00654
 
2e9080b
 
 
 
 
2c694c9
984eb50
2e9080b
9765ec1
9534b33
a2c2448
bdcf524
f1c6c08
 
 
 
0636b63
 
 
 
9765ec1
a2c2448
 
 
f1c6c08
 
 
 
 
 
 
 
 
 
 
 
0318f31
f1c6c08
 
 
 
2944f54
 
 
1fb3ca0
 
 
 
 
 
 
 
1a0c870
1fb3ca0
2944f54
 
b3e0eba
 
 
ca4c542
 
 
 
 
5fa6414
59ef579
821e5e9
ca4c542
 
 
26b4194
f97541a
5fd8ae0
5929363
37eec56
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import gradio as gr
from random import randint
from all_models import models
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import time
import requests

now2 = 0
kii=" mohawk femboy racecar driver "
combined_prompt = ""



        


def log_message():
    logs = ""
    for i in range(5):  # Simulate some logging over time
        logs += f"Log message {i+1}\n"
        time.sleep(1)
    return logs


def get_current_time():
    now = datetime.now()
    now2 = now
    current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
    ki = f'{kii} {current_time}'
    return ki


def load_fn(models):
    global models_load
    models_load = {}
    for model in models:
        if model not in models_load.keys():
            try:
                m = gr.load(f'models/{model}')
                print(f"{m}");
            except Exception as error:
                print(f"Error loading model {model}: {error}")
                m = gr.Interface(lambda _: None, inputs=gr.Textbox(), outputs=gr.Image(), enable_queue=False)
            models_load.update({model: m})


load_fn(models)

num_models = len(models)
default_models = models[:num_models]


def extend_choices(choices):
    return choices + (num_models - len(choices)) * ['NA']


def update_imgbox(choices):
    choices_plus = extend_choices(choices)
    return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]


executor = ThreadPoolExecutor(max_workers=num_models)


def gen_fn(model_str, prompt, negative_prompt):
    if model_str == 'NA':
        return None

    noise = str(randint(0, 9999))
    combined_prompt = f'{prompt} {negative_prompt}'
    print(f"Generating with prompt: {combined_prompt}")
    
    try:
        image_response = models_load[model_str](f'{prompt} {negative_prompt} {noise}')
      # print(f"77 {models_load[model_str](f'{combined_prompt}')}")
    #   image_response = models_load[model_str](f'{combined_prompt}')
        # Ensure the response is an image or image-like object
        if isinstance(image_response, gr.Image):
            return image_response
        elif isinstance(image_response, str):  # If the response is a path or URL, pass it as a string
            return gr.Image(image_response)  # You can handle it based on your model's return type
        else:
            print(f"Unexpected response type: {type(image_response)}")
            return None
    except Exception as e:
        print(f"Error occurred: {e}")
        return None
      


def make_me():
    with gr.Row():
        txt_input = gr.Textbox(lines=2, value=kii)
        negative_prompt_input = gr.Textbox(lines=2, value="", label="Negative Prompt")
        gen_button = gr.Button('Generate images')
       # stop_button = gr.Button('Stop', variant='secondary', interactive=False)
       
       #gen_button.click(lambda _: gr.update(interactive=True), None, stop_button)
        gen_button.click(lambda _: gr.update(interactive=True), None) 
        log_output = gr.Textbox(label="Logs", lines=2, interactive=False)
        log_button = gr.Button("Start Logging")
        log_button.click(log_message, outputs=log_output)
    
        gr.HTML("""
        <div style="text-align: center; max-width: 100%; margin: 0 auto;">
            <body>
            </body>
        </div>
        """)
    
    with gr.Row():
        output = [gr.Image(label=m) for m in default_models]
        current_models = [gr.Textbox(m, visible=False) for m in default_models]
        for m, o in zip(current_models, output):
            gen_event = gen_button.click(gen_fn, [m, txt_input, negative_prompt_input], o, queue=False)

    with gr.Accordion('Model selection'):
        model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, interactive=True)
        model_choice.change(update_imgbox, model_choice, output)
        model_choice.change(extend_choices, model_choice, current_models)


js_code = """
<script>
    const originalScroll = window.scrollTo;
    const originalShowToast = gradio.Toast.show;
    gradio.Toast.show = function() {
        originalShowToast.apply(this, arguments);
        window.scrollTo = function() {};};
    setTimeout(() => {
        window.scrollTo = originalScroll;
    }, 1000); // Restore scroll function after 3 seconds
</script>
"""


    
    
with gr.Blocks(css="""
     label.float.svelte-i3tvor {  top:auto!important;  bottom: 0;    position: absolute;    background: rgba(0,0,0,0.0);    left: var(--block-label-margin);    color: rgba(200,200,200,.7);}
    .genbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
    .stopbut { max-width: 50px; max-height: 30px; width:150px; height:30px}
    .float.svelte-1mwvhlq {    position: absolute;    top: var(--block-label-margin);    left: var(--block-label-margin);    background: none;    border: none;}
    textarea {    overflow-y: scroll;    height: 212px;    resize: both;    width: 179px;    min-height: 50px;    max-height: 230px;}
    .svelte-5y6bt2 {max-height:161px;min-height:160px;}
    .hide-container {    max-height: 2px;    position: fixed;    min-height: 1px;}
""") as demo:
    gr.Markdown("<script>" + js_code + "</script>")
    make_me()

demo.queue()
demo.queue = False
demo.config["queue"] = False
demo.launch(max_threads=200)