File size: 4,505 Bytes
6fef025
89c5f18
f5b8400
53b0019
 
0318f31
f29f8ef
15a21d3
 
53b0019
 
0318f31
c469318
0318f31
b21c027
 
f5b8400
 
 
 
 
 
 
 
d6c665c
f5b8400
6fef025
81987e1
6fef025
19aac56
f5b8400
6fef025
f5b8400
 
6fef025
f5b8400
 
0318f31
6fef025
1fb3ca0
f5b8400
 
1fb3ca0
 
 
 
 
25a1729
1fb3ca0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fef025
bdcf524
132d5bb
 
a655122
4424a6b
1fb3ca0
730e2da
 
132d5bb
781fec0
 
 
 
 
 
8f8f343
2944f54
0b760cf
8f8f343
1fb3ca0
0318f31
3a0cfaa
 
 
 
 
 
 
 
 
 
0318f31
2944f54
 
 
1fb3ca0
 
 
 
 
 
 
 
 
 
2944f54
 
 
6eab85d
1fb3ca0
 
 
26b4194
 
1fb3ca0
0318f31
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import gradio as gr
from random import randint
from all_models import models
from datetime import datetime

now2 = 0
kii=" mohawk femboy racecar driver ";


def get_current_time():
    now = datetime.now()
    now2 = now
    current_time = now2.strftime("%Y-%m-%d %H:%M:%S")
    ki = f'{kii} {current_time}'
    return ki

def load_fn(models):
    global models_load
    models_load = {}
    for model in models:
        if model not in models_load.keys():
            try:
                m = gr.load(f'models/{model}')
            except Exception as error:
                m = gr.Interface(lambda txt: None, ['text'], ['image'])
            models_load.update({model: m})

load_fn(models)

num_models = len(models)
default_models = models[:num_models]

def extend_choices(choices):
    return choices + (num_models - len(choices)) * ['NA']

def update_imgbox(choices):
    choices_plus = extend_choices(choices)
    return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]

def gen_fn(model_str, prompt, negative_prompt):
    if model_str == 'NA':
        return None

    
    
    
    
    noise = str(randint(0, 9999))
    combined_prompt = f'{prompt} {model_str} {negative_prompt} {noise}'
    print(f"Generating with prompt: {combined_prompt}")  # Debug line
    
   # result = models_load[model_str](f'{prompt} {negative_prompt} {noise}')

   # end_time = time.time()  # End timing
   # runtime = end_time - start_time
   # model_timings[model_str] = runtime  # Log the model's execution time
    
   # queue_size -= 1  # Decrement queue size after processing
   # return f"Model {model_str} ran for {runtime:.2f} seconds", result

    
    
    

    return models_load[model_str](f'{prompt} {negative_prompt} {noise}')



def make_me():
   # with gr.Tab('The Dream'): 
        with gr.Row():
            txt_input = gr.Textbox(lines=2, value=kii, width=300, max_height=100)
            #txt_input = gr.Textbox(label='Your prompt:', lines=2, value=kii)
            negative_prompt_input = gr.Textbox(lines=2, value="", label="Negative Prompt", width=300, max_height=100)
            gen_button = gr.Button('Generate images', width=150, height=30)
            stop_button = gr.Button('Stop', variant='secondary', interactive=False, width=150, height=30)
            gen_button.click(lambda s: gr.update(interactive=True), None, stop_button)
            gr.HTML("""
            <div style="text-align: center; max-width: 100%; margin: 0 auto;">
                <body>
                </body>
            </div>
            """)
        with gr.Row():
            output = [gr.Image(label=m, min_width=170, height=170) for m in default_models]
            current_models = [gr.Textbox(m, visible=False) for m in default_models]
            for m, o in zip(current_models, output):
                gen_event = gen_button.click(gen_fn, [m, txt_input, negative_prompt_input], o)
                stop_button.click(lambda s: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
     #   with gr.Accordion('Model selection'):
     #       model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, multiselect=True, max_choices=num_models, interactive=True, filterable=False)
     #       model_choice.change(update_imgbox, model_choice, output)
     #       model_choice.change(extend_choices, model_choice, current_models)
     #   with gr.Row():
     #       gr.HTML("""
     #           <div class="footer">
     #           <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier!
     #           </p>
     #       """)



js_code = """
<script>
    const originalScroll = window.scrollTo;
    const originalShowToast = gradio.Toast.show;
    gradio.Toast.show = function() {
        originalShowToast.apply(this, arguments);
        window.scrollTo = function() {};};
    setTimeout(() => {
        window.scrollTo = originalScroll;
    }, 300000); // Restore scroll function after 3 seconds
</script>
"""


with gr.Blocks(css="div.float.svelte-1mwvhlq {    position: absolute;    top: var(--block-label-margin);    left: var(--block-label-margin);    background: none;    border: none;}") as demo: 
      gr.Markdown("<div></div>")
      make_me()
      gr.Markdown(js_code)


demo.queue(concurrency_count=50)
demo.launch()