File size: 4,875 Bytes
0eac613
acfd51b
 
 
0eac613
 
acfd51b
0eac613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8c3fc8
0eac613
e8c3fc8
 
0eac613
 
 
 
 
 
 
acfd51b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8c3fc8
acfd51b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import gradio as gr
import os
import sys
from pathlib import Path

models = [
    {"name": "Claudfuen 1", "url": "sd-concepts-library/ahx-model-6"},
    {"name": "Deliberate", "url": "Masagin/Deliberate"},
    {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"},
    {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"},
    {"name": "Dreamshaper", "url": "Lykon/DreamShaper"},
]

current_model = models[0]

text_gen = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")

models2 = []
for model in models:
    model_url = f"models/{model['url']}"
    loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
    models2.append(loaded_model)


def text_it(inputs, text_gen=text_gen):
    return text_gen(inputs)


def set_model(current_model_index):
    global current_model
    current_model = models[current_model_index]
    return gr.update(label=f"{current_model['name']}")


def send_it(inputs, model_choice):
    proc = models2[model_choice]
    return proc(inputs)


css = """"""

with gr.Blocks(css=css) as myface:
    gr.HTML(
        """<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="utf-8" />
    <meta name="twitter:card" content="player"/>
    <meta name="twitter:site" content=""/>
    <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
    <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
    <meta name="twitter:player:width" content="100%"/>
    <meta name="twitter:player:height" content="600"/>    
    <meta property="og:title" content="Embedded Live Viewer"/>
    <meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
    <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
    <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
  </head>
</html>
"""
    )

    with gr.Row():
        with gr.Row():
            input_text = gr.Textbox(label="Prompt idea", lines=1)
            # Model selection dropdown
            model_name1 = gr.Dropdown(
                label="Choose Model",
                choices=[m["name"] for m in models],
                type="index",
                value=current_model["name"],
                interactive=True,
            )
        with gr.Row():
            see_prompts = gr.Button("Generate Prompts")
            run = gr.Button("Generate Images", variant="primary")
    with gr.Tab("Main"):
        with gr.Row():
            output1 = gr.Image(label=f"{current_model['name']}")
            output2 = gr.Image(label=f"{current_model['name']}")
            output3 = gr.Image(label=f"{current_model['name']}")
            output4 = gr.Image(label=f"{current_model['name']}")
        with gr.Row():
            magic1 = gr.Textbox(lines=4)
            magic2 = gr.Textbox(lines=4)
            magic3 = gr.Textbox(lines=4)
            magic4 = gr.Textbox(lines=4)

        with gr.Row():
            output5 = gr.Image(label=f"{current_model['name']}")
            output6 = gr.Image(label=f"{current_model['name']}")
            output7 = gr.Image(label=f"{current_model['name']}")
            output8 = gr.Image(label=f"{current_model['name']}")
        with gr.Row():
            magic5 = gr.Textbox(lines=4)
            magic6 = gr.Textbox(lines=4)
            magic7 = gr.Textbox(lines=4)
            magic8 = gr.Textbox(lines=4)

    model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6, output7, output8])

    run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
    run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
    run.click(send_it, inputs=[magic3, model_name1], outputs=[output3])
    run.click(send_it, inputs=[magic4, model_name1], outputs=[output4])
    run.click(send_it, inputs=[magic5, model_name1], outputs=[output5])
    run.click(send_it, inputs=[magic6, model_name1], outputs=[output6])
    run.click(send_it, inputs=[magic7, model_name1], outputs=[output7])
    run.click(send_it, inputs=[magic8, model_name1], outputs=[output8])

    see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic3])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic4])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic5])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic6])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic7])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic8])

myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)