File size: 10,779 Bytes
630d1c8
 
 
ffa7df2
30f564a
ffa7df2
630d1c8
 
e2f757b
 
 
0f8e37d
 
 
 
 
630d1c8
0f8e37d
ffa7df2
630d1c8
 
 
 
a125903
e2f757b
 
 
 
 
 
a125903
e2f757b
30f564a
ffa7df2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90d2a01
 
 
630d1c8
 
ffa7df2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2f757b
6d482fb
0f8e37d
 
9ed2bbc
ffa7df2
9ed2bbc
795c3c5
 
 
 
9ed2bbc
44f2016
795c3c5
9ed2bbc
795c3c5
 
 
 
9ed2bbc
256b272
9ed2bbc
 
 
 
 
 
 
 
 
d83e2ce
795c3c5
44f2016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620cd30
e2f757b
620cd30
e2f757b
620cd30
 
e2f757b
620cd30
e2f757b
 
620cd30
0f8e37d
630d1c8
e2f757b
630d1c8
ffa7df2
795c3c5
 
412497f
44f2016
 
ffa7df2
795c3c5
 
 
 
ffa7df2
795c3c5
 
ffa7df2
 
795c3c5
 
 
 
ffa7df2
795c3c5
ffa7df2
 
795c3c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d83e2ce
795c3c5
 
 
30f564a
 
795c3c5
 
 
 
 
 
 
 
44f2016
795c3c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7819529
a125903
e2f757b
a125903
 
 
 
 
30f564a
 
 
e2f757b
02074a8
 
66d1fcc
ffa7df2
 
 
 
02074a8
 
ce6ba71
795c3c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
import gradio as gr
import numpy as np
import random
import torch
from diffusers import DiffusionPipeline
from tags import participant_tags, tribe_tags, skin_tone_tags, body_type_tags, tattoo_tags, piercing_tags, expression_tags, eye_tags, hair_style_tags, position_tags, fetish_tags, location_tags, camera_tags, atmosphere_tags

device = "cuda" if torch.cuda.is_available() else "cpu"

# Default model version
model_repo_id = "John6666/wai-ani-nsfw-ponyxl-v8-sdxl"  # Default model V8

if torch.cuda.is_available():
    torch_dtype = torch.float16
else:
    torch_dtype = torch.float32

pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe = pipe.to(device)

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

def update_model_version(version, state_model_repo_id):
    """Update the model version dynamically based on the selected version."""
    global model_repo_id, pipe
    model_repo_id = f"John6666/wai-ani-nsfw-ponyxl-{version}-sdxl"
    pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
    pipe = pipe.to(device)
    print(f"Model switched to {model_repo_id}")
    state_model_repo_id.set(model_repo_id)  # Update the state with the new model version

@gradio.GPU  # [uncomment to use ZeroGPU]
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
          selected_participant_tags, selected_tribe_tags, selected_skin_tone_tags, selected_body_type_tags,
          selected_tattoo_tags, selected_piercing_tags, selected_expression_tags, selected_eye_tags,
          selected_hair_style_tags, selected_position_tags, selected_fetish_tags, selected_location_tags,
          selected_camera_tags, selected_atmosphere_tags, active_tab, progress=gr.Progress(track_tqdm=True)):
    if active_tab == "Prompt Input":
        final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {prompt}'
    else:
        selected_tags = (
            [participant_tags[tag] for tag in selected_participant_tags] +
            [tribe_tags[tag] for tag in selected_tribe_tags] +
            [skin_tone_tags[tag] for tag in selected_skin_tone_tags] +
            [body_type_tags[tag] for tag in selected_body_type_tags] +
            [tattoo_tags[tag] for tag in selected_tattoo_tags] +
            [piercing_tags[tag] for tag in selected_piercing_tags] +
            [expression_tags[tag] for tag in selected_expression_tags] +
            [eye_tags[tag] for tag in selected_eye_tags] +
            [hair_style_tags[tag] for tag in selected_hair_style_tags] +
            [position_tags[tag] for tag in selected_position_tags] +
            [fetish_tags[tag] for tag in selected_fetish_tags] +
            [location_tags[tag] for tag in selected_location_tags] +
            [camera_tags[tag] for tag in selected_camera_tags] +
            [atmosphere_tags[tag] for tag in selected_atmosphere_tags]
        )
        tags_text = ', '.join(selected_tags)
        final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {tags_text}'

    additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
    full_negative_prompt = f"{additional_negatives}, {negative_prompt}"

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    generator = torch.Generator().manual_seed(seed)

    # Generate the image with the final prompts
    image = pipe(
        prompt=final_prompt,
        negative_prompt=full_negative_prompt,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        width=width,
        height=height,
        generator=generator
    ).images[0]

    return image, seed, f"Prompt used: {final_prompt}\nNegative prompt used: {full_negative_prompt}"

# CSS for button styling and horizontal layout
css = """
#col-container {
    margin: 0 auto;
    max-width: 1280px;
}

#left-column {
    width: 50%;
    display: inline-block;
    padding-right: 20px;
    padding-left: 20px;
    vertical-align: top;
}

#right-column {
    width: 50%;
    display: inline-block;
    vertical-align: top;
    padding-left: 20px;
    margin-top: 53px;
}

#left-column > * {
    margin-bottom: 20px;
}

#run-button {
    width: 100%;
    margin-top: 10px;
    display: block;
}

#prompt-info {
    margin-bottom: 20px;
}

#result {
    margin-bottom: 20px;
}

.gradio-tabs > .tab-item {
    margin-bottom: 20px;
}

#prompt {
    margin-bottom: 20px;
}

.button-group {
    display: flex;
    justify-content: space-between;
}

.button-group .gradio-button {
    flex: 1;
    margin: 0 10px;
    text-align: center;
}
"""

# Gradio interface setup
with gr.Blocks(css=css) as demo:

    with gr.Row():
        with gr.Column(elem_id="left-column"):
            gr.Markdown("""# Rainbow Media X""")
            result = gr.Image(label="Result", show_label=False, elem_id="result")
            prompt_info = gr.Textbox(label="Prompts Used", lines=3, interactive=False, elem_id="prompt-info")

            # Advanced Settings and Run Button
            with gr.Accordion("Advanced Settings", open=False):
                negative_prompt = gr.Textbox(
                    label="Negative prompt",
                    max_lines=1,
                    placeholder="Enter a negative prompt",
                    visible=True,
                )

                seed = gr.Slider(
                    label="Seed",
                    minimum=0,
                    maximum=MAX_SEED,
                    step=1,
                    value=0,
                )

                randomize_seed = gr.Checkbox(label="Randomize seed", value=True)

                with gr.Row():
                    width = gr.Slider(
                        label="Width",
                        minimum=256,
                        maximum=MAX_IMAGE_SIZE,
                        step=32,
                        value=1024,
                    )

                    height = gr.Slider(
                        label="Height",
                        minimum=256,
                        maximum=MAX_IMAGE_SIZE,
                        step=32,
                        value=1024,
                    )

                with gr.Row():
                    guidance_scale = gr.Slider(
                        label="Guidance scale",
                        minimum=0.0,
                        maximum=10.0,
                        step=0.1,
                        value=7,
                    )

                    num_inference_steps = gr.Slider(
                        label="Number of inference steps",
                        minimum=1,
                        maximum=50,
                        step=1,
                        value=35,
                    )

            run_button = gr.Button("Run", elem_id="run-button")

        with gr.Column(elem_id="right-column"):
            active_tab = gr.State("Prompt Input")
            model_version_state = gr.State("v8")  # Default model version is v8

            with gr.Tabs() as tabs:
                with gr.TabItem("Prompt Input") as prompt_tab:
                    prompt = gr.Textbox(
                        label="Prompt",
                        show_label=False,
                        max_lines=1,
                        placeholder="Enter your prompt",
                        container=False,
                        elem_id="prompt"
                    )
                    prompt_tab.select(lambda: "Prompt Input", inputs=None, outputs=active_tab)

                with gr.TabItem("Tag Selection") as tag_tab:
                    selected_participant_tags = gr.CheckboxGroup(choices=list(participant_tags.keys()), label="Participant Tags")
                    selected_tribe_tags = gr.CheckboxGroup(choices=list(tribe_tags.keys()), label="Tribe Tags")
                    selected_skin_tone_tags = gr.CheckboxGroup(choices=list(skin_tone_tags.keys()), label="Skin Tone Tags")
                    selected_body_type_tags = gr.CheckboxGroup(choices=list(body_type_tags.keys()), label="Body Type Tags")
                    selected_tattoo_tags = gr.CheckboxGroup(choices=list(tattoo_tags.keys()), label="Tattoo Tags")
                    selected_piercing_tags = gr.CheckboxGroup(choices=list(piercing_tags.keys()), label="Piercing Tags")
                    selected_expression_tags = gr.CheckboxGroup(choices=list(expression_tags.keys()), label="Expression Tags")
                    selected_eye_tags = gr.CheckboxGroup(choices=list(eye_tags.keys()), label="Eye Tags")
                    selected_hair_style_tags = gr.CheckboxGroup(choices=list(hair_style_tags.keys()), label="Hair Style Tags")
                    selected_position_tags = gr.CheckboxGroup(choices=list(position_tags.keys()), label="Position Tags")
                    selected_fetish_tags = gr.CheckboxGroup(choices=list(fetish_tags.keys()), label="Fetish Tags")
                    selected_location_tags = gr.CheckboxGroup(choices=list(location_tags.keys()), label="Location Tags")
                    selected_camera_tags = gr.CheckboxGroup(choices=list(camera_tags.keys()), label="Camera Tags")
                    selected_atmosphere_tags = gr.CheckboxGroup(choices=list(atmosphere_tags.keys()), label="Atmosphere Tags")
                    tag_tab.select(lambda: "Tag Selection", inputs=None, outputs=active_tab)

            # Horizontal buttons to switch models
            with gr.Row(elem_id="button-group"):
                link_button_v7 = gr.Button("V7 Model", variant="primary")
                link_button_v8 = gr.Button("V8 Model", variant="primary")
                link_button_v11 = gr.Button("V11 Model", variant="primary")

            # Set the model version based on the button clicked
            link_button_v7.click(update_model_version, inputs=[gr.Text("v7")], outputs=[model_version_state])
            link_button_v8.click(update_model_version, inputs=[gr.Text("v8")], outputs=[model_version_state])
            link_button_v11.click(update_model_version, inputs=[gr.Text("v11")], outputs=[model_version_state])

        run_button.click(
            infer,
            inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
                    selected_participant_tags, selected_tribe_tags, selected_skin_tone_tags, selected_body_type_tags,
                    selected_tattoo_tags, selected_piercing_tags, selected_expression_tags, selected_eye_tags,
                    selected_hair_style_tags, selected_position_tags, selected_fetish_tags, selected_location_tags,
                    selected_camera_tags, selected_atmosphere_tags, active_tab],
            outputs=[result, seed, prompt_info]
        )

demo.queue().launch()