File size: 6,808 Bytes
24a6868
 
edf7b61
3ada2b6
24a6868
 
 
 
4554617
24a6868
b83b6a4
ccacd4d
5e2a422
53b0f9b
6d6dac5
1d2025e
24a6868
 
 
 
 
 
 
 
fde98ce
 
24a6868
 
efe2aab
11453ec
24a6868
 
 
fde98ce
 
6095378
 
 
 
 
 
 
 
64b2e38
6d6dac5
61f228f
ecbe477
 
 
12fc57e
71096db
a9911c4
5e2a422
ef6e36a
ecbe477
 
 
 
a9911c4
ef6e36a
7e50ae5
7de4c3e
c7f68b4
 
 
 
0563fad
24a6868
 
 
 
 
 
 
c4e836e
a1933b9
b0abbc2
cb768f0
f4e97ee
 
8c94298
 
0563fad
 
35915ac
8c94298
f531783
b13cec6
f4c9c2f
f531783
b13cec6
f4c9c2f
35e9dd4
61f228f
 
206620c
61f228f
5e2a422
206620c
61f228f
 
 
59dd509
8c94298
4ab383e
672253d
4ab383e
672253d
 
388fff6
b0abbc2
d34abc6
 
04b7218
d62c968
2981d40
 
24a6868
c6a4957
2981d40
0563fad
2981d40
24a6868
f56276f
cc86f15
5866b57
 
63ddf6d
c7f68b4
1d2025e
e6b0483
cd12320
64b2e38
85f70fe
fde98ce
85f70fe
a1933b9
 
 
 
 
 
e564e9e
3030c98
24a6868
3389734
 
 
92a6d46
ef7c198
ba7e9d5
ef7c198
7de4c3e
6095378
00acfc9
115e8a4
00acfc9
6095378
 
35915ac
6095378
45bfc52
7aab969
 
867e893
 
e9947e2
6095378
a1933b9
471c3c5
6095378
3389734
1794783
24a6868
6095378
393c4eb
 
e564e9e
24a6868
1f1b2c4
3389734
1794783
24a6868
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# Imports
import gradio as gr
import threading
import requests
import random
import spaces
import torch
import uuid
import json
import os
import numpy as np

from huggingface_hub import hf_hub_download
from diffusers import DiffusionPipeline
from transformers import pipeline
from PIL import Image

# Pre-Initialize
DEVICE = "auto"
if DEVICE == "auto":
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
print(f"[SYSTEM] | Using {DEVICE} type compute device.")

# Variables
HF_TOKEN = os.environ.get("HF_TOKEN")

MAX_SEED = 9007199254740991
DEFAULT_INPUT = ""
DEFAULT_NEGATIVE_INPUT = "(bad, ugly, amputation, abstract, blur, deformed, distorted, disfigured, disconnected, mutation, mutated, low quality, lowres), unfinished, text, signature, watermark, (limbs, legs, feet, arms, hands), (porn, nude, naked, nsfw)"
DEFAULT_MODEL = "Default"
DEFAULT_HEIGHT = 1024
DEFAULT_WIDTH = 1024

headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HF_TOKEN}" }

css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
    visibility: hidden
}
'''

repo_nsfw_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")

repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="default_base")
repo_default.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="pixel_base")
repo_default.load_lora_weights("nerijs/pixel-art-xl", adapter_name="pixel_base_2")

repo_pro = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, use_safetensors=True)
repo_pro.load_lora_weights(hf_hub_download("alimama-creative/FLUX.1-Turbo-Alpha", "diffusion_pytorch_model.safetensors"))

repo_customs = {
    "Default": repo_default,
    "Realistic": DiffusionPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
    "Anime": DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
    "Pixel": repo_default,
    "Pro": repo_pro,
}

# Functions
def save_image(img, seed):
    name = f"{seed}-{uuid.uuid4()}.png"
    img.save(name)
    return name
    
def get_seed(seed):
    seed = seed.strip()
    if seed.isdigit():
        return int(seed)
    else:
        return random.randint(0, MAX_SEED)

@spaces.GPU(duration=30)
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None, height_buffer=DEFAULT_HEIGHT, width_buffer=DEFAULT_WIDTH):

    repo = repo_customs[model or "Default"]
    filter_input = filter_input or ""
    negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
    steps_set = steps
    guidance_set = guidance
    seed = get_seed(seed)

    print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed)
    
    if model == "Realistic":   
        steps_set = 25
        guidance_set = 7
    elif model == "Anime":   
        steps_set = 25
        guidance_set = 7
    elif model == "Pixel":   
        steps_set = 10
        guidance_set = 1.5
        repo.set_adapters(["pixel_base", "pixel_base_2"], adapter_weights=[1, 1])
    elif model == "Pro":   
        steps_set = 8
        guidance_set = 3.5
    else:
        steps_set = 25
        guidance_set = 7
        repo.set_adapters(["default_base"], adapter_weights=[0.7])

    if not steps:
        steps = steps_set
    if not guidance:
        guidance = guidance_set
    
    print(steps, guidance)
    
    repo.to(DEVICE)
    
    parameters = {
        "prompt": input,
        "height": height,
        "width": width,
        "num_inference_steps": steps,
        "guidance_scale": guidance,
        "num_images_per_prompt": number,
        "generator": torch.Generator().manual_seed(seed),
        "output_type":"pil",
    }

    if model != "Pro":
        parameters["negative_prompt"] = filter_input + negative_input

    images = repo(**parameters).images
    image_paths = [save_image(img, seed) for img in images]

    print(image_paths)
    
    nsfw_prediction = repo_nsfw_classifier(image_paths[0])

    print(nsfw_prediction)

    buffer_image = images[0].convert("RGBA").resize((width_buffer, height_buffer))
    
    image_array = np.array(buffer_image)
    pixel_data = image_array.flatten().tolist()
    
    buffer_json = json.dumps(pixel_data)

    return image_paths, {item['label']: round(item['score'], 3) for item in nsfw_prediction}, buffer_json

def cloud():
    print("[CLOUD] | Space maintained.")

@spaces.GPU(duration=0.1)
def gpu():
    print("[GPU] | Fetched GPU token.")
    
# Initialize
with gr.Blocks(css=css) as main:
    with gr.Column():
        gr.Markdown("🪄 Generate high quality images in all styles.")
        
    with gr.Column():
        input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input")
        filter_input = gr.Textbox(lines=1, value="", label="Input Filter")
        negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative")
        model = gr.Dropdown(choices=repo_customs.keys(), value="Default", label="Model")
        height = gr.Slider(minimum=8, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height")
        width = gr.Slider(minimum=8, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width")
        steps = gr.Slider(minimum=1, maximum=100, step=1, value=25, label="Steps")
        guidance = gr.Slider(minimum=0, maximum=100, step=0.1, value=5, label = "Guidance")
        number = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Number")
        seed = gr.Textbox(lines=1, value="", label="Seed (Blank for random)")
        height_buffer = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Buffer Height")
        width_buffer = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Buffer Width")
        submit = gr.Button("▶")
        maintain = gr.Button("☁️")
        get_gpu = gr.Button("💻")

    with gr.Column():
        output = gr.Gallery(columns=1, label="Image")
        output_2 = gr.Label()
        output_3 = gr.Textbox(lines=1, value="", label="Buffer")
            
    submit.click(generate, inputs=[input, filter_input, negative_input, model, height, width, steps, guidance, number, seed, height_buffer, width_buffer], outputs=[output, output_2, output_3], queue=False)
    maintain.click(cloud, inputs=[], outputs=[], queue=False)
    get_gpu.click(gpu, inputs=[], outputs=[], queue=False)

main.launch(show_api=True)