Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import random
|
| 4 |
-
import spaces # Uncomment if you're using ZeroGPU
|
| 5 |
-
from diffusers import DiffusionPipeline
|
| 6 |
import torch
|
|
|
|
|
|
|
| 7 |
from tags import participant_tags, tribe_tags, role_tags, skin_tone_tags, body_type_tags, tattoo_tags, piercing_tags, expression_tags, eye_tags, hair_style_tags, position_tags, fetish_tags, location_tags, camera_tags, atmosphere_tags
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -21,7 +21,21 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
| 23 |
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def infer(
|
| 26 |
prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 27 |
selected_participant_tags, selected_tribe_tags, selected_role_tags, selected_skin_tone_tags, selected_body_type_tags,
|
|
@@ -29,28 +43,46 @@ def infer(
|
|
| 29 |
selected_position_tags, selected_fetish_tags, selected_location_tags, selected_camera_tags, selected_atmosphere_tags,
|
| 30 |
active_tab, progress=gr.Progress(track_tqdm=True)
|
| 31 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
# Handle the active tab and generate the prompt accordingly
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
)
|
| 53 |
-
final_prompt = f"score_9, score_8_up, score_7_up, source_anime, {', '.join(tag_list)}"
|
| 54 |
|
| 55 |
# Concatenate additional negative prompts
|
| 56 |
additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
|
|
@@ -72,7 +104,7 @@ def infer(
|
|
| 72 |
|
| 73 |
return image, seed, f"Prompt: {final_prompt}\nNegative Prompt: {full_negative_prompt}"
|
| 74 |
|
| 75 |
-
|
| 76 |
css = """
|
| 77 |
#col-container {
|
| 78 |
margin: 0 auto;
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import random
|
|
|
|
|
|
|
| 4 |
import torch
|
| 5 |
+
from diffusers import DiffusionPipeline
|
| 6 |
+
import importlib # to import tag modules dynamically
|
| 7 |
from tags import participant_tags, tribe_tags, role_tags, skin_tone_tags, body_type_tags, tattoo_tags, piercing_tags, expression_tags, eye_tags, hair_style_tags, position_tags, fetish_tags, location_tags, camera_tags, atmosphere_tags
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
| 22 |
|
| 23 |
|
| 24 |
+
# Function to load tags dynamically based on the selected tab
|
| 25 |
+
def load_tags(active_tab):
|
| 26 |
+
if active_tab == "Gay":
|
| 27 |
+
tags_module = importlib.import_module('tags_gay') # dynamically import the tags_gay module
|
| 28 |
+
elif active_tab == "Straight":
|
| 29 |
+
tags_module = importlib.import_module('tags_straight') # dynamically import the tags_straight module
|
| 30 |
+
elif active_tab == "Lesbian":
|
| 31 |
+
tags_module = importlib.import_module('tags_lesbian') # dynamically import the tags_lesbian module
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError(f"Unknown tab: {active_tab}")
|
| 34 |
+
|
| 35 |
+
return tags_module
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@gradio.Interface
|
| 39 |
def infer(
|
| 40 |
prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
| 41 |
selected_participant_tags, selected_tribe_tags, selected_role_tags, selected_skin_tone_tags, selected_body_type_tags,
|
|
|
|
| 43 |
selected_position_tags, selected_fetish_tags, selected_location_tags, selected_camera_tags, selected_atmosphere_tags,
|
| 44 |
active_tab, progress=gr.Progress(track_tqdm=True)
|
| 45 |
):
|
| 46 |
+
# Dynamically load the correct tags module based on active tab
|
| 47 |
+
tags_module = load_tags(active_tab)
|
| 48 |
+
|
| 49 |
+
# Now use the tags from the loaded module
|
| 50 |
+
participant_tags = tags_module.participant_tags
|
| 51 |
+
tribe_tags = tags_module.tribe_tags
|
| 52 |
+
role_tags = tags_module.role_tags
|
| 53 |
+
skin_tone_tags = tags_module.skin_tone_tags
|
| 54 |
+
body_type_tags = tags_module.body_type_tags
|
| 55 |
+
tattoo_tags = tags_module.tattoo_tags
|
| 56 |
+
piercing_tags = tags_module.piercing_tags
|
| 57 |
+
expression_tags = tags_module.expression_tags
|
| 58 |
+
eye_tags = tags_module.eye_tags
|
| 59 |
+
hair_style_tags = tags_module.hair_style_tags
|
| 60 |
+
position_tags = tags_module.position_tags
|
| 61 |
+
fetish_tags = tags_module.fetish_tags
|
| 62 |
+
location_tags = tags_module.location_tags
|
| 63 |
+
camera_tags = tags_module.camera_tags
|
| 64 |
+
atmosphere_tags = tags_module.atmosphere_tags
|
| 65 |
+
|
| 66 |
# Handle the active tab and generate the prompt accordingly
|
| 67 |
+
tag_list = (
|
| 68 |
+
[participant_tags[tag] for tag in selected_participant_tags] +
|
| 69 |
+
[tribe_tags[tag] for tag in selected_tribe_tags] +
|
| 70 |
+
[role_tags[tag] for tag in selected_role_tags] +
|
| 71 |
+
[skin_tone_tags[tag] for tag in selected_skin_tone_tags] +
|
| 72 |
+
[body_type_tags[tag] for tag in selected_body_type_tags] +
|
| 73 |
+
[tattoo_tags[tag] for tag in selected_tattoo_tags] +
|
| 74 |
+
[piercing_tags[tag] for tag in selected_piercing_tags] +
|
| 75 |
+
[expression_tags[tag] for tag in selected_expression_tags] +
|
| 76 |
+
[eye_tags[tag] for tag in selected_eye_tags] +
|
| 77 |
+
[hair_style_tags[tag] for tag in selected_hair_style_tags] +
|
| 78 |
+
[position_tags[tag] for tag in selected_position_tags] +
|
| 79 |
+
[fetish_tags[tag] for tag in selected_fetish_tags] +
|
| 80 |
+
[location_tags[tag] for tag in selected_location_tags] +
|
| 81 |
+
[camera_tags[tag] for tag in selected_camera_tags] +
|
| 82 |
+
[atmosphere_tags[tag] for tag in selected_atmosphere_tags]
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
final_prompt = f"score_9, score_8_up, score_7_up, source_anime, {', '.join(tag_list)}"
|
|
|
|
|
|
|
| 86 |
|
| 87 |
# Concatenate additional negative prompts
|
| 88 |
additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
|
|
|
|
| 104 |
|
| 105 |
return image, seed, f"Prompt: {final_prompt}\nNegative Prompt: {full_negative_prompt}"
|
| 106 |
|
| 107 |
+
# CSS for the layout
|
| 108 |
css = """
|
| 109 |
#col-container {
|
| 110 |
margin: 0 auto;
|