Spaces:
Running
Running
Upload 6 files
Browse files- app.py +20 -1
- ctag.py +95 -1
- data/noob_e621.json +2 -2
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
-
from ctag import MODELS, DEFAULT_DF, search_char_dict, on_select_df
|
|
|
3 |
from hft2i import (gen_image, save_gallery, get_models, get_def_model, change_model, warm_model, get_model_info_md, get_recom_prompt_mode, update_prompt)
|
4 |
|
5 |
MAX_IMAGES = 6
|
@@ -23,6 +24,19 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as app:
|
|
23 |
search_input = gr.Textbox(label="Search for characters or series:", placeholder="sousou no frieren")
|
24 |
search_detail = gr.Checkbox(label="Show character detail", value=True)
|
25 |
search_model = gr.CheckboxGroup(label="Models", choices=MODELS, value=MODELS, scale=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
with gr.Group():
|
27 |
with gr.Row(equal_height=True):
|
28 |
search_tag = gr.Textbox(label="Output tag", value="", show_copy_button=True, interactive=False)
|
@@ -62,6 +76,11 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as app:
|
|
62 |
inputs=[search_input, search_model], outputs=[search_output], trigger_mode="always_last")
|
63 |
search_output.select(on_select_df, [search_output, search_detail], [search_tag, search_tag_model, search_md], queue=False, show_api=False)
|
64 |
search_tag.change(update_prompt, [search_tag, search_tag_model], [prompt, model_name[0], recom_prompt_mode], queue=False, show_api=False)
|
|
|
|
|
|
|
|
|
|
|
65 |
for i, o in enumerate(output_images):
|
66 |
img_i = gr.Number(i, visible=False)
|
67 |
model_name[i].change(change_model, [model_name[i]], [model_info[i]], queue=False, show_api=False)\
|
|
|
1 |
import gradio as gr
|
2 |
+
from ctag import (MODELS, DEFAULT_DF, DEFAULT_SERIES_DF, search_char_dict, on_select_df,
|
3 |
+
search_series_dict, on_select_series_df, update_series_chars, apply_series, on_select_series_gallery)
|
4 |
from hft2i import (gen_image, save_gallery, get_models, get_def_model, change_model, warm_model, get_model_info_md, get_recom_prompt_mode, update_prompt)
|
5 |
|
6 |
MAX_IMAGES = 6
|
|
|
24 |
search_input = gr.Textbox(label="Search for characters or series:", placeholder="sousou no frieren")
|
25 |
search_detail = gr.Checkbox(label="Show character detail", value=True)
|
26 |
search_model = gr.CheckboxGroup(label="Models", choices=MODELS, value=MODELS, scale=1)
|
27 |
+
with gr.Accordion("Search from series", open=False):
|
28 |
+
with gr.Row(equal_height=True):
|
29 |
+
search_series_input = gr.Textbox(label="Search for series:", placeholder="sousou no frieren")
|
30 |
+
search_series_detail = gr.Checkbox(label="Show series detail", value=True)
|
31 |
+
with gr.Row(equal_height=True):
|
32 |
+
search_series = gr.Textbox(label="Selected series", value="", interactive=False)
|
33 |
+
search_chars = gr.Dropdown(label="Characters", choices=[""], value="", allow_custom_value=True)
|
34 |
+
search_series_button = gr.Button("Add to search")
|
35 |
+
with gr.Row(equal_height=True):
|
36 |
+
search_series_output = gr.Dataframe(label="Select series", value=DEFAULT_SERIES_DF, type="pandas", wrap=True, interactive=False)
|
37 |
+
with gr.Column():
|
38 |
+
search_series_md = gr.Markdown("<br><br><br>", elem_classes="info")
|
39 |
+
search_series_gallery = gr.Gallery(label="Characters", allow_preview=False, value=[], columns=4, rows=4, elem_id="gallery", show_share_button=False, interactive=False)
|
40 |
with gr.Group():
|
41 |
with gr.Row(equal_height=True):
|
42 |
search_tag = gr.Textbox(label="Output tag", value="", show_copy_button=True, interactive=False)
|
|
|
76 |
inputs=[search_input, search_model], outputs=[search_output], trigger_mode="always_last")
|
77 |
search_output.select(on_select_df, [search_output, search_detail], [search_tag, search_tag_model, search_md], queue=False, show_api=False)
|
78 |
search_tag.change(update_prompt, [search_tag, search_tag_model], [prompt, model_name[0], recom_prompt_mode], queue=False, show_api=False)
|
79 |
+
search_series_input.change(search_series_dict, [search_series_input], [search_series_output])
|
80 |
+
search_series_output.select(on_select_series_df, [search_series_output, search_series_detail], [search_series, search_series_md, search_series_gallery], queue=False, show_api=False)
|
81 |
+
search_series.change(update_series_chars, [search_series], [search_chars], queue=False, show_api=False)
|
82 |
+
search_series_gallery.select(on_select_series_gallery, [search_series_gallery], [search_input], queue=False, show_api=False)
|
83 |
+
search_series_button.click(apply_series, [search_series, search_chars], [search_input], queue=False, show_api=False)
|
84 |
for i, o in enumerate(output_images):
|
85 |
img_i = gr.Number(i, visible=False)
|
86 |
model_name[i].change(change_model, [model_name[i]], [model_info[i]], queue=False, show_api=False)\
|
ctag.py
CHANGED
@@ -9,6 +9,7 @@ from pykakasi import kakasi
|
|
9 |
|
10 |
MODELS = ["Animagine 3.1", "NoobAI XL", "Illustrious"]
|
11 |
DEFAULT_DF = pd.DataFrame({"Character": ["oomuro sakurako (yuru yuri) (NoobAI XL)", "kafuu chino (gochuumon wa usagi desu ka?) (Animagine 3.1)"]})
|
|
|
12 |
|
13 |
|
14 |
kks = kakasi()
|
@@ -71,6 +72,7 @@ def find_char_info(query: str):
|
|
71 |
d["gender"] = i["gender"]
|
72 |
d["image"] = i["character_image"]
|
73 |
d["desc"] = i["desc"]
|
|
|
74 |
aid = str(i["anime_id"])
|
75 |
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
|
76 |
return d
|
@@ -79,6 +81,41 @@ def find_char_info(query: str):
|
|
79 |
return None
|
80 |
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
def load_json(filename: str):
|
83 |
try:
|
84 |
with open(filename, encoding="utf-8") as f:
|
@@ -97,6 +134,17 @@ def load_char_dict():
|
|
97 |
return d
|
98 |
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
def create_char_df(char_dict: dict):
|
101 |
d = {}
|
102 |
m = {}
|
@@ -112,9 +160,55 @@ def create_char_df(char_dict: dict):
|
|
112 |
|
113 |
|
114 |
char_dict = load_char_dict()
|
|
|
|
|
115 |
char_df, tag_dict, model_dict = create_char_df(char_dict)
|
116 |
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=True)):
|
119 |
try:
|
120 |
MAX_COLS = 50
|
@@ -135,7 +229,7 @@ def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=
|
|
135 |
|
136 |
def on_select_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
|
137 |
d = tag_dict.get(evt.value, None)
|
138 |
-
if d is None: return ""
|
139 |
#print(d) #
|
140 |
if is_detail: info = find_char_info(d["name"])
|
141 |
else: info = None
|
|
|
9 |
|
10 |
MODELS = ["Animagine 3.1", "NoobAI XL", "Illustrious"]
|
11 |
DEFAULT_DF = pd.DataFrame({"Character": ["oomuro sakurako (yuru yuri) (NoobAI XL)", "kafuu chino (gochuumon wa usagi desu ka?) (Animagine 3.1)"]})
|
12 |
+
DEFAULT_SERIES_DF = pd.DataFrame({"Series": ["yuru yuri", "gochuumon wa usagi desu ka?"]})
|
13 |
|
14 |
|
15 |
kks = kakasi()
|
|
|
72 |
d["gender"] = i["gender"]
|
73 |
d["image"] = i["character_image"]
|
74 |
d["desc"] = i["desc"]
|
75 |
+
d["wiki"] = ""
|
76 |
aid = str(i["anime_id"])
|
77 |
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
|
78 |
return d
|
|
|
81 |
return None
|
82 |
|
83 |
|
84 |
+
def find_series_info(query: str):
|
85 |
+
user_agent = get_user_agent()
|
86 |
+
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
87 |
+
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php'
|
88 |
+
params = {"anime_q": query}
|
89 |
+
session = requests.Session()
|
90 |
+
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
91 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
92 |
+
try:
|
93 |
+
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
|
94 |
+
if not r.ok or r.json() == -1: return None
|
95 |
+
j = dict(r.json())
|
96 |
+
if "search_results" not in j or len(j["search_results"]) == 0: return None
|
97 |
+
d = {}
|
98 |
+
i = j["search_results"][0]
|
99 |
+
d["name"] = i["anime_name"]
|
100 |
+
d["id"] = i["anime_id"]
|
101 |
+
d["image"] = i["anime_image"]
|
102 |
+
d["wiki"] = ""
|
103 |
+
char_url = i["characters_url"]
|
104 |
+
aid = str(i["anime_id"])
|
105 |
+
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
|
106 |
+
d["chars"] = {}
|
107 |
+
r = session.get(char_url, headers=headers, stream=True, timeout=(5.0, 15))
|
108 |
+
if not r.ok or r.json() == -1: return d
|
109 |
+
j = dict(r.json())
|
110 |
+
if "characters" not in j: return d
|
111 |
+
for c in j["characters"]:
|
112 |
+
d["chars"][c["name"]] = c["character_image"]
|
113 |
+
return d
|
114 |
+
except Exception as e:
|
115 |
+
print(e)
|
116 |
+
return None
|
117 |
+
|
118 |
+
|
119 |
def load_json(filename: str):
|
120 |
try:
|
121 |
with open(filename, encoding="utf-8") as f:
|
|
|
134 |
return d
|
135 |
|
136 |
|
137 |
+
def create_series_dict(char_dict: dict):
|
138 |
+
d = {}
|
139 |
+
for model, value in char_dict.items():
|
140 |
+
for name, v in value.items():
|
141 |
+
if not v["series"]: continue
|
142 |
+
d[v["series"]] = d[v["series"]] + [v["name"]] if v["series"] in d.keys() and isinstance(d[v["series"]], list) else [v["name"]]
|
143 |
+
for k in sorted(d.keys()):
|
144 |
+
d[k] = sorted(list(set(d[k])))
|
145 |
+
return d
|
146 |
+
|
147 |
+
|
148 |
def create_char_df(char_dict: dict):
|
149 |
d = {}
|
150 |
m = {}
|
|
|
160 |
|
161 |
|
162 |
char_dict = load_char_dict()
|
163 |
+
series_dict = create_series_dict(char_dict)
|
164 |
+
series_list = list(sorted(series_dict.keys()))
|
165 |
char_df, tag_dict, model_dict = create_char_df(char_dict)
|
166 |
|
167 |
|
168 |
+
def search_series_dict(q: str, progress=gr.Progress(track_tqdm=True)):
|
169 |
+
try:
|
170 |
+
MAX_COLS = 20
|
171 |
+
if q.strip():
|
172 |
+
rq = to_roman(q).lower()
|
173 |
+
search_results = pd.DataFrame({"Series": [s for s in series_list if rq in s]})
|
174 |
+
else:
|
175 |
+
return DEFAULT_SERIES_DF
|
176 |
+
if len(search_results.columns) > MAX_COLS:
|
177 |
+
search_results = search_results.iloc[:, :MAX_COLS]
|
178 |
+
return search_results
|
179 |
+
except Exception as e:
|
180 |
+
print(e)
|
181 |
+
|
182 |
+
|
183 |
+
def on_select_series_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
|
184 |
+
gallery = []
|
185 |
+
if is_detail: info = find_series_info(evt.value.split("(")[0].strip())
|
186 |
+
else: info = None
|
187 |
+
if info is not None:
|
188 |
+
md = f'## [{info["name"]}]({info["wiki"]})\n![{info["name"]}]({info["image"]}#center)'
|
189 |
+
for cap, image in info["chars"].items():
|
190 |
+
gallery.append((image, cap))
|
191 |
+
else: md = f'## {evt.value}'
|
192 |
+
return evt.value, md, gallery
|
193 |
+
|
194 |
+
|
195 |
+
def on_select_series_gallery(gallery: list, evt: gr.SelectData):
|
196 |
+
return " ".join(list(reversed(gallery[evt.index][1].lower().split(" "))))
|
197 |
+
|
198 |
+
|
199 |
+
def update_series_chars(series: str):
|
200 |
+
choices = list(series_dict.get(series, []))
|
201 |
+
return gr.update(choices=choices, value=choices[0] if len(choices) > 0 else "")
|
202 |
+
|
203 |
+
|
204 |
+
def apply_series(series: str, char: str):
|
205 |
+
q = char if char else ""
|
206 |
+
q = q + " (" + series + ")" if series else q
|
207 |
+
q = q.strip()
|
208 |
+
if not q: return gr.update()
|
209 |
+
else: return q
|
210 |
+
|
211 |
+
|
212 |
def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=True)):
|
213 |
try:
|
214 |
MAX_COLS = 50
|
|
|
229 |
|
230 |
def on_select_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
|
231 |
d = tag_dict.get(evt.value, None)
|
232 |
+
if d is None: return "", "", "<br><br><br>"
|
233 |
#print(d) #
|
234 |
if is_detail: info = find_char_info(d["name"])
|
235 |
else: info = None
|
data/noob_e621.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:932d512d646a1df0a54f588e6dd1153be3f1176b76736e4611c9a240ad3987e7
|
3 |
+
size 38623283
|