John6666's picture
Upload 6 files
ebbb11c verified
raw
history blame
9.06 kB
import gradio as gr
import pandas as pd
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from pykakasi import kakasi
MODELS = ["Animagine 3.1", "NoobAI XL", "Illustrious"]
DEFAULT_DF = pd.DataFrame({"Character": ["oomuro sakurako (yuru yuri) (NoobAI XL)", "kafuu chino (gochuumon wa usagi desu ka?) (Animagine 3.1)"]})
DEFAULT_SERIES_DF = pd.DataFrame({"Series": ["yuru yuri", "gochuumon wa usagi desu ka?"]})
kks = kakasi()
def to_roman(s: str):
try:
return "".join([i.get("hepburn", "") for i in kks.convert(s)])
except Exception as e:
print(e)
return s
def get_user_agent():
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
def get_series_wiki_dict():
user_agent = get_user_agent()
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php?title_wiki_links'
params = {}
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
try:
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok: return {}
j = dict(r.json())
if "titles" not in j: return {}
return {str(i["anime_id"]): i["wikipedia_url"] for i in j["titles"]}
except Exception as e:
print(e)
return {}
series_wiki_dict = get_series_wiki_dict()
def find_char_info(query: str):
user_agent = get_user_agent()
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php'
params = {"character_q": query}
params2 = {"character_q": " ".join(list(reversed(query.split(" "))))}
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
try:
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1:
r = session.get(base_url, params=params2, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1: return None
j = dict(r.json())
if "search_results" not in j or len(j["search_results"]) == 0: return None
d = {}
i = j["search_results"][0]
d["name"] = i["name"]
d["series"] = i["anime_name"]
d["gender"] = i["gender"]
d["image"] = i["character_image"]
d["desc"] = i["desc"]
d["wiki"] = ""
aid = str(i["anime_id"])
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
return d
except Exception as e:
print(e)
return None
def find_series_info(query: str):
user_agent = get_user_agent()
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php'
params = {"anime_q": query}
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
try:
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1: return None
j = dict(r.json())
if "search_results" not in j or len(j["search_results"]) == 0: return None
d = {}
i = j["search_results"][0]
d["name"] = i["anime_name"]
d["id"] = i["anime_id"]
d["image"] = i["anime_image"]
d["wiki"] = ""
char_url = i["characters_url"]
aid = str(i["anime_id"])
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
d["chars"] = {}
r = session.get(char_url, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1: return d
j = dict(r.json())
if "characters" not in j: return d
for c in j["characters"]:
d["chars"][c["name"]] = c["character_image"]
return d
except Exception as e:
print(e)
return None
def load_json(filename: str):
try:
with open(filename, encoding="utf-8") as f:
d = json.load(f)
return dict(d)
except Exception as e:
print(e)
return {}
def load_char_dict():
d = {}
d["Animagine 3.1"] = load_json("data/animagine.json")
d["Illustrious"] = load_json("data/illustrious.json")
d["NoobAI XL"] = load_json("data/noob_danbooru.json") | load_json("data/noob_e621.json")
return d
def create_series_dict(char_dict: dict):
d = {}
for model, value in char_dict.items():
for name, v in value.items():
if not v["series"]: continue
d[v["series"]] = d[v["series"]] + [v["name"]] if v["series"] in d.keys() and isinstance(d[v["series"]], list) else [v["name"]]
for k in sorted(d.keys()):
d[k] = sorted(list(set(d[k])))
return d
def create_char_df(char_dict: dict):
d = {}
m = {}
tags = []
for model, value in char_dict.items():
for name, v in value.items():
tag = f'{v["name"]} ({v["series"]}) ({model})' if v["series"] else f'{v["name"]} ({model})'
tags.append(tag)
d[tag] = v.copy()
m[tag] = model
df = pd.DataFrame({"Character": tags})
return df, d, m
char_dict = load_char_dict()
series_dict = create_series_dict(char_dict)
series_list = list(sorted(series_dict.keys()))
char_df, tag_dict, model_dict = create_char_df(char_dict)
def search_series_dict(q: str, progress=gr.Progress(track_tqdm=True)):
try:
MAX_COLS = 20
if q.strip():
rq = to_roman(q).lower()
search_results = pd.DataFrame({"Series": [s for s in series_list if rq in s]})
else:
return DEFAULT_SERIES_DF
if len(search_results.columns) > MAX_COLS:
search_results = search_results.iloc[:, :MAX_COLS]
return search_results
except Exception as e:
print(e)
def on_select_series_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
gallery = []
if is_detail: info = find_series_info(evt.value.split("(")[0].strip())
else: info = None
if info is not None:
md = f'## [{info["name"]}]({info["wiki"]})\n![{info["name"]}]({info["image"]}#center)'
for cap, image in info["chars"].items():
gallery.append((image, cap))
else: md = f'## {evt.value}'
return evt.value, md, gallery
def on_select_series_gallery(gallery: list, evt: gr.SelectData):
return " ".join(list(reversed(gallery[evt.index][1].lower().split(" "))))
def update_series_chars(series: str):
choices = list(series_dict.get(series, []))
return gr.update(choices=choices, value=choices[0] if len(choices) > 0 else "")
def apply_series(series: str, char: str):
q = char if char else ""
q = q + " (" + series + ")" if series else q
q = q.strip()
if not q: return gr.update()
else: return q
def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=True)):
try:
MAX_COLS = 50
if q.strip():
#search_results = df[df["Character"].str.contains(to_roman(q).lower(), regex=False, na=False) & df["Character"].str.contains("(?: \\(" + "\\))|(?: \\(".join(models) + "\\))", regex=True, na=False)]
l = char_df["Character"].tolist()
rq = to_roman(q).lower()
mt = tuple([f" ({s})" for s in models])
search_results = pd.DataFrame({"Character": [s for s in l if rq in s and s.endswith(mt)]})
else:
return DEFAULT_DF
if len(search_results.columns) > MAX_COLS:
search_results = search_results.iloc[:, :MAX_COLS]
return search_results
except Exception as e:
print(e)
def on_select_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
d = tag_dict.get(evt.value, None)
if d is None: return "", "", "<br><br><br>"
#print(d) #
if is_detail: info = find_char_info(d["name"])
else: info = None
#print(info) #
if info is not None:
md = f'## [{info["name"]}]({info["wiki"]}) / [{info["series"]}]({info["wiki"]}) / {info["gender"]}\n![{info["name"]}]({info["image"]}#center)\n[{info["desc"]}]({info["wiki"]})'
else: md = f'## {d["name"]} / {d["series"]}' if d["series"] else f'## {d["name"]}'
md += f'\n<br>Tag is for {model_dict[evt.value]}.'
return d["tag"], model_dict[evt.value], md