John6666's picture
Upload 8 files
002c552 verified
raw
history blame
5.39 kB
import gradio as gr
import pandas as pd
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from pykakasi import kakasi
MODELS = ["Animagine 3.1", "NoobAI XL", "Illustrious"]
DEFAULT_DF = pd.DataFrame({"Character": ["oomuro sakurako (yuru yuri) (NoobAI XL)", "kafuu chino (gochuumon wa usagi desu ka?) (Animagine 3.1)"]})
kks = kakasi()
def to_roman(s: str):
try:
return "".join([i.get("hepburn", "") for i in kks.convert(s)])
except Exception as e:
print(e)
return s
def get_user_agent():
return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
def get_series_wiki_dict():
user_agent = get_user_agent()
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php?title_wiki_links'
params = {}
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
try:
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok: return {}
j = dict(r.json())
if "titles" not in j: return {}
return {str(i["anime_id"]): i["wikipedia_url"] for i in j["titles"]}
except Exception as e:
print(e)
return {}
series_wiki_dict = get_series_wiki_dict()
def find_char_info(query: str):
user_agent = get_user_agent()
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
base_url = 'https://www.animecharactersdatabase.com/api_series_characters.php'
params = {"character_q": query}
params2 = {"character_q": " ".join(list(reversed(query.split(" "))))}
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
try:
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1:
r = session.get(base_url, params=params2, headers=headers, stream=True, timeout=(5.0, 15))
if not r.ok or r.json() == -1: return None
j = dict(r.json())
if "search_results" not in j or len(j["search_results"]) == 0: return None
d = {}
i = j["search_results"][0]
d["name"] = i["name"]
d["series"] = i["anime_name"]
d["gender"] = i["gender"]
d["image"] = i["character_image"]
d["desc"] = i["desc"]
aid = str(i["anime_id"])
if aid in series_wiki_dict.keys(): d["wiki"] = series_wiki_dict[aid]
return d
except Exception as e:
print(e)
return None
def load_json(filename: str):
try:
with open(filename, encoding="utf-8") as f:
d = json.load(f)
return dict(d)
except Exception as e:
print(e)
return {}
def load_char_dict():
d = {}
d["Animagine 3.1"] = load_json("data/animagine.json")
d["Illustrious"] = load_json("data/illustrious.json")
d["NoobAI XL"] = load_json("data/noob_danbooru.json") | load_json("data/noob_e621.json")
return d
def create_char_df(char_dict: dict):
d = {}
m = {}
tags = []
for model, value in char_dict.items():
for name, v in value.items():
tag = f'{v["name"]} ({v["series"]}) ({model})' if v["series"] else f'{v["name"]} ({model})'
tags.append(tag)
d[tag] = v.copy()
m[tag] = model
df = pd.DataFrame({"Character": tags})
return df, d, m
char_dict = load_char_dict()
char_df, tag_dict, model_dict = create_char_df(char_dict)
def search_char_dict(q: str, models: list[str], progress=gr.Progress(track_tqdm=True)):
try:
MAX_COLS = 50
if q.strip():
#search_results = df[df["Character"].str.contains(to_roman(q).lower(), regex=False, na=False) & df["Character"].str.contains("(?: \\(" + "\\))|(?: \\(".join(models) + "\\))", regex=True, na=False)]
l = char_df["Character"].tolist()
rq = to_roman(q)
mt = tuple([f" ({s})" for s in models])
search_results = pd.DataFrame({"Character": [s for s in l if rq in s and s.endswith(mt)]})
else:
return DEFAULT_DF
if len(search_results.columns) > MAX_COLS:
search_results = search_results.iloc[:, :MAX_COLS]
return search_results
except Exception as e:
print(e)
def on_select_df(df: pd.DataFrame, is_detail: bool, evt: gr.SelectData):
d = tag_dict.get(evt.value, None)
if d is None: return ""
print(d)
if is_detail: info = find_char_info(d["name"])
else: info = None
print(info)
if info is not None:
md = f'## [{info["name"]}]({info["wiki"]}) / [{info["series"]}]({info["wiki"]}) / {info["gender"]}\n![{info["name"]}]({info["image"]}#center)\n[{info["desc"]}]({info["wiki"]})'
else: md = f'## {d["name"]} / {d["series"]}' if d["series"] else f'## {d["name"]}'
md += f'\n<br>Tag is for {model_dict[evt.value]}.'
return d["tag"], md