babelmachine / interfaces /illframes.py
poltextlab's picture
Update interfaces/illframes.py
9c6d143 verified
raw
history blame
3.45 kB
import gradio as gr
import os
import torch
import numpy as np
import pandas as pd
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from huggingface_hub import HfApi
from label_dicts import ILLFRAMES_MIGRATION_LABEL_NAMES, ILLFRAMES_COVID_LABEL_NAMES
HF_TOKEN = os.environ["hf_read"]
languages = [
"English"
]
domains = {
"Covid": "covid",
"Migration": "migration"
}
# --- DEBUG ---
import shutil
def convert_size(size):
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
if size < 1024:
return f"{size:.2f} {unit}"
size /= 1024
def get_disk_space(path="/"):
total, used, free = shutil.disk_usage(path)
return {
"Total": convert_size(total),
"Used": convert_size(used),
"Free": convert_size(free)
}
# ---
def check_huggingface_path(checkpoint_path: str):
try:
hf_api = HfApi(token=HF_TOKEN)
hf_api.model_info(checkpoint_path, token=HF_TOKEN)
return True
except:
return False
def build_huggingface_path(domain: str):
return f"poltextlab/xlm-roberta-large-english-ILLFRAMES-{domain}"
def predict(text, model_id, tokenizer_id, label_names):
device = torch.device("cpu")
# --- DEBUG ---
disk_space = get_disk_space('/data/')
print("Disk Space Info:")
for key, value in disk_space.items():
print(f"{key}: {value}")
# ---
try:
model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, offload_folder="offload", device_map="auto", token=HF_TOKEN)
except:
disk_space = get_disk_space('/data/')
print("Disk Space Error:")
for key, value in disk_space.items():
print(f"{key}: {value}")
model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", token=HF_TOKEN, force_download=True)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
inputs = tokenizer(text,
max_length=256,
truncation=True,
padding="do_not_pad",
return_tensors="pt").to(device)
model.eval()
with torch.no_grad():
logits = model(**inputs).logits
probs = torch.nn.functional.softmax(logits, dim=1).cpu().numpy().flatten()
NUMS_DICT = {i: key for i, key in enumerate(sorted(label_names.keys()))}
output_pred = {f"[{NUMS_DICT[i]}] {label_names[NUMS_DICT[i]]}": probs[i] for i in np.argsort(probs)[::-1]}
output_info = f'<p style="text-align: center; display: block">Prediction was made using the <a href="https://huggingface.co/{model_id}">{model_id}</a> model.</p>'
return output_pred, output_info
def predict_illframes(text, language, domain):
domain = domains[domain]
model_id = build_huggingface_path(domain)
tokenizer_id = "xlm-roberta-large"
if domain == "migration":
label_names = ILLFRAMES_MIGRATION_LABEL_NAMES
else:
label_names = ILLFRAMES_COVID_LABEL_NAMES
return predict(text, model_id, tokenizer_id, label_names)
demo = gr.Interface(
fn=predict_illframes,
inputs=[gr.Textbox(lines=6, label="Input"),
gr.Dropdown(languages, label="Language"),
gr.Dropdown(domains.keys(), label="Domain")],
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])