|
import os |
|
import os.path as osp |
|
import json |
|
import random |
|
import transformers |
|
from tqdm import tqdm |
|
from multiprocessing import Pool |
|
from functools import partial |
|
|
|
from data_class import domNode, nodeBbox |
|
|
|
|
|
MAX_TOKEN_LEN = 800 |
|
MIN_TOKEN_LEN = 200 |
|
MAX_IoU = 0 |
|
|
|
HTML2BBOX_TEMPLATE = [ |
|
"Where is the given dom tree?", |
|
|
|
] |
|
|
|
BBOX2HTML_TEMPLATE = [ |
|
"Can you give me a description of the region <objs> in image?", |
|
|
|
] |
|
|
|
|
|
class NodewithHtml(domNode): |
|
def __init__(self, id, info=None, children=None, father=None, tokenizer=None, img_w=None, img_h=None, task=""): |
|
super().__init__(id, info, children, father, tokenizer, img_w=img_w, img_h=img_h, task="") |
|
self.dom_xml = self.get_dom_xml(mode="dom_xml") |
|
self.dom_xml_with_bbox = self.get_dom_xml(mode="dom_xml_with_bbox") |
|
|
|
def bbox2str(self) -> str: |
|
x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)) * 1000) |
|
y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)) * 1000) |
|
x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)) * 1000) |
|
y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)) * 1000) |
|
|
|
coords = [x1_rel, y1_rel, x2_rel, y2_rel] |
|
bbox_str = ["{:03}".format(coord) for coord in coords] |
|
bbox_str = "[" + ", ".join(bbox_str) + "]" |
|
|
|
return bbox_str |
|
|
|
def center2str(self) -> str: |
|
cx = round(max(0, min(0.999, self.info.bbox.cx / IMG_W)) * 1000) |
|
cy = round(max(0, min(0.999, self.info.bbox.cy / IMG_H)) * 1000) |
|
|
|
point = [cx, cy] |
|
point_str = ["{:03}".format(p) for p in point] |
|
point_str = "[" + ", ".join(point_str) + "]" |
|
|
|
return point_str |
|
|
|
def node2xml_withbbox(self) -> str: |
|
text = self.info.text |
|
bbox_str = self.bbox2str() |
|
|
|
if self.is_text() or self.has_single_text_child(): |
|
pseudo_html_line = f"<text box={bbox_str} content=\"{text}\">" |
|
elif self.is_img(): |
|
if self.class_for_caption == 'img': |
|
pseudo_html_line = f"<img box={bbox_str} alt={text}>" |
|
elif self.class_for_caption == 'svg': |
|
pseudo_html_line = f"<svg box={bbox_str} alt={text}>" |
|
else: |
|
return "" |
|
elif self.info.func == 'type': |
|
pseudo_html_line = f"<input box={bbox_str}, content=\"{text}\">" |
|
else: |
|
if self.is_leaf(): |
|
return "" |
|
else: |
|
pseudo_html_line = f"<plain box={bbox_str}>" |
|
|
|
return pseudo_html_line |
|
|
|
def node2xml(self) -> str: |
|
text = self.info.text |
|
bbox_str = self.bbox2str() |
|
center_str = self.center2str() |
|
|
|
if self.is_text() or self.has_single_text_child(): |
|
pseudo_html_line = f"<text content=\"{text}\">" |
|
elif self.is_img(): |
|
if self.class_for_caption == 'img': |
|
pseudo_html_line = f"<img alt={text}>" |
|
elif self.class_for_caption == 'svg': |
|
pseudo_html_line = f"<svg alt={text}>" |
|
else: |
|
return "" |
|
elif self.info.func == 'type': |
|
pseudo_html_line = f"<input center={center_str}, content=\"{text}\">" |
|
else: |
|
if self.is_leaf(): |
|
return "" |
|
else: |
|
pseudo_html_line = f"<plain>" |
|
|
|
return pseudo_html_line |
|
|
|
def get_dom_xml(self, mode: str): |
|
xml_func = self.node2xml if mode == 'dom_xml' else self.node2xml_withbbox |
|
|
|
self_dom_xml = xml_func() |
|
if not self_dom_xml: |
|
return "" |
|
|
|
dom_xml_list = [self_dom_xml] |
|
|
|
if not self.is_func_leaf(): |
|
for child in self.children: |
|
child_xml = getattr(child, mode) |
|
if not child_xml: |
|
return "" |
|
child_xml_list = child_xml.strip().split('\n') |
|
child_xml_list = ['\t' + line for line in child_xml_list] |
|
child_xml_fmt = '\n'.join(child_xml_list) |
|
dom_xml_list.append(child_xml_fmt) |
|
|
|
dom_xml = '\n'.join(dom_xml_list) |
|
return dom_xml |
|
|
|
|
|
tokenizer = transformers.AutoTokenizer.from_pretrained( |
|
"path/to/your/tokenizer", |
|
trust_remote_code=True, |
|
) |
|
|
|
|
|
def get_str_token_len(str_: str) -> int: |
|
return len(tokenizer.encode(str_)) |
|
|
|
|
|
def collect_nodes(dom: NodewithHtml, node_list: list[NodewithHtml]) -> list: |
|
if dom.is_valid: |
|
node_list.append(dom) |
|
|
|
for child in dom.children: |
|
collect_nodes(child, node_list) |
|
|
|
return node_list |
|
|
|
|
|
def selece_node(xml: str, bbox: nodeBbox, selected_dom_list: list[nodeBbox]) -> bool: |
|
if not xml: |
|
return False |
|
|
|
try: |
|
token_len = get_str_token_len(xml) |
|
except: |
|
return False |
|
if token_len > MAX_TOKEN_LEN or token_len < MIN_TOKEN_LEN: |
|
return False |
|
|
|
if selected_dom_list: |
|
max_IoUs = bbox.get_max_IoU(selected_dom_list) |
|
if max_IoUs > MAX_IoU: |
|
return False |
|
|
|
return True |
|
|
|
|
|
def make_qa(node_list: list[NodewithHtml], img_path: str): |
|
random.shuffle(node_list) |
|
|
|
dom_xml_list = [] |
|
dom_xml_with_bbox_list = [] |
|
|
|
ann_list = [] |
|
for node in node_list: |
|
if selece_node(node.dom_xml_with_bbox, node.info.bbox, dom_xml_with_bbox_list): |
|
bbox2html_prompt = random.choice(BBOX2HTML_TEMPLATE) |
|
prompt = bbox2html_prompt.replace("<objs>", node.bbox2str()) |
|
caption = node.dom_xml_with_bbox |
|
|
|
ann = { |
|
"image": img_path, |
|
"conversations": [ |
|
{ |
|
"from": "human", |
|
"value": f"<image>\n{prompt}" |
|
}, |
|
{ |
|
"from": "gpt", |
|
"value": caption |
|
} |
|
] |
|
} |
|
ann_list.append(ann) |
|
dom_xml_with_bbox_list.append(node.info.bbox) |
|
|
|
return ann_list |
|
|
|
|
|
def write_ann(ann_list, ann_path): |
|
ann_dir = osp.dirname(ann_path) |
|
if not osp.exists(ann_dir): |
|
os.mkdir(ann_dir) |
|
|
|
with open(ann_path, 'a', encoding='utf-8') as f: |
|
for i in range(len(ann_list)): |
|
ann_list[i]['id'] = i |
|
|
|
ann_str = json.dumps(ann_list[i], ensure_ascii=False) |
|
f.write(ann_str + '\n') |
|
|
|
|
|
def single_proc(data_dir, language, sub_dir): |
|
sub_path = osp.join(data_dir, sub_dir) |
|
screen_shot_dir = osp.join(sub_path, 'screenshot') |
|
dom_sub_dir = 'dom_svg' if language == 'zh' else 'dom_svg_en' |
|
dom_dir = osp.join(sub_path, dom_sub_dir) |
|
html_dir = osp.join(sub_path, 'html') |
|
|
|
html_path = osp.join(html_dir, 'html_0.html') |
|
if not osp.exists(html_path): |
|
return [] |
|
html_content = open(html_path).readline().strip() |
|
if html_content in ['https://www.qq.com/', 'https://music.163.com/']: |
|
return [] |
|
|
|
all_annotations = [] |
|
for img_file in os.listdir(screen_shot_dir): |
|
if 'tmp' in img_file: |
|
continue |
|
file_ids = img_file.split('.')[0].split('_')[1] |
|
json_path = osp.join(dom_dir, f'dom_{file_ids}.json') |
|
img_path = osp.join(screen_shot_dir, img_file) |
|
if not osp.exists(json_path): |
|
continue |
|
|
|
dom_data = json.load(open(json_path)) |
|
try: |
|
dom_node = NodewithHtml(tokenizer=tokenizer, img_w=IMG_W, img_h=IMG_H, task='dom', **dom_data) |
|
except ValueError as e: |
|
print(f"Json data error: {e}\n{json_path}") |
|
continue |
|
|
|
node_list = collect_nodes(dom_node, []) |
|
annotations = make_qa(node_list, img_path) |
|
all_annotations.extend(annotations) |
|
|
|
return all_annotations |
|
|
|
|
|
def main_multi(data_dir, dst_path, language): |
|
pool = Pool(processes=16) |
|
|
|
sub_dir_list = os.listdir(data_dir) |
|
single_proc_partial = partial(single_proc, data_dir, language) |
|
|
|
num = 0 |
|
for res in tqdm(pool.imap_unordered(single_proc_partial, sub_dir_list), total=len(sub_dir_list)): |
|
write_ann(res, dst_path) |
|
num += len(res) |
|
|
|
|
|
IMG_W = 1920 |
|
IMG_H = 1080 |
|
|
|
data_dir = 'data_20240624' |
|
dst_path = 'xxx.jsonl' |
|
|
|
|
|
if __name__ == '__main__': |
|
main_multi(data_dir, dst_path, 'en') |
|
|