Questions about ocr details and missing items in this dataset

#1
by Qingyun - opened

Hi, thanks for sharing this dataset.

While i've checked the dataset. I found some missing_items.
'B001GNBU18', '823924335', '395291305', '1565842715', '399173013', '471927074', '1259589730', '143120581', '3882265868', '980011116', '1455773506', '471470538', '812969472', '73398144', '1492616737', '679754814', '007149572X', '089886772X', '1609785908', '1412716071', '865424306', '435302485', '1597752134'

And I found TextVQA/Cap-Style OCR tokens, which is useful but not provided in the meta OCR-VQA. Can you sharing more details about the ocr process?

The items were check with this script.

import json
import datasets
from collections import defaultdict
from tqdm import tqdm


def compare_qa_list(q_list_1, a_list_1, q_list_2, a_list_2):
    if not len(q_list_1) == len(q_list_2) == len(a_list_1) == len(a_list_2):
        return False
    if set(q_list_1) != set(q_list_2):
        return False
    if set(a_list_1) != set(a_list_2):
        return False
    qa_1 = {q:a for q, a in zip(q_list_1, a_list_1)}
    qa_2 = {q:a for q, a in zip(q_list_2, a_list_2)}
    for q in qa_1.keys():
        if not qa_1[q] == qa_2[q]:
            return False
    return True


def main_check_hf_datasets():

    hf_trainset = datasets.load_dataset("howard-hou/OCR-VQA", split="train")
    hf_valset = datasets.load_dataset("howard-hou/OCR-VQA", split="validation")
    hf_testset = datasets.load_dataset("howard-hou/OCR-VQA", split="test")
    
    src_dataset = json.load(open("datasets/OCR-VQA/dataset.json", "r"))
    
    unexpected_items, error_items = {}, {}, defaultdict(list)
    # check
    hf_all_image_id = []
    for split, dataset in [("train", hf_trainset), ("val", hf_valset), ("test", hf_testset)]:
        for item_id, item in enumerate(tqdm(dataset)):
            image_id = item["image_id"]  # 'image', 'image_id', 'questions', 'answers', 'ocr_tokens', 'ocr_info', 'title', 'authorName', 'genre', 'image_width', 'image_height', 'image_url', 'set_name'
            hf_all_image_id.append(image_id)
            if image_id in src_dataset:
                src_item = src_dataset[image_id]  # 'imageURL', 'questions', 'answers', 'title', 'authorName', 'genre', 'split'
                
                # check
                if not item["image_url"] == src_item["imageURL"]:
                    error_items["image_url_mismatch"].append((split, item_id))
                if not compare_qa_list(item["questions"], item["answers"], src_item["questions"], src_item["answers"]):
                    error_items["qa_mismatch"].append((split, item_id))
                if not item["title"] == src_item["title"]:
                    error_items["title_mismatch"].append((split, item_id))
            else:
                unexpected_items[image_id] = item
    missing_items = {img_id:src_dataset[img_id] for img_id in tqdm(set(src_dataset.keys()) - set(hf_all_image_id))}


if __name__ == "__main__":
    main_check_hf_datasets()
Qingyun changed discussion title from Questions about info in this dataset to Questions about ocr details and missing items in this dataset
Your need to confirm your account before you can post a new comment.

Sign up or log in to comment