"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import os
import re
import datasets

import pandas as pd

from requests import get

logger = datasets.logging.get_logger(__name__)
datasets.logging.set_verbosity_info()


_DESCRIPTION = "XYZ"
_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"

_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"

_URLS = {
   "train_test_split": f"{_REPO}/train_test_split.txt",
   "classes": f"{_REPO}/classes.txt",
   "image_class_labels": f"{_REPO}/image_class_labels.txt",
   "images": f"{_REPO}/images.txt",
   "image_urls": f"{_REPO}/images.zip",
   "text_urls": f"{_REPO}/text.zip",
}

# Create id-to-label dictionary using the classes file
classes = get(_URLS["classes"]).iter_lines()
logger.info(f"classes: {classes}")
_ID2LABEL = {}
for row in classes:
   row = row.decode("UTF8")
   if row != "":
      idx, label = row.split(" ")
      _ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
      
logger.info(f"_ID2LABEL: {_ID2LABEL}")

_NAMES = list(_ID2LABEL.values())


# build from images.txt: a mapping from image_file_name -> id
imgpath_to_ids = get(_URLS["images"]).iter_lines()
_IMGNAME2ID = {}
for row in imgpath_to_ids:
   row = row.decode("UTF8")
   if row != "":
      idx, img_name = row.split(" ")
      _IMGNAME2ID[os.path.basename(img_name)] = int(idx)
  
    
# Create TRAIN_IDX_SET
train_test_split = get(_URLS["train_test_split"]).iter_lines()
_TRAIN_IDX_SET = set()
for row in train_test_split:
   row = row.decode("UTF8")
   if row != "":
      idx, train_bool = row.split(" ")
      # 1: train, 0: test
      if train_bool == "1":
         _TRAIN_IDX_SET.add(int(idx))


class CubDataset(datasets.GeneratorBasedBuilder):
   """Cub Dataset"""
   
   def _info(self):
      features = datasets.Features({
         "image": datasets.Image(),
         "labels": datasets.features.ClassLabel(names=_NAMES),
      })
      keys = ("image", "labels")
      
      return datasets.DatasetInfo(
         description=_DESCRIPTION,
         features=features,
         supervised_keys=keys,
         homepage=_HOMEPAGE,
         citation=_CITATION,
      )
      
      
   def _split_generators(self, dl_manager):      
      train_files = []
      test_files = []
      
      # Download images
      data_files = dl_manager.download_and_extract(_URLS["image_urls"])
      
      for batch in data_files:
         path_files = dl_manager.iter_files(batch)
         for img in path_files:
            if _IMGNAME2ID[os.path.basename(img)] in _TRAIN_IDX_SET:
               train_files.append(img)
            else:
               test_files.append(img)
               
      return [
                 datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                       "files": train_files
                    }
                 ),
                 datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                       "files": test_files
                    }
                 )
      ]
      
      
   def _generate_examples(self, files):
   
      for i, path in enumerate(files):
         file_name = os.path.basename(path)
         if file_name.endswith(".jpg"):
            yield i, {
               "image": path,
               "labels": os.path.basename(os.path.dirname(path)).lower(),
            }