cv_backbones / vi_backbones.py
admin
try fix torchvision not installed
a27297b
raw
history blame
5.81 kB
import os
import re
import hashlib
import requests
import datasets
import subprocess
import torchvision.models as models
from bs4 import BeautifulSoup
_DBNAME = os.path.basename(__file__).split('.')[0]
_HOMEPAGE = "https://huggingface.co/datasets/george-chou/" + _DBNAME
_URL = 'https://pytorch.org/vision/main/_modules/'
subprocess.call(['pip', 'install', 'torchvision'])
class vi_backbones(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"ver": datasets.Value("string"),
"type": datasets.Value("string"),
"input_size": datasets.Value("int16"),
"output_size": datasets.Value("int64"),
"url": datasets.Value("string"),
# "md5": datasets.Value("string"),
}
),
supervised_keys=("ver", "type"),
homepage=_HOMEPAGE,
license="mit"
)
def _get_file_md5(self, url):
"""
Calculate the MD5 hash value of a file using its URL
:param url: the URL address of the file
:return: the MD5 hash value in string format
"""
try:
response = requests.get(url, stream=True)
if response.status_code == 200:
md5obj = hashlib.md5()
for chunk in response.iter_content(chunk_size=1024*1024):
md5obj.update(chunk)
return md5obj.hexdigest()
else:
raise ValueError(
f"Error downloading file from {url}. Status code: {response.status_code}")
except Exception as e:
raise ValueError(
f"Error calculating MD5 of file at {url}: {str(e)}")
def _parse_url(self, url):
response = requests.get(url)
html = response.text
return BeautifulSoup(html, 'html.parser')
def _special_type(self, m_type):
if m_type == 'wide' or m_type == 'resnext':
return 'resnet'
if m_type == 'swin':
return 'swin_transformer'
if m_type == 'inception':
return 'googlenet'
return m_type
def _info_on_dataset(self, m_ver, m_type, in1k_span):
url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
size_span = url_span.find_next_sibling('span', {'class': 'mi'})
m_type = self._special_type(m_type)
m_url = str(url_span.text[1:-1])
input_size = int(size_span.text)
m_dict = {
'ver': m_ver,
'type': m_type,
'input_size': input_size,
'url': m_url
}
return m_dict, size_span
def _generate_dataset(self, url):
torch_page = self._parse_url(url)
article = torch_page.find('article', {'id': 'pytorch-article'})
ul = article.find('ul').find('ul')
in1k_v1, in1k_v2 = [], []
for li in ul.find_all('li'):
name = str(li.text)
if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
if name.__contains__('_api') or name.__contains__('feature_extraction'):
continue
href = li.find('a').get('href')
model_page = self._parse_url(url + href)
divs = model_page.select('div.viewcode-block')
for div in divs:
div_id = str(div['id'])
if div_id.__contains__('_Weights'):
m_ver = div_id.split('_Weight')[0].lower()
if not hasattr(models, m_ver):
continue
m_type = re.search('[a-zA-Z]+', m_ver).group(0)
in1k_v1_span = div.find(
name='span',
attrs={'class': 'n'},
string='IMAGENET1K_V1'
)
if in1k_v1_span == None:
continue
m_dict, size_span = self._info_on_dataset(
m_ver,
m_type,
in1k_v1_span
)
in1k_v1.append(m_dict)
in1k_v2_span = size_span.find_next_sibling(
name='span',
attrs={'class': 'n'},
string='IMAGENET1K_V2'
)
if in1k_v2_span != None:
m_dict, _ = self._info_on_dataset(
m_ver,
m_type,
in1k_v2_span
)
in1k_v2.append(m_dict)
return in1k_v1, in1k_v2
def _split_generators(self, _):
in1k_v1, in1k_v2 = self._generate_dataset(_URL)
return [
datasets.SplitGenerator(
name="IMAGENET1K_V1",
gen_kwargs={
"subset": in1k_v1,
},
),
datasets.SplitGenerator(
name="IMAGENET1K_V2",
gen_kwargs={
"subset": in1k_v2,
},
),
]
def _generate_examples(self, subset):
for i, model in enumerate(subset):
yield i, {
"ver": model['ver'],
"type": model['type'],
"input_size": model['input_size'],
"output_size": 1234,
"url": model['url'],
# "md5": self._get_file_md5(model['url']),
}