Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def download_kaggle_data(self, competition_name): with self._downloader.tqdm(): kaggle_downloader = self._downloader.kaggle_downloader(competition_name) urls = kaggle_downloader.competition_urls files = kaggle_downloader.competition_files return _map_promise(self._download, dict((f, u) for (f, u) in zip(files, urls)))
[ "Download data for a given Kaggle competition." ]
Please provide a description of the function:def download(self, url_or_urls): # Add progress bar to follow the download state with self._downloader.tqdm(): return _map_promise(self._download, url_or_urls)
[ "Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n " ]
Please provide a description of the function:def iter_archive(self, resource): if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) return extractor.iter_archive(resource.path, resource.extract_method)
[ "Returns iterator over files within archive.\n\n **Important Note**: caller should read files as they are yielded.\n Reading out of order is slow.\n\n Args:\n resource: path to archive or `tfds.download.Resource`.\n\n Returns:\n Generator yielding tuple (path_within_archive, file_obj).\n " ]
Please provide a description of the function:def extract(self, path_or_paths): # Add progress bar to follow the download state with self._extractor.tqdm(): return _map_promise(self._extract, path_or_paths)
[ "Extract given path(s).\n\n Args:\n path_or_paths: path or `list`/`dict` of path of file to extract. Each\n path can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method is deduced\n from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, The extracted paths matching the given input\n path_or_paths.\n " ]
Please provide a description of the function:def download_and_extract(self, url_or_urls): # Add progress bar to follow the download state with self._downloader.tqdm(): with self._extractor.tqdm(): return _map_promise(self._download_extract, url_or_urls)
[ "Download and extract given url_or_urls.\n\n Is roughly equivalent to:\n\n ```\n extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))\n ```\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method will\n automatically be deduced from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, extracted paths of given URL(s).\n " ]
Please provide a description of the function:def manual_dir(self): if not tf.io.gfile.exists(self._manual_dir): raise AssertionError( 'Manual directory {} does not exist. Create it and download/extract ' 'dataset artifacts in there.'.format(self._manual_dir)) return self._manual_dir
[ "Returns the directory containing the manually extracted data." ]
Please provide a description of the function:def _make_builder_configs(): config_list = [] for corruption in _CORRUPTIONS: for severity in range(1, 6): config_list.append( Cifar10CorruptedConfig( name=corruption + '_' + str(severity), version='0.0.1', description='Corruption method: ' + corruption + ', severity level: ' + str(severity), corruption_type=corruption, severity=severity, )) return config_list
[ "Construct a list of BuilderConfigs.\n\n Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to\n the 15 corruption types and 5 severities.\n\n Returns:\n A list of 75 Cifar10CorruptedConfig objects.\n " ]
Please provide a description of the function:def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_DOWNLOAD_URL) return [ tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)}) ]
[ "Return the test split of Cifar10.\n\n Args:\n dl_manager: download manager object.\n\n Returns:\n test split.\n " ]
Please provide a description of the function:def _generate_examples(self, data_dir): corruption = self.builder_config.corruption severity = self.builder_config.severity images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption]) labels_file = os.path.join(data_dir, _LABELS_FILENAME) with tf.io.gfile.GFile(labels_file, mode='rb') as f: labels = np.load(f) num_images = labels.shape[0] // 5 # Labels are stacked 5 times so we can just read the first iteration labels = labels[:num_images] with tf.io.gfile.GFile(images_file, mode='rb') as f: images = np.load(f) # Slice images corresponding to correct severity level images = images[(severity - 1) * num_images:severity * num_images] for image, label in zip(images, labels): yield { 'image': image, 'label': label, }
[ "Generate corrupted Cifar10 test data.\n\n Apply corruptions to the raw images according to self.corruption_type.\n\n Args:\n data_dir: root directory of downloaded dataset\n\n Yields:\n dictionary with image file and label.\n " ]
Please provide a description of the function:def document_single_builder(builder): mod_name = builder.__class__.__module__ cls_name = builder.__class__.__name__ mod_file = sys.modules[mod_name].__file__ if mod_file.endswith("pyc"): mod_file = mod_file[:-1] description_prefix = "" if builder.builder_configs: # Dataset with configs; document each one config_docs = [] for config in builder.BUILDER_CONFIGS: builder = tfds.builder(builder.name, config=config) info = builder.info # TODO(rsepassi): document the actual config object config_doc = SINGLE_CONFIG_ENTRY.format( builder_name=builder.name, config_name=config.name, description=config.description, version=config.version, feature_information=make_feature_information(info), size=tfds.units.size_str(info.size_in_bytes), ) config_docs.append(config_doc) out_str = DATASET_WITH_CONFIGS_ENTRY.format( snakecase_name=builder.name, module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name), cls_url=cls_url(mod_name), config_names="\n".join([ CONFIG_BULLET.format(name=config.name, description=config.description, version=config.version, size=tfds.units.size_str(tfds.builder( builder.name, config=config) .info.size_in_bytes)) for config in builder.BUILDER_CONFIGS]), config_cls="%s.%s" % (tfds_mod_name(mod_name), type(builder.builder_config).__name__), configs="\n".join(config_docs), urls=format_urls(info.urls), url=url_from_info(info), supervised_keys=str(info.supervised_keys), citation=make_citation(info.citation), statistics_information=make_statistics_information(info), description=builder.info.description, description_prefix=description_prefix, ) else: info = builder.info out_str = DATASET_ENTRY.format( snakecase_name=builder.name, module_and_class="%s.%s" % (tfds_mod_name(mod_name), cls_name), cls_url=cls_url(mod_name), description=info.description, description_prefix=description_prefix, version=info.version, feature_information=make_feature_information(info), statistics_information=make_statistics_information(info), urls=format_urls(info.urls), url=url_from_info(info), supervised_keys=str(info.supervised_keys), citation=make_citation(info.citation), size=tfds.units.size_str(info.size_in_bytes), ) out_str = schema_org(builder) + "\n" + out_str return out_str
[ "Doc string for a single builder, with or without configs." ]
Please provide a description of the function:def make_module_to_builder_dict(datasets=None): # pylint: disable=g-long-lambda # dict to hold tfds->image->mnist->[builders] module_to_builder = collections.defaultdict( lambda: collections.defaultdict( lambda: collections.defaultdict(list))) # pylint: enable=g-long-lambda if datasets: builders = [tfds.builder(name) for name in datasets] else: builders = [ tfds.builder(name) for name in tfds.list_builders() if name not in BUILDER_BLACKLIST ] + [tfds.builder("image_label_folder", dataset_name="image_label_folder")] for builder in builders: mod_name = builder.__class__.__module__ modules = mod_name.split(".") if "testing" in modules: continue current_mod_ctr = module_to_builder for mod in modules: current_mod_ctr = current_mod_ctr[mod] current_mod_ctr.append(builder) module_to_builder = module_to_builder["tensorflow_datasets"] return module_to_builder
[ "Get all builders organized by module in nested dicts." ]
Please provide a description of the function:def _pprint_features_dict(features_dict, indent=0, add_prefix=True): first_last_indent_str = " " * indent indent_str = " " * (indent + 4) first_line = "%s%s({" % ( first_last_indent_str if add_prefix else "", type(features_dict).__name__, ) lines = [first_line] for k in sorted(list(features_dict.keys())): v = features_dict[k] if isinstance(v, tfds.features.FeaturesDict): v_str = _pprint_features_dict(v, indent + 4, False) else: v_str = str(v) lines.append("%s'%s': %s," % (indent_str, k, v_str)) lines.append("%s})" % first_last_indent_str) return "\n".join(lines)
[ "Pretty-print tfds.features.FeaturesDict." ]
Please provide a description of the function:def make_statistics_information(info): if not info.splits.total_num_examples: # That means that we have yet to calculate the statistics for this. return "None computed" stats = [(info.splits.total_num_examples, "ALL")] for split_name, split_info in info.splits.items(): stats.append((split_info.num_examples, split_name.upper())) # Sort reverse on number of examples. stats.sort(reverse=True) stats = "\n".join([ "{0:10} | {1:>10,}".format(name, num_exs) for (num_exs, name) in stats ]) return STATISTICS_TABLE.format(split_statistics=stats)
[ "Make statistics information table." ]
Please provide a description of the function:def dataset_docs_str(datasets=None): module_to_builder = make_module_to_builder_dict(datasets) sections = sorted(list(module_to_builder.keys())) section_tocs = [] section_docs = [] for section in sections: builders = tf.nest.flatten(module_to_builder[section]) builders = sorted(builders, key=lambda b: b.name) builder_docs = [document_single_builder(builder) for builder in builders] section_doc = SECTION_DATASETS.format( section_name=section, datasets="\n".join(builder_docs)) section_toc = create_section_toc(section, builders) section_docs.append(section_doc) section_tocs.append(section_toc) full_doc = DOC.format(toc="\n".join(section_tocs), datasets="\n".join(section_docs)) return full_doc
[ "Create dataset documentation string for given datasets.\n\n Args:\n datasets: list of datasets for which to create documentation.\n If None, then all available datasets will be used.\n\n Returns:\n string describing the datasets (in the MarkDown format).\n " ]
Please provide a description of the function:def schema_org(builder): # pylint: disable=line-too-long # pylint: enable=line-too-long properties = [ (lambda x: x.name, SCHEMA_ORG_NAME), (lambda x: x.description, SCHEMA_ORG_DESC), (lambda x: x.name, SCHEMA_ORG_URL), (lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS) ] info = builder.info out_str = SCHEMA_ORG_PRE for extractor, template in properties: val = extractor(info) if val: # We are using cgi module instead of html due to Python 2 compatibility out_str += template.format(val=cgi.escape(val, quote=True).strip()) out_str += SCHEMA_ORG_POST return out_str
[ "Builds schema.org microdata for DatasetSearch from DatasetBuilder.\n\n Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset\n Testing tool: https://search.google.com/structured-data/testing-tool\n For Google Dataset Search: https://toolbox.google.com/datasetsearch\n\n Microdata format was chosen over JSON-LD due to the fact that Markdown\n rendering engines remove all <script> tags.\n\n Args:\n builder: `tfds.core.DatasetBuilder`\n\n Returns:\n HTML string with microdata\n " ]
Please provide a description of the function:def disk(radius, alias_blur=0.1, dtype=np.float32): if radius <= 8: length = np.arange(-8, 8 + 1) ksize = (3, 3) else: length = np.arange(-radius, radius + 1) ksize = (5, 5) x_axis, y_axis = np.meshgrid(length, length) aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) # supersample disk to antialias return tfds.core.lazy_imports.cv2.GaussianBlur( aliased_disk, ksize=ksize, sigmaX=alias_blur)
[ "Generating a Gaussian blurring kernel with disk shape.\n\n Generating a Gaussian blurring kernel with disk shape using cv2 API.\n\n Args:\n radius: integer, radius of blurring kernel.\n alias_blur: float, standard deviation of Gaussian blurring.\n dtype: data type of kernel\n\n Returns:\n cv2 object of the Gaussian blurring kernel.\n " ]
Please provide a description of the function:def clipped_zoom(img, zoom_factor): h = img.shape[0] ch = int(np.ceil(h / float(zoom_factor))) top_h = (h - ch) // 2 w = img.shape[1] cw = int(np.ceil(w / float(zoom_factor))) top_w = (w - cw) // 2 img = tfds.core.lazy_imports.scipy.ndimage.zoom( img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1), order=1) # trim off any extra pixels trim_top_h = (img.shape[0] - h) // 2 trim_top_w = (img.shape[1] - w) // 2 return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
[ "Zoom image with clipping.\n\n Zoom the central part of the image and clip extra pixels.\n\n Args:\n img: numpy array, uncorrupted image.\n zoom_factor: numpy array, a sequence of float numbers for zoom factor.\n\n Returns:\n numpy array, zoomed image after clipping.\n " ]
Please provide a description of the function:def plasma_fractal(mapsize=512, wibbledecay=3): if mapsize & (mapsize - 1) != 0: raise ValueError('mapsize must be a power of two.') maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max()
[ "Generate a heightmap using diamond-square algorithm.\n\n Modification of the algorithm in\n https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py\n\n Args:\n mapsize: side length of the heightmap, must be a power of two.\n wibbledecay: integer, decay factor.\n\n Returns:\n numpy 2d array, side length 'mapsize', of floats in [0,255].\n ", "For each square, calculate middle value as mean of points + wibble.", "For each diamond, calculate middle value as meanof points + wibble." ]
Please provide a description of the function:def gaussian_noise(x, severity=1): c = [.08, .12, 0.18, 0.26, 0.38][severity - 1] x = np.array(x) / 255. x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255 return around_and_astype(x_clip)
[ "Gaussian noise corruption to images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.\n " ]
Please provide a description of the function:def shot_noise(x, severity=1): c = [60, 25, 12, 5, 3][severity - 1] x = np.array(x) / 255. x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255 return around_and_astype(x_clip)
[ "Shot noise corruption to images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Added shot noise.\n " ]
Please provide a description of the function:def impulse_noise(x, severity=1): c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = tfds.core.lazy_imports.skimage.util.random_noise( np.array(x) / 255., mode='s&p', amount=c) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
[ "Impulse noise corruption to images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Added impulse noise.\n " ]
Please provide a description of the function:def defocus_blur(x, severity=1): c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1] x = np.array(x) / 255. kernel = disk(radius=c[0], alias_blur=c[1]) channels = [] for d in range(3): channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel)) channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3 x_clip = np.clip(channels, 0, 1) * 255 return around_and_astype(x_clip)
[ "Defocus blurring to images.\n\n Apply defocus blurring to images using Gaussian kernel.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.\n " ]
Please provide a description of the function:def frosted_glass_blur(x, severity=1): # sigma, max_delta, iterations c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1] x = np.uint8( tfds.core.lazy_imports.skimage.filters.gaussian( np.array(x) / 255., sigma=c[0], multichannel=True) * 255) # locally shuffle pixels for _ in range(c[2]): for h in range(x.shape[0] - c[1], c[1], -1): for w in range(x.shape[1] - c[1], c[1], -1): dx, dy = np.random.randint(-c[1], c[1], size=(2,)) h_prime, w_prime = h + dy, w + dx # swap x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w] x_clip = np.clip( tfds.core.lazy_imports.skimage.filters.gaussian( x / 255., sigma=c[0], multichannel=True), 0, 1) x_clip *= 255 return around_and_astype(x_clip)
[ "Frosted glass blurring to images.\n\n Apply frosted glass blurring to images by shuffling pixels locally.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.\n " ]
Please provide a description of the function:def zoom_blur(x, severity=1): c = [ np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03) ][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
[ "Zoom blurring to images.\n\n Applying zoom blurring to images by zooming the central part of the images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.\n " ]
Please provide a description of the function:def fog(x, severity=1): c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1] x = np.array(x) / 255. max_val = x.max() mapsize = 512 shape = x.shape max_length = max(shape[0], shape[1]) if max_length > mapsize: mapsize = 2**int(np.ceil(np.log2(float(max_length)))) tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1]) tmp = tmp[:x.shape[0], :x.shape[1]] tmp = tmp[..., np.newaxis] x += c[0] * tmp x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255 return around_and_astype(x_clip)
[ "Fog corruption to images.\n\n Adding fog to images. Fog is generated by diamond-square algorithm.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Added fog.\n " ]
Please provide a description of the function:def brightness(x, severity=1): c = [.1, .2, .3, .4, .5][severity - 1] x = np.array(x) / 255. x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x) x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1) x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
[ "Change brightness of images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Changed brightness.\n " ]
Please provide a description of the function:def contrast(x, severity=1): c = [0.4, .3, .2, .1, .05][severity - 1] x = np.array(x) / 255. means = np.mean(x, axis=(0, 1), keepdims=True) x_clip = np.clip((x - means) * c + means, 0, 1) * 255 return around_and_astype(x_clip)
[ "Change contrast of images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Changed contrast.\n " ]
Please provide a description of the function:def elastic(x, severity=1): c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2), (244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01, 244 * 0.02), (244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1] image = np.array(x, dtype=np.float32) / 255. shape = image.shape shape_size = shape[:2] # random affine center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([ center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size ]) pts2 = pts1 + np.random.uniform( -c[2], c[2], size=pts1.shape).astype(np.float32) affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2) image = tfds.core.lazy_imports.cv2.warpAffine( image, affine_trans, shape_size[::-1], borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101) dx = (tfds.core.lazy_imports.skimage.filters.gaussian( np.random.uniform(-1, 1, size=shape[:2]), c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32) dy = (tfds.core.lazy_imports.skimage.filters.gaussian( np.random.uniform(-1, 1, size=shape[:2]), c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32) dx, dy = dx[..., np.newaxis], dy[..., np.newaxis] x, y, z = np.meshgrid( np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape( z, (-1, 1)) x_clip = np.clip( tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates( image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255 return around_and_astype(x_clip)
[ "Conduct elastic transform to images.\n\n Elastic transform is performed on small patches of the images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.\n " ]
Please provide a description of the function:def pixelate(x, severity=1): c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1] shape = x.shape x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8)) x = x.resize((int(shape[1] * c), int(shape[0] * c))) x = x.resize((shape[1], shape[0])) return np.asarray(x)
[ "Pixelate images.\n\n Conduct pixelating corruptions to images by first shrinking the images and\n then resizing to original size.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied pixelating\n corruption.\n " ]
Please provide a description of the function:def jpeg_compression(x, severity=1): c = [25, 18, 15, 10, 7][severity - 1] x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8)) output = io.BytesIO() x.save(output, 'JPEG', quality=c) output.seek(0) x = tfds.core.lazy_imports.PIL_Image.open(output) return np.asarray(x)
[ "Conduct jpeg compression to images.\n\n Args:\n x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\n severity: integer, severity of corruption.\n\n Returns:\n numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.\n " ]
Please provide a description of the function:def temporary_assignment(obj, attr, value): original = getattr(obj, attr, None) setattr(obj, attr, value) yield setattr(obj, attr, original)
[ "Temporarily assign obj.attr to value." ]
Please provide a description of the function:def zip_dict(*dicts): for key in set(itertools.chain(*dicts)): # set merge all keys # Will raise KeyError if the dict don't have the same keys yield key, tuple(d[key] for d in dicts)
[ "Iterate over items of dictionaries grouped by their keys." ]
Please provide a description of the function:def map_nested(function, data_struct, dict_only=False, map_tuple=False): # Could add support for more exotic data_struct, like OrderedDict if isinstance(data_struct, dict): return { k: map_nested(function, v, dict_only, map_tuple) for k, v in data_struct.items() } elif not dict_only: types = [list] if map_tuple: types.append(tuple) if isinstance(data_struct, tuple(types)): mapped = [map_nested(function, v, dict_only, map_tuple) for v in data_struct] if isinstance(data_struct, list): return mapped else: return tuple(mapped) # Singleton return function(data_struct)
[ "Apply a function recursively to each element of a nested data struct." ]
Please provide a description of the function:def zip_nested(arg0, *args, **kwargs): # Python 2 do not support kwargs only arguments dict_only = kwargs.pop("dict_only", False) assert not kwargs # Could add support for more exotic data_struct, like OrderedDict if isinstance(arg0, dict): return { k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args) } elif not dict_only: if isinstance(arg0, list): return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)] # Singleton return (arg0,) + args
[ "Zip data struct together and return a data struct with the same shape." ]
Please provide a description of the function:def as_proto_cls(proto_cls): def decorator(cls): class ProtoCls(object): def __init__(self, *args, **kwargs): super(ProtoCls, self).__setattr__( "_ProtoCls__proto", proto_cls(*args, **kwargs), ) def __getattr__(self, attr_name): return getattr(self.__proto, attr_name) def __setattr__(self, attr_name, new_value): try: return setattr(self.__proto, attr_name, new_value) except AttributeError: return super(ProtoCls, self).__setattr__(attr_name, new_value) def __eq__(self, other): return self.__proto, other.get_proto() def get_proto(self): return self.__proto def __repr__(self): return "<{cls_name}\n{proto_repr}\n>".format( cls_name=cls.__name__, proto_repr=repr(self.__proto)) decorator_cls = type(cls.__name__, (cls, ProtoCls), { "__doc__": cls.__doc__, }) return decorator_cls return decorator
[ "Simulate proto inheritance.\n\n By default, protobuf do not support direct inheritance, so this decorator\n simulates inheritance to the class to which it is applied.\n\n Example:\n\n ```\n @as_proto_class(proto.MyProto)\n class A(object):\n def custom_method(self):\n return self.proto_field * 10\n\n p = proto.MyProto(proto_field=123)\n\n a = A()\n a.CopyFrom(p) # a is like a proto object\n assert a.proto_field == 123\n a.custom_method() # But has additional methods\n\n ```\n\n Args:\n proto_cls: The protobuf class to inherit from\n\n Returns:\n decorated_cls: The decorated class\n ", "Decorator applied to the class.", "Base class simulating the protobuf." ]
Please provide a description of the function:def tfds_dir(): return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
[ "Path to tensorflow_datasets directory." ]
Please provide a description of the function:def atomic_write(path, mode): tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) with tf.io.gfile.GFile(tmp_path, mode) as file_: yield file_ tf.io.gfile.rename(tmp_path, path, overwrite=True)
[ "Writes to path atomically, by writing to temp file and renaming it." ]
Please provide a description of the function:def read_checksum_digest(path, checksum_cls=hashlib.sha256): checksum = checksum_cls() size = 0 with tf.io.gfile.GFile(path, "rb") as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) size += len(block) if not block: break checksum.update(block) return checksum.hexdigest(), size
[ "Given a hash constructor, returns checksum digest and size of file." ]
Please provide a description of the function:def reraise(additional_msg): exc_type, exc_value, exc_traceback = sys.exc_info() msg = str(exc_value) + "\n" + additional_msg six.reraise(exc_type, exc_type(msg), exc_traceback)
[ "Reraise an exception with an additional message." ]
Please provide a description of the function:def rgetattr(obj, attr, *args): def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split("."))
[ "Get attr that handles dots in attr name." ]
Please provide a description of the function:def _split_generators(self, dl_manager): image_tar_file = os.path.join(dl_manager.manual_dir, self.builder_config.file_name) if not tf.io.gfile.exists(image_tar_file): # The current celebahq generation code depends on a concrete version of # pillow library and cannot be easily ported into tfds. msg = "You must download the dataset files manually and place them in: " msg += dl_manager.manual_dir msg += " as .tar files. See testing/test_data/fake_examples/celeb_a_hq " raise AssertionError(msg) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=50, gen_kwargs={"archive": dl_manager.iter_archive(image_tar_file)}, ) ]
[ "Returns SplitGenerators." ]
Please provide a description of the function:def _generate_examples(self, source_file, target_file): with tf.io.gfile.GFile(source_file) as f: source_sentences = f.read().split("\n") with tf.io.gfile.GFile(target_file) as f: target_sentences = f.read().split("\n") assert len(target_sentences) == len( source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (len( source_sentences), len(target_sentences), source_file, target_file) source, target = self.builder_config.language_pair for l1, l2 in zip(source_sentences, target_sentences): result = {source: l1, target: l2} # Make sure that both translations are non-empty. if all(result.values()): yield result
[ "This function returns the examples in the raw (text) form." ]
Please provide a description of the function:def _generate_examples(self, filepath): rows_per_pair_id = collections.defaultdict(list) with tf.io.gfile.GFile(filepath) as f: reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: rows_per_pair_id[row['pairID']].append(row) for rows in six.itervalues(rows_per_pair_id): premise = {row['language']: row['sentence1'] for row in rows} hypothesis = {row['language']: row['sentence2'] for row in rows} yield { 'premise': premise, 'hypothesis': hypothesis, 'label': rows[0]['gold_label'], }
[ "This function returns the examples in the raw (text) form." ]
Please provide a description of the function:def _generate_example(self, data_path, image_id): image_filepath = os.path.join( data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id)) annon_filepath = os.path.join( data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id)) def _get_example_objects(): with tf.io.gfile.GFile(annon_filepath, "r") as f: root = xml.etree.ElementTree.parse(f).getroot() size = root.find("size") width = float(size.find("width").text) height = float(size.find("height").text) for obj in root.findall("object"): # Get object's label name. label = obj.find("name").text.lower() # Get objects' pose name. pose = obj.find("pose").text.lower() is_truncated = (obj.find("truncated").text == "1") is_difficult = (obj.find("difficult").text == "1") bndbox = obj.find("bndbox") xmax = float(bndbox.find("xmax").text) xmin = float(bndbox.find("xmin").text) ymax = float(bndbox.find("ymax").text) ymin = float(bndbox.find("ymin").text) yield { "label": label, "pose": pose, "bbox": tfds.features.BBox( ymin / height, xmin / width, ymax / height, xmax / width), "is_truncated": is_truncated, "is_difficult": is_difficult, } objects = list(_get_example_objects()) # Use set() to remove duplicates labels = sorted(set(obj["label"] for obj in objects)) labels_no_difficult = sorted(set( obj["label"] for obj in objects if obj["is_difficult"] == 0 )) return { "image": image_filepath, "image/filename": image_id + ".jpg", "objects": objects, "labels": labels, "labels_no_difficult": labels_no_difficult, }
[ "Yields examples.", "Function to get all the objects from the annotation XML file." ]
Please provide a description of the function:def set_encoding_format(self, encoding_format): supported = ENCODE_FN.keys() if encoding_format not in supported: raise ValueError('`encoding_format` must be one of %s.' % supported) self._encoding_format = encoding_format
[ "Update the encoding format." ]
Please provide a description of the function:def set_shape(self, shape): channels = shape[-1] acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format] if channels not in acceptable_channels: raise ValueError('Acceptable `channels` for %s: %s (was %s)' % ( self._encoding_format, acceptable_channels, channels)) self._shape = tuple(shape)
[ "Update the shape." ]
Please provide a description of the function:def _encode_image(self, np_image): if np_image.dtype != np.uint8: raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype) utils.assert_shape_match(np_image.shape, self._shape) return self._runner.run(ENCODE_FN[self._encoding_format], np_image)
[ "Returns np_image encoded as jpeg or png." ]
Please provide a description of the function:def encode_example(self, image_or_path_or_fobj): if isinstance(image_or_path_or_fobj, np.ndarray): encoded_image = self._encode_image(image_or_path_or_fobj) elif isinstance(image_or_path_or_fobj, six.string_types): with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f: encoded_image = image_f.read() else: encoded_image = image_or_path_or_fobj.read() return encoded_image
[ "Convert the given image into a dict convertible to tf example." ]
Please provide a description of the function:def decode_example(self, example): img = tf.image.decode_image( example, channels=self._shape[-1], dtype=tf.uint8) img.set_shape(self._shape) return img
[ "Reconstruct the image from the tf example." ]
Please provide a description of the function:def save_metadata(self, data_dir, feature_name=None): filepath = _get_metadata_filepath(data_dir, feature_name) with tf.io.gfile.GFile(filepath, 'w') as f: json.dump({ 'shape': [-1 if d is None else d for d in self._shape], 'encoding_format': self._encoding_format, }, f, sort_keys=True)
[ "See base class for details." ]
Please provide a description of the function:def load_metadata(self, data_dir, feature_name=None): # Restore names if defined filepath = _get_metadata_filepath(data_dir, feature_name) if tf.io.gfile.exists(filepath): with tf.io.gfile.GFile(filepath, 'r') as f: info_data = json.load(f) self.set_encoding_format(info_data['encoding_format']) self.set_shape([None if d == -1 else d for d in info_data['shape']])
[ "See base class for details." ]
Please provide a description of the function:def _create_moving_sequence(image, pad_lefts, total_padding): with tf.name_scope("moving_sequence"): def get_padded_image(args): pad_left, = args pad_right = total_padding - pad_left padding = tf.stack([pad_left, pad_right], axis=-1) z = tf.zeros((1, 2), dtype=pad_left.dtype) padding = tf.concat([padding, z], axis=0) return tf.pad(image, padding) padded_images = tf.map_fn( get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False, back_prop=False) return padded_images
[ "Create a moving image sequence from the given image a left padding values.\n\n Args:\n image: [in_h, in_w, n_channels] uint8 array\n pad_lefts: [sequence_length, 2] int32 array of left padding values\n total_padding: tensor of padding values, (pad_h, pad_w)\n\n Returns:\n [sequence_length, out_h, out_w, n_channels] uint8 image sequence, where\n out_h = in_h + pad_h, out_w = in_w + out_w\n " ]
Please provide a description of the function:def _get_linear_trajectory(x0, velocity, t): x0 = tf.convert_to_tensor(x0) velocity = tf.convert_to_tensor(velocity) t = tf.convert_to_tensor(t) if x0.shape.ndims != 1: raise ValueError("x0 must be a rank 1 tensor") if velocity.shape.ndims != 1: raise ValueError("velocity must be a rank 1 tensor") if t.shape.ndims != 1: raise ValueError("t must be a rank 1 tensor") x0 = tf.expand_dims(x0, axis=0) velocity = tf.expand_dims(velocity, axis=0) dx = velocity * tf.expand_dims(t, axis=-1) linear_trajectories = x0 + dx assert linear_trajectories.shape.ndims == 2, \ "linear_trajectories should be a rank 2 tensor" return linear_trajectories
[ "Construct a linear trajectory from x0.\n\n Args:\n x0: N-D float tensor.\n velocity: N-D float tensor\n t: [sequence_length]-length float tensor\n\n Returns:\n x: [sequence_length, ndims] float tensor.\n " ]
Please provide a description of the function:def image_as_moving_sequence( image, sequence_length=20, output_size=(64, 64), velocity=0.1, start_position=None): ndims = 2 image = tf.convert_to_tensor(image) if image.shape.ndims != 3: raise ValueError("image must be rank 3, got %s" % str(image)) output_size = tf.TensorShape(output_size) if len(output_size) != ndims: raise ValueError("output_size must have exactly %d elements, got %s" % (ndims, output_size)) image_shape = tf.shape(image) if start_position is None: start_position = tf.random.uniform((ndims,), dtype=tf.float32) elif start_position.shape != (ndims,): raise ValueError("start_positions must (%d,)" % ndims) velocity = tf.convert_to_tensor(velocity, dtype=tf.float32) if velocity.shape.ndims == 0: velocity = _get_random_unit_vector(ndims, tf.float32) * velocity elif velocity.shape.ndims != 1: raise ValueError("velocity must be rank 0 or rank 1, got %s" % velocity) t = tf.range(sequence_length, dtype=tf.float32) trajectory = _get_linear_trajectory(start_position, velocity, t) trajectory = _bounce_to_bbox(trajectory) total_padding = output_size - image_shape[:2] if not tf.executing_eagerly(): cond = tf.compat.v1.assert_greater(total_padding, -1) with tf.control_dependencies([cond]): total_padding = tf.identity(total_padding) sequence_pad_lefts = tf.cast( tf.math.round(trajectory * tf.cast(total_padding, tf.float32)), tf.int32) sequence = _create_moving_sequence(image, sequence_pad_lefts, total_padding) sequence.set_shape( [sequence_length] + output_size.as_list() + [image.shape[-1]]) return MovingSequence( image_sequence=sequence, trajectory=trajectory, start_position=start_position, velocity=velocity)
[ "Turn simple static images into sequences of the originals bouncing around.\n\n Adapted from Srivastava et al.\n http://www.cs.toronto.edu/~nitish/unsupervised_video/\n\n Example usage:\n ```python\n import tensorflow as tf\n import tensorflow_datasets as tfds\n from tensorflow_datasets.video import moving_sequence\n tf.compat.v1.enable_eager_execution()\n\n def animate(sequence):\n import numpy as np\n import matplotlib.pyplot as plt\n import matplotlib.animation as animation\n sequence = np.squeeze(sequence, axis=-1)\n\n fig = plt.figure()\n plt.axis(\"off\")\n ims = [[plt.imshow(im, cmap=\"gray\", animated=True)] for im in sequence]\n # don't remove `anim =` as linter may suggets\n # weird behaviour, plot will freeze on last frame\n anim = animation.ArtistAnimation(\n fig, ims, interval=50, blit=True, repeat_delay=100)\n\n plt.show()\n plt.close()\n\n\n tf.enable_eager_execution()\n mnist_ds = tfds.load(\"mnist\", split=tfds.Split.TRAIN, as_supervised=True)\n mnist_ds = mnist_ds.repeat().shuffle(1024)\n\n def map_fn(image, label):\n sequence = moving_sequence.image_as_moving_sequence(\n image, sequence_length=20)\n return sequence.image_sequence\n\n moving_mnist_ds = mnist_ds.map(map_fn).batch(2).map(\n lambda x: dict(image_sequence=tf.reduce_max(x, axis=0)))\n\n # # for comparison with test data provided by original authors\n # moving_mnist_ds = tfds.load(\"moving_mnist\", split=tfds.Split.TEST)\n\n for seq in moving_mnist_ds:\n animate(seq[\"image_sequence\"].numpy())\n ```\n\n Args:\n image: [in_h, in_w, n_channels] tensor defining the sub-image to be bouncing\n around.\n sequence_length: int, length of sequence.\n output_size: (out_h, out_w) size returned images.\n velocity: scalar speed or 2D velocity of image. If scalar, the 2D\n velocity is randomly generated with this magnitude. This is the\n normalized distance moved each time step by the sub-image, where\n normalization occurs over the feasible distance the sub-image can move\n e.g if the input image is [10 x 10] and the output image is [60 x 60],\n a speed of 0.1 means the sub-image moves (60 - 10) * 0.1 = 5 pixels per\n time step.\n start_position: 2D float32 normalized initial position of each\n image in [0, 1]. Randomized uniformly if not given.\n\n Returns:\n `MovingSequence` namedtuple containing:\n `image_sequence`:\n [sequence_length, out_h, out_w, n_channels] image at each time step.\n padded values are all zero. Same dtype as input image.\n `trajectory`: [sequence_length, 2] float32 in [0, 1]\n 2D normalized coordinates of the image at every time step.\n `start_position`: 2D float32 initial position in [0, 1].\n 2D normalized initial position of image. Same as input if provided,\n otherwise the randomly value generated.\n `velocity`: 2D float32 normalized velocity. Same as input velocity\n if provided as a 2D tensor, otherwise the random velocity generated.\n " ]
Please provide a description of the function:def _split_generators(self, dl_manager): dl_urls = { split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS } dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH + "instrument_labels.txt") dl_paths = dl_manager.download_and_extract(dl_urls) instrument_labels = tf.io.gfile.GFile(dl_paths["instrument_labels"], "r").read().strip().split("\n") self.info.features["instrument"]["label"].names = instrument_labels return [ tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension name=split, num_shards=_SPLIT_SHARDS[split], gen_kwargs={"path": dl_paths[split]}) for split in _SPLITS ]
[ "Returns splits." ]
Please provide a description of the function:def _str_to_version(version_str, allow_wildcard=False): reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG res = reg.match(version_str) if not res: msg = "Invalid version '{}'. Format should be x.y.z".format(version_str) if allow_wildcard: msg += " with {x,y,z} being digits or wildcard." else: msg += " with {x,y,z} being digits." raise ValueError(msg) return tuple( v if v == "*" else int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
[ "Return the tuple (major, minor, patch) version extracted from the str." ]
Please provide a description of the function:def match(self, other_version): major, minor, patch = _str_to_version(other_version, allow_wildcard=True) return (major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"])
[ "Returns True if other_version matches.\n\n Args:\n other_version: string, of the form \"x[.y[.x]]\" where {x,y,z} can be a\n number or a wildcard.\n " ]
Please provide a description of the function:def _get_validation_labels(val_path): labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME) with tf.io.gfile.GFile(labels_path) as labels_f: labels = labels_f.read().strip().split('\n') with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj: tar = tarfile.open(mode='r:', fileobj=tar_f_obj) images = sorted(tar.getnames()) return dict(zip(images, labels))
[ "Returns labels for validation.\n\n Args:\n val_path: path to TAR file containing validation images. It is used to\n retrieve the name of pictures and associate them to labels.\n\n Returns:\n dict, mapping from image name (str) to label (str).\n " ]
Please provide a description of the function:def _generate_examples(self, archive, validation_labels=None): if validation_labels: # Validation split for example in self._generate_examples_validation(archive, validation_labels): yield example # Training split. Main archive contains archives names after a synset noun. # Each sub-archive contains pictures associated to that synset. for fname, fobj in archive: label = fname[:-4] # fname is something like 'n01632458.tar' # TODO(b/117643231): in py3, the following lines trigger tarfile module # to call `fobj.seekable()`, which Gfile doesn't have. We should find an # alternative, as this loads ~150MB in RAM. fobj_mem = io.BytesIO(fobj.read()) for image_fname, image_fobj in tfds.download.iter_archive( fobj_mem, tfds.download.ExtractMethod.TAR): yield { 'file_name': image_fname, 'image': image_fobj, 'label': label, }
[ "Yields examples." ]
Please provide a description of the function:def do_files_exist(filenames): preexisting = [tf.io.gfile.exists(f) for f in filenames] return any(preexisting)
[ "Whether any of the filenames exist." ]
Please provide a description of the function:def get_incomplete_path(filename): random_suffix = "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) return filename + ".incomplete" + random_suffix
[ "Returns a temporary filename based on filename." ]
Please provide a description of the function:def _incomplete_files(filenames): tmp_files = [get_incomplete_path(f) for f in filenames] try: yield tmp_files for tmp, output in zip(tmp_files, filenames): tf.io.gfile.rename(tmp, output) finally: for tmp in tmp_files: if tf.io.gfile.exists(tmp): tf.io.gfile.remove(tmp)
[ "Create temporary files for filenames and rename on exit." ]
Please provide a description of the function:def incomplete_dir(dirname): tmp_dir = get_incomplete_path(dirname) tf.io.gfile.makedirs(tmp_dir) try: yield tmp_dir tf.io.gfile.rename(tmp_dir, dirname) finally: if tf.io.gfile.exists(tmp_dir): tf.io.gfile.rmtree(tmp_dir)
[ "Create temporary dir for dirname and rename on exit." ]
Please provide a description of the function:def _shuffle_tfrecord(path, random_gen): # Read all records record_iter = tf.compat.v1.io.tf_record_iterator(path) all_records = [ r for r in utils.tqdm( record_iter, desc="Reading...", unit=" examples", leave=False) ] # Shuffling in memory random_gen.shuffle(all_records) # Write all record back with tf.io.TFRecordWriter(path) as writer: for record in utils.tqdm( all_records, desc="Writing...", unit=" examples", leave=False): writer.write(record)
[ "Shuffle a single record file in memory." ]
Please provide a description of the function:def _write_tfrecords_from_generator(generator, output_files, shuffle=True): if do_files_exist(output_files): raise ValueError( "Pre-processed files already exists: {}.".format(output_files)) with _incomplete_files(output_files) as tmp_files: # Write all shards writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files] with _close_on_exit(writers) as writers: logging.info("Writing TFRecords") _round_robin_write(writers, generator) # Shuffle each shard if shuffle: # WARNING: Using np instead of Python random because Python random # produce different values between Python 2 and 3 and between # architectures random_gen = np.random.RandomState(42) for path in utils.tqdm( tmp_files, desc="Shuffling...", unit=" shard", leave=False): _shuffle_tfrecord(path, random_gen=random_gen)
[ "Writes generated str records to output_files in round-robin order." ]
Please provide a description of the function:def _round_robin_write(writers, generator): for i, example in enumerate(utils.tqdm( generator, unit=" examples", leave=False)): writers[i % len(writers)].write(example)
[ "Write records from generator round-robin across writers." ]
Please provide a description of the function:def _item_to_tf_feature(item, key_name): v = item if isinstance(v, (list, tuple)) and not v: raise ValueError( "Feature {} received an empty list value, so is unable to infer the " "feature type to record. To support empty value, the corresponding " "FeatureConnector should return a numpy array with the correct dtype " "instead of a Python list.".format(key_name) ) # Handle strings/bytes first if isinstance(v, (six.binary_type, six.string_types)): v = [tf.compat.as_bytes(v)] return tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) elif (isinstance(v, (tuple, list)) and all(isinstance(x, (six.binary_type, six.string_types)) for x in v)): v = [tf.compat.as_bytes(x) for x in v] return tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) elif (isinstance(v, np.ndarray) and (v.dtype.kind in ("U", "S") or v.dtype == object)): # binary or unicode v = [tf.compat.as_bytes(x) for x in v.flatten()] return tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) # Use NumPy for numeric types v = np.array(v).flatten() # Convert v into a 1-d array if np.issubdtype(v.dtype, np.integer): return tf.train.Feature(int64_list=tf.train.Int64List(value=v)) elif np.issubdtype(v.dtype, np.floating): return tf.train.Feature(float_list=tf.train.FloatList(value=v)) else: raise ValueError( "Value received: {}.\n" "tf.train.Feature does not support type {} for feature key {}. " "This may indicate that one of the FeatureConnectors received an " "unsupported value as input.".format(repr(v), repr(type(v)), key_name) )
[ "Single item to a tf.train.Feature." ]
Please provide a description of the function:def _dict_to_tf_features(example_dict): features = {k: _item_to_tf_feature(v, k) for k, v in six.iteritems(example_dict)} return tf.train.Features(feature=features)
[ "Builds tf.train.Features from (string -> int/float/str list) dictionary." ]
Please provide a description of the function:def _async_tqdm(*args, **kwargs): with tqdm_lib.tqdm(*args, **kwargs) as pbar: pbar = _TqdmPbarAsync(pbar) yield pbar pbar.clear() # pop pbar from the active list of pbar print()
[ "Wrapper around Tqdm which can be updated in threads.\n\n Usage:\n\n ```\n with utils.async_tqdm(...) as pbar:\n # pbar can then be modified inside a thread\n # pbar.update_total(3)\n # pbar.update()\n ```\n\n Args:\n *args: args of tqdm\n **kwargs: kwargs of tqdm\n\n Yields:\n pbar: Async pbar which can be shared between threads.\n " ]
Please provide a description of the function:def update_total(self, n=1): with self._lock: self._pbar.total += n self.refresh()
[ "Increment total pbar value." ]
Please provide a description of the function:def update(self, n=1): with self._lock: self._pbar.update(n) self.refresh()
[ "Increment current value." ]
Please provide a description of the function:def _build_pcollection(self, pipeline, folder, split): beam = tfds.core.lazy_imports.apache_beam split_type = self.builder_config.split_type filename = os.path.join(folder, "{}.tar.gz".format(split_type)) def _extract_data(inputs): filename, split = inputs with tf.io.gfile.GFile(filename, "rb") as f: with tarfile.open(fileobj=f, mode="r") as tar: for tarinfo in tar: split_name = tarinfo.name.split("_") if len(split_name) > 2 and split_name[2] == split: buf = six.BytesIO() shutil.copyfileobj(tar.extractfile(tarinfo), buf) yield [tarinfo.name, buf.getvalue()] def _process_example(inputs): filename, data_string = inputs buf = six.BytesIO(data_string) buf.seek(0) data = np.load(buf) # Extract the images and convert to uint8. The reshape is required, see # https://github.com/deepmind/abstract-reasoning-matrices. all_images = np.uint8(data["image"].reshape(16, 160, 160, 1)) return { "relation_structure_encoded": data["relation_structure_encoded"], "target": data["target"], "meta_target": data["meta_target"], "context": all_images[:8], "answers": all_images[8:], "filename": filename, } # Beam might fuse together the _extract_data and _process_example which # defeats the purpose of parallel processing. As a result, we reshard by # doing a GroupByKey on random keys, and then flattening again. def _add_random_keys(inputs): key = str(random.randrange(10**10)) return key, inputs def _remove_keys(inputs): _, rows = inputs for row in rows: yield row return (pipeline | beam.Create([(filename, split)]) | beam.FlatMap(_extract_data) | beam.Map(_add_random_keys) | beam.GroupByKey() | beam.FlatMap(_remove_keys) | beam.Map(_process_example))
[ "Generate examples as dicts.", "Extracts files from the tar archives." ]
Please provide a description of the function:def _copy(src_file, dest_path): tf.io.gfile.makedirs(os.path.dirname(dest_path)) with tf.io.gfile.GFile(dest_path, 'wb') as dest_file: while True: data = src_file.read(io.DEFAULT_BUFFER_SIZE) if not data: break dest_file.write(data)
[ "Copy data read from src file obj to new file in dest_path." ]
Please provide a description of the function:def iter_tar(arch_f, gz=False, stream=False): read_type = 'r' + ('|' if stream else ':') if gz: read_type += 'gz' with _open_or_pass(arch_f) as fobj: tar = tarfile.open(mode=read_type, fileobj=fobj) for member in tar: extract_file = tar.extractfile(member) if extract_file: # File with data (not directory): path = _normpath(member.path) if not path: continue yield [path, extract_file]
[ "Iter over tar archive, yielding (path, object-like) tuples.\n\n Args:\n arch_f: File object of the archive to iterate.\n gz: If True, open a gzip'ed archive.\n stream: If True, open the archive in stream mode which allows for faster\n processing and less temporary disk consumption, but random access to the\n file is not allowed.\n\n Yields:\n (filepath, extracted_fobj) for each file in the archive.\n " ]
Please provide a description of the function:def tqdm(self): with utils.async_tqdm( total=0, desc='Extraction completed...', unit=' file') as pbar_path: self._pbar_path = pbar_path yield
[ "Add a progression bar for the current extraction." ]
Please provide a description of the function:def extract(self, path, extract_method, to_path): self._pbar_path.update_total(1) if extract_method not in _EXTRACT_METHODS: raise ValueError('Unknown extraction method "%s".' % extract_method) future = self._executor.submit(self._sync_extract, path, extract_method, to_path) return promise.Promise.resolve(future)
[ "Returns `promise.Promise` => to_path." ]
Please provide a description of the function:def _sync_extract(self, from_path, method, to_path): to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex) try: for path, handle in iter_archive(from_path, method): _copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp) except BaseException as err: msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err) raise ExtractError(msg) # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty # directories, so delete destination first, if it already exists. if tf.io.gfile.exists(to_path): tf.io.gfile.rmtree(to_path) tf.io.gfile.rename(to_path_tmp, to_path) self._pbar_path.update(1) return to_path
[ "Returns `to_path` once resource has been extracted there." ]
Please provide a description of the function:def to_serialized_field(tensor_info): # Select the type dtype = tensor_info.dtype # TODO(b/119937875): TF Examples proto only support int64, float32 and string # This create limitation like float64 downsampled to float32, bool converted # to int64 which is space ineficient, no support for complexes or quantized if tensor_info.dtype.is_integer or tensor_info.dtype.is_bool: dtype = tf.int64 elif tensor_info.dtype.is_floating: dtype = tf.float32 # It seems quite space inefficient to convert bool to int64 # We may want to add support for complex, quantize dtype in the future # TFRecord only support 3 types if dtype not in (tf.int64, tf.float32, tf.string): raise NotImplementedError( 'Serialization not implemented for {}'.format(dtype)) # Select the feature proto type in function of the unknown shape if (tensor_info.shape is not None and # Shape is a sequence (None, ...) tensor_info.shape.count(None) == 1 and tensor_info.shape[0] is None): return tf.io.FixedLenSequenceFeature( shape=tensor_info.shape[1:], dtype=dtype, allow_missing=True, ) # At least one dimension is undefined elif tensor_info.shape is None or None in tensor_info.shape: return tf.io.VarLenFeature(dtype=dtype) else: return tf.io.FixedLenFeature( shape=tensor_info.shape, dtype=dtype, )
[ "Convert a `TensorInfo` object into a feature proto object." ]
Please provide a description of the function:def to_feature(value): if isinstance(value, FeatureConnector): return value elif utils.is_dtype(value): # tf.int32, tf.string,... return Tensor(shape=(), dtype=tf.as_dtype(value)) elif isinstance(value, dict): return FeaturesDict(value) else: raise ValueError('Feature not supported: {}'.format(value))
[ "Convert the given value to Feature if necessary." ]
Please provide a description of the function:def decode_single_feature_from_dict( feature_k, feature, tfexample_dict): # Singleton case if not feature.serialized_keys: data_to_decode = tfexample_dict[feature_k] # Feature contains sub features else: # Extract the sub-features from the global feature dict data_to_decode = { k: tfexample_dict[posixpath.join(feature_k, k)] for k in feature.serialized_keys } return feature.decode_example(data_to_decode)
[ "Decode the given feature from the tfexample_dict.\n\n Args:\n feature_k (str): Feature key in the tfexample_dict\n feature (FeatureConnector): Connector object to use to decode the field\n tfexample_dict (dict): Dict containing the data to decode.\n\n Returns:\n decoded_feature: The output of the feature.decode_example\n " ]
Please provide a description of the function:def _assert_keys_match(keys1, keys2): if set(keys1) != set(keys2): raise ValueError('{} {}'.format(list(keys1), list(keys2)))
[ "Ensure the two list of keys matches." ]
Please provide a description of the function:def get_tensor_info(self): return { feature_key: feature.get_tensor_info() for feature_key, feature in self._feature_dict.items() }
[ "See base class for details." ]
Please provide a description of the function:def get_serialized_info(self): # Flatten tf-example features dict # Use NonMutableDict to ensure there is no collision between features keys features_dict = utils.NonMutableDict() for feature_key, feature in self._feature_dict.items(): serialized_info = feature.get_serialized_info() # Features can be either containers (dict of other features) or plain # features (ex: single tensor). Plain features have a None # feature.features_keys if not feature.serialized_keys: features_dict[feature_key] = serialized_info else: # Sanity check which should always be True, as feature.serialized_keys # is computed using feature.get_serialized_info() _assert_keys_match(serialized_info.keys(), feature.serialized_keys) features_dict.update({ posixpath.join(feature_key, k): v for k, v in serialized_info.items() }) return features_dict
[ "See base class for details." ]
Please provide a description of the function:def encode_example(self, example_dict): # Flatten dict matching the tf-example features # Use NonMutableDict to ensure there is no collision between features keys tfexample_dict = utils.NonMutableDict() # Iterate over example fields for feature_key, (feature, example_value) in utils.zip_dict( self._feature_dict, example_dict): # Encode the field with the associated encoder encoded_feature = feature.encode_example(example_value) # Singleton case if not feature.serialized_keys: tfexample_dict[feature_key] = encoded_feature # Feature contains sub features else: _assert_keys_match(encoded_feature.keys(), feature.serialized_keys) tfexample_dict.update({ posixpath.join(feature_key, k): encoded_feature[k] for k in feature.serialized_keys }) return tfexample_dict
[ "See base class for details." ]
Please provide a description of the function:def decode_example(self, tfexample_dict): tensor_dict = {} # Iterate over the Tensor dict keys for feature_key, feature in six.iteritems(self._feature_dict): decoded_feature = decode_single_feature_from_dict( feature_k=feature_key, feature=feature, tfexample_dict=tfexample_dict, ) tensor_dict[feature_key] = decoded_feature return tensor_dict
[ "See base class for details." ]
Please provide a description of the function:def save_metadata(self, data_dir, feature_name=None): # Recursively save all child features for feature_key, feature in six.iteritems(self._feature_dict): if feature_name: feature_key = '-'.join((feature_name, feature_key)) feature.save_metadata(data_dir, feature_name=feature_key)
[ "See base class for details." ]
Please provide a description of the function:def encode_example(self, example_data): np_dtype = np.dtype(self._dtype.as_numpy_dtype) # Convert to numpy if possible if not isinstance(example_data, np.ndarray): example_data = np.array(example_data, dtype=np_dtype) # Ensure the shape and dtype match if example_data.dtype != np_dtype: raise ValueError('Dtype {} do not match {}'.format( example_data.dtype, np_dtype)) utils.assert_shape_match(example_data.shape, self._shape) # For booleans, convert to integer (tf.train.Example does not support bool) if example_data.dtype == np.bool_: example_data = example_data.astype(int) return example_data
[ "See base class for details." ]
Please provide a description of the function:def decode_example(self, tfexample_data): # TODO(epot): Support dynamic shape if self.shape.count(None) < 2: # Restore the shape if possible. TF Example flattened it. shape = [-1 if i is None else i for i in self.shape] tfexample_data = tf.reshape(tfexample_data, shape) if tfexample_data.dtype != self.dtype: tfexample_data = tf.dtypes.cast(tfexample_data, self.dtype) return tfexample_data
[ "See base class for details." ]
Please provide a description of the function:def _process_celeba_config_file(self, file_path): with tf.io.gfile.GFile(file_path) as f: data_raw = f.read() lines = data_raw.split("\n") keys = lines[1].strip().split() values = {} # Go over each line (skip the last one, as it is empty). for line in lines[2:-1]: row_values = line.strip().split() # Each row start with the 'file_name' and then space-separated values. values[row_values[0]] = [int(v) for v in row_values[1:]] return keys, values
[ "Unpack the celeba config file.\n\n The file starts with the number of lines, and a header.\n Afterwards, there is a configuration for each file: one per line.\n\n Args:\n file_path: Path to the file with the configuration.\n\n Returns:\n keys: names of the attributes\n values: map from the file name to the list of attribute values for\n this file.\n " ]
Please provide a description of the function:def _generate_examples(self, file_id, extracted_dirs): filedir = os.path.join(extracted_dirs["img_align_celeba"], "img_align_celeba") img_list_path = extracted_dirs["list_eval_partition"] landmarks_path = extracted_dirs["landmarks_celeba"] attr_path = extracted_dirs["list_attr_celeba"] with tf.io.gfile.GFile(img_list_path) as f: files = [ line.split()[0] for line in f.readlines() if int(line.split()[1]) == file_id ] attributes = self._process_celeba_config_file(attr_path) landmarks = self._process_celeba_config_file(landmarks_path) for file_name in sorted(files): path = os.path.join(filedir, file_name) yield { "image": path, "landmarks": { k: v for k, v in zip(landmarks[0], landmarks[1][file_name]) }, "attributes": { # atributes value are either 1 or -1, so convert to bool k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name]) }, }
[ "Yields examples." ]
Please provide a description of the function:def _generate_examples(self, file_paths): for label, path in sorted(file_paths.items(), key=lambda x: x[0]): with tf.io.gfile.GFile(path, "rb") as f: class_images = np.load(f) for np_image in class_images: yield { "image": np_image.reshape(_QUICKDRAW_IMAGE_SHAPE), "label": label, }
[ "Generate QuickDraw bitmap examples.\n\n Given a list of file paths with data for each class label, generate examples\n in a random order.\n\n Args:\n file_paths: (dict of {str: str}) the paths to files containing the data,\n indexed by label.\n\n Yields:\n The QuickDraw examples, as defined in the dataset info features.\n " ]
Please provide a description of the function:def ensure_tf_install(): # pylint: disable=g-statement-before-imports try: import tensorflow as tf except ImportError: # Print more informative error message, then reraise. print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not " "installed by default when you install TensorFlow Datasets. This is " "so that users can decide whether to install the GPU-enabled " "TensorFlow package. To use TensorFlow Datasets, please install the " "most recent version of TensorFlow, by following instructions at " "https://tensorflow.org/install.\n\n") raise tf_version = distutils.version.LooseVersion(tf.__version__) v_1_12 = distutils.version.LooseVersion("1.12.0") if tf_version < v_1_12: raise ImportError( "This version of TensorFlow Datasets requires TensorFlow " "version >= {required}; Detected an installation of version {present}. " "Please upgrade TensorFlow to proceed.".format( required="1.12.0", present=tf.__version__)) _patch_tf(tf)
[ "Attempt to import tensorflow, and ensure its version is sufficient.\n\n Raises:\n ImportError: if either tensorflow is not importable or its version is\n inadequate.\n " ]
Please provide a description of the function:def _patch_tf(tf): global TF_PATCH if TF_PATCH: return v_1_12 = distutils.version.LooseVersion("1.12.0") v_1_13 = distutils.version.LooseVersion("1.13.0") v_2 = distutils.version.LooseVersion("2.0.0") tf_version = distutils.version.LooseVersion(tf.__version__) if v_1_12 <= tf_version < v_1_13: # TODO(b/123930850): remove when 1.13 is stable. TF_PATCH = "tf1_12" _patch_for_tf1_12(tf) elif v_1_13 <= tf_version < v_2: TF_PATCH = "tf1_13" _patch_for_tf1_13(tf) else: TF_PATCH = "tf2" _patch_for_tf2(tf)
[ "Patch TF to maintain compatibility across versions." ]
Please provide a description of the function:def _patch_for_tf1_12(tf): tf.io.gfile = tf.gfile tf.io.gfile.copy = tf.gfile.Copy tf.io.gfile.exists = tf.gfile.Exists tf.io.gfile.glob = tf.gfile.Glob tf.io.gfile.isdir = tf.gfile.IsDirectory tf.io.gfile.listdir = tf.gfile.ListDirectory tf.io.gfile.makedirs = tf.gfile.MakeDirs tf.io.gfile.mkdir = tf.gfile.MkDir tf.io.gfile.remove = tf.gfile.Remove tf.io.gfile.rename = tf.gfile.Rename tf.io.gfile.rmtree = tf.gfile.DeleteRecursively tf.io.gfile.stat = tf.gfile.Stat tf.io.gfile.walk = tf.gfile.Walk tf.io.gfile.GFile = tf.gfile.GFile tf.data.experimental = tf.contrib.data tf.compat.v1 = types.ModuleType("tf.compat.v1") tf.compat.v1.assert_greater = tf.assert_greater tf.compat.v1.placeholder = tf.placeholder tf.compat.v1.ConfigProto = tf.ConfigProto tf.compat.v1.Session = tf.Session tf.compat.v1.enable_eager_execution = tf.enable_eager_execution tf.compat.v1.io = tf.io tf.compat.v1.data = tf.data tf.compat.v1.data.Dataset = tf.data.Dataset tf.compat.v1.data.make_one_shot_iterator = ( lambda ds: ds.make_one_shot_iterator()) tf.compat.v1.train = tf.train tf.compat.v1.global_variables_initializer = tf.global_variables_initializer tf.compat.v1.test = tf.test tf.compat.v1.test.get_temp_dir = tf.test.get_temp_dir tf.nest = tf.contrib.framework.nest
[ "Monkey patch tf 1.12 so tfds can use it." ]
Please provide a description of the function:def _patch_for_tf1_13(tf): if not hasattr(tf.io.gfile, "GFile"): tf.io.gfile.GFile = tf.gfile.GFile if not hasattr(tf, "nest"): tf.nest = tf.contrib.framework.nest if not hasattr(tf.compat, "v2"): tf.compat.v2 = types.ModuleType("tf.compat.v2") tf.compat.v2.data = types.ModuleType("tf.compat.v2.data") from tensorflow.python.data.ops import dataset_ops tf.compat.v2.data.Dataset = dataset_ops.DatasetV2 if not hasattr(tf.compat.v2.data.Dataset, "output_shapes"): from tensorflow.python.data.ops import dataset_ops if hasattr(dataset_ops, "get_legacy_output_shapes"): tf.compat.v2.data.Dataset.output_shapes = property( dataset_ops.get_legacy_output_shapes) tf.compat.v2.data.Dataset.output_types = property( dataset_ops.get_legacy_output_types)
[ "Monkey patch tf 1.13 so tfds can use it." ]
Please provide a description of the function:def is_dataset(ds): import tensorflow as tf from tensorflow_datasets.core.utils import py_utils dataset_types = [tf.data.Dataset] v1_ds = py_utils.rgetattr(tf, "compat.v1.data.Dataset", None) v2_ds = py_utils.rgetattr(tf, "compat.v2.data.Dataset", None) if v1_ds is not None: dataset_types.append(v1_ds) if v2_ds is not None: dataset_types.append(v2_ds) return isinstance(ds, tuple(dataset_types))
[ "Whether ds is a Dataset. Compatible across TF versions." ]
Please provide a description of the function:def _generate_examples(self, data_file): with tf.io.gfile.GFile(data_file) as f: reader = csv.DictReader(f, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: # Everything in the row except for 'talk_name' will be a translation. # Missing/incomplete translations will contain the string "__NULL__" or # "_ _ NULL _ _". yield { 'translations': { lang: text for lang, text in six.iteritems(row) if lang != 'talk_name' and _is_translation_complete(text) }, 'talk_name': row['talk_name'] }
[ "This function returns the examples in the raw (text) form." ]
Please provide a description of the function:def _generate_examples(self, filepath): for idx, line in enumerate(tf.io.gfile.GFile(filepath, "rb")): if idx == 0: continue # skip header line = tf.compat.as_text(line.strip()) split_line = line.split("\t") # Examples not marked with a three out of five consensus are marked with # "-" and should not be used in standard evaluations. if split_line[0] == "-": continue # Works for both splits even though dev has some extra human labels. yield { "premise": split_line[5], "hypothesis": split_line[6], "label": split_line[0] }
[ "Generate mnli examples.\n\n Args:\n filepath: a string\n\n Yields:\n dictionaries containing \"premise\", \"hypothesis\" and \"label\" strings\n " ]
Please provide a description of the function:def _split_generators(self, dl_manager): # At data creation time, parse the folder to deduce number of splits, # labels, image size, # The splits correspond to the high level folders split_names = list_folders(dl_manager.manual_dir) # Extract all label names and associated images split_label_images = {} # dict[split_name][label_name] = list(img_paths) for split_name in split_names: split_dir = os.path.join(dl_manager.manual_dir, split_name) split_label_images[split_name] = { label_name: list_imgs(os.path.join(split_dir, label_name)) for label_name in list_folders(split_dir) } # Merge all label names from all splits to get the final list of labels # Sorted list for determinism labels = [split.keys() for split in split_label_images.values()] labels = list(sorted(set(itertools.chain(*labels)))) # Could improve the automated encoding format detection # Extract the list of all image paths image_paths = [ image_paths for label_images in split_label_images.values() for image_paths in label_images.values() ] if any(f.lower().endswith(".png") for f in itertools.chain(*image_paths)): encoding_format = "png" else: encoding_format = "jpeg" # Update the info.features. Those info will be automatically resored when # the dataset is re-created self.info.features["image"].set_encoding_format(encoding_format) self.info.features["label"].names = labels def num_examples(label_images): return sum(len(imgs) for imgs in label_images.values()) # Define the splits return [ tfds.core.SplitGenerator( name=split_name, # The number of shards is a dynamic function of the total # number of images (between 0-10) num_shards=min(10, max(num_examples(label_images) // 1000, 1)), gen_kwargs=dict(label_images=label_images,), ) for split_name, label_images in split_label_images.items() ]
[ "Returns SplitGenerators from the folder names." ]
Please provide a description of the function:def _generate_examples(self, label_images): for label, image_paths in label_images.items(): for image_path in image_paths: yield { "image": image_path, "label": label, }
[ "Generate example for each image in the dict." ]