body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def get_standard_ids_by_id(self, _id):
'Get chebi_id, pubmed_id, and kegg_id from\n database specific id.\n \n Args:\n _id (:obj:`str`): Database specific ID.\n\n Return:\n (:obj:`dict`): Dictionary containing the information.\n '
if (self.collection_str == 'ecmdb'):
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
doc = self.collection.find_one(filter=query)
if (doc is None):
return {}
else:
return doc | 4,700,694,575,084,467,000 | Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information. | datanator_query_python/query/query_xmdb.py | get_standard_ids_by_id | KarrLab/datanator_query_python | python | def get_standard_ids_by_id(self, _id):
'Get chebi_id, pubmed_id, and kegg_id from\n database specific id.\n \n Args:\n _id (:obj:`str`): Database specific ID.\n\n Return:\n (:obj:`dict`): Dictionary containing the information.\n '
if (self.collection_str == 'ecmdb'):
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
doc = self.collection.find_one(filter=query)
if (doc is None):
return {}
else:
return doc |
@configurable
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, use_keypoint: bool=False, instance_mask_format: str='polygon', keypoint_hflip_indices: Optional[np.ndarray]=None, precomputed_proposal_topk: Optional[int]=None, recompute_boxes: bool=False):
'\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it\'s used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n '
if recompute_boxes:
assert use_instance_mask, 'recompute_boxes requires instance masks'
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
logger = logging.getLogger(__name__)
mode = ('training' if is_train else 'inference')
logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}') | -208,818,319,591,433,820 | NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations. | detectron2/data/dataset_mapper.py | __init__ | Jerrypiglet/detectron2 | python | @configurable
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, use_keypoint: bool=False, instance_mask_format: str='polygon', keypoint_hflip_indices: Optional[np.ndarray]=None, precomputed_proposal_topk: Optional[int]=None, recompute_boxes: bool=False):
'\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it\'s used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n '
if recompute_boxes:
assert use_instance_mask, 'recompute_boxes requires instance masks'
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
logger = logging.getLogger(__name__)
mode = ('training' if is_train else 'inference')
logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}') |
def __call__(self, dataset_dict):
'\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n '
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.image_format)
utils.check_image_size(dataset_dict, image)
if ('sem_seg_file_name' in dataset_dict):
sem_seg_gt = utils.read_image(dataset_dict.pop('sem_seg_file_name'), 'L').squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
(image, sem_seg_gt) = (aug_input.image, aug_input.sem_seg)
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (sem_seg_gt is not None):
dataset_dict['sem_seg'] = torch.as_tensor(sem_seg_gt.astype('long'))
if (self.proposal_topk is not None):
utils.transform_proposals(dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk)
if (not self.is_train):
dataset_dict.pop('sem_seg_file_name', None)
return dataset_dict
if ('annotations' in dataset_dict):
for anno in dataset_dict['annotations']:
if (not self.use_instance_mask):
anno.pop('segmentation', None)
if (not self.use_keypoint):
anno.pop('keypoints', None)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
instances = utils.annotations_to_instances(annos, image_shape, mask_format=self.instance_mask_format)
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict['instances'] = utils.filter_empty_instances(instances)
return dataset_dict | 689,083,837,638,239,400 | Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept | detectron2/data/dataset_mapper.py | __call__ | Jerrypiglet/detectron2 | python | def __call__(self, dataset_dict):
'\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n '
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.image_format)
utils.check_image_size(dataset_dict, image)
if ('sem_seg_file_name' in dataset_dict):
sem_seg_gt = utils.read_image(dataset_dict.pop('sem_seg_file_name'), 'L').squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
(image, sem_seg_gt) = (aug_input.image, aug_input.sem_seg)
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (sem_seg_gt is not None):
dataset_dict['sem_seg'] = torch.as_tensor(sem_seg_gt.astype('long'))
if (self.proposal_topk is not None):
utils.transform_proposals(dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk)
if (not self.is_train):
dataset_dict.pop('sem_seg_file_name', None)
return dataset_dict
if ('annotations' in dataset_dict):
for anno in dataset_dict['annotations']:
if (not self.use_instance_mask):
anno.pop('segmentation', None)
if (not self.use_keypoint):
anno.pop('keypoints', None)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
instances = utils.annotations_to_instances(annos, image_shape, mask_format=self.instance_mask_format)
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict['instances'] = utils.filter_empty_instances(instances)
return dataset_dict |
@contextlib.contextmanager
def _open_archive(archive, directory):
'Manages a directory in which an existing SDK is laid out.'
if directory:
(yield directory)
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
with tarfile.open(archive) as archive_file:
archive_file.extractall(temp_dir)
try:
(yield temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set') | 3,065,005,589,519,523,300 | Manages a directory in which an existing SDK is laid out. | scripts/sdk/merger/merge.py | _open_archive | allansrc/fuchsia | python | @contextlib.contextmanager
def _open_archive(archive, directory):
if directory:
(yield directory)
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
with tarfile.open(archive) as archive_file:
archive_file.extractall(temp_dir)
try:
(yield temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set') |
@contextlib.contextmanager
def _open_output(archive, directory):
'Manages the output of this script.'
if directory:
shutil.rmtree(directory, ignore_errors=True)
(yield directory)
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
try:
(yield temp_dir)
with tarfile.open(archive, 'w:gz') as archive_file:
archive_file.add(temp_dir, arcname='')
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set') | 138,120,787,399,427,680 | Manages the output of this script. | scripts/sdk/merger/merge.py | _open_output | allansrc/fuchsia | python | @contextlib.contextmanager
def _open_output(archive, directory):
if directory:
shutil.rmtree(directory, ignore_errors=True)
(yield directory)
elif archive:
temp_dir = tempfile.mkdtemp(prefix='fuchsia-merger')
try:
(yield temp_dir)
with tarfile.open(archive, 'w:gz') as archive_file:
archive_file.add(temp_dir, arcname=)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
raise Exception('Error: archive or directory must be set') |
def _get_manifest(sdk_dir):
'Returns the set of elements in the given SDK.'
with open(os.path.join(sdk_dir, 'meta', 'manifest.json'), 'r') as manifest:
return json.load(manifest) | -6,938,343,977,296,843,000 | Returns the set of elements in the given SDK. | scripts/sdk/merger/merge.py | _get_manifest | allansrc/fuchsia | python | def _get_manifest(sdk_dir):
with open(os.path.join(sdk_dir, 'meta', 'manifest.json'), 'r') as manifest:
return json.load(manifest) |
def _get_meta(element, sdk_dir):
"Returns the contents of the given element's manifest in a given SDK."
with open(os.path.join(sdk_dir, element), 'r') as meta:
return json.load(meta) | -4,476,401,921,661,051,400 | Returns the contents of the given element's manifest in a given SDK. | scripts/sdk/merger/merge.py | _get_meta | allansrc/fuchsia | python | def _get_meta(element, sdk_dir):
with open(os.path.join(sdk_dir, element), 'r') as meta:
return json.load(meta) |
def _get_type(element):
'Returns the SDK element type.'
if ('schema_id' in element):
return element['data']['type']
return element['type'] | 55,824,938,872,102,580 | Returns the SDK element type. | scripts/sdk/merger/merge.py | _get_type | allansrc/fuchsia | python | def _get_type(element):
if ('schema_id' in element):
return element['data']['type']
return element['type'] |
def _get_files(element_meta):
'Extracts the files associated with the given element.\n Returns a 2-tuple containing:\n - the set of arch-independent files;\n - the sets of arch-dependent files, indexed by architecture.\n '
type = _get_type(element_meta)
common_files = set()
arch_files = {}
if (type == 'cc_prebuilt_library'):
common_files.update(element_meta['headers'])
for (arch, binaries) in element_meta['binaries'].items():
contents = set()
contents.add(binaries['link'])
if ('dist' in binaries):
contents.add(binaries['dist'])
if ('debug' in binaries):
contents.add(binaries['debug'])
arch_files[arch] = contents
elif (type == 'cc_source_library'):
common_files.update(element_meta['headers'])
common_files.update(element_meta['sources'])
elif (type == 'dart_library'):
common_files.update(element_meta['sources'])
elif (type == 'fidl_library'):
common_files.update(element_meta['sources'])
elif (type in ['host_tool', 'companion_host_tool']):
if ('files' in element_meta):
common_files.update(element_meta['files'])
if ('target_files' in element_meta):
arch_files.update(element_meta['target_files'])
elif (type == 'loadable_module'):
common_files.update(element_meta['resources'])
arch_files.update(element_meta['binaries'])
elif (type == 'sysroot'):
for (arch, version) in element_meta['versions'].items():
contents = set()
contents.update(version['headers'])
contents.update(version['link_libs'])
contents.update(version['dist_libs'])
contents.update(version['debug_libs'])
arch_files[arch] = contents
elif (type == 'documentation'):
common_files.update(element_meta['docs'])
elif (type in ('config', 'license', 'component_manifest')):
common_files.update(element_meta['data'])
elif (type in 'version_history'):
pass
elif (type == 'bind_library'):
common_files.update(element_meta['sources'])
else:
raise Exception(('Unknown element type: ' + type))
return (common_files, arch_files) | 7,918,897,328,551,751,000 | Extracts the files associated with the given element.
Returns a 2-tuple containing:
- the set of arch-independent files;
- the sets of arch-dependent files, indexed by architecture. | scripts/sdk/merger/merge.py | _get_files | allansrc/fuchsia | python | def _get_files(element_meta):
'Extracts the files associated with the given element.\n Returns a 2-tuple containing:\n - the set of arch-independent files;\n - the sets of arch-dependent files, indexed by architecture.\n '
type = _get_type(element_meta)
common_files = set()
arch_files = {}
if (type == 'cc_prebuilt_library'):
common_files.update(element_meta['headers'])
for (arch, binaries) in element_meta['binaries'].items():
contents = set()
contents.add(binaries['link'])
if ('dist' in binaries):
contents.add(binaries['dist'])
if ('debug' in binaries):
contents.add(binaries['debug'])
arch_files[arch] = contents
elif (type == 'cc_source_library'):
common_files.update(element_meta['headers'])
common_files.update(element_meta['sources'])
elif (type == 'dart_library'):
common_files.update(element_meta['sources'])
elif (type == 'fidl_library'):
common_files.update(element_meta['sources'])
elif (type in ['host_tool', 'companion_host_tool']):
if ('files' in element_meta):
common_files.update(element_meta['files'])
if ('target_files' in element_meta):
arch_files.update(element_meta['target_files'])
elif (type == 'loadable_module'):
common_files.update(element_meta['resources'])
arch_files.update(element_meta['binaries'])
elif (type == 'sysroot'):
for (arch, version) in element_meta['versions'].items():
contents = set()
contents.update(version['headers'])
contents.update(version['link_libs'])
contents.update(version['dist_libs'])
contents.update(version['debug_libs'])
arch_files[arch] = contents
elif (type == 'documentation'):
common_files.update(element_meta['docs'])
elif (type in ('config', 'license', 'component_manifest')):
common_files.update(element_meta['data'])
elif (type in 'version_history'):
pass
elif (type == 'bind_library'):
common_files.update(element_meta['sources'])
else:
raise Exception(('Unknown element type: ' + type))
return (common_files, arch_files) |
def _ensure_directory(path):
'Ensures that the directory hierarchy of the given path exists.'
target_dir = os.path.dirname(path)
try:
os.makedirs(target_dir)
except OSError as exception:
if ((exception.errno == errno.EEXIST) and os.path.isdir(target_dir)):
pass
else:
raise | -2,222,556,865,119,095,600 | Ensures that the directory hierarchy of the given path exists. | scripts/sdk/merger/merge.py | _ensure_directory | allansrc/fuchsia | python | def _ensure_directory(path):
target_dir = os.path.dirname(path)
try:
os.makedirs(target_dir)
except OSError as exception:
if ((exception.errno == errno.EEXIST) and os.path.isdir(target_dir)):
pass
else:
raise |
def _copy_file(file, source_dir, dest_dir):
'Copies a file to a given path, taking care of creating directories if\n needed.\n '
source = os.path.join(source_dir, file)
destination = os.path.join(dest_dir, file)
_ensure_directory(destination)
shutil.copy2(source, destination) | -1,162,556,876,817,004,300 | Copies a file to a given path, taking care of creating directories if
needed. | scripts/sdk/merger/merge.py | _copy_file | allansrc/fuchsia | python | def _copy_file(file, source_dir, dest_dir):
'Copies a file to a given path, taking care of creating directories if\n needed.\n '
source = os.path.join(source_dir, file)
destination = os.path.join(dest_dir, file)
_ensure_directory(destination)
shutil.copy2(source, destination) |
def _copy_files(files, source_dir, dest_dir):
'Copies a set of files to a given directory.'
for file in files:
_copy_file(file, source_dir, dest_dir) | 679,370,016,772,844,700 | Copies a set of files to a given directory. | scripts/sdk/merger/merge.py | _copy_files | allansrc/fuchsia | python | def _copy_files(files, source_dir, dest_dir):
for file in files:
_copy_file(file, source_dir, dest_dir) |
def _copy_identical_files(set_one, source_dir_one, set_two, source_dir_two, dest_dir):
'Verifies that two sets of files are absolutely identical and then copies\n them to the output directory.\n '
if (set_one != set_two):
return False
_copy_files(set_one, source_dir_one, dest_dir)
return True | -6,738,155,008,769,796,000 | Verifies that two sets of files are absolutely identical and then copies
them to the output directory. | scripts/sdk/merger/merge.py | _copy_identical_files | allansrc/fuchsia | python | def _copy_identical_files(set_one, source_dir_one, set_two, source_dir_two, dest_dir):
'Verifies that two sets of files are absolutely identical and then copies\n them to the output directory.\n '
if (set_one != set_two):
return False
_copy_files(set_one, source_dir_one, dest_dir)
return True |
def _copy_element(element, source_dir, dest_dir):
'Copy an entire SDK element to a given directory.'
meta = _get_meta(element, source_dir)
(common_files, arch_files) = _get_files(meta)
files = common_files
for more_files in arch_files.values():
files.update(more_files)
_copy_files(files, source_dir, dest_dir)
_copy_file(element, source_dir, dest_dir) | 8,808,345,156,117,892,000 | Copy an entire SDK element to a given directory. | scripts/sdk/merger/merge.py | _copy_element | allansrc/fuchsia | python | def _copy_element(element, source_dir, dest_dir):
meta = _get_meta(element, source_dir)
(common_files, arch_files) = _get_files(meta)
files = common_files
for more_files in arch_files.values():
files.update(more_files)
_copy_files(files, source_dir, dest_dir)
_copy_file(element, source_dir, dest_dir) |
def _write_meta(element, source_dir_one, source_dir_two, dest_dir):
'Writes a meta file for the given element, resulting from the merge of the\n meta files for that element in the two given SDK directories.\n '
meta_one = _get_meta(element, source_dir_one)
meta_two = _get_meta(element, source_dir_two)
type = _get_type(meta_one)
meta = {}
if (type in ('cc_prebuilt_library', 'loadable_module')):
meta = meta_one
meta['binaries'].update(meta_two['binaries'])
elif (type == 'sysroot'):
meta = meta_one
meta['versions'].update(meta_two['versions'])
elif (type in ['host_tool', 'companion_host_tool']):
meta = meta_one
if (not ('target_files' in meta)):
meta['target_files'] = {}
if ('target_files' in meta_two):
meta['target_files'].update(meta_two['target_files'])
elif (type in ('cc_source_library', 'dart_library', 'fidl_library', 'documentation', 'device_profile', 'config', 'license', 'component_manifest', 'bind_library', 'version_history')):
meta = meta_one
else:
raise Exception(('Unknown element type: ' + type))
meta_path = os.path.join(dest_dir, element)
_ensure_directory(meta_path)
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file, indent=2, sort_keys=True, separators=(',', ': '))
return True | 4,619,572,242,825,215,000 | Writes a meta file for the given element, resulting from the merge of the
meta files for that element in the two given SDK directories. | scripts/sdk/merger/merge.py | _write_meta | allansrc/fuchsia | python | def _write_meta(element, source_dir_one, source_dir_two, dest_dir):
'Writes a meta file for the given element, resulting from the merge of the\n meta files for that element in the two given SDK directories.\n '
meta_one = _get_meta(element, source_dir_one)
meta_two = _get_meta(element, source_dir_two)
type = _get_type(meta_one)
meta = {}
if (type in ('cc_prebuilt_library', 'loadable_module')):
meta = meta_one
meta['binaries'].update(meta_two['binaries'])
elif (type == 'sysroot'):
meta = meta_one
meta['versions'].update(meta_two['versions'])
elif (type in ['host_tool', 'companion_host_tool']):
meta = meta_one
if (not ('target_files' in meta)):
meta['target_files'] = {}
if ('target_files' in meta_two):
meta['target_files'].update(meta_two['target_files'])
elif (type in ('cc_source_library', 'dart_library', 'fidl_library', 'documentation', 'device_profile', 'config', 'license', 'component_manifest', 'bind_library', 'version_history')):
meta = meta_one
else:
raise Exception(('Unknown element type: ' + type))
meta_path = os.path.join(dest_dir, element)
_ensure_directory(meta_path)
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file, indent=2, sort_keys=True, separators=(',', ': '))
return True |
def _has_host_content(parts):
'Returns true if the given list of SDK parts contains an element with\n content built for a host.\n '
return ('host_tool' in [part.type for part in parts]) | 8,791,001,200,716,074,000 | Returns true if the given list of SDK parts contains an element with
content built for a host. | scripts/sdk/merger/merge.py | _has_host_content | allansrc/fuchsia | python | def _has_host_content(parts):
'Returns true if the given list of SDK parts contains an element with\n content built for a host.\n '
return ('host_tool' in [part.type for part in parts]) |
def _write_manifest(source_dir_one, source_dir_two, dest_dir):
'Writes a manifest file resulting from the merge of the manifest files for\n the two given SDK directories.\n '
manifest_one = _get_manifest(source_dir_one)
manifest_two = _get_manifest(source_dir_two)
parts_one = set([Part(p) for p in manifest_one['parts']])
parts_two = set([Part(p) for p in manifest_two['parts']])
manifest = {'arch': {}}
if (manifest_one['schema_version'] != manifest_two['schema_version']):
print('Error: mismatching schema version')
return False
manifest['schema_version'] = manifest_one['schema_version']
host_archs = set()
if _has_host_content(parts_one):
host_archs.add(manifest_one['arch']['host'])
if _has_host_content(parts_two):
host_archs.add(manifest_two['arch']['host'])
if (not host_archs):
host_archs.add(manifest_one['arch']['host'])
if (len(host_archs) != 1):
print(('Error: mismatching host architecture: %s' % ', '.join(host_archs)))
return False
manifest['arch']['host'] = list(host_archs)[0]
if (manifest_one['id'] != manifest_two['id']):
print('Error: mismatching id')
return False
manifest['id'] = manifest_one['id']
if (manifest_one['root'] != manifest_two['root']):
print('Error: mismatching root')
return False
manifest['root'] = manifest_one['root']
manifest['arch']['target'] = sorted((set(manifest_one['arch']['target']) | set(manifest_two['arch']['target'])))
manifest['parts'] = [vars(p) for p in sorted((parts_one | parts_two))]
manifest_path = os.path.join(dest_dir, 'meta', 'manifest.json')
_ensure_directory(manifest_path)
with open(manifest_path, 'w') as manifest_file:
json.dump(manifest, manifest_file, indent=2, sort_keys=True, separators=(',', ': '))
return True | 6,378,681,545,367,784,000 | Writes a manifest file resulting from the merge of the manifest files for
the two given SDK directories. | scripts/sdk/merger/merge.py | _write_manifest | allansrc/fuchsia | python | def _write_manifest(source_dir_one, source_dir_two, dest_dir):
'Writes a manifest file resulting from the merge of the manifest files for\n the two given SDK directories.\n '
manifest_one = _get_manifest(source_dir_one)
manifest_two = _get_manifest(source_dir_two)
parts_one = set([Part(p) for p in manifest_one['parts']])
parts_two = set([Part(p) for p in manifest_two['parts']])
manifest = {'arch': {}}
if (manifest_one['schema_version'] != manifest_two['schema_version']):
print('Error: mismatching schema version')
return False
manifest['schema_version'] = manifest_one['schema_version']
host_archs = set()
if _has_host_content(parts_one):
host_archs.add(manifest_one['arch']['host'])
if _has_host_content(parts_two):
host_archs.add(manifest_two['arch']['host'])
if (not host_archs):
host_archs.add(manifest_one['arch']['host'])
if (len(host_archs) != 1):
print(('Error: mismatching host architecture: %s' % ', '.join(host_archs)))
return False
manifest['arch']['host'] = list(host_archs)[0]
if (manifest_one['id'] != manifest_two['id']):
print('Error: mismatching id')
return False
manifest['id'] = manifest_one['id']
if (manifest_one['root'] != manifest_two['root']):
print('Error: mismatching root')
return False
manifest['root'] = manifest_one['root']
manifest['arch']['target'] = sorted((set(manifest_one['arch']['target']) | set(manifest_two['arch']['target'])))
manifest['parts'] = [vars(p) for p in sorted((parts_one | parts_two))]
manifest_path = os.path.join(dest_dir, 'meta', 'manifest.json')
_ensure_directory(manifest_path)
with open(manifest_path, 'w') as manifest_file:
json.dump(manifest, manifest_file, indent=2, sort_keys=True, separators=(',', ': '))
return True |
def testDAGCollectionAllOf(self):
'Test DAGCollectionAllOf'
pass | 2,568,053,484,597,592,000 | Test DAGCollectionAllOf | airflow_client/test/test_dag_collection_all_of.py | testDAGCollectionAllOf | sptsakcg/airflow-client-python | python | def testDAGCollectionAllOf(self):
pass |
@pytest.mark.usefixtures('init_blockchain')
def test_check_script(rpconn, piece_hashes, spool_regtest, transactions):
'\n Test :staticmethod:`check_script`.\n\n Args;\n alice (str): bitcoin address of alice, the sender\n bob (str): bitcoin address of bob, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) to bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password, netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, (Spool.FEE / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
txid = spool_regtest.transfer(('', sender_address), receiver_address, piece_hashes, sender_password, 5, min_confirmations=1)
verb = BlockchainSpider.check_script(transactions.get(txid)['vouts'])
assert (verb == b'ASCRIBESPOOL01TRANSFER5') | 7,597,036,693,908,579,000 | Test :staticmethod:`check_script`.
Args;
alice (str): bitcoin address of alice, the sender
bob (str): bitcoin address of bob, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) to bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node | tests/test_spoolex.py | test_check_script | ascribe/pyspool | python | @pytest.mark.usefixtures('init_blockchain')
def test_check_script(rpconn, piece_hashes, spool_regtest, transactions):
'\n Test :staticmethod:`check_script`.\n\n Args;\n alice (str): bitcoin address of alice, the sender\n bob (str): bitcoin address of bob, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) to bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool import Spool
from spool.spoolex import BlockchainSpider
sender_password = uuid1().hex.encode('utf-8')
sender_wallet = BIP32Node.from_master_secret(sender_password, netcode='XTN')
sender_address = sender_wallet.bitcoin_address()
rpconn.importaddress(sender_address)
rpconn.sendtoaddress(sender_address, (Spool.FEE / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.sendtoaddress(sender_address, (Spool.TOKEN / 100000000))
rpconn.generate(1)
receiver_address = rpconn.getnewaddress()
txid = spool_regtest.transfer((, sender_address), receiver_address, piece_hashes, sender_password, 5, min_confirmations=1)
verb = BlockchainSpider.check_script(transactions.get(txid)['vouts'])
assert (verb == b'ASCRIBESPOOL01TRANSFER5') |
@pytest.mark.usefixtures('init_blockchain')
def test_check_script_with_invalid_tx(eve, wendy, rpconn, transactions):
'\n An invalid transaction in this context is one that does not contain a\n ``vout`` for which the ``hex`` is a valid ``Spool`` verb.\n\n Args;\n eve (str): bitcoin address of eve, the sender\n wendy (str): bitcoin address of wendy, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) a local bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool.spoolex import BlockchainSpider
rpconn.sendtoaddress(eve, 2)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 1)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(Exception) as exc:
BlockchainSpider.check_script(decoded_raw_transfer_tx['vouts'])
assert (exc.value.args[0] == 'Invalid ascribe transaction') | -3,958,756,659,847,866,000 | An invalid transaction in this context is one that does not contain a
``vout`` for which the ``hex`` is a valid ``Spool`` verb.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node | tests/test_spoolex.py | test_check_script_with_invalid_tx | ascribe/pyspool | python | @pytest.mark.usefixtures('init_blockchain')
def test_check_script_with_invalid_tx(eve, wendy, rpconn, transactions):
'\n An invalid transaction in this context is one that does not contain a\n ``vout`` for which the ``hex`` is a valid ``Spool`` verb.\n\n Args;\n eve (str): bitcoin address of eve, the sender\n wendy (str): bitcoin address of wendy, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) a local bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool.spoolex import BlockchainSpider
rpconn.sendtoaddress(eve, 2)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 1)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(Exception) as exc:
BlockchainSpider.check_script(decoded_raw_transfer_tx['vouts'])
assert (exc.value.args[0] == 'Invalid ascribe transaction') |
@pytest.mark.usefixtures('init_blockchain')
def test_get_addresses_with_invalid_tx(eve, wendy, rpconn, transactions):
'\n An invalid transaction in this context is one that has inputs from\n different addresses.\n\n Args;\n eve (str): bitcoin address of eve, the sender\n wendy (str): bitcoin address of wendy, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) a local bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool.spoolex import BlockchainSpider, InvalidTransactionError
rpconn.sendtoaddress(eve, 1)
rpconn.sendtoaddress(eve, 1)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 2)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(InvalidTransactionError) as exc:
BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert isinstance(exc.value, InvalidTransactionError) | -5,346,686,385,827,037,000 | An invalid transaction in this context is one that has inputs from
different addresses.
Args;
eve (str): bitcoin address of eve, the sender
wendy (str): bitcoin address of wendy, the receiver
rpconn (AuthServiceProxy): JSON-RPC connection
(:class:`AuthServiceProxy` instance) a local bitcoin regtest
transactions (Transactions): :class:`Transactions` instance to
communicate to the bitcoin regtest node | tests/test_spoolex.py | test_get_addresses_with_invalid_tx | ascribe/pyspool | python | @pytest.mark.usefixtures('init_blockchain')
def test_get_addresses_with_invalid_tx(eve, wendy, rpconn, transactions):
'\n An invalid transaction in this context is one that has inputs from\n different addresses.\n\n Args;\n eve (str): bitcoin address of eve, the sender\n wendy (str): bitcoin address of wendy, the receiver\n rpconn (AuthServiceProxy): JSON-RPC connection\n (:class:`AuthServiceProxy` instance) a local bitcoin regtest\n transactions (Transactions): :class:`Transactions` instance to\n communicate to the bitcoin regtest node\n\n '
from spool.spoolex import BlockchainSpider, InvalidTransactionError
rpconn.sendtoaddress(eve, 1)
rpconn.sendtoaddress(eve, 1)
rpconn.generate(1)
txid = rpconn.sendfrom('eve', wendy, 2)
decoded_raw_transfer_tx = transactions.get(txid)
with pytest.raises(InvalidTransactionError) as exc:
BlockchainSpider._get_addresses(decoded_raw_transfer_tx)
assert isinstance(exc.value, InvalidTransactionError) |
@contextlib.contextmanager
def syslog(ctx, config):
'\n start syslog / stop syslog on exit.\n '
if (ctx.archive is None):
(yield)
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
log_dir = '{adir}/syslog'.format(adir=archive_dir)
run.wait(ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', log_dir], wait=False))
CONF = '/etc/rsyslog.d/80-cephtest.conf'
kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir)
misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir)
conf_lines = ['kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log), '*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format(misc_log=misc_log)]
conf_fp = StringIO('\n'.join(conf_lines))
try:
for rem in ctx.cluster.remotes.iterkeys():
log_context = 'system_u:object_r:var_log_t:s0'
for log_path in (kern_log, misc_log):
rem.run(args=('touch %s' % log_path))
rem.chcon(log_path, log_context)
misc.sudo_write_file(remote=rem, path=CONF, data=conf_fp)
conf_fp.seek(0)
run.wait(ctx.cluster.run(args=['sudo', 'service', 'rsyslog', 'restart'], wait=False))
(yield)
finally:
log.info('Shutting down syslog monitoring...')
run.wait(ctx.cluster.run(args=['sudo', 'rm', '-f', '--', CONF, run.Raw('&&'), 'sudo', 'service', 'rsyslog', 'restart'], wait=False))
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(args=['egrep', '--binary-files=text', '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b', run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)), run.Raw('|'), 'grep', '-v', 'task .* blocked for more than .* seconds', run.Raw('|'), 'grep', '-v', 'lockdep is turned off', run.Raw('|'), 'grep', '-v', 'trying to register non-static key', run.Raw('|'), 'grep', '-v', 'DEBUG: fsize', run.Raw('|'), 'grep', '-v', 'CRON', run.Raw('|'), 'grep', '-v', 'BUG: bad unlock balance detected', run.Raw('|'), 'grep', '-v', 'inconsistent lock state', run.Raw('|'), 'grep', '-v', '*** DEADLOCK ***', run.Raw('|'), 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', run.Raw('|'), 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run', run.Raw('|'), 'grep', '-v', 'INFO: recovery required on readonly', run.Raw('|'), 'grep', '-v', 'ceph-create-keys: INFO', run.Raw('|'), 'egrep', '-v', '\\bsalt-master\\b|\\bsalt-minion\\b|\\bsalt-api\\b', run.Raw('|'), 'head', '-n', '1'], stdout=StringIO())
stdout = r.stdout.getvalue()
if (stdout != ''):
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if ('failure_reason' not in ctx.summary):
ctx.summary['failure_reason'] = "'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(ctx.cluster.run(args=['find', '{adir}/syslog'.format(adir=archive_dir), '-name', '*.log', '-print0', run.Raw('|'), 'sudo', 'xargs', '-0', '--no-run-if-empty', '--', 'gzip', '--'], wait=False)) | -8,120,165,371,422,887,000 | start syslog / stop syslog on exit. | teuthology/task/internal/syslog.py | syslog | dzedro/teuthology | python | @contextlib.contextmanager
def syslog(ctx, config):
'\n \n '
if (ctx.archive is None):
(yield)
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
log_dir = '{adir}/syslog'.format(adir=archive_dir)
run.wait(ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', log_dir], wait=False))
CONF = '/etc/rsyslog.d/80-cephtest.conf'
kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir)
misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir)
conf_lines = ['kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log), '*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format(misc_log=misc_log)]
conf_fp = StringIO('\n'.join(conf_lines))
try:
for rem in ctx.cluster.remotes.iterkeys():
log_context = 'system_u:object_r:var_log_t:s0'
for log_path in (kern_log, misc_log):
rem.run(args=('touch %s' % log_path))
rem.chcon(log_path, log_context)
misc.sudo_write_file(remote=rem, path=CONF, data=conf_fp)
conf_fp.seek(0)
run.wait(ctx.cluster.run(args=['sudo', 'service', 'rsyslog', 'restart'], wait=False))
(yield)
finally:
log.info('Shutting down syslog monitoring...')
run.wait(ctx.cluster.run(args=['sudo', 'rm', '-f', '--', CONF, run.Raw('&&'), 'sudo', 'service', 'rsyslog', 'restart'], wait=False))
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(args=['egrep', '--binary-files=text', '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b', run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)), run.Raw('|'), 'grep', '-v', 'task .* blocked for more than .* seconds', run.Raw('|'), 'grep', '-v', 'lockdep is turned off', run.Raw('|'), 'grep', '-v', 'trying to register non-static key', run.Raw('|'), 'grep', '-v', 'DEBUG: fsize', run.Raw('|'), 'grep', '-v', 'CRON', run.Raw('|'), 'grep', '-v', 'BUG: bad unlock balance detected', run.Raw('|'), 'grep', '-v', 'inconsistent lock state', run.Raw('|'), 'grep', '-v', '*** DEADLOCK ***', run.Raw('|'), 'grep', '-v', 'INFO: possible irq lock inversion dependency detected', run.Raw('|'), 'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run', run.Raw('|'), 'grep', '-v', 'INFO: recovery required on readonly', run.Raw('|'), 'grep', '-v', 'ceph-create-keys: INFO', run.Raw('|'), 'egrep', '-v', '\\bsalt-master\\b|\\bsalt-minion\\b|\\bsalt-api\\b', run.Raw('|'), 'head', '-n', '1'], stdout=StringIO())
stdout = r.stdout.getvalue()
if (stdout != ):
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if ('failure_reason' not in ctx.summary):
ctx.summary['failure_reason'] = "'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(ctx.cluster.run(args=['find', '{adir}/syslog'.format(adir=archive_dir), '-name', '*.log', '-print0', run.Raw('|'), 'sudo', 'xargs', '-0', '--no-run-if-empty', '--', 'gzip', '--'], wait=False)) |
def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
'\n Define graph for synchronized training.\n '
model = Cifar10Model(resnet_size=20, data_format='channels_last', resnet_version=2, dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(logits=logits, labels=labels, l2=0.0002, loss_filter_fn=(lambda name: ('batch_normalization' not in name)))
metrics = [TopKAccuracy(logits, labels, topk=1), TopKAccuracy(logits, labels, topk=5)]
global_step = tf.train.get_or_create_global_step()
lr_scheduler = manual_stepping(global_step=global_step, boundaries=[(32000 // replicas_to_aggregate), (48000 // replicas_to_aggregate)], rates=[0.1, 0.01, 0.001], warmup=False)
optimizer_ = tf.train.MomentumOptimizer(learning_rate=lr_scheduler, momentum=0.9, use_nesterov=True)
optimizer = tf.train.SyncReplicasOptimizer(optimizer_, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=replicas_to_aggregate)
hooks = [optimizer.make_session_run_hook((rank == 0), num_tokens=0)]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads_and_vars = list(optimizer.compute_gradients(loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
return (train_op, loss, metrics, hooks) | -1,640,052,700,166,835,500 | Define graph for synchronized training. | tensorflow/imagerecognition/openmpi-cifar10-resnet20-all-reduce/main.py | define_graph | mlbench/mlbench-benchmarks | python | def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
'\n \n '
model = Cifar10Model(resnet_size=20, data_format='channels_last', resnet_version=2, dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(logits=logits, labels=labels, l2=0.0002, loss_filter_fn=(lambda name: ('batch_normalization' not in name)))
metrics = [TopKAccuracy(logits, labels, topk=1), TopKAccuracy(logits, labels, topk=5)]
global_step = tf.train.get_or_create_global_step()
lr_scheduler = manual_stepping(global_step=global_step, boundaries=[(32000 // replicas_to_aggregate), (48000 // replicas_to_aggregate)], rates=[0.1, 0.01, 0.001], warmup=False)
optimizer_ = tf.train.MomentumOptimizer(learning_rate=lr_scheduler, momentum=0.9, use_nesterov=True)
optimizer = tf.train.SyncReplicasOptimizer(optimizer_, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=replicas_to_aggregate)
hooks = [optimizer.make_session_run_hook((rank == 0), num_tokens=0)]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads_and_vars = list(optimizer.compute_gradients(loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
return (train_op, loss, metrics, hooks) |
def shift_decoder(decoder, shift_constant):
' Shifts the indices of a decoder by a constant.\n\n Args:\n decoder (iterable): list of BinaryPolynomial; the decoder\n shift_constant (int): the qubit index that corresponds to the offset.\n\n Returns (list): list of BinaryPolynomial shifted decoder\n '
decode_shifted = []
if (not isinstance(shift_constant, (numpy.int64, numpy.int32, int))):
raise TypeError('the shift to the decoder must be integer. got {}of type {}'.format(shift_constant, type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted | 1,393,594,874,258,132,700 | Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder | src/openfermion/ops/_binary_code.py | shift_decoder | 0tt3r/OpenFermion | python | def shift_decoder(decoder, shift_constant):
' Shifts the indices of a decoder by a constant.\n\n Args:\n decoder (iterable): list of BinaryPolynomial; the decoder\n shift_constant (int): the qubit index that corresponds to the offset.\n\n Returns (list): list of BinaryPolynomial shifted decoder\n '
decode_shifted = []
if (not isinstance(shift_constant, (numpy.int64, numpy.int32, int))):
raise TypeError('the shift to the decoder must be integer. got {}of type {}'.format(shift_constant, type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted |
def double_decoding(decoder_1, decoder_2):
' Concatenates two decodings\n\n Args:\n decoder_1 (iterable): list of BinaryPolynomial\n decoding of the outer code layer\n decoder_2 (iterable): list of BinaryPolynomial\n decoding of the inner code layer\n\n Returns (list): list of BinaryPolynomial the decoding defined by\n w -> decoder_1( decoder_2(w) )\n '
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = (tmp_term + tmp_sum)
doubled_decoder += [tmp_sum]
return doubled_decoder | -7,527,210,314,016,534,000 | Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) ) | src/openfermion/ops/_binary_code.py | double_decoding | 0tt3r/OpenFermion | python | def double_decoding(decoder_1, decoder_2):
' Concatenates two decodings\n\n Args:\n decoder_1 (iterable): list of BinaryPolynomial\n decoding of the outer code layer\n decoder_2 (iterable): list of BinaryPolynomial\n decoding of the inner code layer\n\n Returns (list): list of BinaryPolynomial the decoding defined by\n w -> decoder_1( decoder_2(w) )\n '
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = (tmp_term + tmp_sum)
doubled_decoder += [tmp_sum]
return doubled_decoder |
def __init__(self, encoding, decoding):
' Initialization of a binary code.\n\n Args:\n encoding (np.ndarray or list): nested lists or binary 2D-array\n decoding (array or list): list of BinaryPolynomial (list or str).\n\n Raises:\n TypeError: non-list, array like encoding or decoding, unsuitable\n BinaryPolynomial generators,\n BinaryCodeError: in case of decoder/encoder size mismatch or\n decoder size, qubits indexed mismatch\n '
if (not isinstance(encoding, (numpy.ndarray, list))):
raise TypeError('encoding must be a list or array.')
if (not isinstance(decoding, (numpy.ndarray, list))):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
(self.n_qubits, self.n_modes) = numpy.shape(encoding)
if (self.n_modes != len(decoding)):
raise BinaryCodeError('size mismatch, decoder and encoder should have the same first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int, numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = (decoder_qubits | set(symbolic_binary.enumerate_qubits()))
else:
raise TypeError('decoder component provided is not a suitable for BinaryPolynomial', symbolic_binary)
if (len(decoder_qubits) != self.n_qubits):
raise BinaryCodeError('decoder and encoder provided has different number of qubits')
if ((max(decoder_qubits) + 1) > self.n_qubits):
raise BinaryCodeError('decoder is not indexing some qubits. Qubitsindexed are: {}'.format(decoder_qubits)) | -5,366,171,525,332,897,000 | Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch | src/openfermion/ops/_binary_code.py | __init__ | 0tt3r/OpenFermion | python | def __init__(self, encoding, decoding):
' Initialization of a binary code.\n\n Args:\n encoding (np.ndarray or list): nested lists or binary 2D-array\n decoding (array or list): list of BinaryPolynomial (list or str).\n\n Raises:\n TypeError: non-list, array like encoding or decoding, unsuitable\n BinaryPolynomial generators,\n BinaryCodeError: in case of decoder/encoder size mismatch or\n decoder size, qubits indexed mismatch\n '
if (not isinstance(encoding, (numpy.ndarray, list))):
raise TypeError('encoding must be a list or array.')
if (not isinstance(decoding, (numpy.ndarray, list))):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
(self.n_qubits, self.n_modes) = numpy.shape(encoding)
if (self.n_modes != len(decoding)):
raise BinaryCodeError('size mismatch, decoder and encoder should have the same first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int, numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = (decoder_qubits | set(symbolic_binary.enumerate_qubits()))
else:
raise TypeError('decoder component provided is not a suitable for BinaryPolynomial', symbolic_binary)
if (len(decoder_qubits) != self.n_qubits):
raise BinaryCodeError('decoder and encoder provided has different number of qubits')
if ((max(decoder_qubits) + 1) > self.n_qubits):
raise BinaryCodeError('decoder is not indexing some qubits. Qubitsindexed are: {}'.format(decoder_qubits)) |
def __iadd__(self, appendix):
' In-place appending a binary code with +=.\n\n Args:\n appendix (BinaryCode): The code to append to the present one.\n\n Returns (BinaryCode): A global binary code with size\n (n_modes1 + n_modes2), (n_qubits1,n_qubits2)\n\n Raises:\n TypeError: Appendix must be a BinaryCode.\n '
if (not isinstance(appendix, BinaryCode)):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder, shift_decoder(appendix.decoder, self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None], [None, appendix.encoder]])
(self.n_qubits, self.n_modes) = numpy.shape(self.encoder)
return self | -2,979,734,494,670,934,000 | In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode. | src/openfermion/ops/_binary_code.py | __iadd__ | 0tt3r/OpenFermion | python | def __iadd__(self, appendix):
' In-place appending a binary code with +=.\n\n Args:\n appendix (BinaryCode): The code to append to the present one.\n\n Returns (BinaryCode): A global binary code with size\n (n_modes1 + n_modes2), (n_qubits1,n_qubits2)\n\n Raises:\n TypeError: Appendix must be a BinaryCode.\n '
if (not isinstance(appendix, BinaryCode)):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder, shift_decoder(appendix.decoder, self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None], [None, appendix.encoder]])
(self.n_qubits, self.n_modes) = numpy.shape(self.encoder)
return self |
def __add__(self, appendix):
'Appends two binary codes via addition +.\n\n Args:\n appendix (BinaryCode): The code to append to the present one.\n\n Returns (BinaryCode): global binary code\n '
twin = copy.deepcopy(self)
twin += appendix
return twin | 6,067,573,203,762,698,000 | Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code | src/openfermion/ops/_binary_code.py | __add__ | 0tt3r/OpenFermion | python | def __add__(self, appendix):
'Appends two binary codes via addition +.\n\n Args:\n appendix (BinaryCode): The code to append to the present one.\n\n Returns (BinaryCode): global binary code\n '
twin = copy.deepcopy(self)
twin += appendix
return twin |
def __imul__(self, factor):
'In-place code concatenation or appendage via *= .\n Multiplication with integer will yield appendage, otherwise\n concatenation.\n\n Args:\n factor (int or BinaryCode): the BinaryCode to concatenate. In case\n of int, it will append the code to itself factor times.\n\n Returns (BinaryCode): segmented or concatenated code\n\n Raises:\n TypeError: factor must be an integer or a BinaryCode\n BinaryCodeError: size mismatch between self and factor\n ValueError: in case of an integer factor that is < 1\n '
if (not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int))):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if (self.n_qubits != factor.n_modes):
raise BinaryCodeError('size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
(self.n_qubits, self.n_modes) = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if (factor < 1):
raise ValueError('integer factor has to be positive, non-zero ')
self.encoder = scipy.sparse.kron(scipy.sparse.identity(factor, format='csc', dtype=int), self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder, shift_decoder(tmp_decoder, (index * self.n_qubits)))
self.n_qubits *= factor
self.n_modes *= factor
return self | 8,357,015,318,212,129,000 | In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1 | src/openfermion/ops/_binary_code.py | __imul__ | 0tt3r/OpenFermion | python | def __imul__(self, factor):
'In-place code concatenation or appendage via *= .\n Multiplication with integer will yield appendage, otherwise\n concatenation.\n\n Args:\n factor (int or BinaryCode): the BinaryCode to concatenate. In case\n of int, it will append the code to itself factor times.\n\n Returns (BinaryCode): segmented or concatenated code\n\n Raises:\n TypeError: factor must be an integer or a BinaryCode\n BinaryCodeError: size mismatch between self and factor\n ValueError: in case of an integer factor that is < 1\n '
if (not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int))):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if (self.n_qubits != factor.n_modes):
raise BinaryCodeError('size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
(self.n_qubits, self.n_modes) = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if (factor < 1):
raise ValueError('integer factor has to be positive, non-zero ')
self.encoder = scipy.sparse.kron(scipy.sparse.identity(factor, format='csc', dtype=int), self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder, shift_decoder(tmp_decoder, (index * self.n_qubits)))
self.n_qubits *= factor
self.n_modes *= factor
return self |
def __mul__(self, factor):
' Concatenation of two codes or appendage the same code factor times\n in case of integer factor.\n\n Args:\n factor (int or BinaryCode): the BinaryCode to concatenate. In case\n of int, it will append the code to itself factor times.\n\n Returns (BinaryCode): segmented or concatenated code\n '
twin = copy.deepcopy(self)
twin *= factor
return twin | 384,758,140,904,999,740 | Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code | src/openfermion/ops/_binary_code.py | __mul__ | 0tt3r/OpenFermion | python | def __mul__(self, factor):
' Concatenation of two codes or appendage the same code factor times\n in case of integer factor.\n\n Args:\n factor (int or BinaryCode): the BinaryCode to concatenate. In case\n of int, it will append the code to itself factor times.\n\n Returns (BinaryCode): segmented or concatenated code\n '
twin = copy.deepcopy(self)
twin *= factor
return twin |
def __rmul__(self, factor):
' Appending the same code factor times.\n\n Args:\n factor (int): integer defining number of appendages.\n\n Returns (BinaryCode): Segmented code.\n\n Raises:\n TypeError: factor must be an integer\n '
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return (self * factor)
else:
raise TypeError('the left multiplier must be an integer to aBinaryCode. Was given {} of type {}'.format(factor, type(factor))) | 8,989,757,218,044,066,000 | Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer | src/openfermion/ops/_binary_code.py | __rmul__ | 0tt3r/OpenFermion | python | def __rmul__(self, factor):
' Appending the same code factor times.\n\n Args:\n factor (int): integer defining number of appendages.\n\n Returns (BinaryCode): Segmented code.\n\n Raises:\n TypeError: factor must be an integer\n '
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return (self * factor)
else:
raise TypeError('the left multiplier must be an integer to aBinaryCode. Was given {} of type {}'.format(factor, type(factor))) |
def __str__(self):
' Return an easy-to-read string representation.'
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += (term.__str__() + ',')
dec_str = dec_str[:(- 1)]
string_return.append((dec_str + ']'))
return str(string_return) | -746,197,833,112,043,300 | Return an easy-to-read string representation. | src/openfermion/ops/_binary_code.py | __str__ | 0tt3r/OpenFermion | python | def __str__(self):
' '
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += (term.__str__() + ',')
dec_str = dec_str[:(- 1)]
string_return.append((dec_str + ']'))
return str(string_return) |
@app.get('/recommendation')
async def recommend(request):
'\n Gets recommendations for user\n Expects args in query string form -> user=x&count=n\n Returns json object {posts, unvoted, user, meta}\n '
args = request.raw_args
recommender = Recommender(app.predictor, request['accessor'], read_config())
posts = (await recommender.recommend_for(args['user'], int(args.get('count', 10))))
return json(posts) | 3,964,531,211,473,154,000 | Gets recommendations for user
Expects args in query string form -> user=x&count=n
Returns json object {posts, unvoted, user, meta} | kiwi-content/kiwi/app.py | recommend | bubblegumsoldier/kiwi | python | @app.get('/recommendation')
async def recommend(request):
'\n Gets recommendations for user\n Expects args in query string form -> user=x&count=n\n Returns json object {posts, unvoted, user, meta}\n '
args = request.raw_args
recommender = Recommender(app.predictor, request['accessor'], read_config())
posts = (await recommender.recommend_for(args['user'], int(args.get('count', 10))))
return json(posts) |
@app.post('/feedback')
async def feedback(request: Request):
'Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.\n Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.'
vote = request.json['vote']
config = read_config()
recommender = Recommender(app.predictor, request['accessor'], config)
try:
vote_result = (await recommender.store_feedback(create_vote(vote, config['positive_cutoff'])))
return json(vote_result)
except KeyError:
abort(400, 'Unknown user') | -7,407,837,287,500,175,000 | Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.
Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service. | kiwi-content/kiwi/app.py | feedback | bubblegumsoldier/kiwi | python | @app.post('/feedback')
async def feedback(request: Request):
'Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.\n Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.'
vote = request.json['vote']
config = read_config()
recommender = Recommender(app.predictor, request['accessor'], config)
try:
vote_result = (await recommender.store_feedback(create_vote(vote, config['positive_cutoff'])))
return json(vote_result)
except KeyError:
abort(400, 'Unknown user') |
@app.post('/content')
async def content(request: Request):
'\n Inserts posts into the database. The request needs the format\n { "posts": [{"id": string, "tags": string}]}.\n Returns the amout of inserted items and 200-OK.\n '
filtered_posts = [(post['id'], post['tags']) for post in request.json['posts']]
inserted = (await request['accessor'].add_content(filtered_posts))
if (inserted > 0):
ensure_future(retrain(app, app.loop))
return json({'inserted_count': inserted}) | 7,956,493,187,145,312,000 | Inserts posts into the database. The request needs the format
{ "posts": [{"id": string, "tags": string}]}.
Returns the amout of inserted items and 200-OK. | kiwi-content/kiwi/app.py | content | bubblegumsoldier/kiwi | python | @app.post('/content')
async def content(request: Request):
'\n Inserts posts into the database. The request needs the format\n { "posts": [{"id": string, "tags": string}]}.\n Returns the amout of inserted items and 200-OK.\n '
filtered_posts = [(post['id'], post['tags']) for post in request.json['posts']]
inserted = (await request['accessor'].add_content(filtered_posts))
if (inserted > 0):
ensure_future(retrain(app, app.loop))
return json({'inserted_count': inserted}) |
@app.get('/activation')
async def activation(request: Request):
'\n Returns the activation value for the given set of heuristics\n '
heuristics = request.json['heuristics']
try:
utv = (await app.predictor.get_user_taste_vector(heuristics['user']))
except KeyError:
utv = None
ac = ActivationCalculator(heuristics, request['accessor'])
a = (await ac.get_activation(utv))
return json({'activation': a, 'received_heuristics': heuristics}) | 2,118,346,643,659,316,500 | Returns the activation value for the given set of heuristics | kiwi-content/kiwi/app.py | activation | bubblegumsoldier/kiwi | python | @app.get('/activation')
async def activation(request: Request):
'\n \n '
heuristics = request.json['heuristics']
try:
utv = (await app.predictor.get_user_taste_vector(heuristics['user']))
except KeyError:
utv = None
ac = ActivationCalculator(heuristics, request['accessor'])
a = (await ac.get_activation(utv))
return json({'activation': a, 'received_heuristics': heuristics}) |
def allocate_buffers(engine):
"Allocates host and device buffer for TRT engine inference.\n\n This function is similair to the one in ../../common.py, but\n converts network outputs (which are np.float32) appropriately\n before writing them to Python buffer. This is needed, since\n TensorRT plugins doesn't support output type description, and\n in our particular case, we use NMS plugin as network output.\n\n Args:\n engine (trt.ICudaEngine): TensorRT engine\n\n Returns:\n inputs [HostDeviceMem]: engine input memory\n outputs [HostDeviceMem]: engine output memory\n bindings [int]: buffer to device bindings\n stream (cuda.Stream): cuda stream for engine inference synchronization\n "
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
binding_to_type = {'Input': np.float32, 'NMS': np.float32, 'NMS_1': np.int32}
for binding in engine:
size = (trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size)
dtype = binding_to_type[str(binding)]
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return (inputs, outputs, bindings, stream) | 4,037,928,288,186,843,600 | Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization | samples/python/uff_ssd/utils/engine.py | allocate_buffers | GreyZzzzzzXh/TensorRT | python | def allocate_buffers(engine):
"Allocates host and device buffer for TRT engine inference.\n\n This function is similair to the one in ../../common.py, but\n converts network outputs (which are np.float32) appropriately\n before writing them to Python buffer. This is needed, since\n TensorRT plugins doesn't support output type description, and\n in our particular case, we use NMS plugin as network output.\n\n Args:\n engine (trt.ICudaEngine): TensorRT engine\n\n Returns:\n inputs [HostDeviceMem]: engine input memory\n outputs [HostDeviceMem]: engine output memory\n bindings [int]: buffer to device bindings\n stream (cuda.Stream): cuda stream for engine inference synchronization\n "
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
binding_to_type = {'Input': np.float32, 'NMS': np.float32, 'NMS_1': np.int32}
for binding in engine:
size = (trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size)
dtype = binding_to_type[str(binding)]
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return (inputs, outputs, bindings, stream) |
def rainbow_to_vector(r, timeformat='h'):
" Convert Rainbow object to np.arrays\n Parameters\n ----------\n r : Rainbow object\n chromatic Rainbow object to convert into array format\n timeformat : str\n (optional, default='hours')\n The time format to use (seconds, minutes, hours, days etc.)\n Returns\n ----------\n rflux : np.array\n flux (MJy/sr) [n_wavelengths x n_integrations]\n rfluxe : np.array\n flux error (MJy/sr) [n_wavelengths x n_integrations]\n rtime : np.array\n time (BJD_TDB, houra) [n_integrations]\n rwavel : np.array\n wavelength (microns) [n_wavelengths]\n "
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux']
rfluxe = r.fluxlike['uncertainty']
rtime = r.timelike['time']
rwavel = r.wavelike['wavelength']
if (timeformat in secondformat):
rtime = (rtime * 3600)
elif (timeformat in minuteformat):
rtime = (rtime * 60)
elif (timeformat in hourformat):
pass
elif (timeformat in dayformat):
rtime = (rtime / 24.0)
elif (timeformat in yearformat):
rtime = (rtime / (24 * 365.0))
else:
warnings.warn('Unrecognised Time Format!')
return
return (rflux, rfluxe, rtime, rwavel) | 979,080,137,809,821,700 | Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths] | src/utils.py | rainbow_to_vector | catrionamurray/chromatic_fitting | python | def rainbow_to_vector(r, timeformat='h'):
" Convert Rainbow object to np.arrays\n Parameters\n ----------\n r : Rainbow object\n chromatic Rainbow object to convert into array format\n timeformat : str\n (optional, default='hours')\n The time format to use (seconds, minutes, hours, days etc.)\n Returns\n ----------\n rflux : np.array\n flux (MJy/sr) [n_wavelengths x n_integrations]\n rfluxe : np.array\n flux error (MJy/sr) [n_wavelengths x n_integrations]\n rtime : np.array\n time (BJD_TDB, houra) [n_integrations]\n rwavel : np.array\n wavelength (microns) [n_wavelengths]\n "
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux']
rfluxe = r.fluxlike['uncertainty']
rtime = r.timelike['time']
rwavel = r.wavelike['wavelength']
if (timeformat in secondformat):
rtime = (rtime * 3600)
elif (timeformat in minuteformat):
rtime = (rtime * 60)
elif (timeformat in hourformat):
pass
elif (timeformat in dayformat):
rtime = (rtime / 24.0)
elif (timeformat in yearformat):
rtime = (rtime / (24 * 365.0))
else:
warnings.warn('Unrecognised Time Format!')
return
return (rflux, rfluxe, rtime, rwavel) |
def rainbow_to_df(r, timeformat='h'):
" Convert Rainbow object to pandas dataframe\n Parameters\n ----------\n r : Rainbow object\n chromatic Rainbow object to convert into pandas df format\n timeformat : str\n (optional, default='hours')\n The time format to use (seconds, minutes, hours, days etc.)\n Returns\n ----------\n pd.DataFrame\n "
(rflux, rfluxe, rtime, rwavel) = rainbow_to_vector(r, timeformat)
(x, y) = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f'Time ({timeformat})': x.ravel(), 'Wavelength (microns)': y.ravel(), 'Flux': rflux.ravel(), 'Flux Error': rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df | 43,283,962,878,375,304 | Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame | src/utils.py | rainbow_to_df | catrionamurray/chromatic_fitting | python | def rainbow_to_df(r, timeformat='h'):
" Convert Rainbow object to pandas dataframe\n Parameters\n ----------\n r : Rainbow object\n chromatic Rainbow object to convert into pandas df format\n timeformat : str\n (optional, default='hours')\n The time format to use (seconds, minutes, hours, days etc.)\n Returns\n ----------\n pd.DataFrame\n "
(rflux, rfluxe, rtime, rwavel) = rainbow_to_vector(r, timeformat)
(x, y) = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f'Time ({timeformat})': x.ravel(), 'Wavelength (microns)': y.ravel(), 'Flux': rflux.ravel(), 'Flux Error': rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df |
def do_import(self, timestamp):
'Call one key import RPC.'
if (self.call == Call.single):
if (self.data == Data.address):
response = self.try_rpc(self.node.importaddress, self.address['address'], self.label, (self.rescan == Rescan.yes))
elif (self.data == Data.pub):
response = self.try_rpc(self.node.importpubkey, self.address['pubkey'], self.label, (self.rescan == Rescan.yes))
elif (self.data == Data.priv):
response = self.try_rpc(self.node.importprivkey, self.key, self.label, (self.rescan == Rescan.yes))
assert_equal(response, None)
elif (self.call == Call.multi):
response = self.node.importmulti([{'scriptPubKey': {'address': self.address['address']}, 'timestamp': ((timestamp + TIMESTAMP_WINDOW) + (1 if (self.rescan == Rescan.late_timestamp) else 0)), 'pubkeys': ([self.address['pubkey']] if (self.data == Data.pub) else []), 'keys': ([self.key] if (self.data == Data.priv) else []), 'label': self.label, 'watchonly': (self.data != Data.priv)}], {'rescan': (self.rescan in (Rescan.yes, Rescan.late_timestamp))})
assert_equal(response, [{'success': True}]) | -4,665,100,780,963,157,000 | Call one key import RPC. | test/functional/import-rescan.py | do_import | BlueScionic/vivarium | python | def do_import(self, timestamp):
if (self.call == Call.single):
if (self.data == Data.address):
response = self.try_rpc(self.node.importaddress, self.address['address'], self.label, (self.rescan == Rescan.yes))
elif (self.data == Data.pub):
response = self.try_rpc(self.node.importpubkey, self.address['pubkey'], self.label, (self.rescan == Rescan.yes))
elif (self.data == Data.priv):
response = self.try_rpc(self.node.importprivkey, self.key, self.label, (self.rescan == Rescan.yes))
assert_equal(response, None)
elif (self.call == Call.multi):
response = self.node.importmulti([{'scriptPubKey': {'address': self.address['address']}, 'timestamp': ((timestamp + TIMESTAMP_WINDOW) + (1 if (self.rescan == Rescan.late_timestamp) else 0)), 'pubkeys': ([self.address['pubkey']] if (self.data == Data.pub) else []), 'keys': ([self.key] if (self.data == Data.priv) else []), 'label': self.label, 'watchonly': (self.data != Data.priv)}], {'rescan': (self.rescan in (Rescan.yes, Rescan.late_timestamp))})
assert_equal(response, [{'success': True}]) |
def check(self, txid=None, amount=None, confirmations=None):
'Verify that getbalance/listtransactions return expected values.'
balance = self.node.getbalance(self.label, 0, False, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if (txid is not None):
(tx,) = [tx for tx in txs if (tx['txid'] == txid)]
assert_equal(tx['account'], self.label)
assert_equal(tx['address'], self.address['address'])
assert_equal(tx['amount'], amount)
assert_equal(tx['category'], 'receive')
assert_equal(tx['label'], self.label)
assert_equal(tx['txid'], txid)
assert_equal(tx['confirmations'], confirmations)
assert_equal(('trusted' not in tx), True)
if (self.data != Data.priv):
assert_equal(tx['involvesWatchonly'], True)
else:
assert_equal(('involvesWatchonly' not in tx), True) | 226,965,051,230,975,360 | Verify that getbalance/listtransactions return expected values. | test/functional/import-rescan.py | check | BlueScionic/vivarium | python | def check(self, txid=None, amount=None, confirmations=None):
balance = self.node.getbalance(self.label, 0, False, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if (txid is not None):
(tx,) = [tx for tx in txs if (tx['txid'] == txid)]
assert_equal(tx['account'], self.label)
assert_equal(tx['address'], self.address['address'])
assert_equal(tx['amount'], amount)
assert_equal(tx['category'], 'receive')
assert_equal(tx['label'], self.label)
assert_equal(tx['txid'], txid)
assert_equal(tx['confirmations'], confirmations)
assert_equal(('trusted' not in tx), True)
if (self.data != Data.priv):
assert_equal(tx['involvesWatchonly'], True)
else:
assert_equal(('involvesWatchonly' not in tx), True) |
def filter_func(data, search_notes):
' Return all objects'
search_notes.append(('error', 'Unable to parse search expression', 0, len(query_string)))
return False | 8,760,193,685,689,438,000 | Return all objects | sampledb/frontend/objects.py | filter_func | sciapp/sampledb | python | def filter_func(data, search_notes):
' '
search_notes.append(('error', 'Unable to parse search expression', 0, len(query_string)))
return False |
def elapsed(t0=0.0):
'get elapsed time from the give time\n\n Returns:\n now: the absolute time now\n dt_str: elapsed time in string\n '
now = time()
dt = (now - t0)
dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
if (dt_sec <= 1):
dt_str = (str(dt_sec) + ' second')
else:
dt_str = (str(dt_sec) + ' seconds')
return (now, dt_str) | 3,239,857,340,972,211,000 | get elapsed time from the give time
Returns:
now: the absolute time now
dt_str: elapsed time in string | andes/utils/time.py | elapsed | mhdella/andes | python | def elapsed(t0=0.0):
'get elapsed time from the give time\n\n Returns:\n now: the absolute time now\n dt_str: elapsed time in string\n '
now = time()
dt = (now - t0)
dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN)
if (dt_sec <= 1):
dt_str = (str(dt_sec) + ' second')
else:
dt_str = (str(dt_sec) + ' seconds')
return (now, dt_str) |
def __str__(self):
'\n String for representing the Model object (in Admin site etc.)\n '
return self.name | 6,625,425,250,428,873,000 | String for representing the Model object (in Admin site etc.) | src/locallibrary/catalog/models.py | __str__ | zhekazuev/mozilla-django-learning | python | def __str__(self):
'\n \n '
return self.name |
def __str__(self):
'\n String for representing the Model object (in Admin site etc.)\n '
return self.name | 6,625,425,250,428,873,000 | String for representing the Model object (in Admin site etc.) | src/locallibrary/catalog/models.py | __str__ | zhekazuev/mozilla-django-learning | python | def __str__(self):
'\n \n '
return self.name |
def __str__(self):
'\n String for representing the Model object.\n '
return self.title | 8,601,006,417,814,906,000 | String for representing the Model object. | src/locallibrary/catalog/models.py | __str__ | zhekazuev/mozilla-django-learning | python | def __str__(self):
'\n \n '
return self.title |
def get_absolute_url(self):
'\n Returns the url to access a particular book instance.\n '
return reverse('book-detail', args=[str(self.id)]) | -9,027,267,842,509,070,000 | Returns the url to access a particular book instance. | src/locallibrary/catalog/models.py | get_absolute_url | zhekazuev/mozilla-django-learning | python | def get_absolute_url(self):
'\n \n '
return reverse('book-detail', args=[str(self.id)]) |
def __str__(self):
'\n String for representing the Model object\n '
return '{0} ({1})'.format(self.id, self.book.title) | -7,487,876,419,173,492,000 | String for representing the Model object | src/locallibrary/catalog/models.py | __str__ | zhekazuev/mozilla-django-learning | python | def __str__(self):
'\n \n '
return '{0} ({1})'.format(self.id, self.book.title) |
def get_absolute_url(self):
'\n Returns the url to access a particular author instance.\n '
return reverse('author-detail', args=[str(self.id)]) | -894,346,297,409,379,600 | Returns the url to access a particular author instance. | src/locallibrary/catalog/models.py | get_absolute_url | zhekazuev/mozilla-django-learning | python | def get_absolute_url(self):
'\n \n '
return reverse('author-detail', args=[str(self.id)]) |
def __str__(self):
'\n String for representing the Model object.\n '
return '{0} ({1})'.format(self.last_name, self.first_name) | -6,520,315,775,681,146,000 | String for representing the Model object. | src/locallibrary/catalog/models.py | __str__ | zhekazuev/mozilla-django-learning | python | def __str__(self):
'\n \n '
return '{0} ({1})'.format(self.last_name, self.first_name) |
def main(args):
'Main function to parse in Nuclei Dataset from Kaggle and store as HDF5\n\n Parameters\n ----------\n args: ArgumentParser()\n input_dir: str\n directory of the Nuclei data\n output_dir: str\n path to the HDF5 output directory\n '
hdf5_fn = h5py.File(os.path.join(args.output_dir, 'data_360.hdf5'), 'a')
data_dirs = glob(os.path.join(args.input_dir, '*/'))
with tqdm.tqdm(total=len(data_dirs), unit='folder') as progress_bar:
for path in data_dirs:
data_name = path.split('/')[(- 2)]
(x, y, masks) = parse_data(path)
if (x is None):
progress_bar.update(1)
continue
y = np.expand_dims(y, axis=0)
data = np.vstack((x, y, masks))
hdf5_fn.create_dataset(str(data_name), data=data, dtype=np.float, chunks=True)
progress_bar.update(1)
hdf5_fn.close() | 6,529,766,840,498,225,000 | Main function to parse in Nuclei Dataset from Kaggle and store as HDF5
Parameters
----------
args: ArgumentParser()
input_dir: str
directory of the Nuclei data
output_dir: str
path to the HDF5 output directory | process_data/nuclei_create_hdf5.py | main | marshuang80/CellSegmentation | python | def main(args):
'Main function to parse in Nuclei Dataset from Kaggle and store as HDF5\n\n Parameters\n ----------\n args: ArgumentParser()\n input_dir: str\n directory of the Nuclei data\n output_dir: str\n path to the HDF5 output directory\n '
hdf5_fn = h5py.File(os.path.join(args.output_dir, 'data_360.hdf5'), 'a')
data_dirs = glob(os.path.join(args.input_dir, '*/'))
with tqdm.tqdm(total=len(data_dirs), unit='folder') as progress_bar:
for path in data_dirs:
data_name = path.split('/')[(- 2)]
(x, y, masks) = parse_data(path)
if (x is None):
progress_bar.update(1)
continue
y = np.expand_dims(y, axis=0)
data = np.vstack((x, y, masks))
hdf5_fn.create_dataset(str(data_name), data=data, dtype=np.float, chunks=True)
progress_bar.update(1)
hdf5_fn.close() |
def train_step(self, *inputs, **kwargs):
'train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
elif (getattr(self, 'require_forward_param_sync', False) and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', False) and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output | -3,869,767,233,722,932,700 | train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5. | mmcv/parallel/distributed.py | train_step | BIGWangYuDong/mmcv | python | def train_step(self, *inputs, **kwargs):
'train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
elif (getattr(self, 'require_forward_param_sync', False) and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', False) and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output |
def val_step(self, *inputs, **kwargs):
'val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
elif (getattr(self, 'require_forward_param_sync', False) and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', False) and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output | 5,817,637,801,054,475,000 | val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5. | mmcv/parallel/distributed.py | val_step | BIGWangYuDong/mmcv | python | def val_step(self, *inputs, **kwargs):
'val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n '
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.7')) and self.reducer._rebuild_buckets()):
print_log('Reducer buckets have been rebuilt in this iteration.', logger='mmcv')
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
elif (getattr(self, 'require_forward_param_sync', False) and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
if (len(self.device_ids) == 1):
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) >= digit_version('1.11.0'))):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled() and getattr(self, 'require_backward_grad_sync', False) and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
elif (('parrots' not in TORCH_VERSION) and (digit_version(TORCH_VERSION) > digit_version('1.2'))):
self.require_forward_param_sync = False
return output |
def del_none(d):
'\n Delete dict keys with None values, and empty lists, recursively.\n '
for (key, value) in d.items():
if ((value is None) or (isinstance(value, list) and (len(value) == 0))):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d | 8,142,591,104,627,484,000 | Delete dict keys with None values, and empty lists, recursively. | models.py | del_none | cwilso/chromium-dashboard | python | def del_none(d):
'\n \n '
for (key, value) in d.items():
if ((value is None) or (isinstance(value, list) and (len(value) == 0))):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d |
def list_to_chunks(l, n):
'Yield successive n-sized chunk lists from l.'
for i in xrange(0, len(l), n):
(yield l[i:(i + n)]) | -1,047,640,047,794,921,100 | Yield successive n-sized chunk lists from l. | models.py | list_to_chunks | cwilso/chromium-dashboard | python | def list_to_chunks(l, n):
for i in xrange(0, len(l), n):
(yield l[i:(i + n)]) |
@classmethod
def fetch_all_components(self, update_cache=False):
'Returns the list of blink components from live endpoint if unavailable in the cache.'
key = ('%s|blinkcomponents' % settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if ((components is None) or update_cache):
components = []
result = urlfetch.fetch(self.COMPONENTS_ENDPOINT, deadline=60)
if (result.status_code == 200):
components = sorted(json.loads(result.content))
memcache.set(key, components)
else:
logging.error(('Fetching blink components returned: %s' % result.status_code))
return components | 2,615,132,007,290,353,000 | Returns the list of blink components from live endpoint if unavailable in the cache. | models.py | fetch_all_components | cwilso/chromium-dashboard | python | @classmethod
def fetch_all_components(self, update_cache=False):
key = ('%s|blinkcomponents' % settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if ((components is None) or update_cache):
components = []
result = urlfetch.fetch(self.COMPONENTS_ENDPOINT, deadline=60)
if (result.status_code == 200):
components = sorted(json.loads(result.content))
memcache.set(key, components)
else:
logging.error(('Fetching blink components returned: %s' % result.status_code))
return components |
@classmethod
def fetch_wf_content_for_components(self, update_cache=False):
'Returns the /web content that use each blink component.'
key = ('%s|wfcomponents' % settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if ((components is None) or update_cache):
components = {}
result = urlfetch.fetch(self.WF_CONTENT_ENDPOINT, deadline=60)
if (result.status_code == 200):
components = json.loads(result.content)
memcache.set(key, components)
else:
logging.error(('Fetching /web blink components content returned: %s' % result.status_code))
return components | 3,755,372,784,358,231,600 | Returns the /web content that use each blink component. | models.py | fetch_wf_content_for_components | cwilso/chromium-dashboard | python | @classmethod
def fetch_wf_content_for_components(self, update_cache=False):
key = ('%s|wfcomponents' % settings.MEMCACHE_KEY_PREFIX)
components = memcache.get(key)
if ((components is None) or update_cache):
components = {}
result = urlfetch.fetch(self.WF_CONTENT_ENDPOINT, deadline=60)
if (result.status_code == 200):
components = json.loads(result.content)
memcache.set(key, components)
else:
logging.error(('Fetching /web blink components content returned: %s' % result.status_code))
return components |
@classmethod
def update_db(self):
'Updates the db with new Blink components from the json endpoint'
self.fetch_wf_content_for_components(update_cache=True)
new_components = self.fetch_all_components(update_cache=True)
existing_comps = self.all().fetch(None)
for name in new_components:
if (not len([x.name for x in existing_comps if (x.name == name)])):
logging.info(('Adding new BlinkComponent: ' + name))
c = BlinkComponent(name=name)
c.put() | 1,520,643,486,992,808,700 | Updates the db with new Blink components from the json endpoint | models.py | update_db | cwilso/chromium-dashboard | python | @classmethod
def update_db(self):
self.fetch_wf_content_for_components(update_cache=True)
new_components = self.fetch_all_components(update_cache=True)
existing_comps = self.all().fetch(None)
for name in new_components:
if (not len([x.name for x in existing_comps if (x.name == name)])):
logging.info(('Adding new BlinkComponent: ' + name))
c = BlinkComponent(name=name)
c.put() |
@classmethod
def get_by_name(self, component_name):
'Fetch blink component with given name.'
q = self.all()
q.filter('name =', component_name)
component = q.fetch(1)
if (not component):
logging.error(('%s is an unknown BlinkComponent.' % component_name))
return None
return component[0] | 1,578,639,703,039,212,000 | Fetch blink component with given name. | models.py | get_by_name | cwilso/chromium-dashboard | python | @classmethod
def get_by_name(self, component_name):
q = self.all()
q.filter('name =', component_name)
component = q.fetch(1)
if (not component):
logging.error(('%s is an unknown BlinkComponent.' % component_name))
return None
return component[0] |
def __notify_feature_subscribers_of_changes(self, is_update):
'Async notifies subscribers of new features and property changes to features by\n posting to a task queue.'
changed_props = []
for (prop_name, prop) in self.properties().iteritems():
new_val = getattr(self, prop_name, None)
old_val = getattr(self, ('_old_' + prop_name), None)
if (new_val != old_val):
changed_props.append({'prop_name': prop_name, 'old_val': old_val, 'new_val': new_val})
payload = json.dumps({'changes': changed_props, 'is_update': is_update, 'feature': self.format_for_template(version=2)})
queue = taskqueue.Queue()
task = taskqueue.Task(method='POST', url='/tasks/email-subscribers', target='notifier', payload=payload)
queue.add(task)
queue = taskqueue.Queue()
task = taskqueue.Task(method='POST', url='/tasks/send_notifications', target='notifier', payload=payload)
queue.add(task) | 1,186,048,520,462,652,700 | Async notifies subscribers of new features and property changes to features by
posting to a task queue. | models.py | __notify_feature_subscribers_of_changes | cwilso/chromium-dashboard | python | def __notify_feature_subscribers_of_changes(self, is_update):
'Async notifies subscribers of new features and property changes to features by\n posting to a task queue.'
changed_props = []
for (prop_name, prop) in self.properties().iteritems():
new_val = getattr(self, prop_name, None)
old_val = getattr(self, ('_old_' + prop_name), None)
if (new_val != old_val):
changed_props.append({'prop_name': prop_name, 'old_val': old_val, 'new_val': new_val})
payload = json.dumps({'changes': changed_props, 'is_update': is_update, 'feature': self.format_for_template(version=2)})
queue = taskqueue.Queue()
task = taskqueue.Task(method='POST', url='/tasks/email-subscribers', target='notifier', payload=payload)
queue.add(task)
queue = taskqueue.Queue()
task = taskqueue.Task(method='POST', url='/tasks/send_notifications', target='notifier', payload=payload)
queue.add(task) |
def add_to_component_subscribers(self, component_name):
'Adds the user to the list of Blink component subscribers.'
c = BlinkComponent.get_by_name(component_name)
if c:
if (not len(list_with_component(self.blink_components, c))):
self.blink_components.append(c.key())
return self.put()
return None | 377,905,915,556,286,800 | Adds the user to the list of Blink component subscribers. | models.py | add_to_component_subscribers | cwilso/chromium-dashboard | python | def add_to_component_subscribers(self, component_name):
c = BlinkComponent.get_by_name(component_name)
if c:
if (not len(list_with_component(self.blink_components, c))):
self.blink_components.append(c.key())
return self.put()
return None |
def remove_from_component_subscribers(self, component_name, remove_as_owner=False):
'Removes the user from the list of Blink component subscribers or as the owner\n of the component.'
c = BlinkComponent.get_by_name(component_name)
if c:
if remove_as_owner:
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
else:
self.blink_components = list_without_component(self.blink_components, c)
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
return self.put()
return None | -6,766,741,114,973,741,000 | Removes the user from the list of Blink component subscribers or as the owner
of the component. | models.py | remove_from_component_subscribers | cwilso/chromium-dashboard | python | def remove_from_component_subscribers(self, component_name, remove_as_owner=False):
'Removes the user from the list of Blink component subscribers or as the owner\n of the component.'
c = BlinkComponent.get_by_name(component_name)
if c:
if remove_as_owner:
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
else:
self.blink_components = list_without_component(self.blink_components, c)
self.primary_blink_components = list_without_component(self.primary_blink_components, c)
return self.put()
return None |
def add_as_component_owner(self, component_name):
'Adds the user as the Blink component owner.'
c = BlinkComponent.get_by_name(component_name)
if c:
self.add_to_component_subscribers(component_name)
if (not len(list_with_component(self.primary_blink_components, c))):
self.primary_blink_components.append(c.key())
return self.put()
return None | 8,702,347,263,936,550,000 | Adds the user as the Blink component owner. | models.py | add_as_component_owner | cwilso/chromium-dashboard | python | def add_as_component_owner(self, component_name):
c = BlinkComponent.get_by_name(component_name)
if c:
self.add_to_component_subscribers(component_name)
if (not len(list_with_component(self.primary_blink_components, c))):
self.primary_blink_components.append(c.key())
return self.put()
return None |
def get_current_time(format_str: str='%Y-%m-%d %H:%M:%S'):
'\n 获取当前时间,默认为 2020-01-01 00:00:00 格式\n :param format_str: 格式\n :return:\n '
return time.strftime(format_str, time.localtime()) | 2,675,374,514,212,199,400 | 获取当前时间,默认为 2020-01-01 00:00:00 格式
:param format_str: 格式
:return: | src/sogou_wechat/mongoDB.py | get_current_time | matiastang/selenium-learning | python | def get_current_time(format_str: str='%Y-%m-%d %H:%M:%S'):
'\n 获取当前时间,默认为 2020-01-01 00:00:00 格式\n :param format_str: 格式\n :return:\n '
return time.strftime(format_str, time.localtime()) |
def __init__(self):
'初始化\n 初始化 mongo db\n '
mongo_uri = ('mongodb://%s:%s@%s:%s' % (MONGO_CONFIG['user'], MONGO_CONFIG['pwd'], MONGO_CONFIG['host'], MONGO_CONFIG['port']))
self.mongo = MongoClient(mongo_uri)
self.sogou_db = self.mongo['sogou_dev']
self.sogou_search_col = self.sogou_db['sogou_search_results'] | -2,319,926,698,126,357,000 | 初始化
初始化 mongo db | src/sogou_wechat/mongoDB.py | __init__ | matiastang/selenium-learning | python | def __init__(self):
'初始化\n 初始化 mongo db\n '
mongo_uri = ('mongodb://%s:%s@%s:%s' % (MONGO_CONFIG['user'], MONGO_CONFIG['pwd'], MONGO_CONFIG['host'], MONGO_CONFIG['port']))
self.mongo = MongoClient(mongo_uri)
self.sogou_db = self.mongo['sogou_dev']
self.sogou_search_col = self.sogou_db['sogou_search_results'] |
def update_sogou_login_cookie(self, username, cookie):
'\n 更新搜狗微信登录 cookie 信息\n :param username:\n :param cookie:\n :return:\n '
col = self.sogou_db['sogou_login_cookies']
ctime = get_current_time()
find_obj = {'nickname': username, 'is_valid': 1}
login_item = col.find_one(find_obj)
print(login_item)
if (not login_item):
cookie = ('DESC=0; %s' % cookie)
col.insert_one({'cookie': cookie, 'nickname': username, 'device': '0', 'state': 'normal', 'c_time': ctime, 'm_time': ctime, 'is_valid': 1, 'failures': 0})
return
cookie = ('DESC=%s; %s' % (login_item['device'], cookie))
col.update_one(find_obj, {'$set': {'state': 'normal', 'cookie': cookie, 'c_time': ctime, 'm_time': ctime, 'failures': 0}}) | 1,485,805,371,386,330,600 | 更新搜狗微信登录 cookie 信息
:param username:
:param cookie:
:return: | src/sogou_wechat/mongoDB.py | update_sogou_login_cookie | matiastang/selenium-learning | python | def update_sogou_login_cookie(self, username, cookie):
'\n 更新搜狗微信登录 cookie 信息\n :param username:\n :param cookie:\n :return:\n '
col = self.sogou_db['sogou_login_cookies']
ctime = get_current_time()
find_obj = {'nickname': username, 'is_valid': 1}
login_item = col.find_one(find_obj)
print(login_item)
if (not login_item):
cookie = ('DESC=0; %s' % cookie)
col.insert_one({'cookie': cookie, 'nickname': username, 'device': '0', 'state': 'normal', 'c_time': ctime, 'm_time': ctime, 'is_valid': 1, 'failures': 0})
return
cookie = ('DESC=%s; %s' % (login_item['device'], cookie))
col.update_one(find_obj, {'$set': {'state': 'normal', 'cookie': cookie, 'c_time': ctime, 'm_time': ctime, 'failures': 0}}) |
def insert_sogou_search_result(self, result):
'\n 保存搜狗搜索信息\n :param results: 结果数组\n '
ctime = get_current_time()
find_obj = {'id': result['id'], 'is_valid': 1}
search_item = self.sogou_search_col.find_one(find_obj)
print(search_item)
new_result = result
if (not search_item):
new_result['c_time'] = ctime
new_result['m_time'] = ctime
new_result['is_valid'] = 1
self.sogou_search_col.insert_one(new_result)
return
new_result['m_time'] = ctime
self.sogou_search_col.update_one(find_obj, {'$set': new_result}) | -2,791,544,919,105,490,000 | 保存搜狗搜索信息
:param results: 结果数组 | src/sogou_wechat/mongoDB.py | insert_sogou_search_result | matiastang/selenium-learning | python | def insert_sogou_search_result(self, result):
'\n 保存搜狗搜索信息\n :param results: 结果数组\n '
ctime = get_current_time()
find_obj = {'id': result['id'], 'is_valid': 1}
search_item = self.sogou_search_col.find_one(find_obj)
print(search_item)
new_result = result
if (not search_item):
new_result['c_time'] = ctime
new_result['m_time'] = ctime
new_result['is_valid'] = 1
self.sogou_search_col.insert_one(new_result)
return
new_result['m_time'] = ctime
self.sogou_search_col.update_one(find_obj, {'$set': new_result}) |
@utils.classproperty
def resource_server(cls) -> Optional[str]:
'\n The resource_server name for the API and scopes associated with this client.\n\n This information is pulled from the ``scopes`` attribute of the client class.\n If the client does not have associated scopes, this value will be ``None``.\n '
if (cls.scopes is None):
return None
return cls.scopes.resource_server | -8,940,739,177,058,369,000 | The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``. | src/globus_sdk/client.py | resource_server | rudyardrichter/globus-sdk-python | python | @utils.classproperty
def resource_server(cls) -> Optional[str]:
'\n The resource_server name for the API and scopes associated with this client.\n\n This information is pulled from the ``scopes`` attribute of the client class.\n If the client does not have associated scopes, this value will be ``None``.\n '
if (cls.scopes is None):
return None
return cls.scopes.resource_server |
def get(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, headers: Optional[Dict[(str, str)]]=None) -> GlobusHTTPResponse:
'\n Make a GET request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'GET to {path} with query_params {query_params}')
return self.request('GET', path, query_params=query_params, headers=headers) | -8,680,274,190,658,656,000 | Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | get | rudyardrichter/globus-sdk-python | python | def get(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, headers: Optional[Dict[(str, str)]]=None) -> GlobusHTTPResponse:
'\n Make a GET request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'GET to {path} with query_params {query_params}')
return self.request('GET', path, query_params=query_params, headers=headers) |
def post(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a POST request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'POST to {path} with query_params {query_params}')
return self.request('POST', path, query_params=query_params, data=data, headers=headers, encoding=encoding) | -6,405,945,534,864,613,000 | Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | post | rudyardrichter/globus-sdk-python | python | def post(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a POST request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'POST to {path} with query_params {query_params}')
return self.request('POST', path, query_params=query_params, data=data, headers=headers, encoding=encoding) |
def delete(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, headers: Optional[Dict[(str, str)]]=None) -> GlobusHTTPResponse:
'\n Make a DELETE request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'DELETE to {path} with query_params {query_params}')
return self.request('DELETE', path, query_params=query_params, headers=headers) | 8,448,269,623,345,632,000 | Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | delete | rudyardrichter/globus-sdk-python | python | def delete(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, headers: Optional[Dict[(str, str)]]=None) -> GlobusHTTPResponse:
'\n Make a DELETE request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'DELETE to {path} with query_params {query_params}')
return self.request('DELETE', path, query_params=query_params, headers=headers) |
def put(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a PUT request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'PUT to {path} with query_params {query_params}')
return self.request('PUT', path, query_params=query_params, data=data, headers=headers, encoding=encoding) | -4,209,089,649,074,972,000 | Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | put | rudyardrichter/globus-sdk-python | python | def put(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a PUT request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'PUT to {path} with query_params {query_params}')
return self.request('PUT', path, query_params=query_params, data=data, headers=headers, encoding=encoding) |
def patch(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a PATCH request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'PATCH to {path} with query_params {query_params}')
return self.request('PATCH', path, query_params=query_params, data=data, headers=headers, encoding=encoding) | -6,768,478,994,757,211,000 | Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | patch | rudyardrichter/globus-sdk-python | python | def patch(self, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Make a PATCH request to the specified path.\n\n See :py:meth:`~.BaseClient.request` for details on the various parameters.\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
log.debug(f'PATCH to {path} with query_params {query_params}')
return self.request('PATCH', path, query_params=query_params, data=data, headers=headers, encoding=encoding) |
def request(self, method: str, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Send an HTTP request\n\n :param method: HTTP request method, as an all caps string\n :type method: str\n :param path: Path for the request, with or without leading slash\n :type path: str\n :param query_params: Parameters to be encoded as a query string\n :type query_params: dict, optional\n :param headers: HTTP headers to add to the request\n :type headers: dict\n :param data: Data to send as the request body. May pass through encoding.\n :type data: dict or str\n :param encoding: A way to encode request data. "json", "form", and "text"\n are all valid values. Custom encodings can be used only if they are\n registered with the transport. By default, strings get "text" behavior and\n all other objects get "json".\n :type encoding: str\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
rheaders = ({**headers} if headers else {})
if (path.startswith('https://') or path.startswith('http://')):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
log.debug('request will hit URL: %s', url)
r = self.transport.request(method=method, url=url, data=(data.data if isinstance(data, utils.PayloadWrapper) else data), query_params=query_params, headers=rheaders, encoding=encoding, authorizer=self.authorizer)
log.debug('request made to URL: %s', r.url)
if (200 <= r.status_code < 400):
log.debug(f'request completed with response code: {r.status_code}')
return GlobusHTTPResponse(r, self)
log.debug(f'request completed with (error) response code: {r.status_code}')
raise self.error_class(r) | -8,669,220,260,102,745,000 | Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object | src/globus_sdk/client.py | request | rudyardrichter/globus-sdk-python | python | def request(self, method: str, path: str, *, query_params: Optional[Dict[(str, Any)]]=None, data: Union[(None, Dict[(str, Any)], utils.PayloadWrapper)]=None, headers: Optional[Dict[(str, str)]]=None, encoding: Optional[str]=None) -> GlobusHTTPResponse:
'\n Send an HTTP request\n\n :param method: HTTP request method, as an all caps string\n :type method: str\n :param path: Path for the request, with or without leading slash\n :type path: str\n :param query_params: Parameters to be encoded as a query string\n :type query_params: dict, optional\n :param headers: HTTP headers to add to the request\n :type headers: dict\n :param data: Data to send as the request body. May pass through encoding.\n :type data: dict or str\n :param encoding: A way to encode request data. "json", "form", and "text"\n are all valid values. Custom encodings can be used only if they are\n registered with the transport. By default, strings get "text" behavior and\n all other objects get "json".\n :type encoding: str\n\n :return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object\n '
rheaders = ({**headers} if headers else {})
if (path.startswith('https://') or path.startswith('http://')):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
log.debug('request will hit URL: %s', url)
r = self.transport.request(method=method, url=url, data=(data.data if isinstance(data, utils.PayloadWrapper) else data), query_params=query_params, headers=rheaders, encoding=encoding, authorizer=self.authorizer)
log.debug('request made to URL: %s', r.url)
if (200 <= r.status_code < 400):
log.debug(f'request completed with response code: {r.status_code}')
return GlobusHTTPResponse(r, self)
log.debug(f'request completed with (error) response code: {r.status_code}')
raise self.error_class(r) |
@property
def name(self):
'Base name.'
return self._name | 6,807,634,801,631,736,000 | Base name. | robustnessgym/core/identifier.py | name | ANarayan/robustness-gym | python | @property
def name(self):
return self._name |
@property
def index(self):
'Index associated with the identifier.'
return self._index | 1,556,588,699,845,362,400 | Index associated with the identifier. | robustnessgym/core/identifier.py | index | ANarayan/robustness-gym | python | @property
def index(self):
return self._index |
@property
def parameters(self):
'Additional parameters contained in the identifier.'
return self._parameters | 7,311,550,149,156,835,000 | Additional parameters contained in the identifier. | robustnessgym/core/identifier.py | parameters | ANarayan/robustness-gym | python | @property
def parameters(self):
return self._parameters |
@classmethod
def range(cls, n: int, _name: str, **kwargs) -> List[Identifier]:
'Create a list of identifiers, with index varying from 1 to `n`.'
if (n > 1):
return [cls(_name=_name, _index=i, **kwargs) for i in range(1, (n + 1))]
return [cls(_name=_name, **kwargs)] | 319,522,355,280,191,170 | Create a list of identifiers, with index varying from 1 to `n`. | robustnessgym/core/identifier.py | range | ANarayan/robustness-gym | python | @classmethod
def range(cls, n: int, _name: str, **kwargs) -> List[Identifier]:
if (n > 1):
return [cls(_name=_name, _index=i, **kwargs) for i in range(1, (n + 1))]
return [cls(_name=_name, **kwargs)] |
def __call__(self, **kwargs):
'Call the identifier with additional parameters to return a new\n identifier.'
ident = Identifier.loads(self.dumps())
for (parameter, value) in kwargs.items():
ident.add_parameter(parameter, value)
return ident | -3,241,385,535,052,178,400 | Call the identifier with additional parameters to return a new
identifier. | robustnessgym/core/identifier.py | __call__ | ANarayan/robustness-gym | python | def __call__(self, **kwargs):
'Call the identifier with additional parameters to return a new\n identifier.'
ident = Identifier.loads(self.dumps())
for (parameter, value) in kwargs.items():
ident.add_parameter(parameter, value)
return ident |
def dumps(self):
'Dump the identifier to JSON.'
return json.dumps(self.__dict__) | 9,035,629,313,671,611,000 | Dump the identifier to JSON. | robustnessgym/core/identifier.py | dumps | ANarayan/robustness-gym | python | def dumps(self):
return json.dumps(self.__dict__) |
@staticmethod
def _parse_args(s: str):
'https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-\n python-argument-list.'
args = 'f({})'.format(s)
tree = ast.parse(args)
funccall = tree.body[0].value
params = {}
for arg in funccall.keywords:
try:
params[arg.arg] = ast.literal_eval(arg.value)
except ValueError:
params[arg.arg] = arg.value.id
return params | -3,864,303,452,000,702,500 | https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-
python-argument-list. | robustnessgym/core/identifier.py | _parse_args | ANarayan/robustness-gym | python | @staticmethod
def _parse_args(s: str):
'https://stackoverflow.com/questions/49723047/parsing-a-string-as-a-\n python-argument-list.'
args = 'f({})'.format(s)
tree = ast.parse(args)
funccall = tree.body[0].value
params = {}
for arg in funccall.keywords:
try:
params[arg.arg] = ast.literal_eval(arg.value)
except ValueError:
params[arg.arg] = arg.value.id
return params |
@classmethod
def parse(cls, s: str) -> Identifier:
'Parse in an identifier from string.'
if ('(' in s):
(name_index, params) = s.split('(')
params = params.split(')')[0]
else:
name_index = s
params = None
if ('-' in name_index):
(name, index) = (name_index.split('-')[:(- 1)], name_index.split('-')[(- 1)])
name = '-'.join(name)
if index.isnumeric():
index = int(index)
else:
name = '-'.join([name, index])
index = None
else:
name = name_index
index = None
if (params is not None):
params = cls._parse_args(params)
else:
params = {}
return cls(_name=name, _index=index, **params) | -3,336,107,861,688,347,000 | Parse in an identifier from string. | robustnessgym/core/identifier.py | parse | ANarayan/robustness-gym | python | @classmethod
def parse(cls, s: str) -> Identifier:
if ('(' in s):
(name_index, params) = s.split('(')
params = params.split(')')[0]
else:
name_index = s
params = None
if ('-' in name_index):
(name, index) = (name_index.split('-')[:(- 1)], name_index.split('-')[(- 1)])
name = '-'.join(name)
if index.isnumeric():
index = int(index)
else:
name = '-'.join([name, index])
index = None
else:
name = name_index
index = None
if (params is not None):
params = cls._parse_args(params)
else:
params = {}
return cls(_name=name, _index=index, **params) |
def without(self, *params) -> Identifier:
'Returns an identifier without `params`.'
return Identifier(self.name, self.index, **{k: v for (k, v) in self.parameters.items() if (k not in set(params))}) | -7,226,501,445,548,212,000 | Returns an identifier without `params`. | robustnessgym/core/identifier.py | without | ANarayan/robustness-gym | python | def without(self, *params) -> Identifier:
return Identifier(self.name, self.index, **{k: v for (k, v) in self.parameters.items() if (k not in set(params))}) |
@classmethod
def loads(cls, s: str):
'Load the identifier from JSON.'
identifier = Identifier(_name='')
identifier.__dict__ = json.loads(s)
return identifier | 1,539,369,053,633,276,000 | Load the identifier from JSON. | robustnessgym/core/identifier.py | loads | ANarayan/robustness-gym | python | @classmethod
def loads(cls, s: str):
identifier = Identifier(_name=)
identifier.__dict__ = json.loads(s)
return identifier |
def add_parameter(self, parameter: str, value: Any) -> None:
'Add a parameter to the identifier.'
if isinstance(value, Callable):
self.parameters[parameter] = '.'.join([str(value.__module__), str(value.__name__)])
else:
self.parameters[parameter] = value | 7,490,064,917,028,692,000 | Add a parameter to the identifier. | robustnessgym/core/identifier.py | add_parameter | ANarayan/robustness-gym | python | def add_parameter(self, parameter: str, value: Any) -> None:
if isinstance(value, Callable):
self.parameters[parameter] = '.'.join([str(value.__module__), str(value.__name__)])
else:
self.parameters[parameter] = value |
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
' Sinusoid position encoding table '
def cal_angle(position, hid_idx):
return (position / np.power(10000, ((2 * (hid_idx // 2)) / d_hid)))
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
if (padding_idx is not None):
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table) | 6,906,142,374,917,182,000 | Sinusoid position encoding table | src/onqg/utils/sinusoid.py | get_sinusoid_encoding_table | MrSchnappi/RL-for-Question-Generation | python | def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
' '
def cal_angle(position, hid_idx):
return (position / np.power(10000, ((2 * (hid_idx // 2)) / d_hid)))
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])
if (padding_idx is not None):
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table) |
def learning_rate_schedule(current_epoch, current_batch, steps_per_epoch, batch_size):
'Handles linear scaling rule, gradual warmup, and LR decay.\n\n Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the\n provided scaling factor.\n\n Args:\n current_epoch: integer, current epoch indexed from 0.\n current_batch: integer, current batch in the current epoch, indexed from 0.\n steps_per_epoch: integer, number of steps in an epoch.\n batch_size: integer, total batch sized.\n\n Returns:\n Adjusted learning rate.\n '
initial_lr = ((BASE_LEARNING_RATE * batch_size) / 256)
epoch = (current_epoch + (float(current_batch) / steps_per_epoch))
(warmup_lr_multiplier, warmup_end_epoch) = LR_SCHEDULE[0]
if (epoch < warmup_end_epoch):
return (((initial_lr * warmup_lr_multiplier) * epoch) / warmup_end_epoch)
for (mult, start_epoch) in LR_SCHEDULE:
if (epoch >= start_epoch):
learning_rate = (initial_lr * mult)
else:
break
return learning_rate | 4,805,554,109,677,772,000 | Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate. | official/vision/image_classification/common.py | learning_rate_schedule | Anku5hk/models | python | def learning_rate_schedule(current_epoch, current_batch, steps_per_epoch, batch_size):
'Handles linear scaling rule, gradual warmup, and LR decay.\n\n Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the\n provided scaling factor.\n\n Args:\n current_epoch: integer, current epoch indexed from 0.\n current_batch: integer, current batch in the current epoch, indexed from 0.\n steps_per_epoch: integer, number of steps in an epoch.\n batch_size: integer, total batch sized.\n\n Returns:\n Adjusted learning rate.\n '
initial_lr = ((BASE_LEARNING_RATE * batch_size) / 256)
epoch = (current_epoch + (float(current_batch) / steps_per_epoch))
(warmup_lr_multiplier, warmup_end_epoch) = LR_SCHEDULE[0]
if (epoch < warmup_end_epoch):
return (((initial_lr * warmup_lr_multiplier) * epoch) / warmup_end_epoch)
for (mult, start_epoch) in LR_SCHEDULE:
if (epoch >= start_epoch):
learning_rate = (initial_lr * mult)
else:
break
return learning_rate |
def get_optimizer(learning_rate=0.1):
'Returns optimizer to use.'
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9) | 7,685,441,610,714,783,000 | Returns optimizer to use. | official/vision/image_classification/common.py | get_optimizer | Anku5hk/models | python | def get_optimizer(learning_rate=0.1):
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9) |
def get_callbacks(steps_per_epoch, learning_rate_schedule_fn=None, pruning_method=None, enable_checkpoint_and_export=False, model_dir=None):
'Returns common callbacks.'
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if ((not FLAGS.use_tensor_lr) and learning_rate_schedule_fn):
lr_callback = LearningRateBatchScheduler(learning_rate_schedule_fn, batch_size=FLAGS.batch_size, steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(FLAGS.model_dir, FLAGS.profile_steps, FLAGS.enable_tensorboard, steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = (pruning_method is not None)
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if (model_dir is not None):
callbacks.append(tfmot.sparsity.keras.PruningSummaries(log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if (model_dir is not None):
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(tf.keras.callbacks.ModelCheckpoint(ckpt_full_path, save_weights_only=True))
return callbacks | 7,434,335,091,102,080,000 | Returns common callbacks. | official/vision/image_classification/common.py | get_callbacks | Anku5hk/models | python | def get_callbacks(steps_per_epoch, learning_rate_schedule_fn=None, pruning_method=None, enable_checkpoint_and_export=False, model_dir=None):
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if ((not FLAGS.use_tensor_lr) and learning_rate_schedule_fn):
lr_callback = LearningRateBatchScheduler(learning_rate_schedule_fn, batch_size=FLAGS.batch_size, steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(FLAGS.model_dir, FLAGS.profile_steps, FLAGS.enable_tensorboard, steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = (pruning_method is not None)
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if (model_dir is not None):
callbacks.append(tfmot.sparsity.keras.PruningSummaries(log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if (model_dir is not None):
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(tf.keras.callbacks.ModelCheckpoint(ckpt_full_path, save_weights_only=True))
return callbacks |
def build_stats(history, eval_output, callbacks):
'Normalizes and returns dictionary of stats.\n\n Args:\n history: Results of the training step. Supports both categorical_accuracy\n and sparse_categorical_accuracy.\n eval_output: Output of the eval step. Assumes first value is eval_loss and\n second value is accuracy_top_1.\n callbacks: a list of callbacks which might include a time history callback\n used during keras.fit.\n\n Returns:\n Dictionary of normalized results.\n '
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if (history and history.history):
train_hist = history.history
stats['loss'] = train_hist['loss'][(- 1)].item()
if ('categorical_accuracy' in train_hist):
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][(- 1)].item()
elif ('sparse_categorical_accuracy' in train_hist):
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][(- 1)].item()
if (not callbacks):
return stats
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if (len(timestamp_log) > 1):
stats['avg_exp_per_second'] = (((callback.batch_size * callback.log_steps) * (len(callback.timestamp_log) - 1)) / (timestamp_log[(- 1)].timestamp - timestamp_log[0].timestamp))
return stats | 1,328,152,771,311,647,200 | Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results. | official/vision/image_classification/common.py | build_stats | Anku5hk/models | python | def build_stats(history, eval_output, callbacks):
'Normalizes and returns dictionary of stats.\n\n Args:\n history: Results of the training step. Supports both categorical_accuracy\n and sparse_categorical_accuracy.\n eval_output: Output of the eval step. Assumes first value is eval_loss and\n second value is accuracy_top_1.\n callbacks: a list of callbacks which might include a time history callback\n used during keras.fit.\n\n Returns:\n Dictionary of normalized results.\n '
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if (history and history.history):
train_hist = history.history
stats['loss'] = train_hist['loss'][(- 1)].item()
if ('categorical_accuracy' in train_hist):
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][(- 1)].item()
elif ('sparse_categorical_accuracy' in train_hist):
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][(- 1)].item()
if (not callbacks):
return stats
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if (len(timestamp_log) > 1):
stats['avg_exp_per_second'] = (((callback.batch_size * callback.log_steps) * (len(callback.timestamp_log) - 1)) / (timestamp_log[(- 1)].timestamp - timestamp_log[0].timestamp))
return stats |
def define_keras_flags(dynamic_loss_scale=True, model=False, optimizer=False, pretrained_filepath=False):
'Define flags for Keras models.'
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True, train_epochs=True, epochs_between_evals=True, distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False, synthetic_data=True, dtype=True, all_reduce_alg=True, num_packs=True, tf_gpu_thread_mode=True, datasets_num_private_threads=True, dynamic_loss_scale=dynamic_loss_scale, loss_scale=True, fp16_implementation=True, tf_data_experimental_slack=True, enable_xla=True, force_v2_in_keras_compile=True, training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
flags.DEFINE_boolean(name='set_learning_phase_to_train', default=True, help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(name='explicit_gpu_placement', default=False, help='If not using distribution strategy, explicitly set device scope for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False, help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True, help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False, help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(name='enable_tensorboard', default=False, help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(name='train_steps', default=None, help='The number of steps to run for training. If it is larger than # batches per epoch, then use # batches per epoch. This flag will be ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(name='profile_steps', default=None, help='Save profiling data to model dir at given range of global steps. The value must be a comma separated pair of positive integers, specifying the first and last step to profile. For example, "--profile_steps=2,4" triggers the profiler to process 3 steps, starting from the 2nd step. Note that profiler has a non-trivial performance overhead, and the output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(name='batchnorm_spatial_persistent', default=True, help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(name='enable_get_next_as_optional', default=False, help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(name='enable_checkpoint_and_export', default=False, help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(name='steps_per_loop', default=500, help='Number of steps per training loop. Only training step happens inside the loop. Callbacks will not be called inside. Will be capped at steps per epoch.')
flags.DEFINE_boolean(name='use_tf_while_loop', default=True, help='Whether to build a tf.while_loop inside the training loop on the host. Setting it to True is critical to have peak performance on TPU.')
flags.DEFINE_boolean(name='use_tf_keras_layers', default=False, help='Whether to use tf.keras.layers instead of tf.python.keras.layers.It only changes imagenet resnet model layers for now. This flag is a temporal flag during transition to tf.keras.layers. Do not use this flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5', 'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default', 'Name of optimizer preset. (mobilenet_default, resnet50_default)')
flags.DEFINE_float('initial_learning_rate_per_sample', 7e-05, 'Initial value of learning rate per sample for mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94, 'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5, 'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '', 'Pretrained file path.') | 2,800,760,739,962,504,700 | Define flags for Keras models. | official/vision/image_classification/common.py | define_keras_flags | Anku5hk/models | python | def define_keras_flags(dynamic_loss_scale=True, model=False, optimizer=False, pretrained_filepath=False):
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True, train_epochs=True, epochs_between_evals=True, distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False, synthetic_data=True, dtype=True, all_reduce_alg=True, num_packs=True, tf_gpu_thread_mode=True, datasets_num_private_threads=True, dynamic_loss_scale=dynamic_loss_scale, loss_scale=True, fp16_implementation=True, tf_data_experimental_slack=True, enable_xla=True, force_v2_in_keras_compile=True, training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
flags.DEFINE_boolean(name='set_learning_phase_to_train', default=True, help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(name='explicit_gpu_placement', default=False, help='If not using distribution strategy, explicitly set device scope for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False, help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True, help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False, help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(name='enable_tensorboard', default=False, help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(name='train_steps', default=None, help='The number of steps to run for training. If it is larger than # batches per epoch, then use # batches per epoch. This flag will be ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(name='profile_steps', default=None, help='Save profiling data to model dir at given range of global steps. The value must be a comma separated pair of positive integers, specifying the first and last step to profile. For example, "--profile_steps=2,4" triggers the profiler to process 3 steps, starting from the 2nd step. Note that profiler has a non-trivial performance overhead, and the output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(name='batchnorm_spatial_persistent', default=True, help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(name='enable_get_next_as_optional', default=False, help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(name='enable_checkpoint_and_export', default=False, help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(name='tpu', default=, help='TPU address to connect to.')
flags.DEFINE_integer(name='steps_per_loop', default=500, help='Number of steps per training loop. Only training step happens inside the loop. Callbacks will not be called inside. Will be capped at steps per epoch.')
flags.DEFINE_boolean(name='use_tf_while_loop', default=True, help='Whether to build a tf.while_loop inside the training loop on the host. Setting it to True is critical to have peak performance on TPU.')
flags.DEFINE_boolean(name='use_tf_keras_layers', default=False, help='Whether to use tf.keras.layers instead of tf.python.keras.layers.It only changes imagenet resnet model layers for now. This flag is a temporal flag during transition to tf.keras.layers. Do not use this flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5', 'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default', 'Name of optimizer preset. (mobilenet_default, resnet50_default)')
flags.DEFINE_float('initial_learning_rate_per_sample', 7e-05, 'Initial value of learning rate per sample for mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94, 'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5, 'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', , 'Pretrained file path.') |
def get_synth_data(height, width, num_channels, num_classes, dtype):
'Creates a set of synthetic random data.\n\n Args:\n height: Integer height that will be used to create a fake image tensor.\n width: Integer width that will be used to create a fake image tensor.\n num_channels: Integer depth that will be used to create a fake image tensor.\n num_classes: Number of classes that should be represented in the fake labels\n tensor\n dtype: Data type for features/images.\n\n Returns:\n A tuple of tensors representing the inputs and labels.\n\n '
inputs = tf.random.truncated_normal([height, width, num_channels], dtype=dtype, mean=127, stddev=60, name='synthetic_inputs')
labels = tf.random.uniform([1], minval=0, maxval=(num_classes - 1), dtype=tf.int32, name='synthetic_labels')
return (inputs, labels) | 4,141,754,423,883,289,000 | Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels. | official/vision/image_classification/common.py | get_synth_data | Anku5hk/models | python | def get_synth_data(height, width, num_channels, num_classes, dtype):
'Creates a set of synthetic random data.\n\n Args:\n height: Integer height that will be used to create a fake image tensor.\n width: Integer width that will be used to create a fake image tensor.\n num_channels: Integer depth that will be used to create a fake image tensor.\n num_classes: Number of classes that should be represented in the fake labels\n tensor\n dtype: Data type for features/images.\n\n Returns:\n A tuple of tensors representing the inputs and labels.\n\n '
inputs = tf.random.truncated_normal([height, width, num_channels], dtype=dtype, mean=127, stddev=60, name='synthetic_inputs')
labels = tf.random.uniform([1], minval=0, maxval=(num_classes - 1), dtype=tf.int32, name='synthetic_labels')
return (inputs, labels) |
def define_pruning_flags():
'Define flags for pruning methods.'
flags.DEFINE_string('pruning_method', None, 'Pruning method.None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0, 'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5, 'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0, 'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000, 'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100, 'Frequency for pruning.') | -3,593,318,337,141,748,000 | Define flags for pruning methods. | official/vision/image_classification/common.py | define_pruning_flags | Anku5hk/models | python | def define_pruning_flags():
flags.DEFINE_string('pruning_method', None, 'Pruning method.None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0, 'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5, 'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0, 'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000, 'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100, 'Frequency for pruning.') |
def get_synth_input_fn(height, width, num_channels, num_classes, dtype=tf.float32, drop_remainder=True):
'Returns an input function that returns a dataset with random data.\n\n This input_fn returns a data set that iterates over a set of random data and\n bypasses all preprocessing, e.g. jpeg decode and copy. The host to device\n copy is still included. This used to find the upper throughput bound when\n tuning the full input pipeline.\n\n Args:\n height: Integer height that will be used to create a fake image tensor.\n width: Integer width that will be used to create a fake image tensor.\n num_channels: Integer depth that will be used to create a fake image tensor.\n num_classes: Number of classes that should be represented in the fake labels\n tensor\n dtype: Data type for features/images.\n drop_remainder: A boolean indicates whether to drop the remainder of the\n batches. If True, the batch dimension will be static.\n\n Returns:\n An input_fn that can be used in place of a real one to return a dataset\n that can be used for iteration.\n '
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
'Returns dataset filled with random data.'
(inputs, labels) = get_synth_data(height=height, width=width, num_channels=num_channels, num_classes=num_classes, dtype=dtype)
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn | 8,003,751,533,849,318,000 | Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration. | official/vision/image_classification/common.py | get_synth_input_fn | Anku5hk/models | python | def get_synth_input_fn(height, width, num_channels, num_classes, dtype=tf.float32, drop_remainder=True):
'Returns an input function that returns a dataset with random data.\n\n This input_fn returns a data set that iterates over a set of random data and\n bypasses all preprocessing, e.g. jpeg decode and copy. The host to device\n copy is still included. This used to find the upper throughput bound when\n tuning the full input pipeline.\n\n Args:\n height: Integer height that will be used to create a fake image tensor.\n width: Integer width that will be used to create a fake image tensor.\n num_channels: Integer depth that will be used to create a fake image tensor.\n num_classes: Number of classes that should be represented in the fake labels\n tensor\n dtype: Data type for features/images.\n drop_remainder: A boolean indicates whether to drop the remainder of the\n batches. If True, the batch dimension will be static.\n\n Returns:\n An input_fn that can be used in place of a real one to return a dataset\n that can be used for iteration.\n '
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
'Returns dataset filled with random data.'
(inputs, labels) = get_synth_data(height=height, width=width, num_channels=num_channels, num_classes=num_classes, dtype=dtype)
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn |
def set_cudnn_batchnorm_mode():
'Set CuDNN batchnorm mode for better performance.\n\n Note: Spatial Persistent mode may lead to accuracy losses for certain\n models.\n '
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None) | -4,307,923,414,367,476,000 | Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models. | official/vision/image_classification/common.py | set_cudnn_batchnorm_mode | Anku5hk/models | python | def set_cudnn_batchnorm_mode():
'Set CuDNN batchnorm mode for better performance.\n\n Note: Spatial Persistent mode may lead to accuracy losses for certain\n models.\n '
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None) |
def on_batch_begin(self, batch, logs=None):
'Executes before step begins.'
lr = self.schedule(self.epochs, batch, self.steps_per_epoch, self.batch_size)
if (not isinstance(lr, (float, np.float32, np.float64))):
raise ValueError('The output of the "schedule" function should be float.')
if (lr != self.prev_lr):
self.model.optimizer.learning_rate = lr
self.prev_lr = lr
tf.compat.v1.logging.debug('Epoch %05d Batch %05d: LearningRateBatchScheduler change learning rate to %s.', self.epochs, batch, lr) | 7,123,145,724,043,767,000 | Executes before step begins. | official/vision/image_classification/common.py | on_batch_begin | Anku5hk/models | python | def on_batch_begin(self, batch, logs=None):
lr = self.schedule(self.epochs, batch, self.steps_per_epoch, self.batch_size)
if (not isinstance(lr, (float, np.float32, np.float64))):
raise ValueError('The output of the "schedule" function should be float.')
if (lr != self.prev_lr):
self.model.optimizer.learning_rate = lr
self.prev_lr = lr
tf.compat.v1.logging.debug('Epoch %05d Batch %05d: LearningRateBatchScheduler change learning rate to %s.', self.epochs, batch, lr) |
def _get_learning_rate(self, step):
'Compute learning rate at given step.'
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup', [self.rescaled_lr, self.step_boundaries, self.lr_values, self.warmup_steps, self.compute_lr_on_cpu]):
def warmup_lr(step):
return (self.rescaled_lr * (tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32)))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(step, self.step_boundaries, self.lr_values)
return tf.cond((step < self.warmup_steps), (lambda : warmup_lr(step)), (lambda : piecewise_lr(step))) | -9,149,486,914,277,548,000 | Compute learning rate at given step. | official/vision/image_classification/common.py | _get_learning_rate | Anku5hk/models | python | def _get_learning_rate(self, step):
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup', [self.rescaled_lr, self.step_boundaries, self.lr_values, self.warmup_steps, self.compute_lr_on_cpu]):
def warmup_lr(step):
return (self.rescaled_lr * (tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32)))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(step, self.step_boundaries, self.lr_values)
return tf.cond((step < self.warmup_steps), (lambda : warmup_lr(step)), (lambda : piecewise_lr(step))) |
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
'Returns dataset filled with random data.'
(inputs, labels) = get_synth_data(height=height, width=width, num_channels=num_channels, num_classes=num_classes, dtype=dtype)
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data | 533,578,729,328,800,500 | Returns dataset filled with random data. | official/vision/image_classification/common.py | input_fn | Anku5hk/models | python | def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
(inputs, labels) = get_synth_data(height=height, width=width, num_channels=num_channels, num_classes=num_classes, dtype=dtype)
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data |
def default_keygen(self, *args, **kwargs) -> Tuple[(Hashable, ...)]:
'Returns all params (args, kwargs, and missing default kwargs) for function as kwargs.'
return tuple(self.get_args_as_kwargs(*args, **kwargs).values()) | -423,157,303,730,307,500 | Returns all params (args, kwargs, and missing default kwargs) for function as kwargs. | atools/_memoize_decorator.py | default_keygen | cevans87/atools | python | def default_keygen(self, *args, **kwargs) -> Tuple[(Hashable, ...)]:
return tuple(self.get_args_as_kwargs(*args, **kwargs).values()) |
def __init__(self, keylist, header=None):
'\n Initializes the ConfigList object by tranfsforming\n a list of keywords into a structured list including\n beams descriptions\n\n keylist: list\n List of configuration keys\n header: str\n the header string\n '
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q']
self.beams = {}
self._beams = []
self.header = header
self.gkeys = self._find_gkeys(keylist)
iindex = 0
while ((len(keylist) > 0) and (iindex < len(idents))):
try:
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
pass
iindex += 1
if (len(keylist) > 0):
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key) | 6,519,035,064,426,874,000 | Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string | pyaxe/axesrc/configfile.py | __init__ | sosey/pyaxe | python | def __init__(self, keylist, header=None):
'\n Initializes the ConfigList object by tranfsforming\n a list of keywords into a structured list including\n beams descriptions\n\n keylist: list\n List of configuration keys\n header: str\n the header string\n '
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q']
self.beams = {}
self._beams = []
self.header = header
self.gkeys = self._find_gkeys(keylist)
iindex = 0
while ((len(keylist) > 0) and (iindex < len(idents))):
try:
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
pass
iindex += 1
if (len(keylist) > 0):
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.