body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def send_job_data(self, current_job, data, poll_timeout=None):
'Send a Gearman JOB_DATA update for an inflight job'
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_data(current_job, data=data)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) | 500,194,096,815,165,760 | Send a Gearman JOB_DATA update for an inflight job | client/python3_gearman/worker.py | send_job_data | aixiwang/gearman_test | python | def send_job_data(self, current_job, data, poll_timeout=None):
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_data(current_job, data=data)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) |
def send_job_warning(self, current_job, data, poll_timeout=None):
'Send a Gearman JOB_WARNING update for an inflight job'
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_warning(current_job, data=data)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) | 1,950,741,434,473,703,000 | Send a Gearman JOB_WARNING update for an inflight job | client/python3_gearman/worker.py | send_job_warning | aixiwang/gearman_test | python | def send_job_warning(self, current_job, data, poll_timeout=None):
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_warning(current_job, data=data)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) |
def create_job(self, command_handler, job_handle, task, unique, data):
'Create a new job using our self.job_class'
current_connection = self.handler_to_connection_map[command_handler]
return self.job_class(current_connection, job_handle, task, unique, data) | 4,949,514,335,659,896,000 | Create a new job using our self.job_class | client/python3_gearman/worker.py | create_job | aixiwang/gearman_test | python | def create_job(self, command_handler, job_handle, task, unique, data):
current_connection = self.handler_to_connection_map[command_handler]
return self.job_class(current_connection, job_handle, task, unique, data) |
def set_job_lock(self, command_handler, lock):
"Set a worker level job lock so we don't try to hold onto 2 jobs at\n anytime"
if (command_handler not in self.handler_to_connection_map):
return False
failed_lock = bool((lock and (self.command_handler_holding_job_lock is not None)))
failed_unlock = bool(((not lock) and (self.command_handler_holding_job_lock != command_handler)))
if (failed_lock or failed_unlock):
return False
if lock:
self.command_handler_holding_job_lock = command_handler
else:
self.command_handler_holding_job_lock = None
return True | -614,135,064,642,233,300 | Set a worker level job lock so we don't try to hold onto 2 jobs at
anytime | client/python3_gearman/worker.py | set_job_lock | aixiwang/gearman_test | python | def set_job_lock(self, command_handler, lock):
"Set a worker level job lock so we don't try to hold onto 2 jobs at\n anytime"
if (command_handler not in self.handler_to_connection_map):
return False
failed_lock = bool((lock and (self.command_handler_holding_job_lock is not None)))
failed_unlock = bool(((not lock) and (self.command_handler_holding_job_lock != command_handler)))
if (failed_lock or failed_unlock):
return False
if lock:
self.command_handler_holding_job_lock = command_handler
else:
self.command_handler_holding_job_lock = None
return True |
def check_job_lock(self, command_handler):
'Check to see if we hold the job lock'
return bool((self.command_handler_holding_job_lock == command_handler)) | 5,963,652,033,655,536,000 | Check to see if we hold the job lock | client/python3_gearman/worker.py | check_job_lock | aixiwang/gearman_test | python | def check_job_lock(self, command_handler):
return bool((self.command_handler_holding_job_lock == command_handler)) |
def get_bond_length_distribution_inner(input_fname, output_fname):
'Generate bond length distibutions.\n\n Args:\n input_fname: An existing TFRecord file containing Conformer protos.\n output_fname: An output file that will be created that contains all bond\n length distributions - all bond types, all atom types. Requires\n post-processing to generate bond length distribution files.\n '
print('Reading from {input_fname} output to {output_fname}')
options = PipelineOptions(direct_num_workers=6, direct_running_mode='multi_processing')
with beam.Pipeline(options=options) as p:
protos = (((((p | beam.io.tfrecordio.ReadFromTFRecord(input_fname, coder=beam.coders.ProtoCoder(dataset_pb2.Conformer().__class__))) | beam.ParDo(bond_lengths.GetBondLengthDistribution())) | beam.CombinePerKey(sum)) | beam.ParDo(BondDistToString())) | beam.io.WriteToText(output_fname))
print(protos) | 1,118,417,521,258,524,800 | Generate bond length distibutions.
Args:
input_fname: An existing TFRecord file containing Conformer protos.
output_fname: An output file that will be created that contains all bond
length distributions - all bond types, all atom types. Requires
post-processing to generate bond length distribution files. | smu/geometry/get_bond_length_distribution.py | get_bond_length_distribution_inner | 10088/google-research | python | def get_bond_length_distribution_inner(input_fname, output_fname):
'Generate bond length distibutions.\n\n Args:\n input_fname: An existing TFRecord file containing Conformer protos.\n output_fname: An output file that will be created that contains all bond\n length distributions - all bond types, all atom types. Requires\n post-processing to generate bond length distribution files.\n '
print('Reading from {input_fname} output to {output_fname}')
options = PipelineOptions(direct_num_workers=6, direct_running_mode='multi_processing')
with beam.Pipeline(options=options) as p:
protos = (((((p | beam.io.tfrecordio.ReadFromTFRecord(input_fname, coder=beam.coders.ProtoCoder(dataset_pb2.Conformer().__class__))) | beam.ParDo(bond_lengths.GetBondLengthDistribution())) | beam.CombinePerKey(sum)) | beam.ParDo(BondDistToString())) | beam.io.WriteToText(output_fname))
print(protos) |
def get_bond_length_distribution(unused_argv):
'Scan Conformer protos to extract bond length distributions.'
del unused_argv
get_bond_length_distribution_inner(FLAGS.input, FLAGS.output) | -6,997,662,189,550,810,000 | Scan Conformer protos to extract bond length distributions. | smu/geometry/get_bond_length_distribution.py | get_bond_length_distribution | 10088/google-research | python | def get_bond_length_distribution(unused_argv):
del unused_argv
get_bond_length_distribution_inner(FLAGS.input, FLAGS.output) |
@contextmanager
def mktemp(contents):
' Create a temporary file with the given contents, and yield its path '
(_, path) = tempfile.mkstemp()
fp = io.open(path, 'wt+', encoding='utf-8')
fp.write(contents)
fp.flush()
try:
(yield path)
finally:
fp.close()
os.unlink(path) | -1,387,237,215,766,695,400 | Create a temporary file with the given contents, and yield its path | tests/render-test.py | mktemp | arrikto/kolypto-j2cli | python | @contextmanager
def mktemp(contents):
' '
(_, path) = tempfile.mkstemp()
fp = io.open(path, 'wt+', encoding='utf-8')
fp.write(contents)
fp.flush()
try:
(yield path)
finally:
fp.close()
os.unlink(path) |
def _testme(self, argv, expected_output, stdin=None, env=None):
' Helper test shortcut '
with mock_environ((env or {})):
result = render_command(os.getcwd(), (env or {}), stdin, argv)
if isinstance(result, bytes):
result = result.decode('utf-8')
self.assertEqual(result, expected_output) | 1,795,941,189,374,585,600 | Helper test shortcut | tests/render-test.py | _testme | arrikto/kolypto-j2cli | python | def _testme(self, argv, expected_output, stdin=None, env=None):
' '
with mock_environ((env or {})):
result = render_command(os.getcwd(), (env or {}), stdin, argv)
if isinstance(result, bytes):
result = result.decode('utf-8')
self.assertEqual(result, expected_output) |
def test_undefined(self):
' Test --undefined '
self.assertRaises(UndefinedError, self._testme, ['resources/name.j2'], u'Hello !\n', env=dict())
self._testme(['--undefined', 'resources/name.j2'], u'Hello !\n', env=dict()) | -1,063,750,977,480,168,600 | Test --undefined | tests/render-test.py | test_undefined | arrikto/kolypto-j2cli | python | def test_undefined(self):
' '
self.assertRaises(UndefinedError, self._testme, ['resources/name.j2'], u'Hello !\n', env=dict())
self._testme(['--undefined', 'resources/name.j2'], u'Hello !\n', env=dict()) |
def test_jinja2_extensions(self):
' Test that an extension is enabled '
with mktemp('{% do [] %}') as template:
self._testme([template], '') | 2,100,193,027,615,986,400 | Test that an extension is enabled | tests/render-test.py | test_jinja2_extensions | arrikto/kolypto-j2cli | python | def test_jinja2_extensions(self):
' '
with mktemp('{% do [] %}') as template:
self._testme([template], ) |
def test_customize(self):
' Test --customize '
with mktemp('<% if 1 %>1<% else %>2<% endif %>') as template:
self._testme(['--customize=resources/customize.py', template], '1')
with mktemp('<< my_function("hey") >>') as template:
self._testme(['--customize=resources/customize.py', template], 'my function says "hey"')
with mktemp('<< ADD >>') as template:
self._testme(['--customize=resources/customize.py', template], '127')
with mktemp('<< ADD|parentheses >>') as template:
self._testme(['--customize=resources/customize.py', template], '(127)')
with mktemp('<% if ADD|int is custom_odd %>odd<% endif %>') as template:
self._testme(['--customize=resources/customize.py', template], 'odd')
del sys.modules['customize-module']
with mktemp('{% if 1 %}1{% endif %}') as template:
self._testme(['--customize=render-test.py', template], '1') | -1,975,867,084,036,834,000 | Test --customize | tests/render-test.py | test_customize | arrikto/kolypto-j2cli | python | def test_customize(self):
' '
with mktemp('<% if 1 %>1<% else %>2<% endif %>') as template:
self._testme(['--customize=resources/customize.py', template], '1')
with mktemp('<< my_function("hey") >>') as template:
self._testme(['--customize=resources/customize.py', template], 'my function says "hey"')
with mktemp('<< ADD >>') as template:
self._testme(['--customize=resources/customize.py', template], '127')
with mktemp('<< ADD|parentheses >>') as template:
self._testme(['--customize=resources/customize.py', template], '(127)')
with mktemp('<% if ADD|int is custom_odd %>odd<% endif %>') as template:
self._testme(['--customize=resources/customize.py', template], 'odd')
del sys.modules['customize-module']
with mktemp('{% if 1 %}1{% endif %}') as template:
self._testme(['--customize=render-test.py', template], '1') |
def _load_coco_keypoint_annotation_kernel(self, img_id):
'load annotation from COCOAPI.\n\n Note:\n bbox:[x1, y1, w, h]\n Args:\n img_id: coco image id\n Returns:\n dict: db entry\n '
img_ann = self.coco.loadImgs(img_id)[0]
width = img_ann['width']
height = img_ann['height']
num_joints = self.ann_info['num_joints']
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = self.coco.loadAnns(ann_ids)
valid_objs = []
for obj in objs:
if ('bbox' not in obj):
continue
(x, y, w, h) = obj['bbox']
x1 = max(0, x)
y1 = max(0, y)
x2 = min((width - 1), (x1 + max(0, (w - 1))))
y2 = min((height - 1), (y1 + max(0, (h - 1))))
if ((('area' not in obj) or (obj['area'] > 0)) and (x2 > x1) and (y2 > y1)):
obj['clean_bbox'] = [x1, y1, (x2 - x1), (y2 - y1)]
valid_objs.append(obj)
objs = valid_objs
rec = []
bbox_id = 0
for obj in objs:
if ('keypoints' not in obj):
continue
if (max(obj['keypoints']) == 0):
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(((((obj['keypoints'] + obj['foot_kpts']) + obj['face_kpts']) + obj['lefthand_kpts']) + obj['righthand_kpts'])).reshape((- 1), 3)
joints_3d[:, :2] = keypoints[:, :2]
joints_3d_visible[:, :2] = np.minimum(1, (keypoints[:, 2:3] > 0))
(center, scale) = self._xywh2cs(*obj['clean_bbox'][:4])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
rec.append({'image_file': image_file, 'center': center, 'scale': scale, 'rotation': 0, 'joints_3d': joints_3d, 'joints_3d_visible': joints_3d_visible, 'dataset': self.dataset_name, 'bbox_score': 1, 'bbox_id': bbox_id})
bbox_id = (bbox_id + 1)
return rec | -3,409,155,877,235,542,500 | load annotation from COCOAPI.
Note:
bbox:[x1, y1, w, h]
Args:
img_id: coco image id
Returns:
dict: db entry | mmpose/datasets/datasets/top_down/topdown_coco_wholebody_dataset.py | _load_coco_keypoint_annotation_kernel | 674106399/mmpose | python | def _load_coco_keypoint_annotation_kernel(self, img_id):
'load annotation from COCOAPI.\n\n Note:\n bbox:[x1, y1, w, h]\n Args:\n img_id: coco image id\n Returns:\n dict: db entry\n '
img_ann = self.coco.loadImgs(img_id)[0]
width = img_ann['width']
height = img_ann['height']
num_joints = self.ann_info['num_joints']
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = self.coco.loadAnns(ann_ids)
valid_objs = []
for obj in objs:
if ('bbox' not in obj):
continue
(x, y, w, h) = obj['bbox']
x1 = max(0, x)
y1 = max(0, y)
x2 = min((width - 1), (x1 + max(0, (w - 1))))
y2 = min((height - 1), (y1 + max(0, (h - 1))))
if ((('area' not in obj) or (obj['area'] > 0)) and (x2 > x1) and (y2 > y1)):
obj['clean_bbox'] = [x1, y1, (x2 - x1), (y2 - y1)]
valid_objs.append(obj)
objs = valid_objs
rec = []
bbox_id = 0
for obj in objs:
if ('keypoints' not in obj):
continue
if (max(obj['keypoints']) == 0):
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(((((obj['keypoints'] + obj['foot_kpts']) + obj['face_kpts']) + obj['lefthand_kpts']) + obj['righthand_kpts'])).reshape((- 1), 3)
joints_3d[:, :2] = keypoints[:, :2]
joints_3d_visible[:, :2] = np.minimum(1, (keypoints[:, 2:3] > 0))
(center, scale) = self._xywh2cs(*obj['clean_bbox'][:4])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
rec.append({'image_file': image_file, 'center': center, 'scale': scale, 'rotation': 0, 'joints_3d': joints_3d, 'joints_3d_visible': joints_3d_visible, 'dataset': self.dataset_name, 'bbox_score': 1, 'bbox_id': bbox_id})
bbox_id = (bbox_id + 1)
return rec |
def _coco_keypoint_results_one_category_kernel(self, data_pack):
'Get coco keypoint results.'
cat_id = data_pack['cat_id']
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if (len(img_kpts) == 0):
continue
_key_points = np.array([img_kpt['keypoints'] for img_kpt in img_kpts])
key_points = _key_points.reshape((- 1), (self.ann_info['num_joints'] * 3))
cuts = (np.cumsum([0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, self.right_hand_num]) * 3)
result = [{'image_id': img_kpt['image_id'], 'category_id': cat_id, 'keypoints': key_point[cuts[0]:cuts[1]].tolist(), 'foot_kpts': key_point[cuts[1]:cuts[2]].tolist(), 'face_kpts': key_point[cuts[2]:cuts[3]].tolist(), 'lefthand_kpts': key_point[cuts[3]:cuts[4]].tolist(), 'righthand_kpts': key_point[cuts[4]:cuts[5]].tolist(), 'score': float(img_kpt['score']), 'center': img_kpt['center'].tolist(), 'scale': img_kpt['scale'].tolist()} for (img_kpt, key_point) in zip(img_kpts, key_points)]
cat_results.extend(result)
return cat_results | 911,176,376,589,387,800 | Get coco keypoint results. | mmpose/datasets/datasets/top_down/topdown_coco_wholebody_dataset.py | _coco_keypoint_results_one_category_kernel | 674106399/mmpose | python | def _coco_keypoint_results_one_category_kernel(self, data_pack):
cat_id = data_pack['cat_id']
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if (len(img_kpts) == 0):
continue
_key_points = np.array([img_kpt['keypoints'] for img_kpt in img_kpts])
key_points = _key_points.reshape((- 1), (self.ann_info['num_joints'] * 3))
cuts = (np.cumsum([0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, self.right_hand_num]) * 3)
result = [{'image_id': img_kpt['image_id'], 'category_id': cat_id, 'keypoints': key_point[cuts[0]:cuts[1]].tolist(), 'foot_kpts': key_point[cuts[1]:cuts[2]].tolist(), 'face_kpts': key_point[cuts[2]:cuts[3]].tolist(), 'lefthand_kpts': key_point[cuts[3]:cuts[4]].tolist(), 'righthand_kpts': key_point[cuts[4]:cuts[5]].tolist(), 'score': float(img_kpt['score']), 'center': img_kpt['center'].tolist(), 'scale': img_kpt['scale'].tolist()} for (img_kpt, key_point) in zip(img_kpts, key_points)]
cat_results.extend(result)
return cat_results |
def _do_python_keypoint_eval(self, res_file):
'Keypoint evaluation using COCOAPI.'
coco_det = self.coco.loadRes(res_file)
cuts = np.cumsum([0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, self.right_hand_num])
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_body', self.sigmas[cuts[0]:cuts[1]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_foot', self.sigmas[cuts[1]:cuts[2]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_face', self.sigmas[cuts[2]:cuts[3]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_lefthand', self.sigmas[cuts[3]:cuts[4]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_righthand', self.sigmas[cuts[4]:cuts[5]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_wholebody', self.sigmas, use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = ['AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = list(zip(stats_names, coco_eval.stats))
return info_str | 147,649,272,100,721,250 | Keypoint evaluation using COCOAPI. | mmpose/datasets/datasets/top_down/topdown_coco_wholebody_dataset.py | _do_python_keypoint_eval | 674106399/mmpose | python | def _do_python_keypoint_eval(self, res_file):
coco_det = self.coco.loadRes(res_file)
cuts = np.cumsum([0, self.body_num, self.foot_num, self.face_num, self.left_hand_num, self.right_hand_num])
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_body', self.sigmas[cuts[0]:cuts[1]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_foot', self.sigmas[cuts[1]:cuts[2]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_face', self.sigmas[cuts[2]:cuts[3]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_lefthand', self.sigmas[cuts[3]:cuts[4]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_righthand', self.sigmas[cuts[4]:cuts[5]], use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_det, 'keypoints_wholebody', self.sigmas, use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = ['AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = list(zip(stats_names, coco_eval.stats))
return info_str |
def tearDown(self):
'Reset all bridge blocks in between test method runs.'
for bridge in self.bridges:
bridge._blockedIn = {} | -8,374,115,032,258,045,000 | Reset all bridge blocks in between test method runs. | bridgedb/test/test_https_distributor.py | tearDown | isislovecruft/bridgedb | python | def tearDown(self):
for bridge in self.bridges:
bridge._blockedIn = {} |
def test_HTTPSDistributor_init_with_proxies(self):
'The HTTPSDistributor, when initialised with proxies, should add an\n extra hashring for proxy users.\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
self.assertIsNotNone(dist.proxies)
self.assertGreater(dist.proxySubring, 0)
self.assertEqual(dist.proxySubring, 4)
self.assertEqual(dist.totalSubrings, 4) | 3,944,217,055,337,707,000 | The HTTPSDistributor, when initialised with proxies, should add an
extra hashring for proxy users. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_init_with_proxies | isislovecruft/bridgedb | python | def test_HTTPSDistributor_init_with_proxies(self):
'The HTTPSDistributor, when initialised with proxies, should add an\n extra hashring for proxy users.\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
self.assertIsNotNone(dist.proxies)
self.assertGreater(dist.proxySubring, 0)
self.assertEqual(dist.proxySubring, 4)
self.assertEqual(dist.totalSubrings, 4) |
def test_HTTPSDistributor_getSubnet_usingProxy(self):
'HTTPSDistributor.getSubnet(usingProxy=True) should return a proxy\n group number.\n '
clientRequest = self.randomClientRequest()
expectedGroup = ((int(ipaddr.IPAddress(clientRequest.client)) % 4) + 1)
subnet = distributor.HTTPSDistributor.getSubnet(clientRequest.client, usingProxy=True)
self.assertTrue(subnet.startswith('proxy-group-'))
self.assertEqual(int(subnet[(- 1)]), expectedGroup) | -178,428,863,725,358,530 | HTTPSDistributor.getSubnet(usingProxy=True) should return a proxy
group number. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getSubnet_usingProxy | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getSubnet_usingProxy(self):
'HTTPSDistributor.getSubnet(usingProxy=True) should return a proxy\n group number.\n '
clientRequest = self.randomClientRequest()
expectedGroup = ((int(ipaddr.IPAddress(clientRequest.client)) % 4) + 1)
subnet = distributor.HTTPSDistributor.getSubnet(clientRequest.client, usingProxy=True)
self.assertTrue(subnet.startswith('proxy-group-'))
self.assertEqual(int(subnet[(- 1)]), expectedGroup) |
def test_HTTPSDistributor_mapSubnetToSubring_usingProxy(self):
'HTTPSDistributor.mapSubnetToSubring() when the client was using a\n proxy should map the client to the proxy subhashring.\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
subnet = 'proxy-group-3'
subring = dist.mapSubnetToSubring(subnet, usingProxy=True)
self.assertEqual(subring, dist.proxySubring) | 1,929,354,051,178,406,100 | HTTPSDistributor.mapSubnetToSubring() when the client was using a
proxy should map the client to the proxy subhashring. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_mapSubnetToSubring_usingProxy | isislovecruft/bridgedb | python | def test_HTTPSDistributor_mapSubnetToSubring_usingProxy(self):
'HTTPSDistributor.mapSubnetToSubring() when the client was using a\n proxy should map the client to the proxy subhashring.\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
subnet = 'proxy-group-3'
subring = dist.mapSubnetToSubring(subnet, usingProxy=True)
self.assertEqual(subring, dist.proxySubring) |
def test_HTTPSDistributor_mapSubnetToSubring_with_proxies(self):
"HTTPSDistributor.mapSubnetToSubring() when the client wasn't using\n a proxy, but the distributor does have some known proxies and a\n proxySubring, should not map the client to the proxy subhashring.\n "
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
subnet = '15.1.0.0/16'
subring = dist.mapSubnetToSubring(subnet, usingProxy=False)
self.assertNotEqual(subring, dist.proxySubring) | 9,026,591,236,788,420,000 | HTTPSDistributor.mapSubnetToSubring() when the client wasn't using
a proxy, but the distributor does have some known proxies and a
proxySubring, should not map the client to the proxy subhashring. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_mapSubnetToSubring_with_proxies | isislovecruft/bridgedb | python | def test_HTTPSDistributor_mapSubnetToSubring_with_proxies(self):
"HTTPSDistributor.mapSubnetToSubring() when the client wasn't using\n a proxy, but the distributor does have some known proxies and a\n proxySubring, should not map the client to the proxy subhashring.\n "
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
subnet = '15.1.0.0/16'
subring = dist.mapSubnetToSubring(subnet, usingProxy=False)
self.assertNotEqual(subring, dist.proxySubring) |
def test_HTTPSDistributor_prepopulateRings_with_proxies(self):
'An HTTPSDistributor with proxies should prepopulate two extra\n subhashrings (one for each of HTTP-Proxy-IPv4 and HTTP-Proxy-IPv6).\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
[dist.insert(bridge) for bridge in self.bridges]
dist.prepopulateRings()
self.assertEqual(len(dist.hashring.filterRings), 8) | 2,776,454,485,375,904,300 | An HTTPSDistributor with proxies should prepopulate two extra
subhashrings (one for each of HTTP-Proxy-IPv4 and HTTP-Proxy-IPv6). | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_prepopulateRings_with_proxies | isislovecruft/bridgedb | python | def test_HTTPSDistributor_prepopulateRings_with_proxies(self):
'An HTTPSDistributor with proxies should prepopulate two extra\n subhashrings (one for each of HTTP-Proxy-IPv4 and HTTP-Proxy-IPv6).\n '
dist = distributor.HTTPSDistributor(3, self.key, ProxySet(['1.1.1.1', '2.2.2.2']))
[dist.insert(bridge) for bridge in self.bridges]
dist.prepopulateRings()
self.assertEqual(len(dist.hashring.filterRings), 8) |
def test_HTTPSDistributor_prepopulateRings_without_proxies(self):
'An HTTPSDistributor without proxies should prepopulate\n totalSubrings * 2 subrings.\n '
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges]
dist.prepopulateRings()
self.assertEqual(len(dist.hashring.filterRings), 6)
ipv4subrings = []
ipv6subrings = []
for (subringName, (filters, subring)) in dist.hashring.filterRings.items():
if ('IPv4' in subringName):
ipv6subrings.append(subring)
if ('IPv6' in subringName):
ipv6subrings.append(subring)
self.assertEqual(len(ipv4subrings), len(ipv6subrings)) | -71,506,198,677,698,360 | An HTTPSDistributor without proxies should prepopulate
totalSubrings * 2 subrings. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_prepopulateRings_without_proxies | isislovecruft/bridgedb | python | def test_HTTPSDistributor_prepopulateRings_without_proxies(self):
'An HTTPSDistributor without proxies should prepopulate\n totalSubrings * 2 subrings.\n '
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges]
dist.prepopulateRings()
self.assertEqual(len(dist.hashring.filterRings), 6)
ipv4subrings = []
ipv6subrings = []
for (subringName, (filters, subring)) in dist.hashring.filterRings.items():
if ('IPv4' in subringName):
ipv6subrings.append(subring)
if ('IPv6' in subringName):
ipv6subrings.append(subring)
self.assertEqual(len(ipv4subrings), len(ipv6subrings)) |
def test_HTTPSDistributor_getBridges_with_proxy_and_nonproxy_users(self):
'An HTTPSDistributor should give separate bridges to proxy users.'
proxies = ProxySet(['.'.join(['1.1.1', str(x)]) for x in range(1, 256)])
dist = distributor.HTTPSDistributor(3, self.key, proxies)
[dist.insert(bridge) for bridge in self.bridges]
for _ in range(10):
bridgeRequest1 = self.randomClientRequest()
bridgeRequest1.client = '.'.join(['1.1.1', str(random.randrange(1, 255))])
bridgeRequest2 = self.randomClientRequest()
bridgeRequest2.client = '.'.join(['9.9.9', str(random.randrange(1, 255))])
n1 = dist.getBridges(bridgeRequest1, 1)
n2 = dist.getBridges(bridgeRequest2, 1)
self.assertGreater(len(n1), 0)
self.assertGreater(len(n2), 0)
for b in n1:
self.assertNotIn(b, n2)
for b in n2:
self.assertNotIn(b, n1) | 6,604,173,368,182,695,000 | An HTTPSDistributor should give separate bridges to proxy users. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_with_proxy_and_nonproxy_users | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_with_proxy_and_nonproxy_users(self):
proxies = ProxySet(['.'.join(['1.1.1', str(x)]) for x in range(1, 256)])
dist = distributor.HTTPSDistributor(3, self.key, proxies)
[dist.insert(bridge) for bridge in self.bridges]
for _ in range(10):
bridgeRequest1 = self.randomClientRequest()
bridgeRequest1.client = '.'.join(['1.1.1', str(random.randrange(1, 255))])
bridgeRequest2 = self.randomClientRequest()
bridgeRequest2.client = '.'.join(['9.9.9', str(random.randrange(1, 255))])
n1 = dist.getBridges(bridgeRequest1, 1)
n2 = dist.getBridges(bridgeRequest2, 1)
self.assertGreater(len(n1), 0)
self.assertGreater(len(n2), 0)
for b in n1:
self.assertNotIn(b, n2)
for b in n2:
self.assertNotIn(b, n1) |
def test_HTTPSDistributor_getBridges_same_bridges_to_same_client(self):
'The same client asking for bridges from the HTTPSDistributor\n multiple times in a row should get the same bridges in response each\n time.\n '
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
responses = {}
for i in range(5):
responses[i] = dist.getBridges(bridgeRequest, 1)
for i in range(4):
self.assertItemsEqual(responses[i], responses[(i + 1)]) | 7,259,232,939,965,180,000 | The same client asking for bridges from the HTTPSDistributor
multiple times in a row should get the same bridges in response each
time. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_same_bridges_to_same_client | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_same_bridges_to_same_client(self):
'The same client asking for bridges from the HTTPSDistributor\n multiple times in a row should get the same bridges in response each\n time.\n '
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
responses = {}
for i in range(5):
responses[i] = dist.getBridges(bridgeRequest, 1)
for i in range(4):
self.assertItemsEqual(responses[i], responses[(i + 1)]) |
def test_HTTPSDistributor_getBridges_ipv4_ipv6(self):
'Asking for bridge addresses which are simultaneously IPv4 and IPv6\n (in that order) should return IPv4 bridges.\n '
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv4()
bridgeRequest.filters.append(byIPv6)
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 1)
self.assertEqual(len(bridges), 3)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv4Address)
self.assertIsNotNone(byIPv4(random.choice(bridges))) | 6,034,760,044,908,151,000 | Asking for bridge addresses which are simultaneously IPv4 and IPv6
(in that order) should return IPv4 bridges. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_ipv4_ipv6 | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_ipv4_ipv6(self):
'Asking for bridge addresses which are simultaneously IPv4 and IPv6\n (in that order) should return IPv4 bridges.\n '
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv4()
bridgeRequest.filters.append(byIPv6)
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 1)
self.assertEqual(len(bridges), 3)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv4Address)
self.assertIsNotNone(byIPv4(random.choice(bridges))) |
def test_HTTPSDistributor_getBridges_ipv6_ipv4(self):
'Asking for bridge addresses which are simultaneously IPv6 and IPv4\n (in that order) should return IPv6 bridges.\n '
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv6()
bridgeRequest.generateFilters()
bridgeRequest.filters.append(byIPv4)
bridges = dist.getBridges(bridgeRequest, 1)
self.assertEqual(len(bridges), 3)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv6Address)
self.assertIsNotNone(byIPv6(random.choice(bridges))) | 4,826,968,932,339,698,000 | Asking for bridge addresses which are simultaneously IPv6 and IPv4
(in that order) should return IPv6 bridges. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_ipv6_ipv4 | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_ipv6_ipv4(self):
'Asking for bridge addresses which are simultaneously IPv6 and IPv4\n (in that order) should return IPv6 bridges.\n '
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv6()
bridgeRequest.generateFilters()
bridgeRequest.filters.append(byIPv4)
bridges = dist.getBridges(bridgeRequest, 1)
self.assertEqual(len(bridges), 3)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv6Address)
self.assertIsNotNone(byIPv6(random.choice(bridges))) |
def test_HTTPSDistributor_getBridges_ipv6(self):
'A request for IPv6 bridges should return IPv6 bridges.'
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
for i in xrange(500):
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv6()
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 'faketimestamp')
self.assertTrue((type(bridges) is list))
self.assertGreater(len(bridges), 0)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv6Address)
self.assertIsNotNone(byIPv6(random.choice(bridges))) | 2,056,632,888,544,047,600 | A request for IPv6 bridges should return IPv6 bridges. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_ipv6 | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_ipv6(self):
dist = distributor.HTTPSDistributor(3, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
for i in xrange(500):
bridgeRequest = self.randomClientRequest()
bridgeRequest.withIPv6()
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 'faketimestamp')
self.assertTrue((type(bridges) is list))
self.assertGreater(len(bridges), 0)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
address = address.strip('[]')
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv6Address)
self.assertIsNotNone(byIPv6(random.choice(bridges))) |
def test_HTTPSDistributor_getBridges_ipv4(self):
'A request for IPv4 bridges should return IPv4 bridges.'
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
for i in xrange(500):
bridgeRequest = self.randomClientRequest()
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 'faketimestamp')
self.assertTrue((type(bridges) is list))
self.assertGreater(len(bridges), 0)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv4Address)
self.assertIsNotNone(byIPv4(random.choice(bridges))) | 3,335,645,459,602,166,300 | A request for IPv4 bridges should return IPv4 bridges. | bridgedb/test/test_https_distributor.py | test_HTTPSDistributor_getBridges_ipv4 | isislovecruft/bridgedb | python | def test_HTTPSDistributor_getBridges_ipv4(self):
dist = distributor.HTTPSDistributor(1, self.key)
[dist.insert(bridge) for bridge in self.bridges[:250]]
for i in xrange(500):
bridgeRequest = self.randomClientRequest()
bridgeRequest.generateFilters()
bridges = dist.getBridges(bridgeRequest, 'faketimestamp')
self.assertTrue((type(bridges) is list))
self.assertGreater(len(bridges), 0)
bridge = random.choice(bridges)
bridgeLine = bridge.getBridgeLine(bridgeRequest)
(addrport, fingerprint) = bridgeLine.split()
(address, port) = addrport.rsplit(':', 1)
self.assertIsInstance(ipaddr.IPAddress(address), ipaddr.IPv4Address)
self.assertIsNotNone(byIPv4(random.choice(bridges))) |
def _stash_exp(self, *args, params: Optional[dict]=None, detach_rev: Optional[str]=None, baseline_rev: Optional[str]=None, branch: Optional[str]=None, name: Optional[str]=None, **kwargs):
"Stash changes from the workspace as an experiment.\n\n Args:\n params: Optional dictionary of parameter values to be used.\n Values take priority over any parameters specified in the\n user's workspace.\n baseline_rev: Optional baseline rev for this experiment, defaults\n to the current SCM rev.\n branch: Optional experiment branch name. If specified, the\n experiment will be added to `branch` instead of creating\n a new branch.\n name: Optional experiment name. If specified this will be used as\n the human-readable name in the experiment branch ref. Has no\n effect of branch is specified.\n "
with self.scm.stash_workspace(include_untracked=(detach_rev or branch)) as workspace:
if ((not (branch or detach_rev)) and workspace):
self.stash.apply(workspace)
self._prune_lockfiles()
if detach_rev:
head = detach_rev
elif branch:
head = branch
else:
head = None
with self.scm.detach_head(head) as rev:
if (baseline_rev is None):
baseline_rev = rev
if params:
self._update_params(params)
self._pack_args(*args, **kwargs)
msg = self._stash_msg(rev, baseline_rev=baseline_rev, branch=branch, name=name)
stash_rev = self.stash.push(message=msg)
logger.debug("Stashed experiment '%s' with baseline '%s' for future execution.", stash_rev[:7], baseline_rev[:7])
self.scm.reset(hard=True)
return stash_rev | 1,772,051,208,236,786,200 | Stash changes from the workspace as an experiment.
Args:
params: Optional dictionary of parameter values to be used.
Values take priority over any parameters specified in the
user's workspace.
baseline_rev: Optional baseline rev for this experiment, defaults
to the current SCM rev.
branch: Optional experiment branch name. If specified, the
experiment will be added to `branch` instead of creating
a new branch.
name: Optional experiment name. If specified this will be used as
the human-readable name in the experiment branch ref. Has no
effect of branch is specified. | dvc/repo/experiments/__init__.py | _stash_exp | esthergold/dvc | python | def _stash_exp(self, *args, params: Optional[dict]=None, detach_rev: Optional[str]=None, baseline_rev: Optional[str]=None, branch: Optional[str]=None, name: Optional[str]=None, **kwargs):
"Stash changes from the workspace as an experiment.\n\n Args:\n params: Optional dictionary of parameter values to be used.\n Values take priority over any parameters specified in the\n user's workspace.\n baseline_rev: Optional baseline rev for this experiment, defaults\n to the current SCM rev.\n branch: Optional experiment branch name. If specified, the\n experiment will be added to `branch` instead of creating\n a new branch.\n name: Optional experiment name. If specified this will be used as\n the human-readable name in the experiment branch ref. Has no\n effect of branch is specified.\n "
with self.scm.stash_workspace(include_untracked=(detach_rev or branch)) as workspace:
if ((not (branch or detach_rev)) and workspace):
self.stash.apply(workspace)
self._prune_lockfiles()
if detach_rev:
head = detach_rev
elif branch:
head = branch
else:
head = None
with self.scm.detach_head(head) as rev:
if (baseline_rev is None):
baseline_rev = rev
if params:
self._update_params(params)
self._pack_args(*args, **kwargs)
msg = self._stash_msg(rev, baseline_rev=baseline_rev, branch=branch, name=name)
stash_rev = self.stash.push(message=msg)
logger.debug("Stashed experiment '%s' with baseline '%s' for future execution.", stash_rev[:7], baseline_rev[:7])
self.scm.reset(hard=True)
return stash_rev |
def _update_params(self, params: dict):
'Update experiment params files with the specified values.'
from benedict import benedict
from dvc.utils.serialize import MODIFIERS
logger.debug("Using experiment params '%s'", params)
for params_fname in params:
path = (PathInfo(self.repo.root_dir) / params_fname)
suffix = path.suffix.lower()
modify_data = MODIFIERS[suffix]
with modify_data(path, tree=self.repo.tree) as data:
benedict(data).merge(params[params_fname], overwrite=True)
self.scm.add(list(params.keys())) | 6,649,994,754,235,740,000 | Update experiment params files with the specified values. | dvc/repo/experiments/__init__.py | _update_params | esthergold/dvc | python | def _update_params(self, params: dict):
from benedict import benedict
from dvc.utils.serialize import MODIFIERS
logger.debug("Using experiment params '%s'", params)
for params_fname in params:
path = (PathInfo(self.repo.root_dir) / params_fname)
suffix = path.suffix.lower()
modify_data = MODIFIERS[suffix]
with modify_data(path, tree=self.repo.tree) as data:
benedict(data).merge(params[params_fname], overwrite=True)
self.scm.add(list(params.keys())) |
def reproduce_one(self, queue=False, **kwargs):
'Reproduce and checkout a single experiment.'
stash_rev = self.new(**kwargs)
if queue:
logger.info("Queued experiment '%s' for future execution.", stash_rev[:7])
return [stash_rev]
results = self.reproduce([stash_rev], keep_stash=False)
exp_rev = first(results)
if (exp_rev is not None):
self._log_reproduced(results)
return results | -7,046,387,588,810,680,000 | Reproduce and checkout a single experiment. | dvc/repo/experiments/__init__.py | reproduce_one | esthergold/dvc | python | def reproduce_one(self, queue=False, **kwargs):
stash_rev = self.new(**kwargs)
if queue:
logger.info("Queued experiment '%s' for future execution.", stash_rev[:7])
return [stash_rev]
results = self.reproduce([stash_rev], keep_stash=False)
exp_rev = first(results)
if (exp_rev is not None):
self._log_reproduced(results)
return results |
@scm_locked
def new(self, *args, checkpoint_resume: Optional[str]=None, **kwargs):
"Create a new experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n "
if (checkpoint_resume is not None):
return self._resume_checkpoint(*args, checkpoint_resume=checkpoint_resume, **kwargs)
return self._stash_exp(*args, **kwargs) | 7,423,319,692,721,970,000 | Create a new experiment.
Experiment will be reproduced and checked out into the user's
workspace. | dvc/repo/experiments/__init__.py | new | esthergold/dvc | python | @scm_locked
def new(self, *args, checkpoint_resume: Optional[str]=None, **kwargs):
"Create a new experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n "
if (checkpoint_resume is not None):
return self._resume_checkpoint(*args, checkpoint_resume=checkpoint_resume, **kwargs)
return self._stash_exp(*args, **kwargs) |
def _resume_checkpoint(self, *args, checkpoint_resume: Optional[str]=None, **kwargs):
"Resume an existing (checkpoint) experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n "
assert checkpoint_resume
if (checkpoint_resume == self.LAST_CHECKPOINT):
resume_rev = self._get_last_checkpoint()
else:
resume_rev = self.scm.resolve_rev(checkpoint_resume)
allow_multiple = ('params' in kwargs)
branch = self.get_branch_by_rev(resume_rev, allow_multiple=allow_multiple)
if (not branch):
raise DvcException(f"Could not find checkpoint experiment '{checkpoint_resume}'")
baseline_rev = self._get_baseline(branch)
if kwargs.get('params', None):
logger.debug("Branching from checkpoint '%s' with modified params, baseline '%s'", checkpoint_resume, baseline_rev[:7])
detach_rev = resume_rev
branch = None
else:
logger.debug("Continuing from tip of checkpoint '%s'", checkpoint_resume)
detach_rev = None
return self._stash_exp(*args, detach_rev=detach_rev, baseline_rev=baseline_rev, branch=branch, **kwargs) | 8,775,670,305,476,068,000 | Resume an existing (checkpoint) experiment.
Experiment will be reproduced and checked out into the user's
workspace. | dvc/repo/experiments/__init__.py | _resume_checkpoint | esthergold/dvc | python | def _resume_checkpoint(self, *args, checkpoint_resume: Optional[str]=None, **kwargs):
"Resume an existing (checkpoint) experiment.\n\n Experiment will be reproduced and checked out into the user's\n workspace.\n "
assert checkpoint_resume
if (checkpoint_resume == self.LAST_CHECKPOINT):
resume_rev = self._get_last_checkpoint()
else:
resume_rev = self.scm.resolve_rev(checkpoint_resume)
allow_multiple = ('params' in kwargs)
branch = self.get_branch_by_rev(resume_rev, allow_multiple=allow_multiple)
if (not branch):
raise DvcException(f"Could not find checkpoint experiment '{checkpoint_resume}'")
baseline_rev = self._get_baseline(branch)
if kwargs.get('params', None):
logger.debug("Branching from checkpoint '%s' with modified params, baseline '%s'", checkpoint_resume, baseline_rev[:7])
detach_rev = resume_rev
branch = None
else:
logger.debug("Continuing from tip of checkpoint '%s'", checkpoint_resume)
detach_rev = None
return self._stash_exp(*args, detach_rev=detach_rev, baseline_rev=baseline_rev, branch=branch, **kwargs) |
@scm_locked
def reproduce(self, revs: Optional[Iterable]=None, keep_stash: Optional[bool]=True, **kwargs):
'Reproduce the specified experiments.\n\n Args:\n revs: If revs is not specified, all stashed experiments will be\n reproduced.\n keep_stash: If True, stashed experiments will be preserved if they\n fail to reproduce successfully.\n '
stash_revs = self.stash_revs
if (revs is None):
to_run = dict(stash_revs)
else:
to_run = {rev: (stash_revs[rev] if (rev in stash_revs) else self.StashEntry(None, rev, rev, None, None)) for rev in revs}
logger.debug("Reproducing experiment revs '%s'", ', '.join((rev[:7] for rev in to_run)))
executors = self._init_executors(to_run)
exec_results = self._reproduce(executors, **kwargs)
if keep_stash:
to_drop = sorted((stash_revs[rev][0] for rev in exec_results if (rev in stash_revs)), reverse=True)
else:
to_drop = sorted((stash_revs[rev][0] for rev in to_run if (rev in stash_revs)), reverse=True)
for index in to_drop:
self.stash.drop(index)
result = {}
for (_, exp_result) in exec_results.items():
result.update(exp_result)
return result | -8,775,852,405,187,722,000 | Reproduce the specified experiments.
Args:
revs: If revs is not specified, all stashed experiments will be
reproduced.
keep_stash: If True, stashed experiments will be preserved if they
fail to reproduce successfully. | dvc/repo/experiments/__init__.py | reproduce | esthergold/dvc | python | @scm_locked
def reproduce(self, revs: Optional[Iterable]=None, keep_stash: Optional[bool]=True, **kwargs):
'Reproduce the specified experiments.\n\n Args:\n revs: If revs is not specified, all stashed experiments will be\n reproduced.\n keep_stash: If True, stashed experiments will be preserved if they\n fail to reproduce successfully.\n '
stash_revs = self.stash_revs
if (revs is None):
to_run = dict(stash_revs)
else:
to_run = {rev: (stash_revs[rev] if (rev in stash_revs) else self.StashEntry(None, rev, rev, None, None)) for rev in revs}
logger.debug("Reproducing experiment revs '%s'", ', '.join((rev[:7] for rev in to_run)))
executors = self._init_executors(to_run)
exec_results = self._reproduce(executors, **kwargs)
if keep_stash:
to_drop = sorted((stash_revs[rev][0] for rev in exec_results if (rev in stash_revs)), reverse=True)
else:
to_drop = sorted((stash_revs[rev][0] for rev in to_run if (rev in stash_revs)), reverse=True)
for index in to_drop:
self.stash.drop(index)
result = {}
for (_, exp_result) in exec_results.items():
result.update(exp_result)
return result |
def _reproduce(self, executors: dict, jobs: Optional[int]=1) -> Mapping[(str, Mapping[(str, str)])]:
'Run dvc repro for the specified BaseExecutors in parallel.\n\n Returns dict containing successfully executed experiments.\n '
result = defaultdict(dict)
manager = Manager()
pid_q = manager.Queue()
with ProcessPoolExecutor(max_workers=jobs) as workers:
futures = {}
for (rev, executor) in executors.items():
future = workers.submit(executor.reproduce, executor.dvc_dir, pid_q, rev, name=executor.name)
futures[future] = (rev, executor)
try:
wait(futures)
except KeyboardInterrupt:
pids = {}
while (not pid_q.empty()):
(rev, pid) = pid_q.get()
pids[rev] = pid
for (future, (rev, _)) in futures.items():
if future.running():
os.kill(pids[rev], signal.SIGINT)
elif (not future.done()):
future.cancel()
for (future, (rev, executor)) in futures.items():
(rev, executor) = futures[future]
exc = future.exception()
try:
if (exc is None):
(exp_hash, force) = future.result()
result[rev].update(self._collect_executor(executor, exp_hash, force))
elif (not isinstance(exc, CheckpointKilledError)):
logger.exception("Failed to reproduce experiment '%s'", rev[:7], exc_info=exc)
except CancelledError:
logger.error("Cancelled before attempting to reproduce experiment '%s'", rev[:7])
finally:
executor.cleanup()
return result | 5,327,217,892,181,162,000 | Run dvc repro for the specified BaseExecutors in parallel.
Returns dict containing successfully executed experiments. | dvc/repo/experiments/__init__.py | _reproduce | esthergold/dvc | python | def _reproduce(self, executors: dict, jobs: Optional[int]=1) -> Mapping[(str, Mapping[(str, str)])]:
'Run dvc repro for the specified BaseExecutors in parallel.\n\n Returns dict containing successfully executed experiments.\n '
result = defaultdict(dict)
manager = Manager()
pid_q = manager.Queue()
with ProcessPoolExecutor(max_workers=jobs) as workers:
futures = {}
for (rev, executor) in executors.items():
future = workers.submit(executor.reproduce, executor.dvc_dir, pid_q, rev, name=executor.name)
futures[future] = (rev, executor)
try:
wait(futures)
except KeyboardInterrupt:
pids = {}
while (not pid_q.empty()):
(rev, pid) = pid_q.get()
pids[rev] = pid
for (future, (rev, _)) in futures.items():
if future.running():
os.kill(pids[rev], signal.SIGINT)
elif (not future.done()):
future.cancel()
for (future, (rev, executor)) in futures.items():
(rev, executor) = futures[future]
exc = future.exception()
try:
if (exc is None):
(exp_hash, force) = future.result()
result[rev].update(self._collect_executor(executor, exp_hash, force))
elif (not isinstance(exc, CheckpointKilledError)):
logger.exception("Failed to reproduce experiment '%s'", rev[:7], exc_info=exc)
except CancelledError:
logger.error("Cancelled before attempting to reproduce experiment '%s'", rev[:7])
finally:
executor.cleanup()
return result |
@scm_locked
def get_baseline(self, rev):
'Return the baseline rev for an experiment rev.'
return self._get_baseline(rev) | 7,143,182,011,199,181,000 | Return the baseline rev for an experiment rev. | dvc/repo/experiments/__init__.py | get_baseline | esthergold/dvc | python | @scm_locked
def get_baseline(self, rev):
return self._get_baseline(rev) |
def get_branch_by_rev(self, rev: str, allow_multiple: bool=False) -> str:
'Returns full refname for the experiment branch containing rev.'
ref_infos = list(exp_refs_by_rev(self.scm, rev))
if (not ref_infos):
return None
if ((len(ref_infos) > 1) and (not allow_multiple)):
raise MultipleBranchError(rev)
return str(ref_infos[0]) | 6,905,835,126,619,686,000 | Returns full refname for the experiment branch containing rev. | dvc/repo/experiments/__init__.py | get_branch_by_rev | esthergold/dvc | python | def get_branch_by_rev(self, rev: str, allow_multiple: bool=False) -> str:
ref_infos = list(exp_refs_by_rev(self.scm, rev))
if (not ref_infos):
return None
if ((len(ref_infos) > 1) and (not allow_multiple)):
raise MultipleBranchError(rev)
return str(ref_infos[0]) |
def get_exact_name(self, rev: str):
'Returns preferred name for the specified revision.\n\n Prefers tags, branches (heads), experiments in that orer.\n '
exclude = f'{EXEC_NAMESPACE}/*'
ref = self.scm.describe(rev, base=EXPS_NAMESPACE, exclude=exclude)
if ref:
return ExpRefInfo.from_ref(ref).name
return None | -4,535,959,236,294,254,000 | Returns preferred name for the specified revision.
Prefers tags, branches (heads), experiments in that orer. | dvc/repo/experiments/__init__.py | get_exact_name | esthergold/dvc | python | def get_exact_name(self, rev: str):
'Returns preferred name for the specified revision.\n\n Prefers tags, branches (heads), experiments in that orer.\n '
exclude = f'{EXEC_NAMESPACE}/*'
ref = self.scm.describe(rev, base=EXPS_NAMESPACE, exclude=exclude)
if ref:
return ExpRefInfo.from_ref(ref).name
return None |
def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a CNN model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
model_input = tf.reshape(model_input, [(- 1), 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [(- 1), ((16 * 16) * 3)])
output = slim.fully_connected(net, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} | -8,735,807,252,078,989,000 | Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes. | video_level_models.py | create_model | abdoo8080/youtube-8m | python | def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a CNN model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
model_input = tf.reshape(model_input, [(- 1), 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [(- 1), ((16 * 16) * 3)])
output = slim.fully_connected(net, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} |
def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a ResNet model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
model_input = tf.reshape(model_input, [(- 1), 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
for i in range(0, 9):
temp = (net + shortcut)
net = slim.conv2d(temp, 3, [3, 3], scope=('conv%d_1' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [4, 4], scope=('conv%d_2' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [5, 5], scope=('conv%d_3' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [(- 1), ((16 * 16) * 3)])
output = slim.fully_connected(net, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} | 6,562,285,822,841,482,000 | Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes. | video_level_models.py | create_model | abdoo8080/youtube-8m | python | def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a ResNet model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
model_input = tf.reshape(model_input, [(- 1), 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
for i in range(0, 9):
temp = (net + shortcut)
net = slim.conv2d(temp, 3, [3, 3], scope=('conv%d_1' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [4, 4], scope=('conv%d_2' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [5, 5], scope=('conv%d_3' % (i + 1)), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [(- 1), ((16 * 16) * 3)])
output = slim.fully_connected(net, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} |
def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a logistic model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
output = slim.fully_connected(model_input, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} | 6,376,754,502,288,288,000 | Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes. | video_level_models.py | create_model | abdoo8080/youtube-8m | python | def create_model(self, model_input, vocab_size, l2_penalty=1e-08, **unused_params):
"Creates a logistic model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
output = slim.fully_connected(model_input, vocab_size, activation_fn=tf.nn.sigmoid, weights_regularizer=slim.l2_regularizer(l2_penalty))
return {'predictions': output} |
def create_model(self, model_input, vocab_size, num_mixtures=None, l2_penalty=1e-08, **unused_params):
"Creates a Mixture of (Logistic) Experts model.\n\n The model consists of a per-class softmax distribution over a\n configurable number of logistic classifiers. One of the classifiers in the\n mixture is not trained, and always predicts 0.\n\n Args:\n model_input: 'batch_size' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n num_mixtures: The number of mixtures (excluding a dummy 'expert' that\n always predicts the non-existence of an entity).\n l2_penalty: How much to penalize the squared magnitudes of parameter\n values.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
num_mixtures = (num_mixtures or FLAGS.moe_num_mixtures)
gate_activations = slim.fully_connected(model_input, (vocab_size * (num_mixtures + 1)), activation_fn=None, biases_initializer=None, weights_regularizer=slim.l2_regularizer(l2_penalty), scope='gates')
expert_activations = slim.fully_connected(model_input, (vocab_size * num_mixtures), activation_fn=None, weights_regularizer=slim.l2_regularizer(l2_penalty), scope='experts')
gating_distribution = tf.nn.softmax(tf.reshape(gate_activations, [(- 1), (num_mixtures + 1)]))
expert_distribution = tf.nn.sigmoid(tf.reshape(expert_activations, [(- 1), num_mixtures]))
final_probabilities_by_class_and_batch = tf.reduce_sum((gating_distribution[:, :num_mixtures] * expert_distribution), 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch, [(- 1), vocab_size])
return {'predictions': final_probabilities} | -6,447,030,492,059,986,000 | Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes. | video_level_models.py | create_model | abdoo8080/youtube-8m | python | def create_model(self, model_input, vocab_size, num_mixtures=None, l2_penalty=1e-08, **unused_params):
"Creates a Mixture of (Logistic) Experts model.\n\n The model consists of a per-class softmax distribution over a\n configurable number of logistic classifiers. One of the classifiers in the\n mixture is not trained, and always predicts 0.\n\n Args:\n model_input: 'batch_size' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n num_mixtures: The number of mixtures (excluding a dummy 'expert' that\n always predicts the non-existence of an entity).\n l2_penalty: How much to penalize the squared magnitudes of parameter\n values.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n "
num_mixtures = (num_mixtures or FLAGS.moe_num_mixtures)
gate_activations = slim.fully_connected(model_input, (vocab_size * (num_mixtures + 1)), activation_fn=None, biases_initializer=None, weights_regularizer=slim.l2_regularizer(l2_penalty), scope='gates')
expert_activations = slim.fully_connected(model_input, (vocab_size * num_mixtures), activation_fn=None, weights_regularizer=slim.l2_regularizer(l2_penalty), scope='experts')
gating_distribution = tf.nn.softmax(tf.reshape(gate_activations, [(- 1), (num_mixtures + 1)]))
expert_distribution = tf.nn.sigmoid(tf.reshape(expert_activations, [(- 1), num_mixtures]))
final_probabilities_by_class_and_batch = tf.reduce_sum((gating_distribution[:, :num_mixtures] * expert_distribution), 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch, [(- 1), vocab_size])
return {'predictions': final_probabilities} |
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
'\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (N) float Tensor.\n '
assert (pred_bbox3d.shape[0] == gt_bbox3d.shape[0])
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
corner_dist = torch.min(torch.norm((pred_box_corners - gt_box_corners), dim=2), torch.norm((pred_box_corners - gt_box_corners_flip), dim=2))
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1) | 5,956,361,160,264,300,000 | Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor. | pcdet/utils/loss_utils.py | get_corner_loss_lidar | ocNflag/point2seq | python | def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
'\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (N) float Tensor.\n '
assert (pred_bbox3d.shape[0] == gt_bbox3d.shape[0])
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
corner_dist = torch.min(torch.norm((pred_box_corners - gt_box_corners), dim=2), torch.norm((pred_box_corners - gt_box_corners_flip), dim=2))
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1) |
def __init__(self, gamma: float=2.0, alpha: float=0.25):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super().__init__()
self.alpha = alpha
self.gamma = gamma | 2,511,071,533,760,564,700 | Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples. | pcdet/utils/loss_utils.py | __init__ | ocNflag/point2seq | python | def __init__(self, gamma: float=2.0, alpha: float=0.25):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super().__init__()
self.alpha = alpha
self.gamma = gamma |
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss | -8,905,405,198,205,577,000 | PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction | pcdet/utils/loss_utils.py | sigmoid_cross_entropy_with_logits | ocNflag/point2seq | python | @staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss |
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1 - target) * (1 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
if ((weights.shape.__len__() == 2) or ((weights.shape.__len__() == 1) and (target.shape.__len__() == 2))):
weights = weights.unsqueeze((- 1))
assert (weights.shape.__len__() == loss.shape.__len__())
return (loss * weights) | -8,421,976,740,454,377,000 | Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting. | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1 - target) * (1 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
if ((weights.shape.__len__() == 2) or ((weights.shape.__len__() == 1) and (target.shape.__len__() == 2))):
weights = weights.unsqueeze((- 1))
assert (weights.shape.__len__() == loss.shape.__len__())
return (loss * weights) |
def __init__(self, beta: float=(1.0 / 9.0), code_weights: list=None):
'\n Args:\n beta: Scalar float.\n L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n '
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if (code_weights is not None):
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda() | 6,300,698,351,211,915,000 | Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights. | pcdet/utils/loss_utils.py | __init__ | ocNflag/point2seq | python | def __init__(self, beta: float=(1.0 / 9.0), code_weights: list=None):
'\n Args:\n beta: Scalar float.\n L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n '
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if (code_weights is not None):
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda() |
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor=None):
'\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n '
target = torch.where(torch.isnan(target), input, target)
diff = (input - target)
if (self.code_weights is not None):
diff = (diff * self.code_weights.view(1, 1, (- 1)))
loss = self.smooth_l1_loss(diff, self.beta)
if (weights is not None):
assert ((weights.shape[0] == loss.shape[0]) and (weights.shape[1] == loss.shape[1]))
loss = (loss * weights.unsqueeze((- 1)))
return loss | -3,411,104,254,395,770,400 | Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction. | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor=None):
'\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n '
target = torch.where(torch.isnan(target), input, target)
diff = (input - target)
if (self.code_weights is not None):
diff = (diff * self.code_weights.view(1, 1, (- 1)))
loss = self.smooth_l1_loss(diff, self.beta)
if (weights is not None):
assert ((weights.shape[0] == loss.shape[0]) and (weights.shape[1] == loss.shape[1]))
loss = (loss * weights.unsqueeze((- 1)))
return loss |
def __init__(self, code_weights: list=None):
'\n Args:\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n '
super(WeightedL1Loss, self).__init__()
if (code_weights is not None):
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda() | -778,590,620,506,814,300 | Args:
code_weights: (#codes) float list if not None.
Code-wise weights. | pcdet/utils/loss_utils.py | __init__ | ocNflag/point2seq | python | def __init__(self, code_weights: list=None):
'\n Args:\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n '
super(WeightedL1Loss, self).__init__()
if (code_weights is not None):
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda() |
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor=None):
'\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n '
target = torch.where(torch.isnan(target), input, target)
diff = (input - target)
if (self.code_weights is not None):
diff = (diff * self.code_weights.view(1, 1, (- 1)))
loss = torch.abs(diff)
if (weights is not None):
assert ((weights.shape[0] == loss.shape[0]) and (weights.shape[1] == loss.shape[1]))
loss = (loss * weights.unsqueeze((- 1)))
return loss | -169,834,570,559,773,700 | Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction. | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor=None):
'\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n '
target = torch.where(torch.isnan(target), input, target)
diff = (input - target)
if (self.code_weights is not None):
diff = (diff * self.code_weights.view(1, 1, (- 1)))
loss = torch.abs(diff)
if (weights is not None):
assert ((weights.shape[0] == loss.shape[0]) and (weights.shape[1] == loss.shape[1]))
loss = (loss * weights.unsqueeze((- 1)))
return loss |
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predited logits for each class.\n target: (B, #anchors, #classes) float tensor.\n One-hot classification targets.\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted cross entropy loss without reduction\n '
input = input.permute(0, 2, 1)
target = target.argmax(dim=(- 1))
loss = (F.cross_entropy(input, target, reduction='none') * weights)
return loss | 5,017,373,398,995,985,000 | Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predited logits for each class.\n target: (B, #anchors, #classes) float tensor.\n One-hot classification targets.\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted cross entropy loss without reduction\n '
input = input.permute(0, 2, 1)
target = target.argmax(dim=(- 1))
loss = (F.cross_entropy(input, target, reduction='none') * weights)
return loss |
def _neg_loss(self, pred, gt):
' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow((1 - gt), 4)
loss = 0
pos_loss = ((torch.log(pred) * torch.pow((1 - pred), 2)) * pos_inds)
neg_loss = (((torch.log((1 - pred)) * torch.pow(pred, 2)) * neg_weights) * neg_inds)
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if (num_pos == 0):
loss = (loss - neg_loss)
else:
loss = (loss - ((pos_loss + neg_loss) / num_pos))
return loss | 389,137,059,020,501,800 | Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w) | pcdet/utils/loss_utils.py | _neg_loss | ocNflag/point2seq | python | def _neg_loss(self, pred, gt):
' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow((1 - gt), 4)
loss = 0
pos_loss = ((torch.log(pred) * torch.pow((1 - pred), 2)) * pos_inds)
neg_loss = (((torch.log((1 - pred)) * torch.pow(pred, 2)) * neg_weights) * neg_inds)
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if (num_pos == 0):
loss = (loss - neg_loss)
else:
loss = (loss - ((pos_loss + neg_loss) / num_pos))
return loss |
def _reg_loss(self, regr, gt_regr, mask):
' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = (regr * mask)
gt_regr = (gt_regr * mask)
loss = torch.abs((regr - gt_regr))
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = (loss / (num + 0.0001))
return loss | -2,449,150,389,028,161,000 | L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects) | pcdet/utils/loss_utils.py | _reg_loss | ocNflag/point2seq | python | def _reg_loss(self, regr, gt_regr, mask):
' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = (regr * mask)
gt_regr = (gt_regr * mask)
loss = torch.abs((regr - gt_regr))
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = (loss / (num + 0.0001))
return loss |
def __init__(self, gamma: float=2.0, alpha: float=0.25):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma | -5,609,581,717,160,888,000 | Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples. | pcdet/utils/loss_utils.py | __init__ | ocNflag/point2seq | python | def __init__(self, gamma: float=2.0, alpha: float=0.25):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma |
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss | -8,905,405,198,205,577,000 | PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction | pcdet/utils/loss_utils.py | sigmoid_cross_entropy_with_logits | ocNflag/point2seq | python | @staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss |
def forward(self, input: torch.Tensor, target: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1 - target) * (1 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
loss = (loss.sum() / num_pos)
return loss | -8,527,889,468,556,255,000 | Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting. | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1 - target) * (1 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
loss = (loss.sum() / num_pos)
return loss |
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = (regr * mask)
gt_regr = (gt_regr * mask)
abs_diff = torch.abs((regr - gt_regr))
abs_diff_lt_1 = torch.le(abs_diff, (1 / (sigma ** 2))).type_as(abs_diff)
loss = (((abs_diff_lt_1 * 0.5) * torch.pow((abs_diff * sigma), 2)) + ((abs_diff - (0.5 / (sigma ** 2))) * (1.0 - abs_diff_lt_1)))
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = (loss / (num + 0.0001))
return loss | 7,120,285,669,924,297,000 | L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects) | pcdet/utils/loss_utils.py | _smooth_reg_loss | ocNflag/point2seq | python | def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = (regr * mask)
gt_regr = (gt_regr * mask)
abs_diff = torch.abs((regr - gt_regr))
abs_diff_lt_1 = torch.le(abs_diff, (1 / (sigma ** 2))).type_as(abs_diff)
loss = (((abs_diff_lt_1 * 0.5) * torch.pow((abs_diff * sigma), 2)) + ((abs_diff - (0.5 / (sigma ** 2))) * (1.0 - abs_diff_lt_1)))
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = (loss / (num + 0.0001))
return loss |
def __init__(self, gamma: float=2.0, alpha: float=0.25, reduction='mean'):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction | 8,031,476,695,436,176,000 | Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples. | pcdet/utils/loss_utils.py | __init__ | ocNflag/point2seq | python | def __init__(self, gamma: float=2.0, alpha: float=0.25, reduction='mean'):
'\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n '
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction |
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss | -8,905,405,198,205,577,000 | PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction | pcdet/utils/loss_utils.py | sigmoid_cross_entropy_with_logits | ocNflag/point2seq | python | @staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
' PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n '
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss |
def forward(self, input: torch.Tensor, target: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1.0 - target) * (1.0 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
if (self.reduction == 'sum'):
return loss.sum()
else:
return loss.mean() | 7,519,885,252,206,170,000 | Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting. | pcdet/utils/loss_utils.py | forward | ocNflag/point2seq | python | def forward(self, input: torch.Tensor, target: torch.Tensor):
'\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n '
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1.0 - target) * (1.0 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
if (self.reduction == 'sum'):
return loss.sum()
else:
return loss.mean() |
def parse_model_config_params(model_params, num_settings, random_state):
'\n\n Args:\n model_params:\n num_settings:\n random_state:\n\n Returns:\n\n '
param_distributions = dict()
dist_types = dict(randint=randint, expon=expon, uniform=uniform)
for (param, param_value) in model_params.items():
if (param_value[0] in ['randint', 'expon', 'uniform']):
param_distributions[param] = dist_types[param_value[0]](*param_value[1:])
else:
param_distributions[param] = param_value
return sampler_generator(ParameterSampler(param_distributions, n_iter=num_settings, random_state=random_state)) | -215,715,886,021,214,560 | Args:
model_params:
num_settings:
random_state:
Returns: | scripts/search_ml_model_params.py | parse_model_config_params | NCAR/mlmicrophysics | python | def parse_model_config_params(model_params, num_settings, random_state):
'\n\n Args:\n model_params:\n num_settings:\n random_state:\n\n Returns:\n\n '
param_distributions = dict()
dist_types = dict(randint=randint, expon=expon, uniform=uniform)
for (param, param_value) in model_params.items():
if (param_value[0] in ['randint', 'expon', 'uniform']):
param_distributions[param] = dist_types[param_value[0]](*param_value[1:])
else:
param_distributions[param] = param_value
return sampler_generator(ParameterSampler(param_distributions, n_iter=num_settings, random_state=random_state)) |
def validate_model_configuration(classifier_model_name, classifier_model_config, regressor_model_name, regressor_model_config, config_index, train_scaled_input, train_labels, train_scaled_output, val_scaled_input, val_labels, val_scaled_output, classifier_metric_list, regressor_metric_list):
'\n Train a single machine learning model configuration to predict each microphysical tendency.\n\n Args:\n classifier_model_name:\n classifier_model_config:\n regressor_model_name:\n regressor_model_config:\n config_index:\n train_scaled_input:\n train_labels:\n train_scaled_output:\n val_scaled_input:\n val_labels:\n val_scaled_output:\n classifier_metric_list:\n regressor_metric_list:\n\n Returns:\n\n '
from mlmicrophysics.models import DenseNeuralNetwork, DenseGAN
import keras.backend as K
metrics = {'mse': mean_squared_error, 'mae': mean_absolute_error, 'r2': r2_score, 'hellinger': hellinger_distance, 'acc': accuracy_score, 'hss': heidke_skill_score, 'pss': peirce_skill_score}
sess = K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))
K.set_session(sess)
with sess.as_default():
model_classes = {'RandomForestRegressor': RandomForestRegressor, 'RandomForestClassifier': RandomForestClassifier, 'DenseNeuralNetwork': DenseNeuralNetwork, 'DenseGAN': DenseGAN}
classifier_models = {}
regressor_models = {}
output_label_preds = pd.DataFrame(0, index=val_labels.index, columns=val_labels.columns, dtype=np.int32)
output_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns, dtype=np.float32)
output_regressor_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns, dtype=np.float32)
output_metric_columns = []
for output_col in train_scaled_output.columns:
for metric in classifier_metric_list:
output_metric_columns.append(((output_col + '_') + metric))
for metric in regressor_metric_list:
output_metric_columns.append(((output_col + '_') + metric))
unique_labels = np.unique(train_labels[output_col])
for unique_label in unique_labels:
for metric in regressor_metric_list:
output_metric_columns.append(f'{output_col}_{unique_label}_{metric}')
output_metrics = pd.Series(index=output_metric_columns, name=config_index, dtype=np.float32)
for output_col in train_scaled_output.columns:
print(output_col)
unique_labels = np.unique(train_labels[output_col])
if (unique_labels.size > 1):
if (classifier_model_name in ['DenseNeuralNetwork', 'DenseGAN']):
classifier_models[output_col] = model_classes[classifier_model_name](outputs=unique_labels.size, classifier=True, **classifier_model_config)
else:
classifier_models[output_col] = model_classes[classifier_model_name](**classifier_model_config)
classifier_models[output_col].fit(train_scaled_input, train_labels[output_col])
output_label_preds.loc[:, output_col] = classifier_models[output_col].predict(val_scaled_input)
for metric in classifier_metric_list:
output_metrics[((output_col + '_') + metric)] = metrics[metric](val_labels[output_col].values, output_label_preds[output_col].values)
else:
output_label_preds.loc[:, output_col] = unique_labels[0]
regressor_models[output_col] = {}
for label in unique_labels:
if (label != 0):
if (regressor_model_name in ['DenseNeuralNetwork', 'DenseGAN']):
regressor_models[output_col][label] = model_classes[regressor_model_name](classifier=False, **regressor_model_config)
else:
regressor_models[output_col][label] = model_classes[regressor_model_name](**regressor_model_config)
regressor_models[output_col][label].fit(train_scaled_input.loc[(train_labels[output_col] == label)], train_scaled_output.loc[((train_labels[output_col] == label), output_col)])
if (np.count_nonzero((output_label_preds[output_col] == label)) > 0):
output_preds.loc[((output_label_preds[output_col] == label), output_col)] = regressor_models[output_col][label].predict(val_scaled_input.loc[(output_label_preds[output_col] == label)])
output_regressor_preds.loc[((val_labels[output_col] == label), output_col)] = regressor_models[output_col][label].predict(val_scaled_input.loc[(val_labels[output_col] == label)])
for metric in regressor_metric_list:
output_metrics[f'{output_col}_{label}_{metric}'] = metrics[metric](val_scaled_output.loc[((val_labels[output_col] == label), output_col)].values, output_regressor_preds.loc[((val_labels[output_col] == label), output_col)].values)
for metric in regressor_metric_list:
output_metrics[((output_col + '_') + metric)] = metrics[metric](val_scaled_output[output_col].values, output_preds[output_col].values)
return output_metrics | -7,930,543,682,850,534,000 | Train a single machine learning model configuration to predict each microphysical tendency.
Args:
classifier_model_name:
classifier_model_config:
regressor_model_name:
regressor_model_config:
config_index:
train_scaled_input:
train_labels:
train_scaled_output:
val_scaled_input:
val_labels:
val_scaled_output:
classifier_metric_list:
regressor_metric_list:
Returns: | scripts/search_ml_model_params.py | validate_model_configuration | NCAR/mlmicrophysics | python | def validate_model_configuration(classifier_model_name, classifier_model_config, regressor_model_name, regressor_model_config, config_index, train_scaled_input, train_labels, train_scaled_output, val_scaled_input, val_labels, val_scaled_output, classifier_metric_list, regressor_metric_list):
'\n Train a single machine learning model configuration to predict each microphysical tendency.\n\n Args:\n classifier_model_name:\n classifier_model_config:\n regressor_model_name:\n regressor_model_config:\n config_index:\n train_scaled_input:\n train_labels:\n train_scaled_output:\n val_scaled_input:\n val_labels:\n val_scaled_output:\n classifier_metric_list:\n regressor_metric_list:\n\n Returns:\n\n '
from mlmicrophysics.models import DenseNeuralNetwork, DenseGAN
import keras.backend as K
metrics = {'mse': mean_squared_error, 'mae': mean_absolute_error, 'r2': r2_score, 'hellinger': hellinger_distance, 'acc': accuracy_score, 'hss': heidke_skill_score, 'pss': peirce_skill_score}
sess = K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))
K.set_session(sess)
with sess.as_default():
model_classes = {'RandomForestRegressor': RandomForestRegressor, 'RandomForestClassifier': RandomForestClassifier, 'DenseNeuralNetwork': DenseNeuralNetwork, 'DenseGAN': DenseGAN}
classifier_models = {}
regressor_models = {}
output_label_preds = pd.DataFrame(0, index=val_labels.index, columns=val_labels.columns, dtype=np.int32)
output_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns, dtype=np.float32)
output_regressor_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns, dtype=np.float32)
output_metric_columns = []
for output_col in train_scaled_output.columns:
for metric in classifier_metric_list:
output_metric_columns.append(((output_col + '_') + metric))
for metric in regressor_metric_list:
output_metric_columns.append(((output_col + '_') + metric))
unique_labels = np.unique(train_labels[output_col])
for unique_label in unique_labels:
for metric in regressor_metric_list:
output_metric_columns.append(f'{output_col}_{unique_label}_{metric}')
output_metrics = pd.Series(index=output_metric_columns, name=config_index, dtype=np.float32)
for output_col in train_scaled_output.columns:
print(output_col)
unique_labels = np.unique(train_labels[output_col])
if (unique_labels.size > 1):
if (classifier_model_name in ['DenseNeuralNetwork', 'DenseGAN']):
classifier_models[output_col] = model_classes[classifier_model_name](outputs=unique_labels.size, classifier=True, **classifier_model_config)
else:
classifier_models[output_col] = model_classes[classifier_model_name](**classifier_model_config)
classifier_models[output_col].fit(train_scaled_input, train_labels[output_col])
output_label_preds.loc[:, output_col] = classifier_models[output_col].predict(val_scaled_input)
for metric in classifier_metric_list:
output_metrics[((output_col + '_') + metric)] = metrics[metric](val_labels[output_col].values, output_label_preds[output_col].values)
else:
output_label_preds.loc[:, output_col] = unique_labels[0]
regressor_models[output_col] = {}
for label in unique_labels:
if (label != 0):
if (regressor_model_name in ['DenseNeuralNetwork', 'DenseGAN']):
regressor_models[output_col][label] = model_classes[regressor_model_name](classifier=False, **regressor_model_config)
else:
regressor_models[output_col][label] = model_classes[regressor_model_name](**regressor_model_config)
regressor_models[output_col][label].fit(train_scaled_input.loc[(train_labels[output_col] == label)], train_scaled_output.loc[((train_labels[output_col] == label), output_col)])
if (np.count_nonzero((output_label_preds[output_col] == label)) > 0):
output_preds.loc[((output_label_preds[output_col] == label), output_col)] = regressor_models[output_col][label].predict(val_scaled_input.loc[(output_label_preds[output_col] == label)])
output_regressor_preds.loc[((val_labels[output_col] == label), output_col)] = regressor_models[output_col][label].predict(val_scaled_input.loc[(val_labels[output_col] == label)])
for metric in regressor_metric_list:
output_metrics[f'{output_col}_{label}_{metric}'] = metrics[metric](val_scaled_output.loc[((val_labels[output_col] == label), output_col)].values, output_regressor_preds.loc[((val_labels[output_col] == label), output_col)].values)
for metric in regressor_metric_list:
output_metrics[((output_col + '_') + metric)] = metrics[metric](val_scaled_output[output_col].values, output_preds[output_col].values)
return output_metrics |
def test_integers(self):
'Adding an integer as a tag should raise a ValueError (#237).'
apple = self.food_model.objects.create(name='apple')
with self.assertRaisesRegexp(ValueError, "Cannot add 1 \\(<(type|class) 'int'>\\). Expected <class 'django.db.models.base.ModelBase'> or str."):
apple.tags.add(1) | 2,789,897,774,914,508,000 | Adding an integer as a tag should raise a ValueError (#237). | tests/tests.py | test_integers | Immensa/django-taggit | python | def test_integers(self):
apple = self.food_model.objects.create(name='apple')
with self.assertRaisesRegexp(ValueError, "Cannot add 1 \\(<(type|class) 'int'>\\). Expected <class 'django.db.models.base.ModelBase'> or str."):
apple.tags.add(1) |
def test_similarity_by_tag(self):
'Test that pears are more similar to apples than watermelons'
apple = self.food_model.objects.create(name='apple')
apple.tags.add('green', 'juicy', 'small', 'sour')
pear = self.food_model.objects.create(name='pear')
pear.tags.add('green', 'juicy', 'small', 'sweet')
watermelon = self.food_model.objects.create(name='watermelon')
watermelon.tags.add('green', 'juicy', 'large', 'sweet')
similar_objs = apple.tags.similar_objects()
self.assertEqual(similar_objs, [pear, watermelon])
self.assertEqual([obj.similar_tags for obj in similar_objs], [3, 2]) | 1,360,618,908,624,745,200 | Test that pears are more similar to apples than watermelons | tests/tests.py | test_similarity_by_tag | Immensa/django-taggit | python | def test_similarity_by_tag(self):
apple = self.food_model.objects.create(name='apple')
apple.tags.add('green', 'juicy', 'small', 'sour')
pear = self.food_model.objects.create(name='pear')
pear.tags.add('green', 'juicy', 'small', 'sweet')
watermelon = self.food_model.objects.create(name='watermelon')
watermelon.tags.add('green', 'juicy', 'large', 'sweet')
similar_objs = apple.tags.similar_objects()
self.assertEqual(similar_objs, [pear, watermelon])
self.assertEqual([obj.similar_tags for obj in similar_objs], [3, 2]) |
def test_with_simple_space_delimited_tags(self):
'\n Test with simple space-delimited tags.\n '
self.assertEqual(parse_tags('one'), ['one'])
self.assertEqual(parse_tags('one two'), ['one', 'two'])
self.assertEqual(parse_tags('one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('one one two two'), ['one', 'two']) | -1,707,843,335,423,203,600 | Test with simple space-delimited tags. | tests/tests.py | test_with_simple_space_delimited_tags | Immensa/django-taggit | python | def test_with_simple_space_delimited_tags(self):
'\n \n '
self.assertEqual(parse_tags('one'), ['one'])
self.assertEqual(parse_tags('one two'), ['one', 'two'])
self.assertEqual(parse_tags('one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('one one two two'), ['one', 'two']) |
def test_with_comma_delimited_multiple_words(self):
'\n Test with comma-delimited multiple words.\n An unquoted comma in the input will trigger this.\n '
self.assertEqual(parse_tags(',one'), ['one'])
self.assertEqual(parse_tags(',one two'), ['one two'])
self.assertEqual(parse_tags(',one two three'), ['one two three'])
self.assertEqual(parse_tags('a-one, a-two and a-three'), ['a-one', 'a-two and a-three']) | 2,158,762,115,256,022,300 | Test with comma-delimited multiple words.
An unquoted comma in the input will trigger this. | tests/tests.py | test_with_comma_delimited_multiple_words | Immensa/django-taggit | python | def test_with_comma_delimited_multiple_words(self):
'\n Test with comma-delimited multiple words.\n An unquoted comma in the input will trigger this.\n '
self.assertEqual(parse_tags(',one'), ['one'])
self.assertEqual(parse_tags(',one two'), ['one two'])
self.assertEqual(parse_tags(',one two three'), ['one two three'])
self.assertEqual(parse_tags('a-one, a-two and a-three'), ['a-one', 'a-two and a-three']) |
def test_with_double_quoted_multiple_words(self):
'\n Test with double-quoted multiple words.\n A completed quote will trigger this. Unclosed quotes are ignored.\n '
self.assertEqual(parse_tags('"one'), ['one'])
self.assertEqual(parse_tags('"one two'), ['one', 'two'])
self.assertEqual(parse_tags('"one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('"one two"'), ['one two'])
self.assertEqual(parse_tags('a-one "a-two and a-three"'), ['a-one', 'a-two and a-three']) | 5,358,388,618,481,210,000 | Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored. | tests/tests.py | test_with_double_quoted_multiple_words | Immensa/django-taggit | python | def test_with_double_quoted_multiple_words(self):
'\n Test with double-quoted multiple words.\n A completed quote will trigger this. Unclosed quotes are ignored.\n '
self.assertEqual(parse_tags('"one'), ['one'])
self.assertEqual(parse_tags('"one two'), ['one', 'two'])
self.assertEqual(parse_tags('"one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('"one two"'), ['one two'])
self.assertEqual(parse_tags('a-one "a-two and a-three"'), ['a-one', 'a-two and a-three']) |
def test_with_no_loose_commas(self):
'\n Test with no loose commas -- split on spaces.\n '
self.assertEqual(parse_tags('one two "thr,ee"'), ['one', 'thr,ee', 'two']) | 6,121,131,418,971,766,000 | Test with no loose commas -- split on spaces. | tests/tests.py | test_with_no_loose_commas | Immensa/django-taggit | python | def test_with_no_loose_commas(self):
'\n \n '
self.assertEqual(parse_tags('one two "thr,ee"'), ['one', 'thr,ee', 'two']) |
def test_with_loose_commas(self):
'\n Loose commas - split on commas\n '
self.assertEqual(parse_tags('"one", two three'), ['one', 'two three']) | 4,144,708,498,068,357,000 | Loose commas - split on commas | tests/tests.py | test_with_loose_commas | Immensa/django-taggit | python | def test_with_loose_commas(self):
'\n \n '
self.assertEqual(parse_tags('"one", two three'), ['one', 'two three']) |
def test_tags_with_double_quotes_can_contain_commas(self):
'\n Double quotes can contain commas\n '
self.assertEqual(parse_tags('a-one "a-two, and a-three"'), ['a-one', 'a-two, and a-three'])
self.assertEqual(parse_tags('"two", one, one, two, "one"'), ['one', 'two']) | 2,664,990,977,619,601,400 | Double quotes can contain commas | tests/tests.py | test_tags_with_double_quotes_can_contain_commas | Immensa/django-taggit | python | def test_tags_with_double_quotes_can_contain_commas(self):
'\n \n '
self.assertEqual(parse_tags('a-one "a-two, and a-three"'), ['a-one', 'a-two, and a-three'])
self.assertEqual(parse_tags('"two", one, one, two, "one"'), ['one', 'two']) |
def test_with_naughty_input(self):
'\n Test with naughty input.\n '
self.assertEqual(parse_tags(None), [])
self.assertEqual(parse_tags(''), [])
self.assertEqual(parse_tags('"'), [])
self.assertEqual(parse_tags('""'), [])
self.assertEqual(parse_tags(('"' * 7)), [])
self.assertEqual(parse_tags(',,,,,,'), [])
self.assertEqual(parse_tags('",",",",",",","'), [','])
self.assertEqual(parse_tags('a-one "a-two" and "a-three'), ['a-one', 'a-three', 'a-two', 'and']) | 6,806,080,594,845,519,000 | Test with naughty input. | tests/tests.py | test_with_naughty_input | Immensa/django-taggit | python | def test_with_naughty_input(self):
'\n \n '
self.assertEqual(parse_tags(None), [])
self.assertEqual(parse_tags(), [])
self.assertEqual(parse_tags('"'), [])
self.assertEqual(parse_tags(''), [])
self.assertEqual(parse_tags(('"' * 7)), [])
self.assertEqual(parse_tags(',,,,,,'), [])
self.assertEqual(parse_tags('",",",",",",","'), [','])
self.assertEqual(parse_tags('a-one "a-two" and "a-three'), ['a-one', 'a-three', 'a-two', 'and']) |
@pytest.fixture
def cache(request):
'\n Return a cache object that can persist state between testing sessions.\n\n cache.get(key, default)\n cache.set(key, value)\n\n Keys must be a ``/`` separated value, where the first part is usually the\n name of your plugin or application to avoid clashes with other cache users.\n\n Values can be any object handled by the json stdlib module.\n '
return request.config.cache | -824,272,688,077,182,200 | Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module. | src/_pytest/cacheprovider.py | cache | bigbZik/pytest | python | @pytest.fixture
def cache(request):
'\n Return a cache object that can persist state between testing sessions.\n\n cache.get(key, default)\n cache.set(key, value)\n\n Keys must be a ``/`` separated value, where the first part is usually the\n name of your plugin or application to avoid clashes with other cache users.\n\n Values can be any object handled by the json stdlib module.\n '
return request.config.cache |
def makedir(self, name):
' return a directory path object with the given name. If the\n directory does not yet exist, it will be created. You can use it\n to manage files likes e. g. store/retrieve database\n dumps across test sessions.\n\n :param name: must be a string not containing a ``/`` separator.\n Make sure the name contains your plugin or application\n identifiers to prevent clashes with other cache users.\n '
name = Path(name)
if (len(name.parts) > 1):
raise ValueError('name is not allowed to contain path separators')
res = self._cachedir.joinpath('d', name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res) | -4,155,896,975,680,624,600 | return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users. | src/_pytest/cacheprovider.py | makedir | bigbZik/pytest | python | def makedir(self, name):
' return a directory path object with the given name. If the\n directory does not yet exist, it will be created. You can use it\n to manage files likes e. g. store/retrieve database\n dumps across test sessions.\n\n :param name: must be a string not containing a ``/`` separator.\n Make sure the name contains your plugin or application\n identifiers to prevent clashes with other cache users.\n '
name = Path(name)
if (len(name.parts) > 1):
raise ValueError('name is not allowed to contain path separators')
res = self._cachedir.joinpath('d', name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res) |
def get(self, key, default):
' return cached value for the given key. If no value\n was yet cached or the value cannot be read, the specified\n default is returned.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param default: must be provided in case of a cache-miss or\n invalid cache values.\n\n '
path = self._getvaluepath(key)
try:
with path.open('r') as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default | 1,390,270,008,379,693,000 | return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values. | src/_pytest/cacheprovider.py | get | bigbZik/pytest | python | def get(self, key, default):
' return cached value for the given key. If no value\n was yet cached or the value cannot be read, the specified\n default is returned.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param default: must be provided in case of a cache-miss or\n invalid cache values.\n\n '
path = self._getvaluepath(key)
try:
with path.open('r') as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default |
def set(self, key, value):
' save value for the given key.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param value: must be of any combination of basic\n python types, including nested types\n like e. g. lists of dictionaries.\n '
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn('could not create cache path {path}', path=path)
return
try:
f = path.open(('wb' if PY2 else 'w'))
except (IOError, OSError):
self.warn('cache could not write path {path}', path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme() | 1,058,034,138,620,027,000 | save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries. | src/_pytest/cacheprovider.py | set | bigbZik/pytest | python | def set(self, key, value):
' save value for the given key.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param value: must be of any combination of basic\n python types, including nested types\n like e. g. lists of dictionaries.\n '
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn('could not create cache path {path}', path=path)
return
try:
f = path.open(('wb' if PY2 else 'w'))
except (IOError, OSError):
self.warn('cache could not write path {path}', path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme() |
def __init__(self, policyId=None, policyType=None):
'\n :param policyId: (Optional) 自动任务策略ID。\n :param policyType: (Optional) 自动任务策略类型,当前只支持 `AutoImage` 自动备份镜像。\n '
self.policyId = policyId
self.policyType = policyType | -6,499,586,989,692,238,000 | :param policyId: (Optional) 自动任务策略ID。
:param policyType: (Optional) 自动任务策略类型,当前只支持 `AutoImage` 自动备份镜像。 | jdcloud_sdk/services/vm/models/Policy.py | __init__ | jdcloud-api/jdcloud-sdk-python | python | def __init__(self, policyId=None, policyType=None):
'\n :param policyId: (Optional) 自动任务策略ID。\n :param policyType: (Optional) 自动任务策略类型,当前只支持 `AutoImage` 自动备份镜像。\n '
self.policyId = policyId
self.policyType = policyType |
def _compute_delta(log_moments, eps):
'Compute delta for given log_moments and eps.\n\n Args:\n log_moments: the log moments of privacy loss, in the form of pairs\n of (moment_order, log_moment)\n eps: the target epsilon.\n Returns:\n delta\n '
min_delta = 1.0
for (moment_order, log_moment) in log_moments:
if (moment_order == 0):
continue
if (math.isinf(log_moment) or math.isnan(log_moment)):
sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order))
continue
if (log_moment < (moment_order * eps)):
min_delta = min(min_delta, math.exp((log_moment - (moment_order * eps))))
return min_delta | 8,862,847,200,555,492,000 | Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta | CIFAR_tests/gaussian_moments.py | _compute_delta | DPBayes/ADADP | python | def _compute_delta(log_moments, eps):
'Compute delta for given log_moments and eps.\n\n Args:\n log_moments: the log moments of privacy loss, in the form of pairs\n of (moment_order, log_moment)\n eps: the target epsilon.\n Returns:\n delta\n '
min_delta = 1.0
for (moment_order, log_moment) in log_moments:
if (moment_order == 0):
continue
if (math.isinf(log_moment) or math.isnan(log_moment)):
sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order))
continue
if (log_moment < (moment_order * eps)):
min_delta = min(min_delta, math.exp((log_moment - (moment_order * eps))))
return min_delta |
def _compute_eps(log_moments, delta):
'Compute epsilon for given log_moments and delta.\n\n Args:\n log_moments: the log moments of privacy loss, in the form of pairs\n of (moment_order, log_moment)\n delta: the target delta.\n Returns:\n epsilon\n '
min_eps = float('inf')
for (moment_order, log_moment) in log_moments:
if (moment_order == 0):
continue
if (math.isinf(log_moment) or math.isnan(log_moment)):
sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order))
continue
min_eps = min(min_eps, ((log_moment - math.log(delta)) / moment_order))
return min_eps | -6,687,883,152,029,463,000 | Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon | CIFAR_tests/gaussian_moments.py | _compute_eps | DPBayes/ADADP | python | def _compute_eps(log_moments, delta):
'Compute epsilon for given log_moments and delta.\n\n Args:\n log_moments: the log moments of privacy loss, in the form of pairs\n of (moment_order, log_moment)\n delta: the target delta.\n Returns:\n epsilon\n '
min_eps = float('inf')
for (moment_order, log_moment) in log_moments:
if (moment_order == 0):
continue
if (math.isinf(log_moment) or math.isnan(log_moment)):
sys.stderr.write(('The %d-th order is inf or Nan\n' % moment_order))
continue
min_eps = min(min_eps, ((log_moment - math.log(delta)) / moment_order))
return min_eps |
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
'Compute the log moment of Gaussian mechanism for given parameters.\n\n Args:\n q: the sampling ratio.\n sigma: the noise sigma.\n steps: the number of steps.\n lmbd: the moment order.\n verify: if False, only compute the symbolic version. If True, computes\n both symbolic and numerical solutions and verifies the results match.\n verbose: if True, print out debug information.\n Returns:\n the log moment with type np.float64, could be np.inf.\n '
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if (not np.isinf(moment_a_mp)):
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return (np.log(moment) * steps) | -2,424,217,040,214,786,000 | Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf. | CIFAR_tests/gaussian_moments.py | compute_log_moment | DPBayes/ADADP | python | def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
'Compute the log moment of Gaussian mechanism for given parameters.\n\n Args:\n q: the sampling ratio.\n sigma: the noise sigma.\n steps: the number of steps.\n lmbd: the moment order.\n verify: if False, only compute the symbolic version. If True, computes\n both symbolic and numerical solutions and verifies the results match.\n verbose: if True, print out debug information.\n Returns:\n the log moment with type np.float64, could be np.inf.\n '
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if (not np.isinf(moment_a_mp)):
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return (np.log(moment) * steps) |
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
'Compute delta (or eps) for given eps (or delta) from log moments.\n\n Args:\n log_moments: array of (moment_order, log_moment) pairs.\n target_eps: if not None, the epsilon for which we would like to compute\n corresponding delta value.\n target_delta: if not None, the delta for which we would like to compute\n corresponding epsilon value. Exactly one of target_eps and target_delta\n is None.\n Returns:\n eps, delta pair\n '
assert ((target_eps is None) ^ (target_delta is None))
assert (not ((target_eps is None) and (target_delta is None)))
if (target_eps is not None):
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta) | 7,915,246,685,305,713,000 | Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair | CIFAR_tests/gaussian_moments.py | get_privacy_spent | DPBayes/ADADP | python | def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
'Compute delta (or eps) for given eps (or delta) from log moments.\n\n Args:\n log_moments: array of (moment_order, log_moment) pairs.\n target_eps: if not None, the epsilon for which we would like to compute\n corresponding delta value.\n target_delta: if not None, the delta for which we would like to compute\n corresponding epsilon value. Exactly one of target_eps and target_delta\n is None.\n Returns:\n eps, delta pair\n '
assert ((target_eps is None) ^ (target_delta is None))
assert (not ((target_eps is None) and (target_delta is None)))
if (target_eps is not None):
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta) |
def create(self, validated_data):
'Create a new user with encrypted password and return it'
return get_user_model().objects.create_user(**validated_data) | -1,686,897,521,577,608,400 | Create a new user with encrypted password and return it | app/user/serializers.py | create | siddharthisaiah/recipe-app-api | python | def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data) |
def update(self, instance, validated_data):
'Update a user, setting the password correctly and return it'
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user | -203,708,244,252,171,460 | Update a user, setting the password correctly and return it | app/user/serializers.py | update | siddharthisaiah/recipe-app-api | python | def update(self, instance, validated_data):
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user |
def validate(self, attrs):
'Validate and authenticate the user'
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(request=self.context.get('request'), username=email, password=password)
if (not user):
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs | 6,380,639,698,687,869,000 | Validate and authenticate the user | app/user/serializers.py | validate | siddharthisaiah/recipe-app-api | python | def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(request=self.context.get('request'), username=email, password=password)
if (not user):
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs |
def testProjectWorkflowStepDtoV2(self):
'Test ProjectWorkflowStepDtoV2'
pass | 5,664,496,987,346,935,000 | Test ProjectWorkflowStepDtoV2 | test/test_project_workflow_step_dto_v2.py | testProjectWorkflowStepDtoV2 | unofficial-memsource/memsource-cli | python | def testProjectWorkflowStepDtoV2(self):
pass |
def binarize(tensor: tf.Tensor, bitsize: Optional[int]=None) -> tf.Tensor:
'Extract bits of values in `tensor`, returning a `tf.Tensor` with same\n dtype.'
with tf.name_scope('binarize'):
bitsize = (bitsize or (tensor.dtype.size * 8))
bit_indices_shape = (([1] * len(tensor.shape)) + [bitsize])
bit_indices = tf.range(bitsize, dtype=tensor.dtype)
bit_indices = tf.reshape(bit_indices, bit_indices_shape)
val = tf.expand_dims(tensor, (- 1))
val = tf.bitwise.bitwise_and(tf.bitwise.right_shift(val, bit_indices), 1)
assert (val.dtype == tensor.dtype)
return val | 3,900,341,414,611,682,300 | Extract bits of values in `tensor`, returning a `tf.Tensor` with same
dtype. | tf_encrypted/tensor/shared.py | binarize | Arash-Afshar/tf-encrypted | python | def binarize(tensor: tf.Tensor, bitsize: Optional[int]=None) -> tf.Tensor:
'Extract bits of values in `tensor`, returning a `tf.Tensor` with same\n dtype.'
with tf.name_scope('binarize'):
bitsize = (bitsize or (tensor.dtype.size * 8))
bit_indices_shape = (([1] * len(tensor.shape)) + [bitsize])
bit_indices = tf.range(bitsize, dtype=tensor.dtype)
bit_indices = tf.reshape(bit_indices, bit_indices_shape)
val = tf.expand_dims(tensor, (- 1))
val = tf.bitwise.bitwise_and(tf.bitwise.right_shift(val, bit_indices), 1)
assert (val.dtype == tensor.dtype)
return val |
def bits(tensor: tf.Tensor, bitsize: Optional[int]=None) -> list:
'Extract bits of values in `tensor`, returning a list of tensors.'
with tf.name_scope('bits'):
bitsize = (bitsize or (tensor.dtype.size * 8))
the_bits = [tf.bitwise.bitwise_and(tf.bitwise.right_shift(tensor, i), 1) for i in range(bitsize)]
return the_bits | 7,872,300,279,742,978,000 | Extract bits of values in `tensor`, returning a list of tensors. | tf_encrypted/tensor/shared.py | bits | Arash-Afshar/tf-encrypted | python | def bits(tensor: tf.Tensor, bitsize: Optional[int]=None) -> list:
with tf.name_scope('bits'):
bitsize = (bitsize or (tensor.dtype.size * 8))
the_bits = [tf.bitwise.bitwise_and(tf.bitwise.right_shift(tensor, i), 1) for i in range(bitsize)]
return the_bits |
def im2col(x: Union[(tf.Tensor, np.ndarray)], h_filter: int, w_filter: int, padding: str, stride: int) -> tf.Tensor:
'Generic implementation of im2col on tf.Tensors.'
with tf.name_scope('im2col'):
nhwc_tensor = tf.transpose(x, [0, 2, 3, 1])
channels = int(nhwc_tensor.shape[3])
patch_tensor = tf.extract_image_patches(nhwc_tensor, ksizes=[1, h_filter, w_filter, 1], strides=[1, stride, stride, 1], rates=[1, 1, 1, 1], padding=padding)
patch_tensor_nchw = tf.reshape(tf.transpose(patch_tensor, [3, 1, 2, 0]), (h_filter, w_filter, channels, (- 1)))
x_col_tensor = tf.reshape(tf.transpose(patch_tensor_nchw, [2, 0, 1, 3]), (((channels * h_filter) * w_filter), (- 1)))
return x_col_tensor | 426,579,149,317,277,900 | Generic implementation of im2col on tf.Tensors. | tf_encrypted/tensor/shared.py | im2col | Arash-Afshar/tf-encrypted | python | def im2col(x: Union[(tf.Tensor, np.ndarray)], h_filter: int, w_filter: int, padding: str, stride: int) -> tf.Tensor:
with tf.name_scope('im2col'):
nhwc_tensor = tf.transpose(x, [0, 2, 3, 1])
channels = int(nhwc_tensor.shape[3])
patch_tensor = tf.extract_image_patches(nhwc_tensor, ksizes=[1, h_filter, w_filter, 1], strides=[1, stride, stride, 1], rates=[1, 1, 1, 1], padding=padding)
patch_tensor_nchw = tf.reshape(tf.transpose(patch_tensor, [3, 1, 2, 0]), (h_filter, w_filter, channels, (- 1)))
x_col_tensor = tf.reshape(tf.transpose(patch_tensor_nchw, [2, 0, 1, 3]), (((channels * h_filter) * w_filter), (- 1)))
return x_col_tensor |
def conv2d(x: AbstractTensor, y: AbstractTensor, stride, padding) -> AbstractTensor:
'Generic convolution implementation with im2col over AbstractTensors.'
with tf.name_scope('conv2d'):
(h_filter, w_filter, in_filters, out_filters) = map(int, y.shape)
(n_x, c_x, h_x, w_x) = map(int, x.shape)
if (c_x != in_filters):
out_filters = in_filters
if (padding == 'SAME'):
h_out = int(math.ceil((float(h_x) / float(stride))))
w_out = int(math.ceil((float(w_x) / float(stride))))
elif (padding == 'VALID'):
h_out = int(math.ceil((float(((h_x - h_filter) + 1)) / float(stride))))
w_out = int(math.ceil((float(((w_x - w_filter) + 1)) / float(stride))))
else:
raise ValueError("Don't know padding method '{}'".format(padding))
x_col = x.im2col(h_filter, w_filter, padding, stride)
w_col = y.transpose([3, 2, 0, 1]).reshape([int(out_filters), (- 1)])
out = w_col.matmul(x_col)
out = out.reshape([out_filters, h_out, w_out, n_x])
out = out.transpose([3, 0, 1, 2])
return out | -1,181,663,389,733,783,800 | Generic convolution implementation with im2col over AbstractTensors. | tf_encrypted/tensor/shared.py | conv2d | Arash-Afshar/tf-encrypted | python | def conv2d(x: AbstractTensor, y: AbstractTensor, stride, padding) -> AbstractTensor:
with tf.name_scope('conv2d'):
(h_filter, w_filter, in_filters, out_filters) = map(int, y.shape)
(n_x, c_x, h_x, w_x) = map(int, x.shape)
if (c_x != in_filters):
out_filters = in_filters
if (padding == 'SAME'):
h_out = int(math.ceil((float(h_x) / float(stride))))
w_out = int(math.ceil((float(w_x) / float(stride))))
elif (padding == 'VALID'):
h_out = int(math.ceil((float(((h_x - h_filter) + 1)) / float(stride))))
w_out = int(math.ceil((float(((w_x - w_filter) + 1)) / float(stride))))
else:
raise ValueError("Don't know padding method '{}'".format(padding))
x_col = x.im2col(h_filter, w_filter, padding, stride)
w_col = y.transpose([3, 2, 0, 1]).reshape([int(out_filters), (- 1)])
out = w_col.matmul(x_col)
out = out.reshape([out_filters, h_out, w_out, n_x])
out = out.transpose([3, 0, 1, 2])
return out |
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(), fitargs=(), regresults=False):
"\n Returns the results for the lag length that maximizes the info criterion.\n\n Parameters\n ----------\n mod : Model class\n Model estimator class\n endog : array-like\n nobs array containing endogenous variable\n exog : array-like\n nobs by (startlag + maxlag) array containing lags and possibly other\n variables\n startlag : int\n The first zero-indexed column to hold a lag. See Notes.\n maxlag : int\n The highest lag order for lag length selection.\n method : {'aic', 'bic', 't-stat'}\n aic - Akaike Information Criterion\n bic - Bayes Information Criterion\n t-stat - Based on last lag\n modargs : tuple, optional\n args to pass to model. See notes.\n fitargs : tuple, optional\n args to pass to fit. See notes.\n regresults : bool, optional\n Flag indicating to return optional return results\n\n Returns\n -------\n icbest : float\n Best information criteria.\n bestlag : int\n The lag length that maximizes the information criterion.\n results : dict, optional\n Dictionary containing all estimation results\n\n Notes\n -----\n Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)\n where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are\n assumed to be in contiguous columns from low to high lag length with\n the highest lag in the last column.\n "
results = {}
method = method.lower()
for lag in range(startlag, ((startlag + maxlag) + 1)):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if (method == 'aic'):
(icbest, bestlag) = min(((v.aic, k) for (k, v) in iteritems(results)))
elif (method == 'bic'):
(icbest, bestlag) = min(((v.bic, k) for (k, v) in iteritems(results)))
elif (method == 't-stat'):
stop = 1.6448536269514722
for lag in range((startlag + maxlag), (startlag - 1), (- 1)):
icbest = np.abs(results[lag].tvalues[(- 1)])
if (np.abs(icbest) >= stop):
bestlag = lag
icbest = icbest
break
else:
raise (ValueError('Information Criterion %s not understood.') % method)
if (not regresults):
return (icbest, bestlag)
else:
return (icbest, bestlag, results) | -3,476,040,984,998,727,700 | Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column. | statsmodels/tsa/stattools.py | _autolag | josef-pkt/statsmodels | python | def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(), fitargs=(), regresults=False):
"\n Returns the results for the lag length that maximizes the info criterion.\n\n Parameters\n ----------\n mod : Model class\n Model estimator class\n endog : array-like\n nobs array containing endogenous variable\n exog : array-like\n nobs by (startlag + maxlag) array containing lags and possibly other\n variables\n startlag : int\n The first zero-indexed column to hold a lag. See Notes.\n maxlag : int\n The highest lag order for lag length selection.\n method : {'aic', 'bic', 't-stat'}\n aic - Akaike Information Criterion\n bic - Bayes Information Criterion\n t-stat - Based on last lag\n modargs : tuple, optional\n args to pass to model. See notes.\n fitargs : tuple, optional\n args to pass to fit. See notes.\n regresults : bool, optional\n Flag indicating to return optional return results\n\n Returns\n -------\n icbest : float\n Best information criteria.\n bestlag : int\n The lag length that maximizes the information criterion.\n results : dict, optional\n Dictionary containing all estimation results\n\n Notes\n -----\n Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)\n where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are\n assumed to be in contiguous columns from low to high lag length with\n the highest lag in the last column.\n "
results = {}
method = method.lower()
for lag in range(startlag, ((startlag + maxlag) + 1)):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if (method == 'aic'):
(icbest, bestlag) = min(((v.aic, k) for (k, v) in iteritems(results)))
elif (method == 'bic'):
(icbest, bestlag) = min(((v.bic, k) for (k, v) in iteritems(results)))
elif (method == 't-stat'):
stop = 1.6448536269514722
for lag in range((startlag + maxlag), (startlag - 1), (- 1)):
icbest = np.abs(results[lag].tvalues[(- 1)])
if (np.abs(icbest) >= stop):
bestlag = lag
icbest = icbest
break
else:
raise (ValueError('Information Criterion %s not understood.') % method)
if (not regresults):
return (icbest, bestlag)
else:
return (icbest, bestlag, results) |
def adfuller(x, maxlag=None, regression='c', autolag='AIC', store=False, regresults=False):
'\n Augmented Dickey-Fuller unit root test\n\n The Augmented Dickey-Fuller test can be used to test for a unit root in a\n univariate process in the presence of serial correlation.\n\n Parameters\n ----------\n x : array_like, 1d\n data series\n maxlag : int\n Maximum lag which is included in test, default 12*(nobs/100)^{1/4}\n regression : {\'c\',\'ct\',\'ctt\',\'nc\'}\n Constant and trend order to include in regression\n\n * \'c\' : constant only (default)\n * \'ct\' : constant and trend\n * \'ctt\' : constant, and linear and quadratic trend\n * \'nc\' : no constant, no trend\n autolag : {\'AIC\', \'BIC\', \'t-stat\', None}\n * if None, then maxlag lags are used\n * if \'AIC\' (default) or \'BIC\', then the number of lags is chosen\n to minimize the corresponding information criterion\n * \'t-stat\' based choice of maxlag. Starts with maxlag and drops a\n lag until the t-statistic on the last lag length is significant\n using a 5%-sized test\n store : bool\n If True, then a result instance is returned additionally to\n the adf statistic. Default is False\n regresults : bool, optional\n If True, the full regression results are returned. Default is False\n\n Returns\n -------\n adf : float\n Test statistic\n pvalue : float\n MacKinnon\'s approximate p-value based on MacKinnon (1994, 2010)\n usedlag : int\n Number of lags used\n nobs : int\n Number of observations used for the ADF regression and calculation of\n the critical values\n critical values : dict\n Critical values for the test statistic at the 1 %, 5 %, and 10 %\n levels. Based on MacKinnon (2010)\n icbest : float\n The maximized information criterion if autolag is not None.\n resstore : ResultStore, optional\n A dummy class with results attached as attributes\n\n Notes\n -----\n The null hypothesis of the Augmented Dickey-Fuller is that there is a unit\n root, with the alternative that there is no unit root. If the pvalue is\n above a critical size, then we cannot reject that there is a unit root.\n\n The p-values are obtained through regression surface approximation from\n MacKinnon 1994, but using the updated 2010 tables. If the p-value is close\n to significant, then the critical values should be used to judge whether\n to reject the null.\n\n The autolag option and maxlag for it are described in Greene.\n\n Examples\n --------\n See example notebook\n\n References\n ----------\n .. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.\n\n .. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.\n\n .. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for\n unit-root and cointegration tests. `Journal of Business and Economic\n Statistics` 12, 167-76.\n\n .. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen\'s\n University, Dept of Economics, Working Papers. Available at\n http://ideas.repec.org/p/qed/wpaper/1227.html\n '
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if ((regression is None) or isinstance(regression, (int, long))):
regression = trenddict[regression]
regression = regression.lower()
if (regression not in ['c', 'nc', 'ct', 'ctt']):
raise (ValueError('regression option %s not understood') % regression)
x = np.asarray(x)
nobs = x.shape[0]
if (maxlag is None):
maxlag = int(np.ceil((12.0 * np.power((nobs / 100.0), (1 / 4.0)))))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:, 0] = x[((- nobs) - 1):(- 1)]
xdshort = xdiff[(- nobs):]
if store:
resstore = ResultsStore()
if autolag:
if (regression != 'nc'):
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = ((fullRHS.shape[1] - xdall.shape[1]) + 1)
if (not regresults):
(icbest, bestlag) = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag)
else:
(icbest, bestlag, alres) = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag, regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:, 0] = x[((- nobs) - 1):(- 1)]
xdshort = xdiff[(- nobs):]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if (regression != 'nc'):
resols = OLS(xdshort, add_trend(xdall[:, :(usedlag + 1)], regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :(usedlag + 1)]).fit()
adfstat = resols.tvalues[0]
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {'1%': critvalues[0], '5%': critvalues[1], '10%': critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = 'The coefficient on the lagged level equals 1 - unit root'
resstore.HA = 'The coefficient on the lagged level < 1 - stationary'
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return (adfstat, pvalue, critvalues, resstore)
elif (not autolag):
return (adfstat, pvalue, usedlag, nobs, critvalues)
else:
return (adfstat, pvalue, usedlag, nobs, critvalues, icbest) | -4,962,697,273,032,497,000 | Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html | statsmodels/tsa/stattools.py | adfuller | josef-pkt/statsmodels | python | def adfuller(x, maxlag=None, regression='c', autolag='AIC', store=False, regresults=False):
'\n Augmented Dickey-Fuller unit root test\n\n The Augmented Dickey-Fuller test can be used to test for a unit root in a\n univariate process in the presence of serial correlation.\n\n Parameters\n ----------\n x : array_like, 1d\n data series\n maxlag : int\n Maximum lag which is included in test, default 12*(nobs/100)^{1/4}\n regression : {\'c\',\'ct\',\'ctt\',\'nc\'}\n Constant and trend order to include in regression\n\n * \'c\' : constant only (default)\n * \'ct\' : constant and trend\n * \'ctt\' : constant, and linear and quadratic trend\n * \'nc\' : no constant, no trend\n autolag : {\'AIC\', \'BIC\', \'t-stat\', None}\n * if None, then maxlag lags are used\n * if \'AIC\' (default) or \'BIC\', then the number of lags is chosen\n to minimize the corresponding information criterion\n * \'t-stat\' based choice of maxlag. Starts with maxlag and drops a\n lag until the t-statistic on the last lag length is significant\n using a 5%-sized test\n store : bool\n If True, then a result instance is returned additionally to\n the adf statistic. Default is False\n regresults : bool, optional\n If True, the full regression results are returned. Default is False\n\n Returns\n -------\n adf : float\n Test statistic\n pvalue : float\n MacKinnon\'s approximate p-value based on MacKinnon (1994, 2010)\n usedlag : int\n Number of lags used\n nobs : int\n Number of observations used for the ADF regression and calculation of\n the critical values\n critical values : dict\n Critical values for the test statistic at the 1 %, 5 %, and 10 %\n levels. Based on MacKinnon (2010)\n icbest : float\n The maximized information criterion if autolag is not None.\n resstore : ResultStore, optional\n A dummy class with results attached as attributes\n\n Notes\n -----\n The null hypothesis of the Augmented Dickey-Fuller is that there is a unit\n root, with the alternative that there is no unit root. If the pvalue is\n above a critical size, then we cannot reject that there is a unit root.\n\n The p-values are obtained through regression surface approximation from\n MacKinnon 1994, but using the updated 2010 tables. If the p-value is close\n to significant, then the critical values should be used to judge whether\n to reject the null.\n\n The autolag option and maxlag for it are described in Greene.\n\n Examples\n --------\n See example notebook\n\n References\n ----------\n .. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.\n\n .. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.\n\n .. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for\n unit-root and cointegration tests. `Journal of Business and Economic\n Statistics` 12, 167-76.\n\n .. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen\'s\n University, Dept of Economics, Working Papers. Available at\n http://ideas.repec.org/p/qed/wpaper/1227.html\n '
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if ((regression is None) or isinstance(regression, (int, long))):
regression = trenddict[regression]
regression = regression.lower()
if (regression not in ['c', 'nc', 'ct', 'ctt']):
raise (ValueError('regression option %s not understood') % regression)
x = np.asarray(x)
nobs = x.shape[0]
if (maxlag is None):
maxlag = int(np.ceil((12.0 * np.power((nobs / 100.0), (1 / 4.0)))))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:, 0] = x[((- nobs) - 1):(- 1)]
xdshort = xdiff[(- nobs):]
if store:
resstore = ResultsStore()
if autolag:
if (regression != 'nc'):
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = ((fullRHS.shape[1] - xdall.shape[1]) + 1)
if (not regresults):
(icbest, bestlag) = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag)
else:
(icbest, bestlag, alres) = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag, regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:, 0] = x[((- nobs) - 1):(- 1)]
xdshort = xdiff[(- nobs):]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if (regression != 'nc'):
resols = OLS(xdshort, add_trend(xdall[:, :(usedlag + 1)], regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :(usedlag + 1)]).fit()
adfstat = resols.tvalues[0]
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {'1%': critvalues[0], '5%': critvalues[1], '10%': critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = 'The coefficient on the lagged level equals 1 - unit root'
resstore.HA = 'The coefficient on the lagged level < 1 - stationary'
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return (adfstat, pvalue, critvalues, resstore)
elif (not autolag):
return (adfstat, pvalue, usedlag, nobs, critvalues)
else:
return (adfstat, pvalue, usedlag, nobs, critvalues, icbest) |
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"\n Autocovariance for 1D\n\n Parameters\n ----------\n x : array\n Time series data. Must be 1d.\n unbiased : bool\n If True, then denominators is n-k, otherwise n\n demean : bool\n If True, then subtract the mean x from each element of x\n fft : bool\n If True, use FFT convolution. This method should be preferred\n for long time series.\n missing : str\n A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs\n are to be treated.\n\n Returns\n -------\n acovf : array\n autocovariance function\n\n References\n -----------\n .. [*] Parzen, E., 1963. On spectral analysis with missing observations\n and amplitude modulation. Sankhya: The Indian Journal of\n Statistics, Series A, pp.383-392.\n "
x = np.squeeze(np.asarray(x))
if (x.ndim > 1):
raise ValueError(('x must be 1d. Got %d dims.' % x.ndim))
missing = missing.lower()
if (missing not in ['none', 'raise', 'conservative', 'drop']):
raise ValueError(('missing option %s not understood' % missing))
if (missing == 'none'):
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if (missing == 'raise'):
raise MissingDataError('NaNs were encountered in the data')
notmask_bool = (~ np.isnan(x))
if (missing == 'conservative'):
x[(~ notmask_bool)] = 0
else:
x = x[notmask_bool]
notmask_int = notmask_bool.astype(int)
if (demean and deal_with_masked):
xo = (x - (x.sum() / notmask_int.sum()))
if (missing == 'conservative'):
xo[(~ notmask_bool)] = 0
elif demean:
xo = (x - x.mean())
else:
xo = x
n = len(x)
if (unbiased and deal_with_masked and (missing == 'conservative')):
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, (n + 1))
d = np.hstack((xi, xi[:(- 1)][::(- 1)]))
elif deal_with_masked:
d = (notmask_int.sum() * np.ones(((2 * n) - 1)))
else:
d = (n * np.ones(((2 * n) - 1)))
if fft:
nobs = len(xo)
n = _next_regular(((2 * nobs) + 1))
Frf = np.fft.fft(xo, n=n)
acov = (np.fft.ifft((Frf * np.conjugate(Frf)))[:nobs] / d[(nobs - 1):])
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[(n - 1):]
if (deal_with_masked and (missing == 'conservative')):
x[(~ notmask_bool)] = np.nan
return acov | -5,403,537,379,756,743,000 | Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392. | statsmodels/tsa/stattools.py | acovf | josef-pkt/statsmodels | python | def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"\n Autocovariance for 1D\n\n Parameters\n ----------\n x : array\n Time series data. Must be 1d.\n unbiased : bool\n If True, then denominators is n-k, otherwise n\n demean : bool\n If True, then subtract the mean x from each element of x\n fft : bool\n If True, use FFT convolution. This method should be preferred\n for long time series.\n missing : str\n A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs\n are to be treated.\n\n Returns\n -------\n acovf : array\n autocovariance function\n\n References\n -----------\n .. [*] Parzen, E., 1963. On spectral analysis with missing observations\n and amplitude modulation. Sankhya: The Indian Journal of\n Statistics, Series A, pp.383-392.\n "
x = np.squeeze(np.asarray(x))
if (x.ndim > 1):
raise ValueError(('x must be 1d. Got %d dims.' % x.ndim))
missing = missing.lower()
if (missing not in ['none', 'raise', 'conservative', 'drop']):
raise ValueError(('missing option %s not understood' % missing))
if (missing == 'none'):
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if (missing == 'raise'):
raise MissingDataError('NaNs were encountered in the data')
notmask_bool = (~ np.isnan(x))
if (missing == 'conservative'):
x[(~ notmask_bool)] = 0
else:
x = x[notmask_bool]
notmask_int = notmask_bool.astype(int)
if (demean and deal_with_masked):
xo = (x - (x.sum() / notmask_int.sum()))
if (missing == 'conservative'):
xo[(~ notmask_bool)] = 0
elif demean:
xo = (x - x.mean())
else:
xo = x
n = len(x)
if (unbiased and deal_with_masked and (missing == 'conservative')):
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, (n + 1))
d = np.hstack((xi, xi[:(- 1)][::(- 1)]))
elif deal_with_masked:
d = (notmask_int.sum() * np.ones(((2 * n) - 1)))
else:
d = (n * np.ones(((2 * n) - 1)))
if fft:
nobs = len(xo)
n = _next_regular(((2 * nobs) + 1))
Frf = np.fft.fft(xo, n=n)
acov = (np.fft.ifft((Frf * np.conjugate(Frf)))[:nobs] / d[(nobs - 1):])
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[(n - 1):]
if (deal_with_masked and (missing == 'conservative')):
x[(~ notmask_bool)] = np.nan
return acov |
def q_stat(x, nobs, type='ljungbox'):
"\n Return's Ljung-Box Q Statistic\n\n x : array-like\n Array of autocorrelation coefficients. Can be obtained from acf.\n nobs : int\n Number of observations in the entire sample (ie., not just the length\n of the autocorrelation function results.\n\n Returns\n -------\n q-stat : array\n Ljung-Box Q-statistic for autocorrelation parameters\n p-value : array\n P-value of the Q statistic\n\n Notes\n ------\n Written to be used with acf.\n "
x = np.asarray(x)
if (type == 'ljungbox'):
ret = ((nobs * (nobs + 2)) * np.cumsum(((1.0 / (nobs - np.arange(1, (len(x) + 1)))) * (x ** 2))))
chi2 = stats.chi2.sf(ret, np.arange(1, (len(x) + 1)))
return (ret, chi2) | -7,626,575,831,499,459,000 | Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf. | statsmodels/tsa/stattools.py | q_stat | josef-pkt/statsmodels | python | def q_stat(x, nobs, type='ljungbox'):
"\n Return's Ljung-Box Q Statistic\n\n x : array-like\n Array of autocorrelation coefficients. Can be obtained from acf.\n nobs : int\n Number of observations in the entire sample (ie., not just the length\n of the autocorrelation function results.\n\n Returns\n -------\n q-stat : array\n Ljung-Box Q-statistic for autocorrelation parameters\n p-value : array\n P-value of the Q statistic\n\n Notes\n ------\n Written to be used with acf.\n "
x = np.asarray(x)
if (type == 'ljungbox'):
ret = ((nobs * (nobs + 2)) * np.cumsum(((1.0 / (nobs - np.arange(1, (len(x) + 1)))) * (x ** 2))))
chi2 = stats.chi2.sf(ret, np.arange(1, (len(x) + 1)))
return (ret, chi2) |
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None, missing='none'):
"\n Autocorrelation function for 1d arrays.\n\n Parameters\n ----------\n x : array\n Time series data\n unbiased : bool\n If True, then denominators for autocovariance are n-k, otherwise n\n nlags: int, optional\n Number of lags to return autocorrelation for.\n qstat : bool, optional\n If True, returns the Ljung-Box q statistic for each autocorrelation\n coefficient. See q_stat for more information.\n fft : bool, optional\n If True, computes the ACF via FFT.\n alpha : scalar, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n Bartlett's formula.\n missing : str, optional\n A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs\n are to be treated.\n\n Returns\n -------\n acf : array\n autocorrelation function\n confint : array, optional\n Confidence intervals for the ACF. Returned if confint is not None.\n qstat : array, optional\n The Ljung-Box Q-Statistic. Returned if q_stat is True.\n pvalues : array, optional\n The p-values associated with the Q-statistics. Returned if q_stat is\n True.\n\n Notes\n -----\n The acf at lag 0 (ie., 1) is returned.\n\n This is based np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n\n If unbiased is true, the denominator for the autocovariance is adjusted\n but the autocorrelation is not an unbiased estimtor.\n\n References\n ----------\n .. [*] Parzen, E., 1963. On spectral analysis with missing observations\n and amplitude modulation. Sankhya: The Indian Journal of\n Statistics, Series A, pp.383-392.\n\n "
nobs = len(x)
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = (avf[:(nlags + 1)] / avf[0])
if (not (qstat or alpha)):
return acf
if (alpha is not None):
varacf = (np.ones((nlags + 1)) / nobs)
varacf[0] = 0
varacf[1] = (1.0 / nobs)
varacf[2:] *= (1 + (2 * np.cumsum((acf[1:(- 1)] ** 2))))
interval = (stats.norm.ppf((1 - (alpha / 2.0))) * np.sqrt(varacf))
confint = np.array(lzip((acf - interval), (acf + interval)))
if (not qstat):
return (acf, confint)
if qstat:
(qstat, pvalue) = q_stat(acf[1:], nobs=nobs)
if (alpha is not None):
return (acf, confint, qstat, pvalue)
else:
return (acf, qstat, pvalue) | -6,031,796,841,099,306,000 | Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett's formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392. | statsmodels/tsa/stattools.py | acf | josef-pkt/statsmodels | python | def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None, missing='none'):
"\n Autocorrelation function for 1d arrays.\n\n Parameters\n ----------\n x : array\n Time series data\n unbiased : bool\n If True, then denominators for autocovariance are n-k, otherwise n\n nlags: int, optional\n Number of lags to return autocorrelation for.\n qstat : bool, optional\n If True, returns the Ljung-Box q statistic for each autocorrelation\n coefficient. See q_stat for more information.\n fft : bool, optional\n If True, computes the ACF via FFT.\n alpha : scalar, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n Bartlett's formula.\n missing : str, optional\n A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs\n are to be treated.\n\n Returns\n -------\n acf : array\n autocorrelation function\n confint : array, optional\n Confidence intervals for the ACF. Returned if confint is not None.\n qstat : array, optional\n The Ljung-Box Q-Statistic. Returned if q_stat is True.\n pvalues : array, optional\n The p-values associated with the Q-statistics. Returned if q_stat is\n True.\n\n Notes\n -----\n The acf at lag 0 (ie., 1) is returned.\n\n This is based np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n\n If unbiased is true, the denominator for the autocovariance is adjusted\n but the autocorrelation is not an unbiased estimtor.\n\n References\n ----------\n .. [*] Parzen, E., 1963. On spectral analysis with missing observations\n and amplitude modulation. Sankhya: The Indian Journal of\n Statistics, Series A, pp.383-392.\n\n "
nobs = len(x)
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = (avf[:(nlags + 1)] / avf[0])
if (not (qstat or alpha)):
return acf
if (alpha is not None):
varacf = (np.ones((nlags + 1)) / nobs)
varacf[0] = 0
varacf[1] = (1.0 / nobs)
varacf[2:] *= (1 + (2 * np.cumsum((acf[1:(- 1)] ** 2))))
interval = (stats.norm.ppf((1 - (alpha / 2.0))) * np.sqrt(varacf))
confint = np.array(lzip((acf - interval), (acf + interval)))
if (not qstat):
return (acf, confint)
if qstat:
(qstat, pvalue) = q_stat(acf[1:], nobs=nobs)
if (alpha is not None):
return (acf, confint, qstat, pvalue)
else:
return (acf, qstat, pvalue) |
def pacf_yw(x, nlags=40, method='unbiased'):
"Partial autocorrelation estimated with non-recursive yule_walker\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n largest lag for which pacf is returned\n method : 'unbiased' (default) or 'mle'\n method for the autocovariance calculations in yule walker\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, maxlag+1 elements\n\n Notes\n -----\n This solves yule_walker for each desired lag and contains\n currently duplicate calculations.\n "
pacf = [1.0]
for k in range(1, (nlags + 1)):
pacf.append(yule_walker(x, k, method=method)[0][(- 1)])
return np.array(pacf) | 7,176,742,949,149,395,000 | Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations. | statsmodels/tsa/stattools.py | pacf_yw | josef-pkt/statsmodels | python | def pacf_yw(x, nlags=40, method='unbiased'):
"Partial autocorrelation estimated with non-recursive yule_walker\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n largest lag for which pacf is returned\n method : 'unbiased' (default) or 'mle'\n method for the autocovariance calculations in yule walker\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, maxlag+1 elements\n\n Notes\n -----\n This solves yule_walker for each desired lag and contains\n currently duplicate calculations.\n "
pacf = [1.0]
for k in range(1, (nlags + 1)):
pacf.append(yule_walker(x, k, method=method)[0][(- 1)])
return np.array(pacf) |
def pacf_ols(x, nlags=40):
'Calculate partial autocorrelations\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n Number of lags for which pacf is returned. Lag 0 is not returned.\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, maxlag+1 elements\n\n Notes\n -----\n This solves a separate OLS estimation for each desired lag.\n '
(xlags, x0) = lagmat(x, nlags, original='sep')
xlags = add_constant(xlags)
pacf = [1.0]
for k in range(1, (nlags + 1)):
res = OLS(x0[k:], xlags[k:, :(k + 1)]).fit()
pacf.append(res.params[(- 1)])
return np.array(pacf) | 6,238,843,439,734,900,000 | Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag. | statsmodels/tsa/stattools.py | pacf_ols | josef-pkt/statsmodels | python | def pacf_ols(x, nlags=40):
'Calculate partial autocorrelations\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n Number of lags for which pacf is returned. Lag 0 is not returned.\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, maxlag+1 elements\n\n Notes\n -----\n This solves a separate OLS estimation for each desired lag.\n '
(xlags, x0) = lagmat(x, nlags, original='sep')
xlags = add_constant(xlags)
pacf = [1.0]
for k in range(1, (nlags + 1)):
res = OLS(x0[k:], xlags[k:, :(k + 1)]).fit()
pacf.append(res.params[(- 1)])
return np.array(pacf) |
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"\n Partial autocorrelation estimated\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n largest lag for which pacf is returned\n method : {'ywunbiased', 'ywmle', 'ols'}\n specifies which method for the calculations to use:\n\n - yw or ywunbiased : yule walker with bias correction in denominator\n for acovf. Default.\n - ywm or ywmle : yule walker without bias correction\n - ols - regression of time series on lags of it and on constant\n - ld or ldunbiased : Levinson-Durbin recursion with bias correction\n - ldb or ldbiased : Levinson-Durbin recursion without bias correction\n alpha : float, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n 1/sqrt(len(x))\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, nlags elements, including lag zero\n confint : array, optional\n Confidence intervals for the PACF. Returned if confint is not None.\n\n Notes\n -----\n This solves yule_walker equations or ols for each desired lag\n and contains currently duplicate calculations.\n "
if (method == 'ols'):
ret = pacf_ols(x, nlags=nlags)
elif (method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']):
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif (method in ['ywm', 'ywmle', 'yw_mle']):
ret = pacf_yw(x, nlags=nlags, method='mle')
elif (method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']):
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
elif (method in ['ldb', 'ldbiased', 'ld_biased']):
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if (alpha is not None):
varacf = (1.0 / len(x))
interval = (stats.norm.ppf((1.0 - (alpha / 2.0))) * np.sqrt(varacf))
confint = np.array(lzip((ret - interval), (ret + interval)))
confint[0] = ret[0]
return (ret, confint)
else:
return ret | 2,415,925,801,062,447,000 | Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations. | statsmodels/tsa/stattools.py | pacf | josef-pkt/statsmodels | python | def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"\n Partial autocorrelation estimated\n\n Parameters\n ----------\n x : 1d array\n observations of time series for which pacf is calculated\n nlags : int\n largest lag for which pacf is returned\n method : {'ywunbiased', 'ywmle', 'ols'}\n specifies which method for the calculations to use:\n\n - yw or ywunbiased : yule walker with bias correction in denominator\n for acovf. Default.\n - ywm or ywmle : yule walker without bias correction\n - ols - regression of time series on lags of it and on constant\n - ld or ldunbiased : Levinson-Durbin recursion with bias correction\n - ldb or ldbiased : Levinson-Durbin recursion without bias correction\n alpha : float, optional\n If a number is given, the confidence intervals for the given level are\n returned. For instance if alpha=.05, 95 % confidence intervals are\n returned where the standard deviation is computed according to\n 1/sqrt(len(x))\n\n Returns\n -------\n pacf : 1d array\n partial autocorrelations, nlags elements, including lag zero\n confint : array, optional\n Confidence intervals for the PACF. Returned if confint is not None.\n\n Notes\n -----\n This solves yule_walker equations or ols for each desired lag\n and contains currently duplicate calculations.\n "
if (method == 'ols'):
ret = pacf_ols(x, nlags=nlags)
elif (method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']):
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif (method in ['ywm', 'ywmle', 'yw_mle']):
ret = pacf_yw(x, nlags=nlags, method='mle')
elif (method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']):
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
elif (method in ['ldb', 'ldbiased', 'ld_biased']):
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if (alpha is not None):
varacf = (1.0 / len(x))
interval = (stats.norm.ppf((1.0 - (alpha / 2.0))) * np.sqrt(varacf))
confint = np.array(lzip((ret - interval), (ret + interval)))
confint[0] = ret[0]
return (ret, confint)
else:
return ret |
def ccovf(x, y, unbiased=True, demean=True):
' crosscovariance for 1D\n\n Parameters\n ----------\n x, y : arrays\n time series data\n unbiased : boolean\n if True, then denominators is n-k, otherwise n\n\n Returns\n -------\n ccovf : array\n autocovariance function\n\n Notes\n -----\n This uses np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n '
n = len(x)
if demean:
xo = (x - x.mean())
yo = (y - y.mean())
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[(n - 1):] | 4,675,003,449,293,840,000 | crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead. | statsmodels/tsa/stattools.py | ccovf | josef-pkt/statsmodels | python | def ccovf(x, y, unbiased=True, demean=True):
' crosscovariance for 1D\n\n Parameters\n ----------\n x, y : arrays\n time series data\n unbiased : boolean\n if True, then denominators is n-k, otherwise n\n\n Returns\n -------\n ccovf : array\n autocovariance function\n\n Notes\n -----\n This uses np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n '
n = len(x)
if demean:
xo = (x - x.mean())
yo = (y - y.mean())
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[(n - 1):] |
def ccf(x, y, unbiased=True):
'cross-correlation function for 1d\n\n Parameters\n ----------\n x, y : arrays\n time series data\n unbiased : boolean\n if True, then denominators for autocovariance is n-k, otherwise n\n\n Returns\n -------\n ccf : array\n cross-correlation function of x and y\n\n Notes\n -----\n This is based np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n\n If unbiased is true, the denominator for the autocovariance is adjusted\n but the autocorrelation is not an unbiased estimtor.\n\n '
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return (cvf / (np.std(x) * np.std(y))) | -2,296,032,143,774,643,000 | cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor. | statsmodels/tsa/stattools.py | ccf | josef-pkt/statsmodels | python | def ccf(x, y, unbiased=True):
'cross-correlation function for 1d\n\n Parameters\n ----------\n x, y : arrays\n time series data\n unbiased : boolean\n if True, then denominators for autocovariance is n-k, otherwise n\n\n Returns\n -------\n ccf : array\n cross-correlation function of x and y\n\n Notes\n -----\n This is based np.correlate which does full convolution. For very long time\n series it is recommended to use fft convolution instead.\n\n If unbiased is true, the denominator for the autocovariance is adjusted\n but the autocorrelation is not an unbiased estimtor.\n\n '
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return (cvf / (np.std(x) * np.std(y))) |
def periodogram(X):
'\n Returns the periodogram for the natural frequency of X\n\n Parameters\n ----------\n X : array-like\n Array for which the periodogram is desired.\n\n Returns\n -------\n pgram : array\n 1./len(X) * np.abs(np.fft.fft(X))**2\n\n\n References\n ----------\n Brockwell and Davis.\n '
X = np.asarray(X)
pergr = ((1.0 / len(X)) * (np.abs(np.fft.fft(X)) ** 2))
pergr[0] = 0.0
return pergr | 6,612,493,813,122,968,000 | Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis. | statsmodels/tsa/stattools.py | periodogram | josef-pkt/statsmodels | python | def periodogram(X):
'\n Returns the periodogram for the natural frequency of X\n\n Parameters\n ----------\n X : array-like\n Array for which the periodogram is desired.\n\n Returns\n -------\n pgram : array\n 1./len(X) * np.abs(np.fft.fft(X))**2\n\n\n References\n ----------\n Brockwell and Davis.\n '
X = np.asarray(X)
pergr = ((1.0 / len(X)) * (np.abs(np.fft.fft(X)) ** 2))
pergr[0] = 0.0
return pergr |
def levinson_durbin(s, nlags=10, isacov=False):
'Levinson-Durbin recursion for autoregressive processes\n\n Parameters\n ----------\n s : array_like\n If isacov is False, then this is the time series. If iasacov is true\n then this is interpreted as autocovariance starting with lag 0\n nlags : integer\n largest lag to include in recursion or order of the autoregressive\n process\n isacov : boolean\n flag to indicate whether the first argument, s, contains the\n autocovariances or the data series.\n\n Returns\n -------\n sigma_v : float\n estimate of the error variance ?\n arcoefs : ndarray\n estimate of the autoregressive coefficients\n pacf : ndarray\n partial autocorrelation function\n sigma : ndarray\n entire sigma array from intermediate result, last value is sigma_v\n phi : ndarray\n entire phi array from intermediate result, last column contains\n autoregressive coefficients for AR(nlags) with a leading 1\n\n Notes\n -----\n This function returns currently all results, but maybe we drop sigma and\n phi from the returns.\n\n If this function is called with the time series (isacov=False), then the\n sample autocovariance function is calculated with the default options\n (biased, no fft).\n '
s = np.asarray(s)
order = nlags
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:(order + 1)]
phi = np.zeros(((order + 1), (order + 1)), 'd')
sig = np.zeros((order + 1))
phi[(1, 1)] = (sxx_m[1] / sxx_m[0])
sig[1] = (sxx_m[0] - (phi[(1, 1)] * sxx_m[1]))
for k in range(2, (order + 1)):
phi[(k, k)] = ((sxx_m[k] - np.dot(phi[1:k, (k - 1)], sxx_m[1:k][::(- 1)])) / sig[(k - 1)])
for j in range(1, k):
phi[(j, k)] = (phi[(j, (k - 1))] - (phi[(k, k)] * phi[((k - j), (k - 1))]))
sig[k] = (sig[(k - 1)] * (1 - (phi[(k, k)] ** 2)))
sigma_v = sig[(- 1)]
arcoefs = phi[1:, (- 1)]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.0
return (sigma_v, arcoefs, pacf_, sig, phi) | -1,512,750,122,343,626,200 | Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft). | statsmodels/tsa/stattools.py | levinson_durbin | josef-pkt/statsmodels | python | def levinson_durbin(s, nlags=10, isacov=False):
'Levinson-Durbin recursion for autoregressive processes\n\n Parameters\n ----------\n s : array_like\n If isacov is False, then this is the time series. If iasacov is true\n then this is interpreted as autocovariance starting with lag 0\n nlags : integer\n largest lag to include in recursion or order of the autoregressive\n process\n isacov : boolean\n flag to indicate whether the first argument, s, contains the\n autocovariances or the data series.\n\n Returns\n -------\n sigma_v : float\n estimate of the error variance ?\n arcoefs : ndarray\n estimate of the autoregressive coefficients\n pacf : ndarray\n partial autocorrelation function\n sigma : ndarray\n entire sigma array from intermediate result, last value is sigma_v\n phi : ndarray\n entire phi array from intermediate result, last column contains\n autoregressive coefficients for AR(nlags) with a leading 1\n\n Notes\n -----\n This function returns currently all results, but maybe we drop sigma and\n phi from the returns.\n\n If this function is called with the time series (isacov=False), then the\n sample autocovariance function is calculated with the default options\n (biased, no fft).\n '
s = np.asarray(s)
order = nlags
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:(order + 1)]
phi = np.zeros(((order + 1), (order + 1)), 'd')
sig = np.zeros((order + 1))
phi[(1, 1)] = (sxx_m[1] / sxx_m[0])
sig[1] = (sxx_m[0] - (phi[(1, 1)] * sxx_m[1]))
for k in range(2, (order + 1)):
phi[(k, k)] = ((sxx_m[k] - np.dot(phi[1:k, (k - 1)], sxx_m[1:k][::(- 1)])) / sig[(k - 1)])
for j in range(1, k):
phi[(j, k)] = (phi[(j, (k - 1))] - (phi[(k, k)] * phi[((k - j), (k - 1))]))
sig[k] = (sig[(k - 1)] * (1 - (phi[(k, k)] ** 2)))
sigma_v = sig[(- 1)]
arcoefs = phi[1:, (- 1)]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.0
return (sigma_v, arcoefs, pacf_, sig, phi) |
Subsets and Splits