body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def stop(self):
'stop thread'
print('stop tcp_sync uhd message tcp thread')
self.q_quit.put('end') | -3,887,549,335,389,106,700 | stop thread | src/tcp_sync.py | stop | Opendigitalradio/ODR-StaticPrecorrection | python | def stop(self):
print('stop tcp_sync uhd message tcp thread')
self.q_quit.put('end') |
def stop(self):
'stop tcp thread'
self.tcpa.stop() | 3,942,472,123,918,109,000 | stop tcp thread | src/tcp_sync.py | stop | Opendigitalradio/ODR-StaticPrecorrection | python | def stop(self):
self.tcpa.stop() |
def get_msgs(self, num):
'get received messages as string of integer'
out = []
while (len(out) < num):
out.append(self.tcpa.queue.get())
return out | 7,122,424,559,509,667,000 | get received messages as string of integer | src/tcp_sync.py | get_msgs | Opendigitalradio/ODR-StaticPrecorrection | python | def get_msgs(self, num):
out = []
while (len(out) < num):
out.append(self.tcpa.queue.get())
return out |
def get_msgs_fft(self, num):
'\n get received messages as string of integer\n apply fftshift to message\n '
out = []
while (len(out) < num):
out.append(self.tcpa.queue.get())
return [np.fft.fftshift(np.array(o)) for o in out] | 3,706,248,970,104,361,500 | get received messages as string of integer
apply fftshift to message | src/tcp_sync.py | get_msgs_fft | Opendigitalradio/ODR-StaticPrecorrection | python | def get_msgs_fft(self, num):
'\n get received messages as string of integer\n apply fftshift to message\n '
out = []
while (len(out) < num):
out.append(self.tcpa.queue.get())
return [np.fft.fftshift(np.array(o)) for o in out] |
def get_res(self):
'get received messages as string of integer'
out = []
while (not self.tcpa.queue.empty()):
out.append(self.tcpa.queue.get())
return out | 8,593,142,558,867,377,000 | get received messages as string of integer | src/tcp_sync.py | get_res | Opendigitalradio/ODR-StaticPrecorrection | python | def get_res(self):
out = []
while (not self.tcpa.queue.empty()):
out.append(self.tcpa.queue.get())
return out |
def has_msg(self):
'Checks if one or more messages were received and empties the message queue'
return (self.get_res() != '') | -6,734,502,277,231,435,000 | Checks if one or more messages were received and empties the message queue | src/tcp_sync.py | has_msg | Opendigitalradio/ODR-StaticPrecorrection | python | def has_msg(self):
return (self.get_res() != ) |
def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0):
'sparse softmax cross entropy with logits'
if is_grad:
return loss_ad.sparse_softmax_cross_entropy_with_logits_ad(labels, features, reduction='mean', grad_scale=sens)
return loss.sparse_softmax_cross_entropy_with_logits(labels, features, reduction='mean') | 7,726,223,615,983,516,000 | sparse softmax cross entropy with logits | python/akg/ms/cce/sparse_softmax_cross_entropy_with_logits.py | SparseSoftmaxCrossEntropyWithLogits | Kiike5/akg | python | def SparseSoftmaxCrossEntropyWithLogits(features, labels, is_grad=False, sens=1.0):
if is_grad:
return loss_ad.sparse_softmax_cross_entropy_with_logits_ad(labels, features, reduction='mean', grad_scale=sens)
return loss.sparse_softmax_cross_entropy_with_logits(labels, features, reduction='mean') |
@requires_version('scipy', '0.14')
def test_savgol_filter():
'Test savgol filtering\n '
h_freq = 10.0
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), (1.0 / evoked.info['sfreq']))
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and((freqs >= 0), (freqs <= (h_freq / 2.0)))
mismatch_mask = np.logical_and((freqs >= (h_freq * 2)), (freqs < 50.0))
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked.data))
assert_allclose(np.mean(data[:, match_mask], 0), np.mean(data_filt[:, match_mask], 0), rtol=0.0001, atol=0.01)
assert_true((np.mean(data[:, mismatch_mask]) > (np.mean(data_filt[:, mismatch_mask]) * 5))) | 1,003,566,939,427,207,700 | Test savgol filtering | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_savgol_filter | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | @requires_version('scipy', '0.14')
def test_savgol_filter():
'\n '
h_freq = 10.0
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), (1.0 / evoked.info['sfreq']))
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and((freqs >= 0), (freqs <= (h_freq / 2.0)))
mismatch_mask = np.logical_and((freqs >= (h_freq * 2)), (freqs < 50.0))
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked.data))
assert_allclose(np.mean(data[:, match_mask], 0), np.mean(data_filt[:, match_mask], 0), rtol=0.0001, atol=0.01)
assert_true((np.mean(data[:, mismatch_mask]) > (np.mean(data_filt[:, mismatch_mask]) * 5))) |
def test_hash_evoked():
'Test evoked hashing\n '
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
assert_true((pickle.dumps(ave) == pickle.dumps(ave_2)))
ave_2.data[(0, 0)] -= 1
assert_not_equal(hash(ave), hash(ave_2)) | 4,966,346,371,525,945,000 | Test evoked hashing | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_hash_evoked | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_hash_evoked():
'\n '
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
assert_true((pickle.dumps(ave) == pickle.dumps(ave_2)))
ave_2.data[(0, 0)] -= 1
assert_not_equal(hash(ave), hash(ave_2)) |
@slow_test
def test_io_evoked():
'Test IO for evoked data (fif + gz) with integer and str args\n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=0.001))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-08))
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition, kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
aves1 = read_evokeds(fname)
aves2 = read_evokeds(fname, [0, 1, 2, 3])
aves3 = read_evokeds(fname, types)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_true((len(w) == 2)) | 6,766,221,181,861,603,000 | Test IO for evoked data (fif + gz) with integer and str args | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_io_evoked | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | @slow_test
def test_io_evoked():
'\n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=0.001))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-08))
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition, kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
aves1 = read_evokeds(fname)
aves2 = read_evokeds(fname, [0, 1, 2, 3])
aves3 = read_evokeds(fname, types)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_true((len(w) == 2)) |
def test_shift_time_evoked():
' Test for shifting of time scale\n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
ave.shift_time((- 0.1), relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time((- 0.1), relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=0.001))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
ave = read_evokeds(fname, 0)
ave.shift_time((- 0.3), relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=0.001))
assert_equal(ave_absolute.first, int(((- 0.3) * ave.info['sfreq']))) | 8,973,585,115,289,390,000 | Test for shifting of time scale | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_shift_time_evoked | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_shift_time_evoked():
' \n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
ave.shift_time((- 0.1), relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time((- 0.1), relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=0.001))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
ave = read_evokeds(fname, 0)
ave.shift_time((- 0.3), relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=0.001))
assert_equal(ave_absolute.first, int(((- 0.3) * ave.info['sfreq']))) |
def test_evoked_resample():
'Test for resampling of evoked data\n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample((2 * sfreq_normal))
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_normal = read_evokeds(fname, 0)
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
assert_true((len(ave_up.times) == (2 * len(ave_normal.times))))
assert_true((ave_up.data.shape[1] == (2 * ave_normal.data.shape[1]))) | 341,372,984,005,174,200 | Test for resampling of evoked data | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_evoked_resample | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_evoked_resample():
'\n '
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample((2 * sfreq_normal))
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_normal = read_evokeds(fname, 0)
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
assert_true((len(ave_up.times) == (2 * len(ave_normal.times))))
assert_true((ave_up.data.shape[1] == (2 * ave_normal.data.shape[1]))) |
def test_evoked_detrend():
'Test for detrending evoked data\n '
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks], rtol=1e-08, atol=1e-16)) | -6,973,230,050,582,125,000 | Test for detrending evoked data | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_evoked_detrend | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_evoked_detrend():
'\n '
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks], rtol=1e-08, atol=1e-16)) |
@requires_pandas
def test_to_data_frame():
'Test evoked Pandas exporter'
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true(('time' in df.columns))
assert_array_equal(df.values[:, 1], (ave.data[0] * 10000000000000.0))
assert_array_equal(df.values[:, 3], (ave.data[2] * 1000000000000000.0)) | 8,547,556,037,848,418,000 | Test evoked Pandas exporter | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_to_data_frame | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | @requires_pandas
def test_to_data_frame():
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true(('time' in df.columns))
assert_array_equal(df.values[:, 1], (ave.data[0] * 10000000000000.0))
assert_array_equal(df.values[:, 3], (ave.data[2] * 1000000000000000.0)) |
def test_evoked_proj():
'Test SSP proj operations\n '
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(((p['active'] == proj) for p in ave.info['projs'])))
if proj:
assert_raises(ValueError, ave.add_proj, [], {'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true((len(ave.info['projs']) == (n_proj - 1)))
ave.add_proj(projs, remove_existing=False)
assert_true((len(ave.info['projs']) == ((2 * n_proj) - 1)))
ave.add_proj(projs, remove_existing=True)
assert_true((len(ave.info['projs']) == n_proj))
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data) | -3,701,147,736,561,590,300 | Test SSP proj operations | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_evoked_proj | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_evoked_proj():
'\n '
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(((p['active'] == proj) for p in ave.info['projs'])))
if proj:
assert_raises(ValueError, ave.add_proj, [], {'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true((len(ave.info['projs']) == (n_proj - 1)))
ave.add_proj(projs, remove_existing=False)
assert_true((len(ave.info['projs']) == ((2 * n_proj) - 1)))
ave.add_proj(projs, remove_existing=True)
assert_true((len(ave.info['projs']) == n_proj))
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data) |
def test_get_peak():
'Test peak getter\n '
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02, tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
(ch_idx, time_idx) = evoked.get_peak(ch_type='mag')
assert_true((ch_idx in evoked.ch_names))
assert_true((time_idx in evoked.times))
(ch_idx, time_idx) = evoked.get_peak(ch_type='mag', time_as_index=True)
assert_true((time_idx < len(evoked.times)))
data = np.array([[0.0, 1.0, 2.0], [0.0, (- 3.0), 0]])
times = np.array([0.1, 0.2, 0.3])
(ch_idx, time_idx) = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
(ch_idx, time_idx) = _get_peak((data * (- 1)), times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
(ch_idx, time_idx) = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, (data + 1000.0), times, mode='neg')
assert_raises(ValueError, _get_peak, (data - 1000.0), times, mode='pos') | 8,836,964,148,858,811,000 | Test peak getter | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_get_peak | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_get_peak():
'\n '
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02, tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
(ch_idx, time_idx) = evoked.get_peak(ch_type='mag')
assert_true((ch_idx in evoked.ch_names))
assert_true((time_idx in evoked.times))
(ch_idx, time_idx) = evoked.get_peak(ch_type='mag', time_as_index=True)
assert_true((time_idx < len(evoked.times)))
data = np.array([[0.0, 1.0, 2.0], [0.0, (- 3.0), 0]])
times = np.array([0.1, 0.2, 0.3])
(ch_idx, time_idx) = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
(ch_idx, time_idx) = _get_peak((data * (- 1)), times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
(ch_idx, time_idx) = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, (data + 1000.0), times, mode='neg')
assert_raises(ValueError, _get_peak, (data - 1000.0), times, mode='pos') |
def test_drop_channels_mixin():
'Test channels-dropping functionality\n '
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data)) | -6,536,372,745,575,585,000 | Test channels-dropping functionality | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_drop_channels_mixin | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_drop_channels_mixin():
'\n '
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data)) |
def test_pick_channels_mixin():
'Test channel-picking functionality\n '
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true(('meg' in evoked))
assert_true(('eeg' in evoked))
evoked.pick_types(meg=False, eeg=True)
assert_true(('meg' not in evoked))
assert_true(('eeg' in evoked))
assert_true((len(evoked.ch_names) == 60)) | 3,259,110,269,856,583,700 | Test channel-picking functionality | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_pick_channels_mixin | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_pick_channels_mixin():
'\n '
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true(('meg' in evoked))
assert_true(('eeg' in evoked))
evoked.pick_types(meg=False, eeg=True)
assert_true(('meg' not in evoked))
assert_true(('eeg' in evoked))
assert_true((len(evoked.ch_names) == 60)) |
def test_equalize_channels():
'Test equalization of channels\n '
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names) | -7,844,826,099,472,365,000 | Test equalization of channels | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_equalize_channels | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_equalize_channels():
'\n '
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names) |
def test_evoked_arithmetic():
'Test evoked arithmetic\n '
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray((- np.ones_like(ev.data)), ev.info, ev.times[0], nave=10)
ev = (ev1 + ev2)
assert_equal(ev.nave, (ev1.nave + ev2.nave))
assert_allclose(ev.data, ((1.0 / 3.0) * np.ones_like(ev.data)))
ev = (ev1 - ev2)
assert_equal(ev.nave, (ev1.nave + ev2.nave))
assert_equal(ev.comment, ((ev1.comment + ' - ') + ev2.comment))
assert_allclose(ev.data, np.ones_like(ev1.data))
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = (ev1 - ev2)
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
ev = combine_evoked([ev1, ev2], weights=[1, (- 1)])
assert_allclose(ev.data, (2 * np.ones_like(ev1.data)))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
(evoked1, evoked2) = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008']
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2) | 4,783,975,814,896,653,000 | Test evoked arithmetic | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_evoked_arithmetic | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_evoked_arithmetic():
'\n '
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray((- np.ones_like(ev.data)), ev.info, ev.times[0], nave=10)
ev = (ev1 + ev2)
assert_equal(ev.nave, (ev1.nave + ev2.nave))
assert_allclose(ev.data, ((1.0 / 3.0) * np.ones_like(ev.data)))
ev = (ev1 - ev2)
assert_equal(ev.nave, (ev1.nave + ev2.nave))
assert_equal(ev.comment, ((ev1.comment + ' - ') + ev2.comment))
assert_allclose(ev.data, np.ones_like(ev1.data))
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = (ev1 - ev2)
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
ev = combine_evoked([ev1, ev2], weights=[1, (- 1)])
assert_allclose(ev.data, (2 * np.ones_like(ev1.data)))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
(evoked1, evoked2) = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008']
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2) |
def test_array_epochs():
'Test creating evoked from array\n '
tempdir = _TempDir()
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1000.0
ch_names = [('EEG %03d' % (i + 1)) for i in range(20)]
types = (['eeg'] * 20)
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=(- 0.01))
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
data3 = data1[np.newaxis, :, :]
events = np.c_[(10, 0, 1)]
evoked3 = EpochsArray(data3, info, events=events, tmin=(- 0.01)).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
ch_names = [('EEG %03d' % (i + 1)) for i in range(19)]
types = (['eeg'] * 19)
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=(- 0.01)) | 1,354,860,884,603,699,000 | Test creating evoked from array | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_array_epochs | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_array_epochs():
'\n '
tempdir = _TempDir()
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1000.0
ch_names = [('EEG %03d' % (i + 1)) for i in range(20)]
types = (['eeg'] * 20)
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=(- 0.01))
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
data3 = data1[np.newaxis, :, :]
events = np.c_[(10, 0, 1)]
evoked3 = EpochsArray(data3, info, events=events, tmin=(- 0.01)).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
ch_names = [('EEG %03d' % (i + 1)) for i in range(19)]
types = (['eeg'] * 19)
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=(- 0.01)) |
def test_add_channels():
'Test evoked splitting / re-appending channel types\n '
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
evoked_meg = evoked.pick_types(meg=True, copy=True)
evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
assert_true(all(((ch in evoked_new.ch_names) for ch in (evoked_stim.ch_names + evoked_meg.ch_names))))
evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
assert_true(((ch in evoked_new.ch_names) for ch in evoked.ch_names))
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(((ch not in evoked_new.ch_names) for ch in evoked_stim.ch_names)))
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop((- 0.1), 0.1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf) | 8,884,444,190,310,556,000 | Test evoked splitting / re-appending channel types | python-packages/mne-python-0.10/mne/tests/test_evoked.py | test_add_channels | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python | def test_add_channels():
'\n '
evoked = read_evokeds(fname, condition=0)
evoked.info['buffer_size_sec'] = None
evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
evoked_meg = evoked.pick_types(meg=True, copy=True)
evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
assert_true(all(((ch in evoked_new.ch_names) for ch in (evoked_stim.ch_names + evoked_meg.ch_names))))
evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
assert_true(((ch in evoked_new.ch_names) for ch in evoked.ch_names))
assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
assert_true(all(((ch not in evoked_new.ch_names) for ch in evoked_stim.ch_names)))
evoked_badsf = evoked_eeg.copy()
evoked_badsf.info['sfreq'] = 3.1415927
evoked_eeg = evoked_eeg.crop((- 0.1), 0.1)
assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf) |
def prob_mac_given_loc(self, mac, val, loc, positive):
'\n Determine the P(mac=val | loc) (positive)\n Determine the P(mac=val | ~loc) (not positive)\n '
name = '{}{}{}{}'.format(mac, val, loc, positive)
cached = cache.get(name)
if (cached != None):
return cached
P = 0.005
nameData = '{}{}{}'.format(mac, loc, positive)
cached = cache.get(nameData)
if (cached != None):
if (val in cached):
P = cached[val]
return P
db = sqlite3.connect(self.db_name)
c = db.cursor()
if positive:
c.execute('SELECT val,count FROM data WHERE loc = ? AND mac = ?', (loc, mac))
else:
c.execute('SELECT val,count FROM data WHERE loc != ? AND mac = ?', (loc, mac))
val_to_count = {}
for row in c.fetchall():
val_to_count[row[0]] = row[1]
db.close()
new_val_to_count = copy.deepcopy(val_to_count)
width = 3
for v in val_to_count:
for x in range(((- 1) * (width ** 3)), ((width ** 3) + 1)):
addend = int(round((100 * norm.pdf(0, loc=x, scale=width))))
if (addend <= 0):
continue
if ((v + x) not in new_val_to_count):
new_val_to_count[(v + x)] = 0
new_val_to_count[(v + x)] = (new_val_to_count[(v + x)] + addend)
total = 0
for v in new_val_to_count:
total += new_val_to_count[v]
for v in new_val_to_count:
new_val_to_count[v] = (new_val_to_count[v] / total)
P = 0.005
if (val in new_val_to_count):
P = new_val_to_count[val]
cache[name] = P
cache[nameData] = new_val_to_count
return P | 6,535,456,042,018,533,000 | Determine the P(mac=val | loc) (positive)
Determine the P(mac=val | ~loc) (not positive) | server/ai/src/naive_bayes.py | prob_mac_given_loc | ChuVal/Respaldo2 | python | def prob_mac_given_loc(self, mac, val, loc, positive):
'\n Determine the P(mac=val | loc) (positive)\n Determine the P(mac=val | ~loc) (not positive)\n '
name = '{}{}{}{}'.format(mac, val, loc, positive)
cached = cache.get(name)
if (cached != None):
return cached
P = 0.005
nameData = '{}{}{}'.format(mac, loc, positive)
cached = cache.get(nameData)
if (cached != None):
if (val in cached):
P = cached[val]
return P
db = sqlite3.connect(self.db_name)
c = db.cursor()
if positive:
c.execute('SELECT val,count FROM data WHERE loc = ? AND mac = ?', (loc, mac))
else:
c.execute('SELECT val,count FROM data WHERE loc != ? AND mac = ?', (loc, mac))
val_to_count = {}
for row in c.fetchall():
val_to_count[row[0]] = row[1]
db.close()
new_val_to_count = copy.deepcopy(val_to_count)
width = 3
for v in val_to_count:
for x in range(((- 1) * (width ** 3)), ((width ** 3) + 1)):
addend = int(round((100 * norm.pdf(0, loc=x, scale=width))))
if (addend <= 0):
continue
if ((v + x) not in new_val_to_count):
new_val_to_count[(v + x)] = 0
new_val_to_count[(v + x)] = (new_val_to_count[(v + x)] + addend)
total = 0
for v in new_val_to_count:
total += new_val_to_count[v]
for v in new_val_to_count:
new_val_to_count[v] = (new_val_to_count[v] / total)
P = 0.005
if (val in new_val_to_count):
P = new_val_to_count[val]
cache[name] = P
cache[nameData] = new_val_to_count
return P |
def tune_model(mod, params, tune_settings, target, model_name):
'\n Tune a model for a specified number of trials along with other tune settings.\n Tune settings are specified using a json configuration, as per the TVM tools readme.\n '
early_stopping = tune_settings['early_stopping']
number = tune_settings['number']
save_path = tune_settings['save_path']
save_name = tune_settings['save_name']
repeat = tune_settings['repeat']
debug = (tune_settings.get('debug_gadqn') or False)
trials = tune_settings['trials']
tuner = tune_settings['tuner']
target = tvm.target.Target(target)
tasks = autotvm.task.extract_from_program(mod['main'], target=target, target_host='llvm', params=params)
runner = autotvm.LocalRunner(number=number, repeat=repeat)
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(build_func='default'), runner=runner)
for (i, tsk) in enumerate(tasks):
prefix = ('[Task %2d/%2d] ' % ((i + 1), len(tasks)))
if (tuner in ('xgb', 'xgb-rank')):
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif (tuner == 'xgb_knob'):
tuner_obj = XGBTuner(tsk, loss_type='rank', feature_type='knob')
elif (tuner == 'ga'):
tuner_obj = GATuner(tsk, pop_size=50)
elif (tuner == 'random'):
tuner_obj = RandomTuner(tsk)
elif (tuner == 'gridsearch'):
tuner_obj = GridSearchTuner(tsk)
elif ((tuner == 'ga-dqn') and debug):
tuner_obj = GADQNTunerDebug(tsk)
elif (tuner == 'ga-dqn'):
tuner_obj = GADQNTuner(tsk)
else:
raise ValueError(('invalid tuner: %s ' % tuner))
abs_path = Path((save_path + save_name)).resolve()
abs_path.mkdir(exist_ok=True, parents=True)
abs_path_str = str(abs_path)
tuner_obj.tune(n_trial=min(trials, len(tsk.config_space)), early_stopping=early_stopping, measure_option=measure_option, callbacks=[autotvm.callback.progress_bar(trials, prefix=prefix), autotvm.callback.log_to_file((abs_path_str + f'/tuning_record_model={model_name}.json'))])
if ((tuner == 'ga-dqn') and debug):
tuner_obj.save_model(save_path, (save_name + f'_model={model_name}_layer={i}'))
del tuner_obj | 9,174,764,952,180,181,000 | Tune a model for a specified number of trials along with other tune settings.
Tune settings are specified using a json configuration, as per the TVM tools readme. | tools/tune_model.py | tune_model | lhutton1/benchmark-tvm | python | def tune_model(mod, params, tune_settings, target, model_name):
'\n Tune a model for a specified number of trials along with other tune settings.\n Tune settings are specified using a json configuration, as per the TVM tools readme.\n '
early_stopping = tune_settings['early_stopping']
number = tune_settings['number']
save_path = tune_settings['save_path']
save_name = tune_settings['save_name']
repeat = tune_settings['repeat']
debug = (tune_settings.get('debug_gadqn') or False)
trials = tune_settings['trials']
tuner = tune_settings['tuner']
target = tvm.target.Target(target)
tasks = autotvm.task.extract_from_program(mod['main'], target=target, target_host='llvm', params=params)
runner = autotvm.LocalRunner(number=number, repeat=repeat)
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(build_func='default'), runner=runner)
for (i, tsk) in enumerate(tasks):
prefix = ('[Task %2d/%2d] ' % ((i + 1), len(tasks)))
if (tuner in ('xgb', 'xgb-rank')):
tuner_obj = XGBTuner(tsk, loss_type='rank')
elif (tuner == 'xgb_knob'):
tuner_obj = XGBTuner(tsk, loss_type='rank', feature_type='knob')
elif (tuner == 'ga'):
tuner_obj = GATuner(tsk, pop_size=50)
elif (tuner == 'random'):
tuner_obj = RandomTuner(tsk)
elif (tuner == 'gridsearch'):
tuner_obj = GridSearchTuner(tsk)
elif ((tuner == 'ga-dqn') and debug):
tuner_obj = GADQNTunerDebug(tsk)
elif (tuner == 'ga-dqn'):
tuner_obj = GADQNTuner(tsk)
else:
raise ValueError(('invalid tuner: %s ' % tuner))
abs_path = Path((save_path + save_name)).resolve()
abs_path.mkdir(exist_ok=True, parents=True)
abs_path_str = str(abs_path)
tuner_obj.tune(n_trial=min(trials, len(tsk.config_space)), early_stopping=early_stopping, measure_option=measure_option, callbacks=[autotvm.callback.progress_bar(trials, prefix=prefix), autotvm.callback.log_to_file((abs_path_str + f'/tuning_record_model={model_name}.json'))])
if ((tuner == 'ga-dqn') and debug):
tuner_obj.save_model(save_path, (save_name + f'_model={model_name}_layer={i}'))
del tuner_obj |
def tune_models(data):
'\n Auto tune all models referenced in the json configuration.\n '
target_string = data['target']
tune_settings = data['autotuner_settings']
for model in data['models']:
(trace, input_shapes) = get_model(model['name'], model['type'])
(mod, params) = relay.frontend.from_pytorch(trace, input_shapes)
print(f"Tuning model {model['name']}, using strategy {tune_settings['tuner']}")
tune_model(mod, params, tune_settings, target_string, model['name']) | 860,469,008,408,997,100 | Auto tune all models referenced in the json configuration. | tools/tune_model.py | tune_models | lhutton1/benchmark-tvm | python | def tune_models(data):
'\n \n '
target_string = data['target']
tune_settings = data['autotuner_settings']
for model in data['models']:
(trace, input_shapes) = get_model(model['name'], model['type'])
(mod, params) = relay.frontend.from_pytorch(trace, input_shapes)
print(f"Tuning model {model['name']}, using strategy {tune_settings['tuner']}")
tune_model(mod, params, tune_settings, target_string, model['name']) |
def example_1():
'\n THIS IS A LONG COMMENT AND should be wrapped to fit within a 72 \n character limit\n '
long_1 = 'LONG CODE LINES should be wrapped within 79 character to \n prevent page cutoff stuff'
long_2 = 'This IS a long string that looks gross and goes beyond \n what it should'
some_tuple = (1, 2, 3, 'a')
some_variable = {'long': long_1, 'other': [math.pi, 100, 200, 300, 9999292929292, long_2], 'more': {'inner': 'THIS whole logical line should be wrapped'}, 'data': [444, 5555, 222, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5]}
return (some_tuple, some_variable) | 5,549,236,925,018,196,000 | THIS IS A LONG COMMENT AND should be wrapped to fit within a 72
character limit | lambdata/code_review.py | example_1 | DevinJMantz/lambdata-25 | python | def example_1():
'\n THIS IS A LONG COMMENT AND should be wrapped to fit within a 72 \n character limit\n '
long_1 = 'LONG CODE LINES should be wrapped within 79 character to \n prevent page cutoff stuff'
long_2 = 'This IS a long string that looks gross and goes beyond \n what it should'
some_tuple = (1, 2, 3, 'a')
some_variable = {'long': long_1, 'other': [math.pi, 100, 200, 300, 9999292929292, long_2], 'more': {'inner': 'THIS whole logical line should be wrapped'}, 'data': [444, 5555, 222, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5]}
return (some_tuple, some_variable) |
def test_api_root_should_reply_200(self):
' GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK.\n '
request = APIRequestFactory().get('/api/v1/')
user_list_view = UserViewSet.as_view({'get': 'list'})
response = user_list_view(request)
self.assertEqual(status.HTTP_200_OK, response.status_code) | 7,714,308,681,116,089,000 | GET /api/v1/ should return an hyperlink to the users view and return a successful status 200 OK. | users_django/users/tests/test_views.py | test_api_root_should_reply_200 | r-o-main/users-exercise | python | def test_api_root_should_reply_200(self):
' \n '
request = APIRequestFactory().get('/api/v1/')
user_list_view = UserViewSet.as_view({'get': 'list'})
response = user_list_view(request)
self.assertEqual(status.HTTP_200_OK, response.status_code) |
def test_list_all_users_should_retrieve_all_users_and_reply_200(self):
' GET /api/v1/users should return all the users (or empty if no users found)\n and return a successful status 200 OK.\n '
users = User.objects.all().order_by('id')
request = self.factory.get(reverse('v1:user-list'))
serializer = UserSerializer(users, many=True, context={'request': request})
user_list_view = UserViewSet.as_view({'get': 'list'})
response = user_list_view(request)
self.assertEqual(len(self.users), len(response.data['results']))
self.assertEqual(serializer.data, response.data['results'])
self.assertEqual(status.HTTP_200_OK, response.status_code) | -6,177,446,604,823,195,000 | GET /api/v1/users should return all the users (or empty if no users found)
and return a successful status 200 OK. | users_django/users/tests/test_views.py | test_list_all_users_should_retrieve_all_users_and_reply_200 | r-o-main/users-exercise | python | def test_list_all_users_should_retrieve_all_users_and_reply_200(self):
' GET /api/v1/users should return all the users (or empty if no users found)\n and return a successful status 200 OK.\n '
users = User.objects.all().order_by('id')
request = self.factory.get(reverse('v1:user-list'))
serializer = UserSerializer(users, many=True, context={'request': request})
user_list_view = UserViewSet.as_view({'get': 'list'})
response = user_list_view(request)
self.assertEqual(len(self.users), len(response.data['results']))
self.assertEqual(serializer.data, response.data['results'])
self.assertEqual(status.HTTP_200_OK, response.status_code) |
@staticmethod
def _min_norm_element_from2(v1v1, v1v2, v2v2):
'\n Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2\n d is the distance (objective) optimzed\n v1v1 = <x1,x1>\n v1v2 = <x1,x2>\n v2v2 = <x2,x2>\n '
if (v1v2 >= v1v1):
gamma = 0.999
cost = v1v1
return (gamma, cost)
if (v1v2 >= v2v2):
gamma = 0.001
cost = v2v2
return (gamma, cost)
gamma = ((- 1.0) * ((v1v2 - v2v2) / ((v1v1 + v2v2) - (2 * v1v2))))
cost = (v2v2 + (gamma * (v1v2 - v2v2)))
return (gamma, cost) | -4,625,356,207,219,539,000 | Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2> | utils/min_norm_solvers.py | _min_norm_element_from2 | DavidHidde/backdoors101 | python | @staticmethod
def _min_norm_element_from2(v1v1, v1v2, v2v2):
'\n Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2\n d is the distance (objective) optimzed\n v1v1 = <x1,x1>\n v1v2 = <x1,x2>\n v2v2 = <x2,x2>\n '
if (v1v2 >= v1v1):
gamma = 0.999
cost = v1v1
return (gamma, cost)
if (v1v2 >= v2v2):
gamma = 0.001
cost = v2v2
return (gamma, cost)
gamma = ((- 1.0) * ((v1v2 - v2v2) / ((v1v1 + v2v2) - (2 * v1v2))))
cost = (v2v2 + (gamma * (v1v2 - v2v2)))
return (gamma, cost) |
@staticmethod
def _min_norm_2d(vecs: list, dps):
'\n Find the minimum norm solution as combination of two points\n This is correct only in 2D\n ie. min_c |\\sum c_i x_i|_2^2 st. \\sum c_i = 1 , 1 >= c_1 >= 0\n for all i, c_i + c_j = 1.0 for some i, j\n '
dmin = 100000000.0
sol = 0
for i in range(len(vecs)):
for j in range((i + 1), len(vecs)):
if ((i, j) not in dps):
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i, j)] += torch.dot(vecs[i][k].view((- 1)), vecs[j][k].view((- 1))).detach()
dps[(j, i)] = dps[(i, j)]
if ((i, i) not in dps):
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i, i)] += torch.dot(vecs[i][k].view((- 1)), vecs[i][k].view((- 1))).detach()
if ((j, j) not in dps):
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k].view((- 1)), vecs[j][k].view((- 1))).detach()
(c, d) = MGDASolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
if (d < dmin):
dmin = d
sol = [(i, j), c, d]
return (sol, dps) | 2,761,131,809,362,592,300 | Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0
for all i, c_i + c_j = 1.0 for some i, j | utils/min_norm_solvers.py | _min_norm_2d | DavidHidde/backdoors101 | python | @staticmethod
def _min_norm_2d(vecs: list, dps):
'\n Find the minimum norm solution as combination of two points\n This is correct only in 2D\n ie. min_c |\\sum c_i x_i|_2^2 st. \\sum c_i = 1 , 1 >= c_1 >= 0\n for all i, c_i + c_j = 1.0 for some i, j\n '
dmin = 100000000.0
sol = 0
for i in range(len(vecs)):
for j in range((i + 1), len(vecs)):
if ((i, j) not in dps):
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i, j)] += torch.dot(vecs[i][k].view((- 1)), vecs[j][k].view((- 1))).detach()
dps[(j, i)] = dps[(i, j)]
if ((i, i) not in dps):
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i, i)] += torch.dot(vecs[i][k].view((- 1)), vecs[i][k].view((- 1))).detach()
if ((j, j) not in dps):
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k].view((- 1)), vecs[j][k].view((- 1))).detach()
(c, d) = MGDASolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
if (d < dmin):
dmin = d
sol = [(i, j), c, d]
return (sol, dps) |
@staticmethod
def _projection2simplex(y):
'\n Given y, it solves argmin_z |y-z|_2 st \\sum z = 1 , 1 >= z_i >= 0 for all i\n '
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = ((np.sum(y) - 1.0) / m)
for i in range((m - 1)):
tmpsum += sorted_y[i]
tmax = ((tmpsum - 1) / (i + 1.0))
if (tmax > sorted_y[(i + 1)]):
tmax_f = tmax
break
return np.maximum((y - tmax_f), np.zeros(y.shape)) | -384,395,269,440,826,900 | Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i | utils/min_norm_solvers.py | _projection2simplex | DavidHidde/backdoors101 | python | @staticmethod
def _projection2simplex(y):
'\n Given y, it solves argmin_z |y-z|_2 st \\sum z = 1 , 1 >= z_i >= 0 for all i\n '
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = ((np.sum(y) - 1.0) / m)
for i in range((m - 1)):
tmpsum += sorted_y[i]
tmax = ((tmpsum - 1) / (i + 1.0))
if (tmax > sorted_y[(i + 1)]):
tmax_f = tmax
break
return np.maximum((y - tmax_f), np.zeros(y.shape)) |
@staticmethod
def find_min_norm_element(vecs: list):
'\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull as min |u|_2 st. u = \\sum c_i vecs[i]\n and \\sum c_i = 1. It is quite geometric, and the main idea is the\n fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution\n lies in (0, d_{i,j})Hence, we find the best 2-task solution , and\n then run the projected gradient descent until convergence\n '
dps = {}
(init_sol, dps) = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = (1 - init_sol[1])
if (n < 3):
return (sol_vec, init_sol[2])
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[(i, j)] = dps[(i, j)]
while (iter_count < MGDASolver.MAX_ITER):
grad_dir = ((- 1.0) * np.dot(grad_mat, sol_vec))
new_point = MGDASolver._next_point(sol_vec, grad_dir, n)
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += ((sol_vec[i] * sol_vec[j]) * dps[(i, j)])
v1v2 += ((sol_vec[i] * new_point[j]) * dps[(i, j)])
v2v2 += ((new_point[i] * new_point[j]) * dps[(i, j)])
(nc, nd) = MGDASolver._min_norm_element_from2(v1v1.item(), v1v2.item(), v2v2.item())
new_sol_vec = ((nc * sol_vec) + ((1 - nc) * new_point))
change = (new_sol_vec - sol_vec)
if (np.sum(np.abs(change)) < MGDASolver.STOP_CRIT):
return (sol_vec, nd)
sol_vec = new_sol_vec | -7,322,903,186,992,737,000 | Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull as min |u|_2 st. u = \sum c_i vecs[i]
and \sum c_i = 1. It is quite geometric, and the main idea is the
fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution
lies in (0, d_{i,j})Hence, we find the best 2-task solution , and
then run the projected gradient descent until convergence | utils/min_norm_solvers.py | find_min_norm_element | DavidHidde/backdoors101 | python | @staticmethod
def find_min_norm_element(vecs: list):
'\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull as min |u|_2 st. u = \\sum c_i vecs[i]\n and \\sum c_i = 1. It is quite geometric, and the main idea is the\n fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution\n lies in (0, d_{i,j})Hence, we find the best 2-task solution , and\n then run the projected gradient descent until convergence\n '
dps = {}
(init_sol, dps) = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = (1 - init_sol[1])
if (n < 3):
return (sol_vec, init_sol[2])
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[(i, j)] = dps[(i, j)]
while (iter_count < MGDASolver.MAX_ITER):
grad_dir = ((- 1.0) * np.dot(grad_mat, sol_vec))
new_point = MGDASolver._next_point(sol_vec, grad_dir, n)
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += ((sol_vec[i] * sol_vec[j]) * dps[(i, j)])
v1v2 += ((sol_vec[i] * new_point[j]) * dps[(i, j)])
v2v2 += ((new_point[i] * new_point[j]) * dps[(i, j)])
(nc, nd) = MGDASolver._min_norm_element_from2(v1v1.item(), v1v2.item(), v2v2.item())
new_sol_vec = ((nc * sol_vec) + ((1 - nc) * new_point))
change = (new_sol_vec - sol_vec)
if (np.sum(np.abs(change)) < MGDASolver.STOP_CRIT):
return (sol_vec, nd)
sol_vec = new_sol_vec |
@staticmethod
def find_min_norm_element_FW(vecs):
'\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull\n as min |u|_2 st. u = \\sum c_i vecs[i] and \\sum c_i = 1.\n It is quite geometric, and the main idea is the fact that if\n d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies\n in (0, d_{i,j})Hence, we find the best 2-task solution, and then\n run the Frank Wolfe until convergence\n '
dps = {}
(init_sol, dps) = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = (1 - init_sol[1])
if (n < 3):
return (sol_vec, init_sol[2])
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[(i, j)] = dps[(i, j)]
while (iter_count < MGDASolver.MAX_ITER):
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[(t_iter, t_iter)]
(nc, nd) = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = (nc * sol_vec)
new_sol_vec[t_iter] += (1 - nc)
change = (new_sol_vec - sol_vec)
if (np.sum(np.abs(change)) < MGDASolver.STOP_CRIT):
return (sol_vec, nd)
sol_vec = new_sol_vec | 6,128,836,268,395,524,000 | Given a list of vectors (vecs), this method finds the minimum norm
element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if
d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies
in (0, d_{i,j})Hence, we find the best 2-task solution, and then
run the Frank Wolfe until convergence | utils/min_norm_solvers.py | find_min_norm_element_FW | DavidHidde/backdoors101 | python | @staticmethod
def find_min_norm_element_FW(vecs):
'\n Given a list of vectors (vecs), this method finds the minimum norm\n element in the convex hull\n as min |u|_2 st. u = \\sum c_i vecs[i] and \\sum c_i = 1.\n It is quite geometric, and the main idea is the fact that if\n d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies\n in (0, d_{i,j})Hence, we find the best 2-task solution, and then\n run the Frank Wolfe until convergence\n '
dps = {}
(init_sol, dps) = MGDASolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = (1 - init_sol[1])
if (n < 3):
return (sol_vec, init_sol[2])
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[(i, j)] = dps[(i, j)]
while (iter_count < MGDASolver.MAX_ITER):
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[(t_iter, t_iter)]
(nc, nd) = MGDASolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = (nc * sol_vec)
new_sol_vec[t_iter] += (1 - nc)
change = (new_sol_vec - sol_vec)
if (np.sum(np.abs(change)) < MGDASolver.STOP_CRIT):
return (sol_vec, nd)
sol_vec = new_sol_vec |
def __init__(self, cfg):
'Create configuration settings that may not already be set.\n\n The user can either define the relevant namespaces specifically for the\n mat_views plugin, or the mat_views plugin can draw on the settings in the\n bucardo section of the config. If neither exists, the script will throw an\n error.\n\n Keyword arguments:\n cfg: contents of the config file as a dictionary\n '
super(MatViews, self).__init__(cfg)
self._set_inheritable_params('mat_views') | 6,242,808,382,029,592,000 | Create configuration settings that may not already be set.
The user can either define the relevant namespaces specifically for the
mat_views plugin, or the mat_views plugin can draw on the settings in the
bucardo section of the config. If neither exists, the script will throw an
error.
Keyword arguments:
cfg: contents of the config file as a dictionary | plugins/mat_views/__init__.py | __init__ | emmadev/bucardo_wrapper | python | def __init__(self, cfg):
'Create configuration settings that may not already be set.\n\n The user can either define the relevant namespaces specifically for the\n mat_views plugin, or the mat_views plugin can draw on the settings in the\n bucardo section of the config. If neither exists, the script will throw an\n error.\n\n Keyword arguments:\n cfg: contents of the config file as a dictionary\n '
super(MatViews, self).__init__(cfg)
self._set_inheritable_params('mat_views') |
def refresh(self):
'Refresh materialized views.\n\n First, this method finds the namespaces being replicated, by referring to the\n config for schemas and tables.\n\n Then it finds any materialized views in the namespaces.\n\n Then it refreshes the materialized views.\n '
print('Finding materialized views.')
views = self._find_objects('m', self.repl_objects)
if views:
conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format)
for view in views:
print(f'Refreshing {view[0]}.{view[1]}')
query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format(schema=sql.Identifier(view[0]), table=sql.Identifier(view[1]))
try:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
except Exception:
conn.close()
raise
conn.close()
print('Done refreshing views.')
else:
print('No materialized views found.') | 4,562,420,599,338,210,000 | Refresh materialized views.
First, this method finds the namespaces being replicated, by referring to the
config for schemas and tables.
Then it finds any materialized views in the namespaces.
Then it refreshes the materialized views. | plugins/mat_views/__init__.py | refresh | emmadev/bucardo_wrapper | python | def refresh(self):
'Refresh materialized views.\n\n First, this method finds the namespaces being replicated, by referring to the\n config for schemas and tables.\n\n Then it finds any materialized views in the namespaces.\n\n Then it refreshes the materialized views.\n '
print('Finding materialized views.')
views = self._find_objects('m', self.repl_objects)
if views:
conn = psycopg2.connect(self.secondary_schema_owner_conn_pg_format)
for view in views:
print(f'Refreshing {view[0]}.{view[1]}')
query = sql.SQL('REFRESH MATERIALIZED VIEW {schema}.{table}').format(schema=sql.Identifier(view[0]), table=sql.Identifier(view[1]))
try:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
except Exception:
conn.close()
raise
conn.close()
print('Done refreshing views.')
else:
print('No materialized views found.') |
def step(self):
'In this function the robot will return to default pose, to\n be ready for the new command.\n '
origin = [0.4]
self.observation = torch.tensor(origin, dtype=self.precision, device=self.device) | -4,679,040,297,958,044,000 | In this function the robot will return to default pose, to
be ready for the new command. | environments/nao/pose_assumption.py | step | AroMorin/DNNOP | python | def step(self):
'In this function the robot will return to default pose, to\n be ready for the new command.\n '
origin = [0.4]
self.observation = torch.tensor(origin, dtype=self.precision, device=self.device) |
def evaluate(self, inference):
'Evaluates the predicted pose.'
self.reset_state()
values = self.process_inference(inference)
self.apply(values)
angles = self.get_joints()
self.calc_error(angles)
return self.error | -2,085,172,381,588,377,600 | Evaluates the predicted pose. | environments/nao/pose_assumption.py | evaluate | AroMorin/DNNOP | python | def evaluate(self, inference):
self.reset_state()
values = self.process_inference(inference)
self.apply(values)
angles = self.get_joints()
self.calc_error(angles)
return self.error |
def process_inference(self, inference):
'Ensures safety of the predicted angles.'
values = [a.item() for a in inference]
for (idx, value) in enumerate(values):
name = self.joints[idx]
limits = self.motion.getLimits(name)
min_angle = limits[0][0]
max_angle = limits[0][1]
max_vel = limits[0][2]
max_tor = limits[0][3]
value = self.cap_angle(value, min_angle, max_angle)
values[idx] = [value]
return values | 3,344,582,118,635,586,600 | Ensures safety of the predicted angles. | environments/nao/pose_assumption.py | process_inference | AroMorin/DNNOP | python | def process_inference(self, inference):
values = [a.item() for a in inference]
for (idx, value) in enumerate(values):
name = self.joints[idx]
limits = self.motion.getLimits(name)
min_angle = limits[0][0]
max_angle = limits[0][1]
max_vel = limits[0][2]
max_tor = limits[0][3]
value = self.cap_angle(value, min_angle, max_angle)
values[idx] = [value]
return values |
def apply(self, angles):
'Applies the pose to the robot.'
self.set_joints(angles) | -8,043,274,140,901,361,000 | Applies the pose to the robot. | environments/nao/pose_assumption.py | apply | AroMorin/DNNOP | python | def apply(self, angles):
self.set_joints(angles) |
def calc_error(self, angles):
'Calculate the error between predicted and target angles, and\n add the safety penalties.\n '
errors = [abs((x - y)) for (x, y) in zip(angles, self.target_angles)]
error = sum(errors)
error += self.penalty
self.error = torch.tensor(error) | -1,805,989,592,975,838,000 | Calculate the error between predicted and target angles, and
add the safety penalties. | environments/nao/pose_assumption.py | calc_error | AroMorin/DNNOP | python | def calc_error(self, angles):
'Calculate the error between predicted and target angles, and\n add the safety penalties.\n '
errors = [abs((x - y)) for (x, y) in zip(angles, self.target_angles)]
error = sum(errors)
error += self.penalty
self.error = torch.tensor(error) |
def parse_args(args=[], doc=False):
'\n Handle parsing of arguments and flags. Generates docs using help from `ArgParser`\n\n Args:\n args (list): argv passed to the binary\n doc (bool): If the function should generate and return manpage\n\n Returns:\n Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage\n '
parser = ArgParser(prog=__COMMAND__, description=f'{__COMMAND__} - {__DESCRIPTION__}')
parser.add_argument('--version', action='store_true', help=f'output version information and exit')
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if (x not in arg_helps)]
NAME = f'''**NAME*/
{__COMMAND__} - {__DESCRIPTION__}'''
SYNOPSIS = f'''**SYNOPSIS*/
{__COMMAND__} [OPTION]... '''
DESCRIPTION = f'''**DESCRIPTION*/
{__DESCRIPTION__}
'''
for item in arg_helps:
if (len(item.option_strings) == 0):
if (item.nargs == '?'):
SYNOPSIS += f'[{item.dest.upper()}] '
else:
SYNOPSIS += f'{item.dest.upper()} '
elif (item.nargs == 0):
if (len(item.option_strings) == 1):
DESCRIPTION += f''' **{' '.join(item.option_strings)}*/ {item.help}
'''
else:
DESCRIPTION += f''' **{' '.join(item.option_strings)}*/
{item.help}
'''
elif (item.nargs == '+'):
DESCRIPTION += f''' **{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...
{item.help}
'''
else:
DESCRIPTION += f''' **{' '.join(item.option_strings)}*/={item.dest.upper()}
{item.help}
'''
if doc:
return f'''{NAME}
{SYNOPSIS}
{DESCRIPTION}
'''
else:
return (args, parser) | 6,631,359,889,158,821,000 | Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage | client/blackhat/bin/installable/ifconfig.py | parse_args | stautonico/blackhat-simulator | python | def parse_args(args=[], doc=False):
'\n Handle parsing of arguments and flags. Generates docs using help from `ArgParser`\n\n Args:\n args (list): argv passed to the binary\n doc (bool): If the function should generate and return manpage\n\n Returns:\n Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage\n '
parser = ArgParser(prog=__COMMAND__, description=f'{__COMMAND__} - {__DESCRIPTION__}')
parser.add_argument('--version', action='store_true', help=f'output version information and exit')
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if (x not in arg_helps)]
NAME = f'**NAME*/
{__COMMAND__} - {__DESCRIPTION__}'
SYNOPSIS = f'**SYNOPSIS*/
{__COMMAND__} [OPTION]... '
DESCRIPTION = f'**DESCRIPTION*/
{__DESCRIPTION__}
'
for item in arg_helps:
if (len(item.option_strings) == 0):
if (item.nargs == '?'):
SYNOPSIS += f'[{item.dest.upper()}] '
else:
SYNOPSIS += f'{item.dest.upper()} '
elif (item.nargs == 0):
if (len(item.option_strings) == 1):
DESCRIPTION += f' **{' '.join(item.option_strings)}*/ {item.help}
'
else:
DESCRIPTION += f' **{' '.join(item.option_strings)}*/
{item.help}
'
elif (item.nargs == '+'):
DESCRIPTION += f' **{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...
{item.help}
'
else:
DESCRIPTION += f' **{' '.join(item.option_strings)}*/={item.dest.upper()}
{item.help}
'
if doc:
return f'{NAME}
{SYNOPSIS}
{DESCRIPTION}
'
else:
return (args, parser) |
def main(args: list, pipe: bool) -> Result:
'\n # TODO: Add docstring for manpage\n '
(args, parser) = parse_args(args)
if parser.error_message:
if (not args.version):
return output(f'{__COMMAND__}: {parser.error_message}', pipe, success=False)
if args.version:
return output(f'ifconfig (blackhat netutils) {__VERSION__}', pipe)
if (not args):
return output('', pipe)
else:
result = getifaddrs()
return output(result.data.ifa_addr, pipe) | -6,250,299,901,454,325,000 | # TODO: Add docstring for manpage | client/blackhat/bin/installable/ifconfig.py | main | stautonico/blackhat-simulator | python | def main(args: list, pipe: bool) -> Result:
'\n \n '
(args, parser) = parse_args(args)
if parser.error_message:
if (not args.version):
return output(f'{__COMMAND__}: {parser.error_message}', pipe, success=False)
if args.version:
return output(f'ifconfig (blackhat netutils) {__VERSION__}', pipe)
if (not args):
return output(, pipe)
else:
result = getifaddrs()
return output(result.data.ifa_addr, pipe) |
def __init__(self, tau_V, geometry='dusty', dust_type='mw', dust_distribution='clumpy'):
"\n Load the attenuation curves for a given geometry, dust type and\n dust distribution.\n\n Parameters\n ----------\n tau_V: float\n optical depth in V band\n\n geometry: string\n 'shell', 'cloudy' or 'dusty'\n\n dust_type: string\n 'mw' or 'smc'\n\n dust_distribution: string\n 'homogeneous' or 'clumpy'\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n "
self.geometry = geometry.lower()
self.dust_type = dust_type.lower()
self.dust_distribution = dust_distribution.lower()
data_path = pkg_resources.resource_filename('dust_attenuation', 'data/WG00/')
data = ascii.read(((data_path + self.geometry) + '.txt'), header_start=0)
if (self.dust_type == 'mw'):
start = 0
elif (self.dust_type == 'smc'):
start = 25
tau_colname = 'tau'
tau_att_colname = 'tau_att'
fsca_colname = 'f(sca)'
fdir_colname = 'f(dir)'
fesc_colname = 'f(esc)'
if (self.dust_distribution == 'clumpy'):
tau_att_colname += '_c'
fsca_colname += '_c'
fdir_colname += '_c'
fesc_colname += '_c'
elif (self.dust_distribution == 'homogeneous'):
tau_att_colname += '_h'
fsca_colname += '_h'
fdir_colname += '_h'
fesc_colname += '_h'
tau_att_list = []
tau_list = []
fsca_list = []
fdir_list = []
fesc_list = []
len_data = len(data['lambda'])
steps = 25
counter = start
while (counter < len_data):
tau_att_list.append(np.array(data[tau_att_colname][counter:(counter + steps)]))
tau_list.append(np.array(data[tau_colname][counter:(counter + steps)]))
fsca_list.append(np.array(data[fsca_colname][counter:(counter + steps)]))
fdir_list.append(np.array(data[fdir_colname][counter:(counter + steps)]))
fesc_list.append(np.array(data[fesc_colname][counter:(counter + steps)]))
counter += int((2 * steps))
tau_att_table = np.array(tau_att_list).T
tau_table = np.array(tau_list).T
fsca_table = np.array(fsca_list).T
fdir_table = np.array(fdir_list).T
fesc_table = np.array(fesc_list).T
wvl = np.array(data['lambda'][0:25])
self.wvl_grid = wvl
tau_V_grid = np.array([0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0])
tab = tabular_model(2, name='2D_table')
gridpoints = (wvl, tau_V_grid)
self.model = tab(gridpoints, lookup_table=tau_att_table, name='tau_att_WG00', bounds_error=False, fill_value=None, method='linear')
self.tau = tab(gridpoints, lookup_table=tau_table, name='tau_WG00', bounds_error=False, fill_value=None, method='linear')
self.fsca = tab(gridpoints, lookup_table=fsca_table, name='fsca_WG00', bounds_error=False, fill_value=None, method='linear')
self.fdir = tab(gridpoints, lookup_table=fdir_table, name='fdir_WG00', bounds_error=False, fill_value=None, method='linear')
self.fesc = tab(gridpoints, lookup_table=fesc_table, name='fesc_WG00', bounds_error=False, fill_value=None, method='linear')
super(WG00, self).__init__(tau_V=tau_V) | 1,800,130,098,260,879,000 | Load the attenuation curves for a given geometry, dust type and
dust distribution.
Parameters
----------
tau_V: float
optical depth in V band
geometry: string
'shell', 'cloudy' or 'dusty'
dust_type: string
'mw' or 'smc'
dust_distribution: string
'homogeneous' or 'clumpy'
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag] | dust_attenuation/radiative_transfer.py | __init__ | gbrammer/dust_attenuation | python | def __init__(self, tau_V, geometry='dusty', dust_type='mw', dust_distribution='clumpy'):
"\n Load the attenuation curves for a given geometry, dust type and\n dust distribution.\n\n Parameters\n ----------\n tau_V: float\n optical depth in V band\n\n geometry: string\n 'shell', 'cloudy' or 'dusty'\n\n dust_type: string\n 'mw' or 'smc'\n\n dust_distribution: string\n 'homogeneous' or 'clumpy'\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n "
self.geometry = geometry.lower()
self.dust_type = dust_type.lower()
self.dust_distribution = dust_distribution.lower()
data_path = pkg_resources.resource_filename('dust_attenuation', 'data/WG00/')
data = ascii.read(((data_path + self.geometry) + '.txt'), header_start=0)
if (self.dust_type == 'mw'):
start = 0
elif (self.dust_type == 'smc'):
start = 25
tau_colname = 'tau'
tau_att_colname = 'tau_att'
fsca_colname = 'f(sca)'
fdir_colname = 'f(dir)'
fesc_colname = 'f(esc)'
if (self.dust_distribution == 'clumpy'):
tau_att_colname += '_c'
fsca_colname += '_c'
fdir_colname += '_c'
fesc_colname += '_c'
elif (self.dust_distribution == 'homogeneous'):
tau_att_colname += '_h'
fsca_colname += '_h'
fdir_colname += '_h'
fesc_colname += '_h'
tau_att_list = []
tau_list = []
fsca_list = []
fdir_list = []
fesc_list = []
len_data = len(data['lambda'])
steps = 25
counter = start
while (counter < len_data):
tau_att_list.append(np.array(data[tau_att_colname][counter:(counter + steps)]))
tau_list.append(np.array(data[tau_colname][counter:(counter + steps)]))
fsca_list.append(np.array(data[fsca_colname][counter:(counter + steps)]))
fdir_list.append(np.array(data[fdir_colname][counter:(counter + steps)]))
fesc_list.append(np.array(data[fesc_colname][counter:(counter + steps)]))
counter += int((2 * steps))
tau_att_table = np.array(tau_att_list).T
tau_table = np.array(tau_list).T
fsca_table = np.array(fsca_list).T
fdir_table = np.array(fdir_list).T
fesc_table = np.array(fesc_list).T
wvl = np.array(data['lambda'][0:25])
self.wvl_grid = wvl
tau_V_grid = np.array([0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0])
tab = tabular_model(2, name='2D_table')
gridpoints = (wvl, tau_V_grid)
self.model = tab(gridpoints, lookup_table=tau_att_table, name='tau_att_WG00', bounds_error=False, fill_value=None, method='linear')
self.tau = tab(gridpoints, lookup_table=tau_table, name='tau_WG00', bounds_error=False, fill_value=None, method='linear')
self.fsca = tab(gridpoints, lookup_table=fsca_table, name='fsca_WG00', bounds_error=False, fill_value=None, method='linear')
self.fdir = tab(gridpoints, lookup_table=fdir_table, name='fdir_WG00', bounds_error=False, fill_value=None, method='linear')
self.fesc = tab(gridpoints, lookup_table=fesc_table, name='fesc_WG00', bounds_error=False, fill_value=None, method='linear')
super(WG00, self).__init__(tau_V=tau_V) |
def evaluate(self, x, tau_V):
'\n WG00 function\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
taux = self.model(xinterp, yinterp)
Attx = (1.086 * taux)
return Attx | 5,917,144,303,742,886,000 | WG00 function
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
Attx: np array (float)
Att(x) attenuation curve [mag]
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | evaluate | gbrammer/dust_attenuation | python | def evaluate(self, x, tau_V):
'\n WG00 function\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n Attx: np array (float)\n Att(x) attenuation curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
taux = self.model(xinterp, yinterp)
Attx = (1.086 * taux)
return Attx |
def get_extinction(self, x, tau_V):
'\n Return the extinction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n ext: np array (float)\n ext(x) extinction curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return (self.tau(xinterp, yinterp) * 1.086) | 8,449,185,508,644,122,000 | Return the extinction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
ext: np array (float)
ext(x) extinction curve [mag]
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_extinction | gbrammer/dust_attenuation | python | def get_extinction(self, x, tau_V):
'\n Return the extinction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n ext: np array (float)\n ext(x) extinction curve [mag]\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return (self.tau(xinterp, yinterp) * 1.086) |
def get_fsca(self, x, tau_V):
'\n Return the scattered flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fsca(xinterp, yinterp) | 5,405,936,841,851,005,000 | Return the scattered flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_fsca | gbrammer/dust_attenuation | python | def get_fsca(self, x, tau_V):
'\n Return the scattered flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fsca(xinterp, yinterp) |
def get_fdir(self, x, tau_V):
'\n Return the direct attenuated stellar flux fraction at a given\n wavelength and V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fdir(xinterp, yinterp) | -6,710,853,848,865,956,000 | Return the direct attenuated stellar flux fraction at a given
wavelength and V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_fdir | gbrammer/dust_attenuation | python | def get_fdir(self, x, tau_V):
'\n Return the direct attenuated stellar flux fraction at a given\n wavelength and V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fdir(xinterp, yinterp) |
def get_fesc(self, x, tau_V):
'\n Return the total escaping flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fesc(xinterp, yinterp) | 6,972,523,913,694,243,000 | Return the total escaping flux fraction at a given wavelength and
V-band optical depth.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
tau_V: float
optical depth in V band
Returns
-------
fsca: np array (float)
fsca(x) scattered flux fraction
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_fesc | gbrammer/dust_attenuation | python | def get_fesc(self, x, tau_V):
'\n Return the total escaping flux fraction at a given wavelength and\n V-band optical depth.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n tau_V: float\n optical depth in V band\n\n Returns\n -------\n fsca: np array (float)\n fsca(x) scattered flux fraction\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
n_x = len(x)
xinterp = (10000.0 * x)
yinterp = (tau_V * np.ones(n_x))
return self.fesc(xinterp, yinterp) |
def get_albedo(self, x):
'\n Return the albedo in function of wavelength for the corresponding\n dust type (SMC or MW). The albedo gives the probability a photon\n is scattered from a dust grain.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n albedo: np array (float)\n alb(x) albedo\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
alb_MW = np.array([0.32, 0.409, 0.481, 0.526, 0.542, 0.536, 0.503, 0.432, 0.371, 0.389, 0.437, 0.47, 0.486, 0.499, 0.506, 0.498, 0.502, 0.491, 0.481, 0.5, 0.473, 0.457, 0.448, 0.424, 0.4])
alb_SMC = np.array([0.4, 0.449, 0.473, 0.494, 0.508, 0.524, 0.529, 0.528, 0.523, 0.52, 0.516, 0.511, 0.505, 0.513, 0.515, 0.498, 0.494, 0.489, 0.484, 0.493, 0.475, 0.465, 0.439, 0.417, 0.4])
if (self.dust_type == 'smc'):
albedo = alb_SMC
elif (self.dust_type == 'mw'):
albedo = alb_MW
tab = tabular_model(1, name='Tabular1D')
alb_fit = tab(self.wvl_grid, lookup_table=albedo, name='albedo', bounds_error=False, fill_value=None, method='linear')
xinterp = (10000.0 * x)
return alb_fit(xinterp) | 263,105,765,852,705,340 | Return the albedo in function of wavelength for the corresponding
dust type (SMC or MW). The albedo gives the probability a photon
is scattered from a dust grain.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
albedo: np array (float)
alb(x) albedo
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_albedo | gbrammer/dust_attenuation | python | def get_albedo(self, x):
'\n Return the albedo in function of wavelength for the corresponding\n dust type (SMC or MW). The albedo gives the probability a photon\n is scattered from a dust grain.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n albedo: np array (float)\n alb(x) albedo\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
alb_MW = np.array([0.32, 0.409, 0.481, 0.526, 0.542, 0.536, 0.503, 0.432, 0.371, 0.389, 0.437, 0.47, 0.486, 0.499, 0.506, 0.498, 0.502, 0.491, 0.481, 0.5, 0.473, 0.457, 0.448, 0.424, 0.4])
alb_SMC = np.array([0.4, 0.449, 0.473, 0.494, 0.508, 0.524, 0.529, 0.528, 0.523, 0.52, 0.516, 0.511, 0.505, 0.513, 0.515, 0.498, 0.494, 0.489, 0.484, 0.493, 0.475, 0.465, 0.439, 0.417, 0.4])
if (self.dust_type == 'smc'):
albedo = alb_SMC
elif (self.dust_type == 'mw'):
albedo = alb_MW
tab = tabular_model(1, name='Tabular1D')
alb_fit = tab(self.wvl_grid, lookup_table=albedo, name='albedo', bounds_error=False, fill_value=None, method='linear')
xinterp = (10000.0 * x)
return alb_fit(xinterp) |
def get_scattering_phase_function(self, x):
'\n Return the scattering phase function in function of wavelength for the\n corresponding dust type (SMC or MW). The scattering phase\n function gives the angle at which the photon scatters.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n g: np array (float)\n g(x) scattering phase function\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
g_MW = np.array([0.8, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.72, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.48, 0.445, 0.42])
g_SMC = np.array([0.8, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.72, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.48, 0.445, 0.42])
if (self.dust_type == 'smc'):
g = g_SMC
elif (self.dust_type == 'mw'):
g = g_MW
tab = tabular_model(1, name='Tabular1D')
g_fit = tab(self.wvl_grid, lookup_table=g, name='albedo', bounds_error=False, fill_value=None, method='linear')
xinterp = (10000.0 * x)
return g_fit(xinterp) | -1,038,028,975,065,373,800 | Return the scattering phase function in function of wavelength for the
corresponding dust type (SMC or MW). The scattering phase
function gives the angle at which the photon scatters.
Parameters
----------
x: float
expects either x in units of wavelengths or frequency
or assumes wavelengths in [micron]
internally microns are used
Returns
-------
g: np array (float)
g(x) scattering phase function
Raises
------
ValueError
Input x values outside of defined range | dust_attenuation/radiative_transfer.py | get_scattering_phase_function | gbrammer/dust_attenuation | python | def get_scattering_phase_function(self, x):
'\n Return the scattering phase function in function of wavelength for the\n corresponding dust type (SMC or MW). The scattering phase\n function gives the angle at which the photon scatters.\n\n Parameters\n ----------\n x: float\n expects either x in units of wavelengths or frequency\n or assumes wavelengths in [micron]\n\n internally microns are used\n\n Returns\n -------\n g: np array (float)\n g(x) scattering phase function\n\n Raises\n ------\n ValueError\n Input x values outside of defined range\n '
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(x, u.micron, dtype=np.float64)
x = x_quant.value
_test_valid_x_range(x, self.x_range, 'WG00')
x = np.atleast_1d(x)
g_MW = np.array([0.8, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.72, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.48, 0.445, 0.42])
g_SMC = np.array([0.8, 0.783, 0.767, 0.756, 0.745, 0.736, 0.727, 0.72, 0.712, 0.707, 0.702, 0.697, 0.691, 0.685, 0.678, 0.646, 0.624, 0.597, 0.563, 0.545, 0.533, 0.511, 0.48, 0.445, 0.42])
if (self.dust_type == 'smc'):
g = g_SMC
elif (self.dust_type == 'mw'):
g = g_MW
tab = tabular_model(1, name='Tabular1D')
g_fit = tab(self.wvl_grid, lookup_table=g, name='albedo', bounds_error=False, fill_value=None, method='linear')
xinterp = (10000.0 * x)
return g_fit(xinterp) |
def load_raster_tile_lookup(iso3):
'\n Load in the preprocessed raster tile lookup.\n\n Parameters\n ----------\n iso3 : string\n Country iso3 code.\n\n Returns\n -------\n lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n '
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup | 3,739,566,521,185,524,700 | Load in the preprocessed raster tile lookup.
Parameters
----------
iso3 : string
Country iso3 code.
Returns
-------
lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values. | scripts/los.py | load_raster_tile_lookup | edwardoughton/e3nb | python | def load_raster_tile_lookup(iso3):
'\n Load in the preprocessed raster tile lookup.\n\n Parameters\n ----------\n iso3 : string\n Country iso3 code.\n\n Returns\n -------\n lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n '
path = os.path.join(DATA_INTERMEDIATE, iso3, 'raster_lookup.csv')
data = pd.read_csv(path)
data = data.to_records('dicts')
lookup = {}
for item in data:
coords = (item['x1'], item['y1'], item['x2'], item['y2'])
lookup[coords] = item['path']
return lookup |
def generate_grid(iso3, side_length):
'\n Generate a spatial grid for the chosen country.\n '
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs='epsg:4326')
country_outline.crs = 'epsg:4326'
country_outline = country_outline.to_crs('epsg:3857')
(xmin, ymin, xmax, ymax) = country_outline.total_bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, side_length, side_length)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs='epsg:3857')
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = 'epsg:3857'
intersection['area_km2'] = (intersection['geometry'].area / 1000000.0)
intersection = intersection.to_crs('epsg:4326')
intersection.to_file(path_output, crs='epsg:4326')
return intersection | -6,030,022,322,096,739,000 | Generate a spatial grid for the chosen country. | scripts/los.py | generate_grid | edwardoughton/e3nb | python | def generate_grid(iso3, side_length):
'\n \n '
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
country_outline = gpd.read_file(path, crs='epsg:4326')
country_outline.crs = 'epsg:4326'
country_outline = country_outline.to_crs('epsg:3857')
(xmin, ymin, xmax, ymax) = country_outline.total_bounds
polygons = manually_create_grid(xmin, ymin, xmax, ymax, side_length, side_length)
grid = gpd.GeoDataFrame({'geometry': polygons}, crs='epsg:3857')
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = 'epsg:3857'
intersection['area_km2'] = (intersection['geometry'].area / 1000000.0)
intersection = intersection.to_crs('epsg:4326')
intersection.to_file(path_output, crs='epsg:4326')
return intersection |
def find_tile(polygon, tile_lookup):
'\n\n Parameters\n ----------\n polygon : tuple\n The bounds of the modeling region.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n Return\n ------\n output : list\n Contains the file path to the correct raster tile. Note:\n only the first element is returned and if there are more than\n one paths, an error is returned.\n\n '
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for (key, value) in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if (len(output) == 1):
return output[0]
elif (len(output) > 1):
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path') | -5,881,771,837,956,214,000 | Parameters
----------
polygon : tuple
The bounds of the modeling region.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
Return
------
output : list
Contains the file path to the correct raster tile. Note:
only the first element is returned and if there are more than
one paths, an error is returned. | scripts/los.py | find_tile | edwardoughton/e3nb | python | def find_tile(polygon, tile_lookup):
'\n\n Parameters\n ----------\n polygon : tuple\n The bounds of the modeling region.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n\n Return\n ------\n output : list\n Contains the file path to the correct raster tile. Note:\n only the first element is returned and if there are more than\n one paths, an error is returned.\n\n '
output = []
poly_bbox = box(polygon[0], polygon[1], polygon[2], polygon[3])
for (key, value) in tile_lookup.items():
bbox = box(key[0], key[1], key[2], key[3])
if bbox.intersects(poly_bbox):
output.append(value)
if (len(output) == 1):
return output[0]
elif (len(output) > 1):
print('Problem with find_tile returning more than 1 path')
return output[0]
else:
print('Problem with find_tile: Unable to find raster path') |
def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
'\n Query the Digital Elevation Model to get an estimated interdecile\n range for each grid square.\n\n '
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for (idx, grid_tile) in grid.iterrows():
path_input = find_tile(grid_tile['geometry'].bounds, tile_lookup)
stats = next(gen_zonal_stats(grid_tile['geometry'], path_input, add_stats={'interdecile_range': interdecile_range}, nodata=0))
id_range_m = stats['interdecile_range']
output.append({'type': 'Feature', 'geometry': grid_tile['geometry'], 'properties': {'id_range_m': id_range_m, 'area_km2': grid_tile['area_km2']}})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, (- np.inf)], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs='epsg:4326')
return output | 8,463,154,447,399,699,000 | Query the Digital Elevation Model to get an estimated interdecile
range for each grid square. | scripts/los.py | add_id_range_data_to_grid | edwardoughton/e3nb | python | def add_id_range_data_to_grid(iso3, tile_lookup, side_length):
'\n Query the Digital Elevation Model to get an estimated interdecile\n range for each grid square.\n\n '
directory = os.path.join(DATA_INTERMEDIATE, iso3, 'grid')
filename = 'grid_final.shp'
path_output = os.path.join(directory, filename)
if os.path.exists(path_output):
return gpd.read_file(path_output, crs='epsg:4328')
filename = 'grid_{}_{}_km.shp'.format(side_length, side_length)
path = os.path.join(directory, filename)
grid = gpd.read_file(path, crs='epsg:4328')
output = []
for (idx, grid_tile) in grid.iterrows():
path_input = find_tile(grid_tile['geometry'].bounds, tile_lookup)
stats = next(gen_zonal_stats(grid_tile['geometry'], path_input, add_stats={'interdecile_range': interdecile_range}, nodata=0))
id_range_m = stats['interdecile_range']
output.append({'type': 'Feature', 'geometry': grid_tile['geometry'], 'properties': {'id_range_m': id_range_m, 'area_km2': grid_tile['area_km2']}})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output = output.replace([np.inf, (- np.inf)], np.nan)
output = output[output.geometry.notnull()]
output.to_file(path_output, crs='epsg:4326')
return output |
def interdecile_range(x):
'\n Get range between bottom 10% and top 10% of values.\n\n This is from the Longley-Rice Irregular Terrain Model.\n\n Code here: https://github.com/edwardoughton/itmlogic\n Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf\n\n Parameters\n ----------\n x : list\n Terrain profile values.\n\n Returns\n -------\n interdecile_range : int\n The terrain irregularity parameter.\n\n '
(q90, q10) = np.percentile(x, [90, 10])
interdecile_range = int(round((q90 - q10), 0))
return interdecile_range | -8,710,541,322,283,852,000 | Get range between bottom 10% and top 10% of values.
This is from the Longley-Rice Irregular Terrain Model.
Code here: https://github.com/edwardoughton/itmlogic
Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf
Parameters
----------
x : list
Terrain profile values.
Returns
-------
interdecile_range : int
The terrain irregularity parameter. | scripts/los.py | interdecile_range | edwardoughton/e3nb | python | def interdecile_range(x):
'\n Get range between bottom 10% and top 10% of values.\n\n This is from the Longley-Rice Irregular Terrain Model.\n\n Code here: https://github.com/edwardoughton/itmlogic\n Paper here: https://joss.theoj.org/papers/10.21105/joss.02266.pdf\n\n Parameters\n ----------\n x : list\n Terrain profile values.\n\n Returns\n -------\n interdecile_range : int\n The terrain irregularity parameter.\n\n '
(q90, q10) = np.percentile(x, [90, 10])
interdecile_range = int(round((q90 - q10), 0))
return interdecile_range |
def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
'\n Perform a viewshed using GRASS.\n\n Parameters\n ---------\n point : tuple\n The point being queried.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n path_output : string\n The directory path for the output folder.\n tile_name : string\n The name allocated to the viewshed tile.\n max_distance : int\n The maximum distance a path can be.\n crs : string\n The coordinate reference system in use.\n\n Returns\n -------\n grid : dataframe\n A geopandas dataframe containing the created grid.\n\n '
with Session(gisdb=path_output, location='location', create_opts=crs):
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
gcore.run_command('r.external.out', directory='viewsheds', format='GTiff')
gcore.run_command('g.region', raster=tile_name)
gcore.run_command('r.viewshed', input=tile_name, output='{}.tif'.format(tile_name), coordinate=[point[0], point[1]], observer_elevation=30, target_elevation=30, memory=5000, overwrite=True, quiet=True, max_distance=max_distance) | -3,419,314,850,477,786,600 | Perform a viewshed using GRASS.
Parameters
---------
point : tuple
The point being queried.
tile_lookup : dict
A lookup table containing raster tile boundary coordinates
as the keys, and the file paths as the values.
path_output : string
The directory path for the output folder.
tile_name : string
The name allocated to the viewshed tile.
max_distance : int
The maximum distance a path can be.
crs : string
The coordinate reference system in use.
Returns
-------
grid : dataframe
A geopandas dataframe containing the created grid. | scripts/los.py | viewshed | edwardoughton/e3nb | python | def viewshed(point, path_input, path_output, tile_name, max_distance, crs):
'\n Perform a viewshed using GRASS.\n\n Parameters\n ---------\n point : tuple\n The point being queried.\n tile_lookup : dict\n A lookup table containing raster tile boundary coordinates\n as the keys, and the file paths as the values.\n path_output : string\n The directory path for the output folder.\n tile_name : string\n The name allocated to the viewshed tile.\n max_distance : int\n The maximum distance a path can be.\n crs : string\n The coordinate reference system in use.\n\n Returns\n -------\n grid : dataframe\n A geopandas dataframe containing the created grid.\n\n '
with Session(gisdb=path_output, location='location', create_opts=crs):
gcore.run_command('r.external', input=path_input, output=tile_name, overwrite=True)
gcore.run_command('r.external.out', directory='viewsheds', format='GTiff')
gcore.run_command('g.region', raster=tile_name)
gcore.run_command('r.viewshed', input=tile_name, output='{}.tif'.format(tile_name), coordinate=[point[0], point[1]], observer_elevation=30, target_elevation=30, memory=5000, overwrite=True, quiet=True, max_distance=max_distance) |
def check_los(path_input, point):
'\n Find potential LOS high points.\n\n Parameters\n ----------\n path_input : string\n File path for the digital elevation raster tile.\n point : tuple\n Coordinate point being queried.\n\n Returns\n -------\n los : string\n The Line of Sight (los) of the path queried.\n\n '
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
los = 'nlos'
return los
else:
los = 'clos'
return los | 3,052,230,217,628,284,400 | Find potential LOS high points.
Parameters
----------
path_input : string
File path for the digital elevation raster tile.
point : tuple
Coordinate point being queried.
Returns
-------
los : string
The Line of Sight (los) of the path queried. | scripts/los.py | check_los | edwardoughton/e3nb | python | def check_los(path_input, point):
'\n Find potential LOS high points.\n\n Parameters\n ----------\n path_input : string\n File path for the digital elevation raster tile.\n point : tuple\n Coordinate point being queried.\n\n Returns\n -------\n los : string\n The Line of Sight (los) of the path queried.\n\n '
with rasterio.open(path_input) as src:
x = point[0]
y = point[1]
for val in src.sample([(x, y)]):
if np.isnan(val):
los = 'nlos'
return los
else:
los = 'clos'
return los |
def setUp(self):
'Called before each test.\n\n Performs setup.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp) | -659,573,350,972,982,100 | Called before each test.
Performs setup.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None`` | tests/unit/test_jlock.py | setUp | Bhav97/pylink | python | def setUp(self):
'Called before each test.\n\n Performs setup.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
assertRaisesRegexp = getattr(self, 'assertRaisesRegexp', None)
self.assertRaisesRegexp = getattr(self, 'assertRaisesRegex', assertRaisesRegexp) |
def tearDown(self):
'Called after each test.\n\n Performs teardown.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
pass | -903,176,742,624,613,600 | Called after each test.
Performs teardown.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None`` | tests/unit/test_jlock.py | tearDown | Bhav97/pylink | python | def tearDown(self):
'Called after each test.\n\n Performs teardown.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
pass |
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
'Tests initialization and deleting a ``JLock``.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
serial_no = 3735928559
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock | 4,304,005,466,927,635,500 | Tests initialization and deleting a ``JLock``.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_init_and_delete | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_init_and_delete(self):
'Tests initialization and deleting a ``JLock``.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
serial_no = 3735928559
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
del lock |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests trying to acquire when the lock exists for an active process.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
pid = 42
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data=('%s\n' % pid)).return_value]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, '')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called() | -2,859,460,508,332,200,000 | Tests trying to acquire when the lock exists for an active process.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_acquire_exists | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_exists(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests trying to acquire when the lock exists for an active process.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
pid = 42
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data=('%s\n' % pid)).return_value]
mock_exists.side_effect = [True, True]
mock_util.pid_exists.return_value = True
mock_op.side_effect = [OSError(errno.EEXIST, )]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertFalse(lock.acquire())
self.assertFalse(lock.acquired)
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_with(pid)
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called() |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests trying to acquire the lock but generating an os-level error.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
serial_no = 3735928559
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError((~ errno.EEXIST), 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called() | 8,056,188,146,607,140,000 | Tests trying to acquire the lock but generating an os-level error.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_acquire_os_error | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_os_error(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests trying to acquire the lock but generating an os-level error.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
serial_no = 3735928559
mock_exists.side_effect = [False, False]
mock_op.side_effect = [OSError((~ errno.EEXIST), 'Message')]
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
with self.assertRaisesRegexp(OSError, 'Message'):
lock.acquire()
self.assertFalse(lock.acquired)
mock_open.assert_not_called()
mock_util.pid_exists.assert_not_called()
mock_op.assert_called_once()
mock_rm.assert_not_called()
mock_wr.assert_not_called() |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring the lockfile when the current lockfile is invallid.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
pid = 42
fd = 1
serial_no = 3735928559
mock_open.side_effect = [IOError()]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once() | -4,963,035,849,834,785,000 | Tests acquiring the lockfile when the current lockfile is invallid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_acquire_bad_file | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_bad_file(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring the lockfile when the current lockfile is invallid.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
pid = 42
fd = 1
serial_no = 3735928559
mock_open.side_effect = [IOError()]
mock_exists.return_value = True
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_not_called()
mock_op.assert_called_once()
mock_wr.assert_called_once() |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring the lockfile when the pid in the lockfile is invalid.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
fd = 1
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data='dog\n').return_value]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once() | 3,479,992,910,060,522,500 | Tests acquiring the lockfile when the pid in the lockfile is invalid.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_acquire_invalid_pid | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_invalid_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring the lockfile when the pid in the lockfile is invalid.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
fd = 1
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data='dog\n').return_value]
mock_op.return_value = fd
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_not_called()
mock_rm.assert_called_once()
mock_op.assert_called_once()
mock_wr.assert_called_once() |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring when the PID in the lockfile does not exist.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
fd = 1
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data='42\n').return_value]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once() | 5,418,935,793,553,427,000 | Tests acquiring when the PID in the lockfile does not exist.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_open (Mock): mocked built-in open method
mock_util (Mock): mocked ``psutil`` module
mock_rm (Mock): mocked os remove method
mock_wr (Mock): mocked os write method
mock_op (Mock): mocked os open method
mock_exists (Mock): mocked path exist method
mock_close (Mock): mocked os file close method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_acquire_old_pid | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.close')
@mock.patch('os.path.exists')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.remove')
@mock.patch('pylink.jlock.psutil')
@mock.patch('pylink.jlock.open')
def test_jlock_acquire_old_pid(self, mock_open, mock_util, mock_rm, mock_wr, mock_op, mock_exists, mock_close):
'Tests acquiring when the PID in the lockfile does not exist.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_open (Mock): mocked built-in open method\n mock_util (Mock): mocked ``psutil`` module\n mock_rm (Mock): mocked os remove method\n mock_wr (Mock): mocked os write method\n mock_op (Mock): mocked os open method\n mock_exists (Mock): mocked path exist method\n mock_close (Mock): mocked os file close method\n\n Returns:\n ``None``\n '
fd = 1
serial_no = 3735928559
mock_open.side_effect = [mock.mock_open(read_data='42\n').return_value]
mock_op.return_value = fd
mock_util.pid_exists.return_value = False
lock = jlock.JLock(serial_no)
lock.release = mock.Mock()
self.assertFalse(lock.acquired)
self.assertTrue(lock.acquire())
self.assertTrue(lock.acquired)
mock_exists.assert_called_once()
mock_open.assert_called_once()
mock_util.pid_exists.assert_called_once_with(42)
mock_rm.assert_called()
mock_op.assert_called_once()
mock_wr.assert_called_once() |
@mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
'Tests releasing a held lock.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_remove (Mock): mock file removal method\n mock_close (Mock): mocked close method\n mock_exists (Mock): mocked path exist method\n\n Returns:\n ``None``\n '
lock = jlock.JLock(3735928559)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired) | -1,303,112,917,660,697,900 | Tests releasing a held lock.
Args:
self (TestJLock): the ``TestJLock`` instance
mock_remove (Mock): mock file removal method
mock_close (Mock): mocked close method
mock_exists (Mock): mocked path exist method
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_release_acquired | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
@mock.patch('os.path.exists')
@mock.patch('os.close')
@mock.patch('os.remove')
def test_jlock_release_acquired(self, mock_remove, mock_close, mock_exists):
'Tests releasing a held lock.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n mock_remove (Mock): mock file removal method\n mock_close (Mock): mocked close method\n mock_exists (Mock): mocked path exist method\n\n Returns:\n ``None``\n '
lock = jlock.JLock(3735928559)
lock.acquired = True
lock.fd = 1
lock.path = os.sep
self.assertTrue(lock.release())
mock_exists.return_value = True
mock_remove.assert_called_once_with(os.sep)
mock_close.assert_called_once_with(1)
mock_exists.assert_called_once_with(os.sep)
self.assertEqual(False, lock.acquired) |
@mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
'Tests calling release when lock not held.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
lock = jlock.JLock(3735928559)
self.assertFalse(lock.release()) | -6,036,109,378,423,207,000 | Tests calling release when lock not held.
Args:
self (TestJLock): the ``TestJLock`` instance
Returns:
``None`` | tests/unit/test_jlock.py | test_jlock_release_not_held | Bhav97/pylink | python | @mock.patch('tempfile.tempdir', new='tmp')
def test_jlock_release_not_held(self):
'Tests calling release when lock not held.\n\n Args:\n self (TestJLock): the ``TestJLock`` instance\n\n Returns:\n ``None``\n '
lock = jlock.JLock(3735928559)
self.assertFalse(lock.release()) |
def calculate_mypypath() -> List[str]:
'Return MYPYPATH so that stubs have precedence over local sources.'
typeshed_root = None
count = 0
started = time.time()
for parent in itertools.chain(Path(__file__).parents, Path(mypy.api.__file__).parents, Path(os.__file__).parents):
count += 1
candidate = (((parent / 'lib') / 'mypy') / 'typeshed')
if candidate.is_dir():
typeshed_root = candidate
break
candidate = (parent / 'typeshed')
if candidate.is_dir():
typeshed_root = candidate
break
LOG.debug('Checked %d paths in %.2fs looking for typeshed. Found %s', count, (time.time() - started), typeshed_root)
if (not typeshed_root):
return []
stdlib_dirs = ('3.7', '3.6', '3.5', '3.4', '3.3', '3.2', '3', '2and3')
stdlib_stubs = [((typeshed_root / 'stdlib') / stdlib_dir) for stdlib_dir in stdlib_dirs]
third_party_dirs = ('3.7', '3.6', '3', '2and3')
third_party_stubs = [((typeshed_root / 'third_party') / tp_dir) for tp_dir in third_party_dirs]
return [str(p) for p in (stdlib_stubs + third_party_stubs)] | -3,410,128,251,006,485,000 | Return MYPYPATH so that stubs have precedence over local sources. | flake8_mypy.py | calculate_mypypath | ambv/flake8-mypy | python | def calculate_mypypath() -> List[str]:
typeshed_root = None
count = 0
started = time.time()
for parent in itertools.chain(Path(__file__).parents, Path(mypy.api.__file__).parents, Path(os.__file__).parents):
count += 1
candidate = (((parent / 'lib') / 'mypy') / 'typeshed')
if candidate.is_dir():
typeshed_root = candidate
break
candidate = (parent / 'typeshed')
if candidate.is_dir():
typeshed_root = candidate
break
LOG.debug('Checked %d paths in %.2fs looking for typeshed. Found %s', count, (time.time() - started), typeshed_root)
if (not typeshed_root):
return []
stdlib_dirs = ('3.7', '3.6', '3.5', '3.4', '3.3', '3.2', '3', '2and3')
stdlib_stubs = [((typeshed_root / 'stdlib') / stdlib_dir) for stdlib_dir in stdlib_dirs]
third_party_dirs = ('3.7', '3.6', '3', '2and3')
third_party_stubs = [((typeshed_root / 'third_party') / tp_dir) for tp_dir in third_party_dirs]
return [str(p) for p in (stdlib_stubs + third_party_stubs)] |
@classmethod
def adapt_error(cls, e: Any) -> _Flake8Error:
'Adapts the extended error namedtuple to be compatible with Flake8.'
return e._replace(message=e.message.format(*e.vars))[:4] | 8,806,283,538,528,994,000 | Adapts the extended error namedtuple to be compatible with Flake8. | flake8_mypy.py | adapt_error | ambv/flake8-mypy | python | @classmethod
def adapt_error(cls, e: Any) -> _Flake8Error:
return e._replace(message=e.message.format(*e.vars))[:4] |
def omit_error(self, e: Error) -> bool:
'Returns True if error should be ignored.'
if (e.vars and (e.vars[0] == 'No parent module -- cannot perform relative import')):
return True
return bool(noqa(self.lines[(e.lineno - 1)])) | 50,287,199,794,713,950 | Returns True if error should be ignored. | flake8_mypy.py | omit_error | ambv/flake8-mypy | python | def omit_error(self, e: Error) -> bool:
if (e.vars and (e.vars[0] == 'No parent module -- cannot perform relative import')):
return True
return bool(noqa(self.lines[(e.lineno - 1)])) |
def generic_visit(self, node: ast.AST) -> None:
'Called if no explicit visitor function exists for a node.'
for (_field, value) in ast.iter_fields(node):
if self.should_type_check:
break
if isinstance(value, list):
for item in value:
if self.should_type_check:
break
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value) | 8,189,229,941,107,114,000 | Called if no explicit visitor function exists for a node. | flake8_mypy.py | generic_visit | ambv/flake8-mypy | python | def generic_visit(self, node: ast.AST) -> None:
for (_field, value) in ast.iter_fields(node):
if self.should_type_check:
break
if isinstance(value, list):
for item in value:
if self.should_type_check:
break
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value) |
def send_lineage(self, operator=None, inlets=None, outlets=None, context=None):
'\n Sends lineage metadata to a backend\n :param operator: the operator executing a transformation on the inlets and outlets\n :param inlets: the inlets to this operator\n :param outlets: the outlets from this operator\n :param context: the current context of the task instance\n '
raise NotImplementedError() | -4,835,420,171,956,280,000 | Sends lineage metadata to a backend
:param operator: the operator executing a transformation on the inlets and outlets
:param inlets: the inlets to this operator
:param outlets: the outlets from this operator
:param context: the current context of the task instance | airflow/lineage/backend/__init__.py | send_lineage | 1010data/incubator-airflow | python | def send_lineage(self, operator=None, inlets=None, outlets=None, context=None):
'\n Sends lineage metadata to a backend\n :param operator: the operator executing a transformation on the inlets and outlets\n :param inlets: the inlets to this operator\n :param outlets: the outlets from this operator\n :param context: the current context of the task instance\n '
raise NotImplementedError() |
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
' Computes minimal distances of each point in points_src to points_tgt.\n\n Args:\n points_src (numpy array): source points\n normals_src (numpy array): source normals\n points_tgt (numpy array): target points\n normals_tgt (numpy array): target normals\n '
kdtree = KDTree(points_tgt)
(dist, idx) = kdtree.query(points_src)
if ((normals_src is not None) and (normals_tgt is not None)):
normals_src = (normals_src / np.linalg.norm(normals_src, axis=(- 1), keepdims=True))
normals_tgt = (normals_tgt / np.linalg.norm(normals_tgt, axis=(- 1), keepdims=True))
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=(- 1))
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array(([np.nan] * points_src.shape[0]), dtype=np.float32)
return (dist, normals_dot_product) | 6,655,076,472,768,128,000 | Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals | src/eval.py | distance_p2p | hummat/convolutional_occupancy_networks | python | def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
' Computes minimal distances of each point in points_src to points_tgt.\n\n Args:\n points_src (numpy array): source points\n normals_src (numpy array): source normals\n points_tgt (numpy array): target points\n normals_tgt (numpy array): target normals\n '
kdtree = KDTree(points_tgt)
(dist, idx) = kdtree.query(points_src)
if ((normals_src is not None) and (normals_tgt is not None)):
normals_src = (normals_src / np.linalg.norm(normals_src, axis=(- 1), keepdims=True))
normals_tgt = (normals_tgt / np.linalg.norm(normals_tgt, axis=(- 1), keepdims=True))
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=(- 1))
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array(([np.nan] * points_src.shape[0]), dtype=np.float32)
return (dist, normals_dot_product) |
def distance_p2m(points, mesh):
' Compute minimal distances of each point in points to mesh.\n\n Args:\n points (numpy array): points array\n mesh (trimesh): mesh\n\n '
(_, dist, _) = trimesh.proximity.closest_point(mesh, points)
return dist | 5,079,051,146,742,759,000 | Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh | src/eval.py | distance_p2m | hummat/convolutional_occupancy_networks | python | def distance_p2m(points, mesh):
' Compute minimal distances of each point in points to mesh.\n\n Args:\n points (numpy array): points array\n mesh (trimesh): mesh\n\n '
(_, dist, _) = trimesh.proximity.closest_point(mesh, points)
return dist |
def get_threshold_percentage(dist, thresholds):
' Evaluates a point cloud.\n\n Args:\n dist (numpy array): calculated distance\n thresholds (numpy array): threshold values for the F-score calculation\n '
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold | -5,807,954,387,139,640,000 | Evaluates a point cloud.
Args:
dist (numpy array): calculated distance
thresholds (numpy array): threshold values for the F-score calculation | src/eval.py | get_threshold_percentage | hummat/convolutional_occupancy_networks | python | def get_threshold_percentage(dist, thresholds):
' Evaluates a point cloud.\n\n Args:\n dist (numpy array): calculated distance\n thresholds (numpy array): threshold values for the F-score calculation\n '
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold |
def eval_mesh(self, mesh, pointcloud_tgt, normals_tgt, points_iou, occ_tgt, remove_wall=False):
' Evaluates a mesh.\n\n Args:\n mesh (trimesh): mesh which should be evaluated\n pointcloud_tgt (numpy array): target point cloud\n normals_tgt (numpy array): target normals\n points_iou (numpy_array): points tensor for IoU evaluation\n occ_tgt (numpy_array): GT occupancy values for IoU points\n '
if ((len(mesh.vertices) != 0) and (len(mesh.faces) != 0)):
if remove_wall:
(pointcloud, idx) = mesh.sample((2 * self.n_points), return_index=True)
eps = 0.007
(x_max, x_min) = (pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min())
(y_max, y_min) = (pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min())
(z_max, z_min) = (pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min())
(x_max, x_min) = ((x_max + eps), (x_min - eps))
(y_max, y_min) = ((y_max + eps), (y_min - eps))
(z_max, z_min) = ((z_max + eps), (z_min - eps))
mask_x = ((pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min))
mask_y = (pointcloud[:, 1] >= y_min)
mask_z = ((pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min))
mask = ((mask_x & mask_y) & mask_z)
pointcloud_new = pointcloud[mask]
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
(pointcloud, idx) = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if ((len(mesh.vertices) != 0) and (len(mesh.faces) != 0)):
occ = check_mesh_contains(mesh, points_iou)
if (occ_tgt.min() < 0):
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.0
return out_dict | 6,281,606,503,414,471,000 | Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points | src/eval.py | eval_mesh | hummat/convolutional_occupancy_networks | python | def eval_mesh(self, mesh, pointcloud_tgt, normals_tgt, points_iou, occ_tgt, remove_wall=False):
' Evaluates a mesh.\n\n Args:\n mesh (trimesh): mesh which should be evaluated\n pointcloud_tgt (numpy array): target point cloud\n normals_tgt (numpy array): target normals\n points_iou (numpy_array): points tensor for IoU evaluation\n occ_tgt (numpy_array): GT occupancy values for IoU points\n '
if ((len(mesh.vertices) != 0) and (len(mesh.faces) != 0)):
if remove_wall:
(pointcloud, idx) = mesh.sample((2 * self.n_points), return_index=True)
eps = 0.007
(x_max, x_min) = (pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min())
(y_max, y_min) = (pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min())
(z_max, z_min) = (pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min())
(x_max, x_min) = ((x_max + eps), (x_min - eps))
(y_max, y_min) = ((y_max + eps), (y_min - eps))
(z_max, z_min) = ((z_max + eps), (z_min - eps))
mask_x = ((pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min))
mask_y = (pointcloud[:, 1] >= y_min)
mask_z = ((pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min))
mask = ((mask_x & mask_y) & mask_z)
pointcloud_new = pointcloud[mask]
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
(pointcloud, idx) = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if ((len(mesh.vertices) != 0) and (len(mesh.faces) != 0)):
occ = check_mesh_contains(mesh, points_iou)
if (occ_tgt.min() < 0):
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.0
return out_dict |
@staticmethod
def eval_pointcloud(pointcloud, pointcloud_tgt, normals=None, normals_tgt=None, thresholds=np.linspace((1.0 / 1000), 1, 1000)):
' Evaluates a point cloud.\n\n Args:\n pointcloud (numpy array): predicted point cloud\n pointcloud_tgt (numpy array): target point cloud\n normals (numpy array): predicted normals\n normals_tgt (numpy array): target normals\n thresholds (numpy array): threshold values for the F-score calculation\n '
if (pointcloud.shape[0] == 0):
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if ((normals is not None) and (normals_tgt is not None)):
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
(completeness, completeness_normals) = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = (completeness ** 2)
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
(accuracy, accuracy_normals) = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = (accuracy ** 2)
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
chamferL2 = (0.5 * (completeness2 + accuracy2))
normals_correctness = ((0.5 * completeness_normals) + (0.5 * accuracy_normals))
chamferL1 = (0.5 * (completeness + accuracy))
F = [(((2 * precision[i]) * recall[i]) / (precision[i] + recall[i])) for i in range(len(precision))]
out_dict = {'completeness': completeness, 'accuracy': accuracy, 'normals completeness': completeness_normals, 'normals accuracy': accuracy_normals, 'normals': normals_correctness, 'completeness2': completeness2, 'accuracy2': accuracy2, 'chamfer-L2': chamferL2, 'chamfer-L1': chamferL1, 'f-score': F[9], 'f-score-15': F[14], 'f-score-20': F[19]}
return out_dict | 1,854,094,332,186,122,500 | Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
thresholds (numpy array): threshold values for the F-score calculation | src/eval.py | eval_pointcloud | hummat/convolutional_occupancy_networks | python | @staticmethod
def eval_pointcloud(pointcloud, pointcloud_tgt, normals=None, normals_tgt=None, thresholds=np.linspace((1.0 / 1000), 1, 1000)):
' Evaluates a point cloud.\n\n Args:\n pointcloud (numpy array): predicted point cloud\n pointcloud_tgt (numpy array): target point cloud\n normals (numpy array): predicted normals\n normals_tgt (numpy array): target normals\n thresholds (numpy array): threshold values for the F-score calculation\n '
if (pointcloud.shape[0] == 0):
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if ((normals is not None) and (normals_tgt is not None)):
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
(completeness, completeness_normals) = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = (completeness ** 2)
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
(accuracy, accuracy_normals) = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = (accuracy ** 2)
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
chamferL2 = (0.5 * (completeness2 + accuracy2))
normals_correctness = ((0.5 * completeness_normals) + (0.5 * accuracy_normals))
chamferL1 = (0.5 * (completeness + accuracy))
F = [(((2 * precision[i]) * recall[i]) / (precision[i] + recall[i])) for i in range(len(precision))]
out_dict = {'completeness': completeness, 'accuracy': accuracy, 'normals completeness': completeness_normals, 'normals accuracy': accuracy_normals, 'normals': normals_correctness, 'completeness2': completeness2, 'accuracy2': accuracy2, 'chamfer-L2': chamferL2, 'chamfer-L1': chamferL1, 'f-score': F[9], 'f-score-15': F[14], 'f-score-20': F[19]}
return out_dict |
def test_assert_json(self):
'Expecting json response but content type is not valid.'
self.headers.append(('content-type', 'text/html; charset=UTF-8'))
self.client.get('auth/token')
self.assertRaises(AssertionError, (lambda : self.client.json)) | -6,025,931,481,044,628,000 | Expecting json response but content type is not valid. | src/wheezy/core/tests/test_httpclient.py | test_assert_json | akornatskyy/wheezy.core | python | def test_assert_json(self):
self.headers.append(('content-type', 'text/html; charset=UTF-8'))
self.client.get('auth/token')
self.assertRaises(AssertionError, (lambda : self.client.json)) |
def test_json(self):
'json response.'
patcher = patch.object(httpclient, 'json_loads')
mock_json_loads = patcher.start()
mock_json_loads.return_value = {}
self.headers.append(('content-type', 'application/json; charset=UTF-8'))
self.mock_response.read.return_value = '{}'.encode('utf-8')
self.client.get('auth/token')
assert ({} == self.client.json)
patcher.stop() | -6,126,482,337,211,894,000 | json response. | src/wheezy/core/tests/test_httpclient.py | test_json | akornatskyy/wheezy.core | python | def test_json(self):
patcher = patch.object(httpclient, 'json_loads')
mock_json_loads = patcher.start()
mock_json_loads.return_value = {}
self.headers.append(('content-type', 'application/json; charset=UTF-8'))
self.mock_response.read.return_value = '{}'.encode('utf-8')
self.client.get('auth/token')
assert ({} == self.client.json)
patcher.stop() |
def test_gzip(self):
'Ensure gzip decompression.'
self.headers.append(('content-encoding', 'gzip'))
self.mock_response.read.return_value = compress('test'.encode('utf-8'))
self.client.get('auth/token')
assert ('test' == self.client.content) | -4,188,835,249,121,999,000 | Ensure gzip decompression. | src/wheezy/core/tests/test_httpclient.py | test_gzip | akornatskyy/wheezy.core | python | def test_gzip(self):
self.headers.append(('content-encoding', 'gzip'))
self.mock_response.read.return_value = compress('test'.encode('utf-8'))
self.client.get('auth/token')
assert ('test' == self.client.content) |
def test_etag(self):
'ETag processing.'
self.headers.append(('etag', '"ca231fbc"'))
self.client.get('auth/token')
(method, path, body, headers) = self.mock_c.request.call_args[0]
assert ('If-None-Match' not in headers)
assert ('"ca231fbc"' == self.client.etags['/api/v1/auth/token'])
self.client.get('auth/token')
(method, path, body, headers) = self.mock_c.request.call_args[0]
assert ('"ca231fbc"' == headers['If-None-Match']) | 1,679,836,737,837,823,700 | ETag processing. | src/wheezy/core/tests/test_httpclient.py | test_etag | akornatskyy/wheezy.core | python | def test_etag(self):
self.headers.append(('etag', '"ca231fbc"'))
self.client.get('auth/token')
(method, path, body, headers) = self.mock_c.request.call_args[0]
assert ('If-None-Match' not in headers)
assert ('"ca231fbc"' == self.client.etags['/api/v1/auth/token'])
self.client.get('auth/token')
(method, path, body, headers) = self.mock_c.request.call_args[0]
assert ('"ca231fbc"' == headers['If-None-Match']) |
@classmethod
def is_usable(cls):
'\n Check whether this class is available for use.\n\n :return: Boolean determination of whether this implementation is usable.\n :rtype: bool\n\n '
return True | -7,418,314,823,100,840,000 | Check whether this class is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool | python/smqtk/representation/descriptor_index/memory.py | is_usable | cdeepakroy/SMQTK | python | @classmethod
def is_usable(cls):
'\n Check whether this class is available for use.\n\n :return: Boolean determination of whether this implementation is usable.\n :rtype: bool\n\n '
return True |
@classmethod
def get_default_config(cls):
"\n Generate and return a default configuration dictionary for this class.\n This will be primarily used for generating what the configuration\n dictionary would look like for this class without instantiating it.\n\n By default, we observe what this class's constructor takes as arguments,\n turning those argument names into configuration dictionary keys. If any\n of those arguments have defaults, we will add those values into the\n configuration dictionary appropriately. The dictionary returned should\n only contain JSON compliant value types.\n\n It is not be guaranteed that the configuration dictionary returned\n from this method is valid for construction of an instance of this class.\n\n :return: Default configuration dictionary for the class.\n :rtype: dict\n\n "
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c | -460,871,247,947,447,100 | Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict | python/smqtk/representation/descriptor_index/memory.py | get_default_config | cdeepakroy/SMQTK | python | @classmethod
def get_default_config(cls):
"\n Generate and return a default configuration dictionary for this class.\n This will be primarily used for generating what the configuration\n dictionary would look like for this class without instantiating it.\n\n By default, we observe what this class's constructor takes as arguments,\n turning those argument names into configuration dictionary keys. If any\n of those arguments have defaults, we will add those values into the\n configuration dictionary appropriately. The dictionary returned should\n only contain JSON compliant value types.\n\n It is not be guaranteed that the configuration dictionary returned\n from this method is valid for construction of an instance of this class.\n\n :return: Default configuration dictionary for the class.\n :rtype: dict\n\n "
c = super(MemoryDescriptorIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c |
@classmethod
def from_config(cls, config_dict, merge_default=True):
'\n Instantiate a new instance of this class given the configuration\n JSON-compliant dictionary encapsulating initialization arguments.\n\n :param config_dict: JSON compliant dictionary encapsulating\n a configuration.\n :type config_dict: dict\n\n :param merge_default: Merge the given configuration on top of the\n default provided by ``get_default_config``.\n :type merge_default: bool\n\n :return: Constructed instance from the provided config.\n :rtype: MemoryDescriptorIndex\n\n '
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
if (config_dict['cache_element'] and config_dict['cache_element']['type']):
e = plugin.from_plugin_config(config_dict['cache_element'], get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False) | 8,910,833,205,713,397,000 | Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: MemoryDescriptorIndex | python/smqtk/representation/descriptor_index/memory.py | from_config | cdeepakroy/SMQTK | python | @classmethod
def from_config(cls, config_dict, merge_default=True):
'\n Instantiate a new instance of this class given the configuration\n JSON-compliant dictionary encapsulating initialization arguments.\n\n :param config_dict: JSON compliant dictionary encapsulating\n a configuration.\n :type config_dict: dict\n\n :param merge_default: Merge the given configuration on top of the\n default provided by ``get_default_config``.\n :type merge_default: bool\n\n :return: Constructed instance from the provided config.\n :rtype: MemoryDescriptorIndex\n\n '
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
if (config_dict['cache_element'] and config_dict['cache_element']['type']):
e = plugin.from_plugin_config(config_dict['cache_element'], get_data_element_impls())
config_dict['cache_element'] = e
else:
config_dict['cache_element'] = None
return super(MemoryDescriptorIndex, cls).from_config(config_dict, False) |
def __init__(self, cache_element=None, pickle_protocol=(- 1)):
'\n Initialize a new in-memory descriptor index, or reload one from a\n cache.\n\n :param cache_element: Optional data element cache, loading an existing\n index if the element has bytes. If the given element is writable,\n new descriptors added to this index are cached to the element.\n :type cache_element: None | smqtk.representation.DataElement\n\n :param pickle_protocol: Pickling protocol to use when serializing index\n table to the optionally provided, writable cache element. We will\n use -1 by default (latest version, probably a binary form).\n :type pickle_protocol: int\n\n '
super(MemoryDescriptorIndex, self).__init__()
self._table = {}
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if (cache_element and (not cache_element.is_empty())):
self._log.debug('Loading cached descriptor index table from %s element.', cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes()) | 9,142,104,922,851,511,000 | Initialize a new in-memory descriptor index, or reload one from a
cache.
:param cache_element: Optional data element cache, loading an existing
index if the element has bytes. If the given element is writable,
new descriptors added to this index are cached to the element.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use when serializing index
table to the optionally provided, writable cache element. We will
use -1 by default (latest version, probably a binary form).
:type pickle_protocol: int | python/smqtk/representation/descriptor_index/memory.py | __init__ | cdeepakroy/SMQTK | python | def __init__(self, cache_element=None, pickle_protocol=(- 1)):
'\n Initialize a new in-memory descriptor index, or reload one from a\n cache.\n\n :param cache_element: Optional data element cache, loading an existing\n index if the element has bytes. If the given element is writable,\n new descriptors added to this index are cached to the element.\n :type cache_element: None | smqtk.representation.DataElement\n\n :param pickle_protocol: Pickling protocol to use when serializing index\n table to the optionally provided, writable cache element. We will\n use -1 by default (latest version, probably a binary form).\n :type pickle_protocol: int\n\n '
super(MemoryDescriptorIndex, self).__init__()
self._table = {}
self.cache_element = cache_element
self.pickle_protocol = pickle_protocol
if (cache_element and (not cache_element.is_empty())):
self._log.debug('Loading cached descriptor index table from %s element.', cache_element.__class__.__name__)
self._table = pickle.loads(cache_element.get_bytes()) |
def clear(self):
"\n Clear this descriptor index's entries.\n "
self._table = {}
self.cache_table() | 8,491,712,948,916,743,000 | Clear this descriptor index's entries. | python/smqtk/representation/descriptor_index/memory.py | clear | cdeepakroy/SMQTK | python | def clear(self):
"\n \n "
self._table = {}
self.cache_table() |
def has_descriptor(self, uuid):
'\n Check if a DescriptorElement with the given UUID exists in this index.\n\n :param uuid: UUID to query for\n :type uuid: collections.Hashable\n\n :return: True if a DescriptorElement with the given UUID exists in this\n index, or False if not.\n :rtype: bool\n\n '
return (uuid in self._table) | -5,014,531,050,288,331,000 | Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool | python/smqtk/representation/descriptor_index/memory.py | has_descriptor | cdeepakroy/SMQTK | python | def has_descriptor(self, uuid):
'\n Check if a DescriptorElement with the given UUID exists in this index.\n\n :param uuid: UUID to query for\n :type uuid: collections.Hashable\n\n :return: True if a DescriptorElement with the given UUID exists in this\n index, or False if not.\n :rtype: bool\n\n '
return (uuid in self._table) |
def add_descriptor(self, descriptor, no_cache=False):
'\n Add a descriptor to this index.\n\n Adding the same descriptor multiple times should not add multiple\n copies of the descriptor in the index.\n\n :param descriptor: Descriptor to index.\n :type descriptor: smqtk.representation.DescriptorElement\n\n :param no_cache: Do not cache the internal table if a file cache was\n provided. This would be used if adding many descriptors at a time,\n preventing a file write for every individual descriptor added.\n :type no_cache: bool\n\n '
self._table[descriptor.uuid()] = descriptor
if (not no_cache):
self.cache_table() | 2,048,474,242,797,334,000 | Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple
copies of the descriptor in the index.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool | python/smqtk/representation/descriptor_index/memory.py | add_descriptor | cdeepakroy/SMQTK | python | def add_descriptor(self, descriptor, no_cache=False):
'\n Add a descriptor to this index.\n\n Adding the same descriptor multiple times should not add multiple\n copies of the descriptor in the index.\n\n :param descriptor: Descriptor to index.\n :type descriptor: smqtk.representation.DescriptorElement\n\n :param no_cache: Do not cache the internal table if a file cache was\n provided. This would be used if adding many descriptors at a time,\n preventing a file write for every individual descriptor added.\n :type no_cache: bool\n\n '
self._table[descriptor.uuid()] = descriptor
if (not no_cache):
self.cache_table() |
def add_many_descriptors(self, descriptors):
'\n Add multiple descriptors at one time.\n\n :param descriptors: Iterable of descriptor instances to add to this\n index.\n :type descriptors:\n collections.Iterable[smqtk.representation.DescriptorElement]\n\n '
added_something = False
for d in descriptors:
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table() | 6,443,921,287,025,161,000 | Add multiple descriptors at one time.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement] | python/smqtk/representation/descriptor_index/memory.py | add_many_descriptors | cdeepakroy/SMQTK | python | def add_many_descriptors(self, descriptors):
'\n Add multiple descriptors at one time.\n\n :param descriptors: Iterable of descriptor instances to add to this\n index.\n :type descriptors:\n collections.Iterable[smqtk.representation.DescriptorElement]\n\n '
added_something = False
for d in descriptors:
self.add_descriptor(d, no_cache=True)
added_something = True
if added_something:
self.cache_table() |
def get_descriptor(self, uuid):
"\n Get the descriptor in this index that is associated with the given UUID.\n\n :param uuid: UUID of the DescriptorElement to get.\n :type uuid: collections.Hashable\n\n :raises KeyError: The given UUID doesn't associate to a\n DescriptorElement in this index.\n\n :return: DescriptorElement associated with the queried UUID.\n :rtype: smqtk.representation.DescriptorElement\n\n "
return self._table[uuid] | 1,490,188,453,307,830,000 | Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement | python/smqtk/representation/descriptor_index/memory.py | get_descriptor | cdeepakroy/SMQTK | python | def get_descriptor(self, uuid):
"\n Get the descriptor in this index that is associated with the given UUID.\n\n :param uuid: UUID of the DescriptorElement to get.\n :type uuid: collections.Hashable\n\n :raises KeyError: The given UUID doesn't associate to a\n DescriptorElement in this index.\n\n :return: DescriptorElement associated with the queried UUID.\n :rtype: smqtk.representation.DescriptorElement\n\n "
return self._table[uuid] |
def get_many_descriptors(self, uuids):
"\n Get an iterator over descriptors associated to given descriptor UUIDs.\n\n :param uuids: Iterable of descriptor UUIDs to query for.\n :type uuids: collections.Iterable[collections.Hashable]\n\n :raises KeyError: A given UUID doesn't associate with a\n DescriptorElement in this index.\n\n :return: Iterator of descriptors associated to given uuid values.\n :rtype: __generator[smqtk.representation.DescriptorElement]\n\n "
for uid in uuids:
(yield self._table[uid]) | -8,884,322,907,515,492,000 | Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement] | python/smqtk/representation/descriptor_index/memory.py | get_many_descriptors | cdeepakroy/SMQTK | python | def get_many_descriptors(self, uuids):
"\n Get an iterator over descriptors associated to given descriptor UUIDs.\n\n :param uuids: Iterable of descriptor UUIDs to query for.\n :type uuids: collections.Iterable[collections.Hashable]\n\n :raises KeyError: A given UUID doesn't associate with a\n DescriptorElement in this index.\n\n :return: Iterator of descriptors associated to given uuid values.\n :rtype: __generator[smqtk.representation.DescriptorElement]\n\n "
for uid in uuids:
(yield self._table[uid]) |
def remove_descriptor(self, uuid, no_cache=False):
"\n Remove a descriptor from this index by the given UUID.\n\n :param uuid: UUID of the DescriptorElement to remove.\n :type uuid: collections.Hashable\n\n :raises KeyError: The given UUID doesn't associate to a\n DescriptorElement in this index.\n\n :param no_cache: Do not cache the internal table if a file cache was\n provided. This would be used if adding many descriptors at a time,\n preventing a file write for every individual descriptor added.\n :type no_cache: bool\n\n "
del self._table[uuid]
if (not no_cache):
self.cache_table() | 1,289,530,611,836,269,300 | Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:param no_cache: Do not cache the internal table if a file cache was
provided. This would be used if adding many descriptors at a time,
preventing a file write for every individual descriptor added.
:type no_cache: bool | python/smqtk/representation/descriptor_index/memory.py | remove_descriptor | cdeepakroy/SMQTK | python | def remove_descriptor(self, uuid, no_cache=False):
"\n Remove a descriptor from this index by the given UUID.\n\n :param uuid: UUID of the DescriptorElement to remove.\n :type uuid: collections.Hashable\n\n :raises KeyError: The given UUID doesn't associate to a\n DescriptorElement in this index.\n\n :param no_cache: Do not cache the internal table if a file cache was\n provided. This would be used if adding many descriptors at a time,\n preventing a file write for every individual descriptor added.\n :type no_cache: bool\n\n "
del self._table[uuid]
if (not no_cache):
self.cache_table() |
def remove_many_descriptors(self, uuids):
"\n Remove descriptors associated to given descriptor UUIDs from this\n index.\n\n :param uuids: Iterable of descriptor UUIDs to remove.\n :type uuids: collections.Iterable[collections.Hashable]\n\n :raises KeyError: A given UUID doesn't associate with a\n DescriptorElement in this index.\n\n "
for uid in uuids:
self.remove_descriptor(uid, no_cache=True)
self.cache_table() | -6,591,105,128,639,121,000 | Remove descriptors associated to given descriptor UUIDs from this
index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index. | python/smqtk/representation/descriptor_index/memory.py | remove_many_descriptors | cdeepakroy/SMQTK | python | def remove_many_descriptors(self, uuids):
"\n Remove descriptors associated to given descriptor UUIDs from this\n index.\n\n :param uuids: Iterable of descriptor UUIDs to remove.\n :type uuids: collections.Iterable[collections.Hashable]\n\n :raises KeyError: A given UUID doesn't associate with a\n DescriptorElement in this index.\n\n "
for uid in uuids:
self.remove_descriptor(uid, no_cache=True)
self.cache_table() |
def instructions(type_library: PushTypeLibrary):
'Return all core text instructions.'
i = []
for push_type in ['str', 'char']:
i.append(SimpleInstruction('{t}_concat'.format(t=push_type), _concat, input_stacks=[push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Concatenates the top two {t}s and pushes the resulting string.'.format(t=push_type)))
i.append(SimpleInstruction('str_insert_{t}'.format(t=push_type), _insert, input_stacks=['str', push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Inserts {t} into the top str at index `n` and pushes\n the resulting string. The value for `n` is taken from the int stack.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_first_char'.format(t=push_type), _first_char, input_stacks=['str'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the first character of the top string.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_last_char'.format(t=push_type), _last_char, input_stacks=['str'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the last character of the top string.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_nth_char'.format(t=push_type), _nth_char, input_stacks=['str', 'int'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the nth character of the top string. The top integer denotes nth position.'.format(t=push_type)))
i.append(SimpleInstruction('str_contains_{t}'.format(t=push_type), _contains, input_stacks=['str', push_type], output_stacks=['bool'], code_blocks=0, docstring='Pushes true if the next {t} is in the top string. Pushes false otherwise.'.format(t=push_type)))
i.append(SimpleInstruction('str_index_of_{t}'.format(t=push_type), _p_index, input_stacks=['str', push_type], output_stacks=['int'], code_blocks=0, docstring='Pushes the index of the next {t} in the top string. If not found, pushes -1.'.format(t=push_type)))
i.append(ProducesManyOfTypeInstruction('str_split_on_{t}'.format(t=push_type), _split_on, input_stacks=['str', push_type], output_stack='str', code_blocks=0, docstring='Pushes multiple strs produced by splitting the top str on the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_first_{t}'.format(t=push_type), _replace_n, input_stacks=['str', push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing the first occurrence of the\n top {t} with the second {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_n_{t}'.format(t=push_type), _replace_n, input_stacks=['str', push_type, push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing the first `n` occurrences of the\n top {t} with the second {t}. The value for `n` is the top int.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_all_{t}'.format(t=push_type), _replace_all, input_stacks=['str', push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing all occurrences of the\n top {t} with the second {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_first_{t}'.format(t=push_type), _remove_n, input_stacks=['str', push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by removing the first occurrence of the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_n_{t}'.format(t=push_type), _remove_n, input_stacks=['str', push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by remvoing the first `n` occurrences of the\n top {t}. The value for `n` is the top int.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_all_{t}'.format(t=push_type), _remove_all, input_stacks=['str', push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by removing all occurrences of the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_occurrences_of_{t}'.format(t=push_type), _occurrences_of, input_stacks=['str', push_type], output_stacks=['int'], code_blocks=0, docstring='Pushes the number of times the top {t} occurs in the top str to the int stack.'.format(t=push_type)))
i.append(SimpleInstruction('str_reverse', _reverse, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Takes the top string and pushes it reversed.'))
i.append(SimpleInstruction('str_head', _head, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes a string of the first `n` characters from the top string. The value\n for `n` is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_tail', _tail, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes a string of the last `n` characters from the top string. The value\n for `n` is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_append_char', _concat, input_stacks=['str', 'char'], output_stacks=['str'], code_blocks=0, docstring='Appends the top char to the top string pushes the resulting string.'))
i.append(SimpleInstruction('str_rest', _rest, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its first character.'))
i.append(SimpleInstruction('str_but_last', _but_last, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its last character.'))
i.append(SimpleInstruction('str_drop', _drop, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its first `n` character. The value for `n`\n is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_but_last_n', _but_last_n, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its last `n` character. The value for `n`\n is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_length', _len, input_stacks=['str'], output_stacks=['int'], code_blocks=0, docstring='Pushes the length of the top str to the int stack.'))
i.append(SimpleInstruction('str_make_empty', _make_empty, input_stacks=[], output_stacks=['str'], code_blocks=0, docstring='Pushes an empty string.'))
i.append(SimpleInstruction('str_is_empty_string', _is_empty, input_stacks=['str'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if top string is empty. Pushes False otherwise.'))
i.append(SimpleInstruction('str_remove_nth', _remove_nth, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with the nth character removed.'))
i.append(SimpleInstruction('str_set_nth', _set_nth, input_stacks=['str', 'char', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with the nth character set to the top character.'))
i.append(SimpleInstruction('str_strip_whitespace', _strip_whitespace, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with trailing and leading whitespace stripped.'))
i.append(SimpleInstruction('char_is_whitespace', _is_whitespace, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is whitespace. Pushes False otherwise.'))
i.append(SimpleInstruction('char_is_letter', _is_letter, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is a letter. Pushes False otherwise.'))
i.append(SimpleInstruction('char_is_digit', _is_digit, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is a numeric digit. Pushes False otherwise.'))
for push_type in ['bool', 'int', 'float', 'char']:
i.append(SimpleInstruction('str_from_{t}'.format(t=push_type), _str_from_thing, input_stacks=[push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the top {t} converted into a str.'.format(t=push_type)))
i.append(SimpleInstruction('char_from_bool', _char_from_bool, input_stacks=['bool'], output_stacks=['char'], code_blocks=0, docstring='Pushes the char "T" if the top bool is True. If the top\n bool is False, pushes the char "F".'))
i.append(SimpleInstruction('char_from_ascii_int', _char_from_ascii, input_stacks=['int'], output_stacks=['char'], code_blocks=0, docstring='Pushes the top int converted into a Character by using the int mod 128 as an ascii value.'))
i.append(SimpleInstruction('char_from_float', _char_from_float, input_stacks=['float'], output_stacks=['char'], code_blocks=0, docstring='Pushes the top float converted into a Character by flooring\n the float to an int, taking the int mod 128, and using it as an ascii value.'))
i.append(ProducesManyOfTypeInstruction('chars_from_str', _all_chars, input_stacks=['str'], output_stack='char', code_blocks=0, docstring='Pushes each character of the top str to the char stack in reverse order.'))
return i | 4,267,998,663,476,276,700 | Return all core text instructions. | pyshgp/push/instructions/text.py | instructions | RedBeansAndRice/pyshgp | python | def instructions(type_library: PushTypeLibrary):
i = []
for push_type in ['str', 'char']:
i.append(SimpleInstruction('{t}_concat'.format(t=push_type), _concat, input_stacks=[push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Concatenates the top two {t}s and pushes the resulting string.'.format(t=push_type)))
i.append(SimpleInstruction('str_insert_{t}'.format(t=push_type), _insert, input_stacks=['str', push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Inserts {t} into the top str at index `n` and pushes\n the resulting string. The value for `n` is taken from the int stack.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_first_char'.format(t=push_type), _first_char, input_stacks=['str'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the first character of the top string.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_last_char'.format(t=push_type), _last_char, input_stacks=['str'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the last character of the top string.'.format(t=push_type)))
i.append(SimpleInstruction('{t}_from_nth_char'.format(t=push_type), _nth_char, input_stacks=['str', 'int'], output_stacks=[push_type], code_blocks=0, docstring='Pushes a {t} of the nth character of the top string. The top integer denotes nth position.'.format(t=push_type)))
i.append(SimpleInstruction('str_contains_{t}'.format(t=push_type), _contains, input_stacks=['str', push_type], output_stacks=['bool'], code_blocks=0, docstring='Pushes true if the next {t} is in the top string. Pushes false otherwise.'.format(t=push_type)))
i.append(SimpleInstruction('str_index_of_{t}'.format(t=push_type), _p_index, input_stacks=['str', push_type], output_stacks=['int'], code_blocks=0, docstring='Pushes the index of the next {t} in the top string. If not found, pushes -1.'.format(t=push_type)))
i.append(ProducesManyOfTypeInstruction('str_split_on_{t}'.format(t=push_type), _split_on, input_stacks=['str', push_type], output_stack='str', code_blocks=0, docstring='Pushes multiple strs produced by splitting the top str on the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_first_{t}'.format(t=push_type), _replace_n, input_stacks=['str', push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing the first occurrence of the\n top {t} with the second {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_n_{t}'.format(t=push_type), _replace_n, input_stacks=['str', push_type, push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing the first `n` occurrences of the\n top {t} with the second {t}. The value for `n` is the top int.'.format(t=push_type)))
i.append(SimpleInstruction('str_replace_all_{t}'.format(t=push_type), _replace_all, input_stacks=['str', push_type, push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by replaceing all occurrences of the\n top {t} with the second {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_first_{t}'.format(t=push_type), _remove_n, input_stacks=['str', push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by removing the first occurrence of the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_n_{t}'.format(t=push_type), _remove_n, input_stacks=['str', push_type, 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by remvoing the first `n` occurrences of the\n top {t}. The value for `n` is the top int.'.format(t=push_type)))
i.append(SimpleInstruction('str_remove_all_{t}'.format(t=push_type), _remove_all, input_stacks=['str', push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the str produced by removing all occurrences of the top {t}.'.format(t=push_type)))
i.append(SimpleInstruction('str_occurrences_of_{t}'.format(t=push_type), _occurrences_of, input_stacks=['str', push_type], output_stacks=['int'], code_blocks=0, docstring='Pushes the number of times the top {t} occurs in the top str to the int stack.'.format(t=push_type)))
i.append(SimpleInstruction('str_reverse', _reverse, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Takes the top string and pushes it reversed.'))
i.append(SimpleInstruction('str_head', _head, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes a string of the first `n` characters from the top string. The value\n for `n` is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_tail', _tail, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes a string of the last `n` characters from the top string. The value\n for `n` is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_append_char', _concat, input_stacks=['str', 'char'], output_stacks=['str'], code_blocks=0, docstring='Appends the top char to the top string pushes the resulting string.'))
i.append(SimpleInstruction('str_rest', _rest, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its first character.'))
i.append(SimpleInstruction('str_but_last', _but_last, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its last character.'))
i.append(SimpleInstruction('str_drop', _drop, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its first `n` character. The value for `n`\n is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_but_last_n', _but_last_n, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str without its last `n` character. The value for `n`\n is the top int mod the length of the string.'))
i.append(SimpleInstruction('str_length', _len, input_stacks=['str'], output_stacks=['int'], code_blocks=0, docstring='Pushes the length of the top str to the int stack.'))
i.append(SimpleInstruction('str_make_empty', _make_empty, input_stacks=[], output_stacks=['str'], code_blocks=0, docstring='Pushes an empty string.'))
i.append(SimpleInstruction('str_is_empty_string', _is_empty, input_stacks=['str'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if top string is empty. Pushes False otherwise.'))
i.append(SimpleInstruction('str_remove_nth', _remove_nth, input_stacks=['str', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with the nth character removed.'))
i.append(SimpleInstruction('str_set_nth', _set_nth, input_stacks=['str', 'char', 'int'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with the nth character set to the top character.'))
i.append(SimpleInstruction('str_strip_whitespace', _strip_whitespace, input_stacks=['str'], output_stacks=['str'], code_blocks=0, docstring='Pushes the top str with trailing and leading whitespace stripped.'))
i.append(SimpleInstruction('char_is_whitespace', _is_whitespace, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is whitespace. Pushes False otherwise.'))
i.append(SimpleInstruction('char_is_letter', _is_letter, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is a letter. Pushes False otherwise.'))
i.append(SimpleInstruction('char_is_digit', _is_digit, input_stacks=['char'], output_stacks=['bool'], code_blocks=0, docstring='Pushes True if the top Char is a numeric digit. Pushes False otherwise.'))
for push_type in ['bool', 'int', 'float', 'char']:
i.append(SimpleInstruction('str_from_{t}'.format(t=push_type), _str_from_thing, input_stacks=[push_type], output_stacks=['str'], code_blocks=0, docstring='Pushes the top {t} converted into a str.'.format(t=push_type)))
i.append(SimpleInstruction('char_from_bool', _char_from_bool, input_stacks=['bool'], output_stacks=['char'], code_blocks=0, docstring='Pushes the char "T" if the top bool is True. If the top\n bool is False, pushes the char "F".'))
i.append(SimpleInstruction('char_from_ascii_int', _char_from_ascii, input_stacks=['int'], output_stacks=['char'], code_blocks=0, docstring='Pushes the top int converted into a Character by using the int mod 128 as an ascii value.'))
i.append(SimpleInstruction('char_from_float', _char_from_float, input_stacks=['float'], output_stacks=['char'], code_blocks=0, docstring='Pushes the top float converted into a Character by flooring\n the float to an int, taking the int mod 128, and using it as an ascii value.'))
i.append(ProducesManyOfTypeInstruction('chars_from_str', _all_chars, input_stacks=['str'], output_stack='char', code_blocks=0, docstring='Pushes each character of the top str to the char stack in reverse order.'))
return i |
@pytest.fixture(params=['proxy', 'layered', 'memory', 'file', 'serialised'])
def cache(request):
'Return cache.'
if (request.param == 'proxy'):
cache = ftrack_api.cache.ProxyCache(ftrack_api.cache.MemoryCache())
elif (request.param == 'layered'):
cache = ftrack_api.cache.LayeredCache([ftrack_api.cache.MemoryCache()])
elif (request.param == 'memory'):
cache = ftrack_api.cache.MemoryCache()
elif (request.param == 'file'):
cache_path = os.path.join(tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex))
cache = ftrack_api.cache.FileCache(cache_path)
def cleanup():
'Cleanup.'
try:
os.remove(cache_path)
except OSError:
os.remove((cache_path + '.db'))
request.addfinalizer(cleanup)
elif (request.param == 'serialised'):
cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.MemoryCache(), encode=(lambda value: value), decode=(lambda value: value))
else:
raise ValueError('Unrecognised cache fixture type {0!r}'.format(request.param))
return cache | 5,505,313,335,095,566,000 | Return cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | cache | Mikfr83/OpenPype | python | @pytest.fixture(params=['proxy', 'layered', 'memory', 'file', 'serialised'])
def cache(request):
if (request.param == 'proxy'):
cache = ftrack_api.cache.ProxyCache(ftrack_api.cache.MemoryCache())
elif (request.param == 'layered'):
cache = ftrack_api.cache.LayeredCache([ftrack_api.cache.MemoryCache()])
elif (request.param == 'memory'):
cache = ftrack_api.cache.MemoryCache()
elif (request.param == 'file'):
cache_path = os.path.join(tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex))
cache = ftrack_api.cache.FileCache(cache_path)
def cleanup():
'Cleanup.'
try:
os.remove(cache_path)
except OSError:
os.remove((cache_path + '.db'))
request.addfinalizer(cleanup)
elif (request.param == 'serialised'):
cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.MemoryCache(), encode=(lambda value: value), decode=(lambda value: value))
else:
raise ValueError('Unrecognised cache fixture type {0!r}'.format(request.param))
return cache |
def function(mutable, x, y=2):
'Function for testing.'
mutable['called'] = True
return {'result': (x + y)} | 7,300,508,250,387,626,000 | Function for testing. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | function | Mikfr83/OpenPype | python | def function(mutable, x, y=2):
mutable['called'] = True
return {'result': (x + y)} |
def assert_memoised_call(memoiser, function, expected, args=None, kw=None, memoised=True):
'Assert *function* call via *memoiser* was *memoised*.'
mapping = {'called': False}
if (args is not None):
args = ((mapping,) + args)
else:
args = (mapping,)
result = memoiser.call(function, args, kw)
assert (result == expected)
assert (mapping['called'] is not memoised) | -5,157,968,186,569,180,000 | Assert *function* call via *memoiser* was *memoised*. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | assert_memoised_call | Mikfr83/OpenPype | python | def assert_memoised_call(memoiser, function, expected, args=None, kw=None, memoised=True):
mapping = {'called': False}
if (args is not None):
args = ((mapping,) + args)
else:
args = (mapping,)
result = memoiser.call(function, args, kw)
assert (result == expected)
assert (mapping['called'] is not memoised) |
def test_get(cache):
'Retrieve item from cache.'
cache.set('key', 'value')
assert (cache.get('key') == 'value') | -4,670,795,253,040,756,000 | Retrieve item from cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_get | Mikfr83/OpenPype | python | def test_get(cache):
cache.set('key', 'value')
assert (cache.get('key') == 'value') |
def test_get_missing_key(cache):
'Fail to retrieve missing item from cache.'
with pytest.raises(KeyError):
cache.get('key') | -7,415,425,646,362,388,000 | Fail to retrieve missing item from cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_get_missing_key | Mikfr83/OpenPype | python | def test_get_missing_key(cache):
with pytest.raises(KeyError):
cache.get('key') |
def test_set(cache):
'Set item in cache.'
with pytest.raises(KeyError):
cache.get('key')
cache.set('key', 'value')
assert (cache.get('key') == 'value') | 5,439,127,552,633,369,000 | Set item in cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_set | Mikfr83/OpenPype | python | def test_set(cache):
with pytest.raises(KeyError):
cache.get('key')
cache.set('key', 'value')
assert (cache.get('key') == 'value') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.