code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render() | Zoom the camera view by a factor. | Below is the the instruction that describes the task:
### Input:
Zoom the camera view by a factor.
### Response:
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render() |
def parse_rna(rna, s2bins, min_rna):
"""
parse [16,23]SfromHMM.py output
- rna_cov[scaffold] = [0, 0, []] # [bases, length, ranges]
"""
rna_cov = {}
for seq in parse_fasta(rna):
# check that length passes threshold
length = len(seq[1])
if length < min_rna:
continue
# check if sequence is binnned
s = seq[0].split('>')[1].split()[0]
if s not in s2bins:
continue
if s not in rna_cov:
rna_cov[s] = [0, 0, []]
position = [int(i) for i in seq[0].rsplit('pos=', 1)[1].split()[0].split('-')]
rna_cov[s][2].append(position)
rna_cov[s][1] += length
return rna_cov | parse [16,23]SfromHMM.py output
- rna_cov[scaffold] = [0, 0, []] # [bases, length, ranges] | Below is the the instruction that describes the task:
### Input:
parse [16,23]SfromHMM.py output
- rna_cov[scaffold] = [0, 0, []] # [bases, length, ranges]
### Response:
def parse_rna(rna, s2bins, min_rna):
"""
parse [16,23]SfromHMM.py output
- rna_cov[scaffold] = [0, 0, []] # [bases, length, ranges]
"""
rna_cov = {}
for seq in parse_fasta(rna):
# check that length passes threshold
length = len(seq[1])
if length < min_rna:
continue
# check if sequence is binnned
s = seq[0].split('>')[1].split()[0]
if s not in s2bins:
continue
if s not in rna_cov:
rna_cov[s] = [0, 0, []]
position = [int(i) for i in seq[0].rsplit('pos=', 1)[1].split()[0].split('-')]
rna_cov[s][2].append(position)
rna_cov[s][1] += length
return rna_cov |
def _get_callable_full_name(self, fob, fin, uobj):
"""Get full path [module, class (if applicable), function name] of callable."""
# Check if object is a class property
name = self._property_search(fob)
if name:
del fob, fin, uobj
return name
if os.path.isfile(fin):
lineno = fob.f_lineno
ret = self._callables_obj.get_callable_from_line(fin, lineno)
del fob, fin, uobj, name, lineno
return ret
# Code executed in doctests does not have an actual callable object
# exec-based callables do not have a valid file name
fname = uobj and _get_func_code(uobj).co_filename
if (not fname) or (fname and (not os.path.isfile(fname))):
del fob, fin, uobj, name, fname
return "dynamic"
code_id = (
inspect.getfile(uobj).replace(".pyc", "py"),
inspect.getsourcelines(uobj)[1],
)
self._callables_obj.trace([code_id[0]])
ret = self._callables_obj.reverse_callables_db[code_id]
del fob, fin, uobj, name, fname, code_id
return ret | Get full path [module, class (if applicable), function name] of callable. | Below is the the instruction that describes the task:
### Input:
Get full path [module, class (if applicable), function name] of callable.
### Response:
def _get_callable_full_name(self, fob, fin, uobj):
"""Get full path [module, class (if applicable), function name] of callable."""
# Check if object is a class property
name = self._property_search(fob)
if name:
del fob, fin, uobj
return name
if os.path.isfile(fin):
lineno = fob.f_lineno
ret = self._callables_obj.get_callable_from_line(fin, lineno)
del fob, fin, uobj, name, lineno
return ret
# Code executed in doctests does not have an actual callable object
# exec-based callables do not have a valid file name
fname = uobj and _get_func_code(uobj).co_filename
if (not fname) or (fname and (not os.path.isfile(fname))):
del fob, fin, uobj, name, fname
return "dynamic"
code_id = (
inspect.getfile(uobj).replace(".pyc", "py"),
inspect.getsourcelines(uobj)[1],
)
self._callables_obj.trace([code_id[0]])
ret = self._callables_obj.reverse_callables_db[code_id]
del fob, fin, uobj, name, fname, code_id
return ret |
def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg | Create the function call statement as a string used for timeit. | Below is the the instruction that describes the task:
### Input:
Create the function call statement as a string used for timeit.
### Response:
def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg |
def weekday(year_or_num, month=None, day=None, full=False):
"""Simple tag - returns the weekday of the given (year, month, day) or of given (weekday_number).
Usage (in template):
{% weekday 2014 3 3 %}
Result: Mon
Return abbreviation by default. To return full name: pass full=True
{% weekday 2014 3 3 full=True %}
Result: Monday
When only number of weekday is given then 0 is considered as "Monday"
{% weekday 0 full=True %}
Result: Monday
"""
if any([month, day]) and not all([month, day]):
raise TemplateSyntaxError("weekday accepts 1 or 3 arguments plus optional 'full' argument")
try:
if all([year_or_num, month, day]):
weekday_num = date(*map(int, (year_or_num, month, day))).weekday()
else:
weekday_num = year_or_num
if full:
return WEEKDAYS[weekday_num]
else:
return WEEKDAYS_ABBR[weekday_num]
except Exception:
return | Simple tag - returns the weekday of the given (year, month, day) or of given (weekday_number).
Usage (in template):
{% weekday 2014 3 3 %}
Result: Mon
Return abbreviation by default. To return full name: pass full=True
{% weekday 2014 3 3 full=True %}
Result: Monday
When only number of weekday is given then 0 is considered as "Monday"
{% weekday 0 full=True %}
Result: Monday | Below is the the instruction that describes the task:
### Input:
Simple tag - returns the weekday of the given (year, month, day) or of given (weekday_number).
Usage (in template):
{% weekday 2014 3 3 %}
Result: Mon
Return abbreviation by default. To return full name: pass full=True
{% weekday 2014 3 3 full=True %}
Result: Monday
When only number of weekday is given then 0 is considered as "Monday"
{% weekday 0 full=True %}
Result: Monday
### Response:
def weekday(year_or_num, month=None, day=None, full=False):
"""Simple tag - returns the weekday of the given (year, month, day) or of given (weekday_number).
Usage (in template):
{% weekday 2014 3 3 %}
Result: Mon
Return abbreviation by default. To return full name: pass full=True
{% weekday 2014 3 3 full=True %}
Result: Monday
When only number of weekday is given then 0 is considered as "Monday"
{% weekday 0 full=True %}
Result: Monday
"""
if any([month, day]) and not all([month, day]):
raise TemplateSyntaxError("weekday accepts 1 or 3 arguments plus optional 'full' argument")
try:
if all([year_or_num, month, day]):
weekday_num = date(*map(int, (year_or_num, month, day))).weekday()
else:
weekday_num = year_or_num
if full:
return WEEKDAYS[weekday_num]
else:
return WEEKDAYS_ABBR[weekday_num]
except Exception:
return |
def thaw_decrypt(vault_client, src_file, tmp_dir, opt):
"""Decrypts the encrypted ice file"""
if not os.path.isdir(opt.secrets):
LOG.info("Creating secret directory %s", opt.secrets)
os.mkdir(opt.secrets)
zip_file = "%s/aomi.zip" % tmp_dir
if opt.gpg_pass_path:
gpg_path_bits = opt.gpg_pass_path.split('/')
gpg_path = '/'.join(gpg_path_bits[0:len(gpg_path_bits) - 1])
gpg_field = gpg_path_bits[len(gpg_path_bits) - 1]
resp = vault_client.read(gpg_path)
gpg_pass = None
if resp and 'data' in resp and gpg_field in resp['data']:
gpg_pass = resp['data'][gpg_field]
if not gpg_pass:
raise aomi.exceptions.GPG("Unable to retrieve GPG password")
LOG.debug("Retrieved GPG password from Vault")
if not decrypt(src_file, zip_file, passphrase=gpg_pass):
raise aomi.exceptions.GPG("Unable to gpg")
else:
raise aomi.exceptions.VaultData("Unable to retrieve GPG password")
else:
if not decrypt(src_file, zip_file):
raise aomi.exceptions.GPG("Unable to gpg")
return zip_file | Decrypts the encrypted ice file | Below is the the instruction that describes the task:
### Input:
Decrypts the encrypted ice file
### Response:
def thaw_decrypt(vault_client, src_file, tmp_dir, opt):
"""Decrypts the encrypted ice file"""
if not os.path.isdir(opt.secrets):
LOG.info("Creating secret directory %s", opt.secrets)
os.mkdir(opt.secrets)
zip_file = "%s/aomi.zip" % tmp_dir
if opt.gpg_pass_path:
gpg_path_bits = opt.gpg_pass_path.split('/')
gpg_path = '/'.join(gpg_path_bits[0:len(gpg_path_bits) - 1])
gpg_field = gpg_path_bits[len(gpg_path_bits) - 1]
resp = vault_client.read(gpg_path)
gpg_pass = None
if resp and 'data' in resp and gpg_field in resp['data']:
gpg_pass = resp['data'][gpg_field]
if not gpg_pass:
raise aomi.exceptions.GPG("Unable to retrieve GPG password")
LOG.debug("Retrieved GPG password from Vault")
if not decrypt(src_file, zip_file, passphrase=gpg_pass):
raise aomi.exceptions.GPG("Unable to gpg")
else:
raise aomi.exceptions.VaultData("Unable to retrieve GPG password")
else:
if not decrypt(src_file, zip_file):
raise aomi.exceptions.GPG("Unable to gpg")
return zip_file |
def scantree(path: str) -> Generator:
"""Recursively yield DirEntry objects for given directory."""
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scantree(entry.path)
else:
if entry.name.endswith('.py'):
yield entry | Recursively yield DirEntry objects for given directory. | Below is the the instruction that describes the task:
### Input:
Recursively yield DirEntry objects for given directory.
### Response:
def scantree(path: str) -> Generator:
"""Recursively yield DirEntry objects for given directory."""
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scantree(entry.path)
else:
if entry.name.endswith('.py'):
yield entry |
def traverse(self, predicate=lambda i, d: True,
prune=lambda i, d: False, depth=-1, branch_first=True,
visit_once=True, ignore_self=1, as_edge=False):
""":return: iterator yielding of items found when traversing self
:param predicate: f(i,d) returns False if item i at depth d should not be included in the result
:param prune:
f(i,d) return True if the search should stop at item i at depth d.
Item i will not be returned.
:param depth:
define at which level the iteration should not go deeper
if -1, there is no limit
if 0, you would effectively only get self, the root of the iteration
i.e. if 1, you would only get the first level of predecessors/successors
:param branch_first:
if True, items will be returned branch first, otherwise depth first
:param visit_once:
if True, items will only be returned once, although they might be encountered
several times. Loops are prevented that way.
:param ignore_self:
if True, self will be ignored and automatically pruned from
the result. Otherwise it will be the first item to be returned.
If as_edge is True, the source of the first edge is None
:param as_edge:
if True, return a pair of items, first being the source, second the
destination, i.e. tuple(src, dest) with the edge spanning from
source to destination"""
visited = set()
stack = Deque()
stack.append((0, self, None)) # self is always depth level 0
def addToStack(stack, item, branch_first, depth):
lst = self._get_intermediate_items(item)
if not lst:
return
if branch_first:
stack.extendleft((depth, i, item) for i in lst)
else:
reviter = ((depth, lst[i], item) for i in range(len(lst) - 1, -1, -1))
stack.extend(reviter)
# END addToStack local method
while stack:
d, item, src = stack.pop() # depth of item, item, item_source
if visit_once and item in visited:
continue
if visit_once:
visited.add(item)
rval = (as_edge and (src, item)) or item
if prune(rval, d):
continue
skipStartItem = ignore_self and (item is self)
if not skipStartItem and predicate(rval, d):
yield rval
# only continue to next level if this is appropriate !
nd = d + 1
if depth > -1 and nd > depth:
continue
addToStack(stack, item, branch_first, nd) | :return: iterator yielding of items found when traversing self
:param predicate: f(i,d) returns False if item i at depth d should not be included in the result
:param prune:
f(i,d) return True if the search should stop at item i at depth d.
Item i will not be returned.
:param depth:
define at which level the iteration should not go deeper
if -1, there is no limit
if 0, you would effectively only get self, the root of the iteration
i.e. if 1, you would only get the first level of predecessors/successors
:param branch_first:
if True, items will be returned branch first, otherwise depth first
:param visit_once:
if True, items will only be returned once, although they might be encountered
several times. Loops are prevented that way.
:param ignore_self:
if True, self will be ignored and automatically pruned from
the result. Otherwise it will be the first item to be returned.
If as_edge is True, the source of the first edge is None
:param as_edge:
if True, return a pair of items, first being the source, second the
destination, i.e. tuple(src, dest) with the edge spanning from
source to destination | Below is the the instruction that describes the task:
### Input:
:return: iterator yielding of items found when traversing self
:param predicate: f(i,d) returns False if item i at depth d should not be included in the result
:param prune:
f(i,d) return True if the search should stop at item i at depth d.
Item i will not be returned.
:param depth:
define at which level the iteration should not go deeper
if -1, there is no limit
if 0, you would effectively only get self, the root of the iteration
i.e. if 1, you would only get the first level of predecessors/successors
:param branch_first:
if True, items will be returned branch first, otherwise depth first
:param visit_once:
if True, items will only be returned once, although they might be encountered
several times. Loops are prevented that way.
:param ignore_self:
if True, self will be ignored and automatically pruned from
the result. Otherwise it will be the first item to be returned.
If as_edge is True, the source of the first edge is None
:param as_edge:
if True, return a pair of items, first being the source, second the
destination, i.e. tuple(src, dest) with the edge spanning from
source to destination
### Response:
def traverse(self, predicate=lambda i, d: True,
prune=lambda i, d: False, depth=-1, branch_first=True,
visit_once=True, ignore_self=1, as_edge=False):
""":return: iterator yielding of items found when traversing self
:param predicate: f(i,d) returns False if item i at depth d should not be included in the result
:param prune:
f(i,d) return True if the search should stop at item i at depth d.
Item i will not be returned.
:param depth:
define at which level the iteration should not go deeper
if -1, there is no limit
if 0, you would effectively only get self, the root of the iteration
i.e. if 1, you would only get the first level of predecessors/successors
:param branch_first:
if True, items will be returned branch first, otherwise depth first
:param visit_once:
if True, items will only be returned once, although they might be encountered
several times. Loops are prevented that way.
:param ignore_self:
if True, self will be ignored and automatically pruned from
the result. Otherwise it will be the first item to be returned.
If as_edge is True, the source of the first edge is None
:param as_edge:
if True, return a pair of items, first being the source, second the
destination, i.e. tuple(src, dest) with the edge spanning from
source to destination"""
visited = set()
stack = Deque()
stack.append((0, self, None)) # self is always depth level 0
def addToStack(stack, item, branch_first, depth):
lst = self._get_intermediate_items(item)
if not lst:
return
if branch_first:
stack.extendleft((depth, i, item) for i in lst)
else:
reviter = ((depth, lst[i], item) for i in range(len(lst) - 1, -1, -1))
stack.extend(reviter)
# END addToStack local method
while stack:
d, item, src = stack.pop() # depth of item, item, item_source
if visit_once and item in visited:
continue
if visit_once:
visited.add(item)
rval = (as_edge and (src, item)) or item
if prune(rval, d):
continue
skipStartItem = ignore_self and (item is self)
if not skipStartItem and predicate(rval, d):
yield rval
# only continue to next level if this is appropriate !
nd = d + 1
if depth > -1 and nd > depth:
continue
addToStack(stack, item, branch_first, nd) |
def data_collate(batch:ItemsList)->Tensor:
"Convert `batch` items to tensor data."
return torch.utils.data.dataloader.default_collate(to_data(batch)) | Convert `batch` items to tensor data. | Below is the the instruction that describes the task:
### Input:
Convert `batch` items to tensor data.
### Response:
def data_collate(batch:ItemsList)->Tensor:
"Convert `batch` items to tensor data."
return torch.utils.data.dataloader.default_collate(to_data(batch)) |
def _get_coarse_dataset(self, key, info):
"""Get the coarse dataset refered to by `key` from the XML data."""
angles = self.root.find('.//Tile_Angles')
if key in ['solar_zenith_angle', 'solar_azimuth_angle']:
elts = angles.findall(info['xml_tag'] + '/Values_List/VALUES')
return np.array([[val for val in elt.text.split()] for elt in elts],
dtype=np.float)
elif key in ['satellite_zenith_angle', 'satellite_azimuth_angle']:
arrays = []
elts = angles.findall(info['xml_tag'] + '[@bandId="1"]')
for elt in elts:
items = elt.findall(info['xml_item'] + '/Values_List/VALUES')
arrays.append(np.array([[val for val in item.text.split()] for item in items],
dtype=np.float))
return np.nanmean(np.dstack(arrays), -1)
else:
return | Get the coarse dataset refered to by `key` from the XML data. | Below is the the instruction that describes the task:
### Input:
Get the coarse dataset refered to by `key` from the XML data.
### Response:
def _get_coarse_dataset(self, key, info):
"""Get the coarse dataset refered to by `key` from the XML data."""
angles = self.root.find('.//Tile_Angles')
if key in ['solar_zenith_angle', 'solar_azimuth_angle']:
elts = angles.findall(info['xml_tag'] + '/Values_List/VALUES')
return np.array([[val for val in elt.text.split()] for elt in elts],
dtype=np.float)
elif key in ['satellite_zenith_angle', 'satellite_azimuth_angle']:
arrays = []
elts = angles.findall(info['xml_tag'] + '[@bandId="1"]')
for elt in elts:
items = elt.findall(info['xml_item'] + '/Values_List/VALUES')
arrays.append(np.array([[val for val in item.text.split()] for item in items],
dtype=np.float))
return np.nanmean(np.dstack(arrays), -1)
else:
return |
def invalid_ipa_characters(unicode_string, indices=False):
"""
Return the list of Unicode characters
in the given Unicode string
that are not IPA valid.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool indices: if ``True``, return a list of pairs (index, invalid character),
instead of a list of str (characters).
:rtype: list of str or list of (int, str)
"""
if unicode_string is None:
return None
if indices:
return [(i, unicode_string[i]) for i in range(len(unicode_string)) if unicode_string[i] not in UNICODE_TO_IPA]
return set([c for c in unicode_string if c not in UNICODE_TO_IPA]) | Return the list of Unicode characters
in the given Unicode string
that are not IPA valid.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool indices: if ``True``, return a list of pairs (index, invalid character),
instead of a list of str (characters).
:rtype: list of str or list of (int, str) | Below is the the instruction that describes the task:
### Input:
Return the list of Unicode characters
in the given Unicode string
that are not IPA valid.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool indices: if ``True``, return a list of pairs (index, invalid character),
instead of a list of str (characters).
:rtype: list of str or list of (int, str)
### Response:
def invalid_ipa_characters(unicode_string, indices=False):
"""
Return the list of Unicode characters
in the given Unicode string
that are not IPA valid.
Return ``None`` if ``unicode_string`` is ``None``.
:param str unicode_string: the Unicode string to be parsed
:param bool indices: if ``True``, return a list of pairs (index, invalid character),
instead of a list of str (characters).
:rtype: list of str or list of (int, str)
"""
if unicode_string is None:
return None
if indices:
return [(i, unicode_string[i]) for i in range(len(unicode_string)) if unicode_string[i] not in UNICODE_TO_IPA]
return set([c for c in unicode_string if c not in UNICODE_TO_IPA]) |
def fit(self, X, y, X_val=None, y_val=None):
"""Train a network with the quasi-Newton method.
Args:
X (np.array of float): feature matrix for training
y (np.array of float): target values for training
X_val (np.array of float): feature matrix for validation
y_val (np.array of float): target values for validation
"""
y = y.reshape((len(y), 1))
if sparse.issparse(X):
X = X.tocsr()
if X_val is not None:
n_val = len(y_val)
y_val = y_val.reshape((n_val, 1))
# Set initial weights randomly.
self.i = X.shape[1]
self.l1 = self.l1 / self.i
self.w = (np.random.rand((self.i + 2) * self.h + 1) - .5) * 1e-6
self.w_opt = self.w
self.n_opt = 0
logger.info('training ...')
n_obs = X.shape[0]
batch = self.b
n_epoch = self.n
idx = range(n_obs)
self.auc_opt = .5
start = time.time()
print('\tEPOCH TRAIN VALID BEST TIME (m)')
print('\t--------------------------------------------')
# Before training
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
0, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
# Use 'while' instead of 'for' to increase n_epoch if the validation
# error keeps improving at the end of n_epoch
epoch = 1
while epoch <= n_epoch:
# Shuffle inputs every epoch - it helps avoiding the local optimum
# when batch < n_obs.
np.random.shuffle(idx)
# Find the optimal weights for batch input examples.
# If batch == 1, it's the stochastic optimization, which is slow
# but uses minimal memory. If batch == n_obs, it's the batch
# optimization, which is fast but uses maximum memory.
# Otherwise, it's the mini-batch optimization, which balances the
# speed and space trade-offs.
for i in range(int(n_obs / batch) + 1):
if (i + 1) * batch > n_obs:
sub_idx = idx[batch * i:n_obs]
else:
sub_idx = idx[batch * i:batch * (i + 1)]
x = X[sub_idx]
neg_idx = [n_idx for n_idx, n_y in enumerate(y[sub_idx]) if n_y == 0.]
pos_idx = [p_idx for p_idx, p_y in enumerate(y[sub_idx]) if p_y == 1.]
x0 = x[neg_idx]
x1 = x[pos_idx]
# Update weights to minimize the cost function using the
# quasi-Newton method (L-BFGS-B), where:
# func -- cost function
# jac -- jacobian (derivative of the cost function)
# maxiter -- number of iterations for L-BFGS-B
ret = minimize(self.func,
self.w,
args=(x0, x1),
method='L-BFGS-B',
jac=self.fprime,
options={'maxiter': 5})
self.w = ret.x
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
if auc_val > self.auc_opt:
self.auc_opt = auc_val
self.w_opt = self.w
self.n_opt = epoch
# If validation auc is still improving after n_epoch,
# try 10 more epochs
if epoch == n_epoch:
n_epoch += 5
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
epoch, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
epoch += 1
if X_val is not None:
print('Optimal epoch is {0} ({1:.6f})'.format(self.n_opt,
self.auc_opt))
self.w = self.w_opt
logger.info('done training') | Train a network with the quasi-Newton method.
Args:
X (np.array of float): feature matrix for training
y (np.array of float): target values for training
X_val (np.array of float): feature matrix for validation
y_val (np.array of float): target values for validation | Below is the the instruction that describes the task:
### Input:
Train a network with the quasi-Newton method.
Args:
X (np.array of float): feature matrix for training
y (np.array of float): target values for training
X_val (np.array of float): feature matrix for validation
y_val (np.array of float): target values for validation
### Response:
def fit(self, X, y, X_val=None, y_val=None):
"""Train a network with the quasi-Newton method.
Args:
X (np.array of float): feature matrix for training
y (np.array of float): target values for training
X_val (np.array of float): feature matrix for validation
y_val (np.array of float): target values for validation
"""
y = y.reshape((len(y), 1))
if sparse.issparse(X):
X = X.tocsr()
if X_val is not None:
n_val = len(y_val)
y_val = y_val.reshape((n_val, 1))
# Set initial weights randomly.
self.i = X.shape[1]
self.l1 = self.l1 / self.i
self.w = (np.random.rand((self.i + 2) * self.h + 1) - .5) * 1e-6
self.w_opt = self.w
self.n_opt = 0
logger.info('training ...')
n_obs = X.shape[0]
batch = self.b
n_epoch = self.n
idx = range(n_obs)
self.auc_opt = .5
start = time.time()
print('\tEPOCH TRAIN VALID BEST TIME (m)')
print('\t--------------------------------------------')
# Before training
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
0, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
# Use 'while' instead of 'for' to increase n_epoch if the validation
# error keeps improving at the end of n_epoch
epoch = 1
while epoch <= n_epoch:
# Shuffle inputs every epoch - it helps avoiding the local optimum
# when batch < n_obs.
np.random.shuffle(idx)
# Find the optimal weights for batch input examples.
# If batch == 1, it's the stochastic optimization, which is slow
# but uses minimal memory. If batch == n_obs, it's the batch
# optimization, which is fast but uses maximum memory.
# Otherwise, it's the mini-batch optimization, which balances the
# speed and space trade-offs.
for i in range(int(n_obs / batch) + 1):
if (i + 1) * batch > n_obs:
sub_idx = idx[batch * i:n_obs]
else:
sub_idx = idx[batch * i:batch * (i + 1)]
x = X[sub_idx]
neg_idx = [n_idx for n_idx, n_y in enumerate(y[sub_idx]) if n_y == 0.]
pos_idx = [p_idx for p_idx, p_y in enumerate(y[sub_idx]) if p_y == 1.]
x0 = x[neg_idx]
x1 = x[pos_idx]
# Update weights to minimize the cost function using the
# quasi-Newton method (L-BFGS-B), where:
# func -- cost function
# jac -- jacobian (derivative of the cost function)
# maxiter -- number of iterations for L-BFGS-B
ret = minimize(self.func,
self.w,
args=(x0, x1),
method='L-BFGS-B',
jac=self.fprime,
options={'maxiter': 5})
self.w = ret.x
p = self.predict_raw(X)
auc = roc_auc_score(y, p)
auc_val = auc
if X_val is not None:
p_val = self.predict_raw(X_val)
auc_val = roc_auc_score(y_val, p_val)
if auc_val > self.auc_opt:
self.auc_opt = auc_val
self.w_opt = self.w
self.n_opt = epoch
# If validation auc is still improving after n_epoch,
# try 10 more epochs
if epoch == n_epoch:
n_epoch += 5
print('\t{:3d}: {:.6f} {:.6f} {:.6f} {:.2f}'.format(
epoch, auc, auc_val, self.auc_opt,
(time.time() - start) / SEC_PER_MIN))
epoch += 1
if X_val is not None:
print('Optimal epoch is {0} ({1:.6f})'.format(self.n_opt,
self.auc_opt))
self.w = self.w_opt
logger.info('done training') |
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.params()):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
else:
owner = meth.__self__
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except:
return str(callable_obj) | Attempt to return a meaningful name identifying a callable or generator | Below is the the instruction that describes the task:
### Input:
Attempt to return a meaningful name identifying a callable or generator
### Response:
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.params()):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
else:
owner = meth.__self__
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except:
return str(callable_obj) |
def get_booster_stats(ctx, currency):
"""Prints out price stats for booster packs available in Steam user inventory."""
username = ctx.obj['username']
inventory = User(username)._get_inventory_raw()
boosters = {}
for item in inventory['rgDescriptions'].values():
is_booster = False
tags = item['tags']
for tag in tags:
if tag['internal_name'] == TAG_ITEM_CLASS_BOOSTER:
is_booster = True
break
if not is_booster:
continue
appid = item['market_fee_app']
title = item['name']
boosters[appid] = title
if not boosters:
click.secho('User `%s` has no booster packs' % username, fg='red', err=True)
return
for appid, title in boosters.items():
click.secho('Found booster: `%s`' % title, fg='blue')
print_card_prices(appid, currency) | Prints out price stats for booster packs available in Steam user inventory. | Below is the the instruction that describes the task:
### Input:
Prints out price stats for booster packs available in Steam user inventory.
### Response:
def get_booster_stats(ctx, currency):
"""Prints out price stats for booster packs available in Steam user inventory."""
username = ctx.obj['username']
inventory = User(username)._get_inventory_raw()
boosters = {}
for item in inventory['rgDescriptions'].values():
is_booster = False
tags = item['tags']
for tag in tags:
if tag['internal_name'] == TAG_ITEM_CLASS_BOOSTER:
is_booster = True
break
if not is_booster:
continue
appid = item['market_fee_app']
title = item['name']
boosters[appid] = title
if not boosters:
click.secho('User `%s` has no booster packs' % username, fg='red', err=True)
return
for appid, title in boosters.items():
click.secho('Found booster: `%s`' % title, fg='blue')
print_card_prices(appid, currency) |
def get_supported_connections(self):
"""Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
"""
if self.supported_connections != -1:
return self.supported_connections
if self.api is None:
return -1
self._set_state(self._STATE_DONGLE_COMMAND)
self.api.ble_cmd_system_get_connections()
self._wait_for_state(self._STATE_DONGLE_COMMAND)
return self.supported_connections | Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error | Below is the the instruction that describes the task:
### Input:
Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
### Response:
def get_supported_connections(self):
"""Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
"""
if self.supported_connections != -1:
return self.supported_connections
if self.api is None:
return -1
self._set_state(self._STATE_DONGLE_COMMAND)
self.api.ble_cmd_system_get_connections()
self._wait_for_state(self._STATE_DONGLE_COMMAND)
return self.supported_connections |
def accounts(self):
"""
A list of structures describing apps and pages owned by this user.
"""
response = self.graph.get('%s/accounts' % self.id)
accounts = []
for item in response['data']:
account = Structure(
page = Page(
id = item['id'],
name = item['name'],
category = item['category']
),
access_token = item['access_token'],
permissions = item['perms']
)
accounts.append(account)
return accounts | A list of structures describing apps and pages owned by this user. | Below is the the instruction that describes the task:
### Input:
A list of structures describing apps and pages owned by this user.
### Response:
def accounts(self):
"""
A list of structures describing apps and pages owned by this user.
"""
response = self.graph.get('%s/accounts' % self.id)
accounts = []
for item in response['data']:
account = Structure(
page = Page(
id = item['id'],
name = item['name'],
category = item['category']
),
access_token = item['access_token'],
permissions = item['perms']
)
accounts.append(account)
return accounts |
def _load_point(big_endian, type_bytes, data_bytes):
"""
Convert byte data for a Point to a GeoJSON `dict`.
:param bool big_endian:
If `True`, interpret the ``data_bytes`` in big endian order, else
little endian.
:param str type_bytes:
4-byte integer (as a binary string) indicating the geometry type
(Point) and the dimensions (2D, Z, M or ZM). For consistency, these
bytes are expected to always be in big endian order, regardless of the
value of ``big_endian``.
:param str data_bytes:
Coordinate data in a binary string.
:returns:
GeoJSON `dict` representing the Point geometry.
"""
endian_token = '>' if big_endian else '<'
if type_bytes == WKB_2D['Point']:
coords = struct.unpack('%sdd' % endian_token,
as_bin_str(take(16, data_bytes)))
elif type_bytes == WKB_Z['Point']:
coords = struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes)))
elif type_bytes == WKB_M['Point']:
# NOTE: The use of XYM types geometries is quite rare. In the interest
# of removing ambiguity, we will treat all XYM geometries as XYZM when
# generate the GeoJSON. A default Z value of `0.0` will be given in
# this case.
coords = list(struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes))))
coords.insert(2, 0.0)
elif type_bytes == WKB_ZM['Point']:
coords = struct.unpack('%sdddd' % endian_token,
as_bin_str(take(32, data_bytes)))
return dict(type='Point', coordinates=list(coords)) | Convert byte data for a Point to a GeoJSON `dict`.
:param bool big_endian:
If `True`, interpret the ``data_bytes`` in big endian order, else
little endian.
:param str type_bytes:
4-byte integer (as a binary string) indicating the geometry type
(Point) and the dimensions (2D, Z, M or ZM). For consistency, these
bytes are expected to always be in big endian order, regardless of the
value of ``big_endian``.
:param str data_bytes:
Coordinate data in a binary string.
:returns:
GeoJSON `dict` representing the Point geometry. | Below is the the instruction that describes the task:
### Input:
Convert byte data for a Point to a GeoJSON `dict`.
:param bool big_endian:
If `True`, interpret the ``data_bytes`` in big endian order, else
little endian.
:param str type_bytes:
4-byte integer (as a binary string) indicating the geometry type
(Point) and the dimensions (2D, Z, M or ZM). For consistency, these
bytes are expected to always be in big endian order, regardless of the
value of ``big_endian``.
:param str data_bytes:
Coordinate data in a binary string.
:returns:
GeoJSON `dict` representing the Point geometry.
### Response:
def _load_point(big_endian, type_bytes, data_bytes):
"""
Convert byte data for a Point to a GeoJSON `dict`.
:param bool big_endian:
If `True`, interpret the ``data_bytes`` in big endian order, else
little endian.
:param str type_bytes:
4-byte integer (as a binary string) indicating the geometry type
(Point) and the dimensions (2D, Z, M or ZM). For consistency, these
bytes are expected to always be in big endian order, regardless of the
value of ``big_endian``.
:param str data_bytes:
Coordinate data in a binary string.
:returns:
GeoJSON `dict` representing the Point geometry.
"""
endian_token = '>' if big_endian else '<'
if type_bytes == WKB_2D['Point']:
coords = struct.unpack('%sdd' % endian_token,
as_bin_str(take(16, data_bytes)))
elif type_bytes == WKB_Z['Point']:
coords = struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes)))
elif type_bytes == WKB_M['Point']:
# NOTE: The use of XYM types geometries is quite rare. In the interest
# of removing ambiguity, we will treat all XYM geometries as XYZM when
# generate the GeoJSON. A default Z value of `0.0` will be given in
# this case.
coords = list(struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes))))
coords.insert(2, 0.0)
elif type_bytes == WKB_ZM['Point']:
coords = struct.unpack('%sdddd' % endian_token,
as_bin_str(take(32, data_bytes)))
return dict(type='Point', coordinates=list(coords)) |
def _assign_to_null(obj, path, value, force=True):
"""
value IS ASSIGNED TO obj[self.path][key]
path IS AN ARRAY OF PROPERTY NAMES
force=False IF YOU PREFER TO use setDefault()
"""
try:
if obj is Null:
return
if _get(obj, CLASS) is NullType:
d = _get(obj, "__dict__")
o = d[OBJ]
p = d["__key__"]
s = [p]+path
return _assign_to_null(o, s, value)
path0 = path[0]
if len(path) == 1:
if force:
obj[path0] = value
else:
_setdefault(obj, path0, value)
return
old_value = obj.get(path0)
if old_value == None:
if value == None:
return
else:
obj[path0] = old_value = {}
_assign_to_null(old_value, path[1:], value)
except Exception as e:
raise e | value IS ASSIGNED TO obj[self.path][key]
path IS AN ARRAY OF PROPERTY NAMES
force=False IF YOU PREFER TO use setDefault() | Below is the the instruction that describes the task:
### Input:
value IS ASSIGNED TO obj[self.path][key]
path IS AN ARRAY OF PROPERTY NAMES
force=False IF YOU PREFER TO use setDefault()
### Response:
def _assign_to_null(obj, path, value, force=True):
"""
value IS ASSIGNED TO obj[self.path][key]
path IS AN ARRAY OF PROPERTY NAMES
force=False IF YOU PREFER TO use setDefault()
"""
try:
if obj is Null:
return
if _get(obj, CLASS) is NullType:
d = _get(obj, "__dict__")
o = d[OBJ]
p = d["__key__"]
s = [p]+path
return _assign_to_null(o, s, value)
path0 = path[0]
if len(path) == 1:
if force:
obj[path0] = value
else:
_setdefault(obj, path0, value)
return
old_value = obj.get(path0)
if old_value == None:
if value == None:
return
else:
obj[path0] = old_value = {}
_assign_to_null(old_value, path[1:], value)
except Exception as e:
raise e |
def auto_detect(self, args):
"""Check for already Slackware binary packages exist
"""
suffixes = [
".tgz",
".txz",
".tbz",
".tlz"
]
if (not args[0].startswith("-") and args[0] not in self.commands and
args[0].endswith(tuple(suffixes))):
packages, not_found = [], []
for pkg in args:
if pkg.endswith(tuple(suffixes)):
if os.path.isfile(pkg):
packages.append(pkg)
else:
not_found.append(pkg)
if packages:
Auto(packages).select()
if not_found:
for ntf in not_found:
self.msg.pkg_not_found("", ntf, "Not installed", "")
raise SystemExit() | Check for already Slackware binary packages exist | Below is the the instruction that describes the task:
### Input:
Check for already Slackware binary packages exist
### Response:
def auto_detect(self, args):
"""Check for already Slackware binary packages exist
"""
suffixes = [
".tgz",
".txz",
".tbz",
".tlz"
]
if (not args[0].startswith("-") and args[0] not in self.commands and
args[0].endswith(tuple(suffixes))):
packages, not_found = [], []
for pkg in args:
if pkg.endswith(tuple(suffixes)):
if os.path.isfile(pkg):
packages.append(pkg)
else:
not_found.append(pkg)
if packages:
Auto(packages).select()
if not_found:
for ntf in not_found:
self.msg.pkg_not_found("", ntf, "Not installed", "")
raise SystemExit() |
def LMA(XY,ParIni):
"""
input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r)
which is acquired by using an algebraic circle fit (TaubinSVD)
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
% Geometric circle fit (minimizing orthogonal distances)
% based on the Levenberg-Marquardt scheme in the
% "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1
% N. Chernov and C. Lesort, "Least squares fitting of circles",
% J. Math. Imag. Vision, Vol. 23, 239-251 (2005)
"""
factorUp=10
factorDown=0.04
lambda0=0.01
epsilon=0.000001
IterMAX = 50
AdjustMax = 20
Xshift=0
Yshift=0
dX=1
dY=0;
n = len(XY); # number of data points
anew = ParIni[0] + Xshift
bnew = ParIni[1] + Yshift
Anew = old_div(1.,(2.*ParIni[2]))
aabb = anew*anew + bnew*bnew
Fnew = (aabb - ParIni[2]*ParIni[2])*Anew
Tnew = numpy.arccos(old_div(-anew,numpy.sqrt(aabb)))
if bnew > 0:
Tnew = 2*numpy.pi - Tnew
VarNew = VarCircle(XY,ParIni)
VarLambda = lambda0;
finish = 0;
for it in range(0,IterMAX):
Aold = Anew
Fold = Fnew
Told = Tnew
VarOld = VarNew
H = numpy.sqrt(1+4*Aold*Fold);
aold = -H*numpy.cos(Told)/(Aold+Aold) - Xshift;
bold = -H*numpy.sin(Told)/(Aold+Aold) - Yshift;
Rold = old_div(1,abs(Aold+Aold));
DD = 1 + 4*Aold*Fold;
D = numpy.sqrt(DD);
CT = numpy.cos(Told);
ST = numpy.sin(Told);
H11=0;
H12=0;
H13=0;
H22=0;
H23=0;
H33=0;
F1=0;
F2=0;
F3=0;
for i in range(0,n):
Xi = XY[i,0] + Xshift;
Yi = XY[i,1] + Yshift;
Zi = Xi*Xi + Yi*Yi;
Ui = Xi*CT + Yi*ST;
Vi =-Xi*ST + Yi*CT;
ADF = Aold*Zi + D*Ui + Fold;
SQ = numpy.sqrt(4*Aold*ADF + 1);
DEN = SQ + 1;
Gi = 2*ADF/DEN;
FACT = 2/DEN*(1 - Aold*Gi/SQ);
DGDAi = FACT*(Zi + 2*Fold*Ui/D) - Gi*Gi/SQ;
DGDFi = FACT*(2*Aold*Ui/D + 1);
DGDTi = FACT*D*Vi;
H11 = H11 + DGDAi*DGDAi;
H12 = H12 + DGDAi*DGDFi;
H13 = H13 + DGDAi*DGDTi;
H22 = H22 + DGDFi*DGDFi;
H23 = H23 + DGDFi*DGDTi;
H33 = H33 + DGDTi*DGDTi;
F1 = F1 + Gi*DGDAi;
F2 = F2 + Gi*DGDFi;
F3 = F3 + Gi*DGDTi;
for adjust in range(1,AdjustMax):
# Cholesly decomposition
G11 = numpy.sqrt(H11 + VarLambda);
G12 = old_div(H12,G11)
G13 = old_div(H13,G11)
G22 = numpy.sqrt(H22 + VarLambda - G12*G12);
G23 = old_div((H23 - G12*G13),G22);
G33 = numpy.sqrt(H33 + VarLambda - G13*G13 - G23*G23);
D1 = old_div(F1,G11);
D2 = old_div((F2 - G12*D1),G22);
D3 = old_div((F3 - G13*D1 - G23*D2),G33);
dT = old_div(D3,G33);
dF = old_div((D2 - G23*dT),G22)
dA = old_div((D1 - G12*dF - G13*dT),G11)
# updating the parameters
Anew = Aold - dA;
Fnew = Fold - dF;
Tnew = Told - dT;
if 1+4*Anew*Fnew < epsilon and VarLambda>1:
Xshift = Xshift + dX;
Yshift = Yshift + dY;
H = numpy.sqrt(1+4*Aold*Fold);
aTemp = -H*numpy.cos(Told)/(Aold+Aold) + dX;
bTemp = -H*numpy.sin(Told)/(Aold+Aold) + dY;
rTemp = old_div(1,abs(Aold+Aold));
Anew = old_div(1,(rTemp + rTemp));
aabb = aTemp*aTemp + bTemp*bTemp;
Fnew = (aabb - rTemp*rTemp)*Anew;
Tnew = numpy.arccos(old_div(-aTemp,numpy.sqrt(aabb)));
if bTemp > 0:
Tnew = 2*numpy.pi - Tnew;
VarNew = VarOld;
break;
if 1+4*Anew*Fnew < epsilon:
VarLambda = VarLambda * factorUp;
continue;
DD = 1 + 4*Anew*Fnew;
D = numpy.sqrt(DD);
CT = numpy.cos(Tnew);
ST = numpy.sin(Tnew);
GG = 0;
for i in range(0, n):
Xi = XY[i,0] + Xshift;
Yi = XY[i,1] + Yshift;
Zi = Xi*Xi + Yi*Yi;
Ui = Xi*CT + Yi*ST;
ADF = Anew*Zi + D*Ui + Fnew;
SQ = numpy.sqrt(4*Anew*ADF + 1);
DEN = SQ + 1;
Gi = 2*ADF/DEN;
GG = GG + Gi*Gi;
VarNew = old_div(GG,(n-3));
H = numpy.sqrt(1+4*Anew*Fnew);
anew = -H*numpy.cos(Tnew)/(Anew+Anew) - Xshift;
bnew = -H*numpy.sin(Tnew)/(Anew+Anew) - Yshift;
Rnew = old_div(1,abs(Anew+Anew));
if VarNew <= VarOld:
progress = old_div((abs(anew-aold) + abs(bnew-bold) + abs(Rnew-Rold)),(Rnew+Rold));
if progress < epsilon:
Aold = Anew;
Fold = Fnew;
Told = Tnew;
VarOld = VarNew # %#ok<NASGU>
finish = 1;
break;
VarLambda = VarLambda * factorDown
break
else: # % no improvement
VarLambda = VarLambda * factorUp;
continue;
if finish == 1:
break
H = numpy.sqrt(1+4*Aold*Fold);
result_a = -H*numpy.cos(Told)/(Aold+Aold) - Xshift;
result_b = -H*numpy.sin(Told)/(Aold+Aold) - Yshift;
result_r = old_div(1,abs(Aold+Aold));
return result_a, result_b, result_r | input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r)
which is acquired by using an algebraic circle fit (TaubinSVD)
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
% Geometric circle fit (minimizing orthogonal distances)
% based on the Levenberg-Marquardt scheme in the
% "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1
% N. Chernov and C. Lesort, "Least squares fitting of circles",
% J. Math. Imag. Vision, Vol. 23, 239-251 (2005) | Below is the the instruction that describes the task:
### Input:
input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r)
which is acquired by using an algebraic circle fit (TaubinSVD)
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
% Geometric circle fit (minimizing orthogonal distances)
% based on the Levenberg-Marquardt scheme in the
% "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1
% N. Chernov and C. Lesort, "Least squares fitting of circles",
% J. Math. Imag. Vision, Vol. 23, 239-251 (2005)
### Response:
def LMA(XY,ParIni):
"""
input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r)
which is acquired by using an algebraic circle fit (TaubinSVD)
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
% Geometric circle fit (minimizing orthogonal distances)
% based on the Levenberg-Marquardt scheme in the
% "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1
% N. Chernov and C. Lesort, "Least squares fitting of circles",
% J. Math. Imag. Vision, Vol. 23, 239-251 (2005)
"""
factorUp=10
factorDown=0.04
lambda0=0.01
epsilon=0.000001
IterMAX = 50
AdjustMax = 20
Xshift=0
Yshift=0
dX=1
dY=0;
n = len(XY); # number of data points
anew = ParIni[0] + Xshift
bnew = ParIni[1] + Yshift
Anew = old_div(1.,(2.*ParIni[2]))
aabb = anew*anew + bnew*bnew
Fnew = (aabb - ParIni[2]*ParIni[2])*Anew
Tnew = numpy.arccos(old_div(-anew,numpy.sqrt(aabb)))
if bnew > 0:
Tnew = 2*numpy.pi - Tnew
VarNew = VarCircle(XY,ParIni)
VarLambda = lambda0;
finish = 0;
for it in range(0,IterMAX):
Aold = Anew
Fold = Fnew
Told = Tnew
VarOld = VarNew
H = numpy.sqrt(1+4*Aold*Fold);
aold = -H*numpy.cos(Told)/(Aold+Aold) - Xshift;
bold = -H*numpy.sin(Told)/(Aold+Aold) - Yshift;
Rold = old_div(1,abs(Aold+Aold));
DD = 1 + 4*Aold*Fold;
D = numpy.sqrt(DD);
CT = numpy.cos(Told);
ST = numpy.sin(Told);
H11=0;
H12=0;
H13=0;
H22=0;
H23=0;
H33=0;
F1=0;
F2=0;
F3=0;
for i in range(0,n):
Xi = XY[i,0] + Xshift;
Yi = XY[i,1] + Yshift;
Zi = Xi*Xi + Yi*Yi;
Ui = Xi*CT + Yi*ST;
Vi =-Xi*ST + Yi*CT;
ADF = Aold*Zi + D*Ui + Fold;
SQ = numpy.sqrt(4*Aold*ADF + 1);
DEN = SQ + 1;
Gi = 2*ADF/DEN;
FACT = 2/DEN*(1 - Aold*Gi/SQ);
DGDAi = FACT*(Zi + 2*Fold*Ui/D) - Gi*Gi/SQ;
DGDFi = FACT*(2*Aold*Ui/D + 1);
DGDTi = FACT*D*Vi;
H11 = H11 + DGDAi*DGDAi;
H12 = H12 + DGDAi*DGDFi;
H13 = H13 + DGDAi*DGDTi;
H22 = H22 + DGDFi*DGDFi;
H23 = H23 + DGDFi*DGDTi;
H33 = H33 + DGDTi*DGDTi;
F1 = F1 + Gi*DGDAi;
F2 = F2 + Gi*DGDFi;
F3 = F3 + Gi*DGDTi;
for adjust in range(1,AdjustMax):
# Cholesly decomposition
G11 = numpy.sqrt(H11 + VarLambda);
G12 = old_div(H12,G11)
G13 = old_div(H13,G11)
G22 = numpy.sqrt(H22 + VarLambda - G12*G12);
G23 = old_div((H23 - G12*G13),G22);
G33 = numpy.sqrt(H33 + VarLambda - G13*G13 - G23*G23);
D1 = old_div(F1,G11);
D2 = old_div((F2 - G12*D1),G22);
D3 = old_div((F3 - G13*D1 - G23*D2),G33);
dT = old_div(D3,G33);
dF = old_div((D2 - G23*dT),G22)
dA = old_div((D1 - G12*dF - G13*dT),G11)
# updating the parameters
Anew = Aold - dA;
Fnew = Fold - dF;
Tnew = Told - dT;
if 1+4*Anew*Fnew < epsilon and VarLambda>1:
Xshift = Xshift + dX;
Yshift = Yshift + dY;
H = numpy.sqrt(1+4*Aold*Fold);
aTemp = -H*numpy.cos(Told)/(Aold+Aold) + dX;
bTemp = -H*numpy.sin(Told)/(Aold+Aold) + dY;
rTemp = old_div(1,abs(Aold+Aold));
Anew = old_div(1,(rTemp + rTemp));
aabb = aTemp*aTemp + bTemp*bTemp;
Fnew = (aabb - rTemp*rTemp)*Anew;
Tnew = numpy.arccos(old_div(-aTemp,numpy.sqrt(aabb)));
if bTemp > 0:
Tnew = 2*numpy.pi - Tnew;
VarNew = VarOld;
break;
if 1+4*Anew*Fnew < epsilon:
VarLambda = VarLambda * factorUp;
continue;
DD = 1 + 4*Anew*Fnew;
D = numpy.sqrt(DD);
CT = numpy.cos(Tnew);
ST = numpy.sin(Tnew);
GG = 0;
for i in range(0, n):
Xi = XY[i,0] + Xshift;
Yi = XY[i,1] + Yshift;
Zi = Xi*Xi + Yi*Yi;
Ui = Xi*CT + Yi*ST;
ADF = Anew*Zi + D*Ui + Fnew;
SQ = numpy.sqrt(4*Anew*ADF + 1);
DEN = SQ + 1;
Gi = 2*ADF/DEN;
GG = GG + Gi*Gi;
VarNew = old_div(GG,(n-3));
H = numpy.sqrt(1+4*Anew*Fnew);
anew = -H*numpy.cos(Tnew)/(Anew+Anew) - Xshift;
bnew = -H*numpy.sin(Tnew)/(Anew+Anew) - Yshift;
Rnew = old_div(1,abs(Anew+Anew));
if VarNew <= VarOld:
progress = old_div((abs(anew-aold) + abs(bnew-bold) + abs(Rnew-Rold)),(Rnew+Rold));
if progress < epsilon:
Aold = Anew;
Fold = Fnew;
Told = Tnew;
VarOld = VarNew # %#ok<NASGU>
finish = 1;
break;
VarLambda = VarLambda * factorDown
break
else: # % no improvement
VarLambda = VarLambda * factorUp;
continue;
if finish == 1:
break
H = numpy.sqrt(1+4*Aold*Fold);
result_a = -H*numpy.cos(Told)/(Aold+Aold) - Xshift;
result_b = -H*numpy.sin(Told)/(Aold+Aold) - Yshift;
result_r = old_div(1,abs(Aold+Aold));
return result_a, result_b, result_r |
def process_cbn_jgif_file(file_name):
"""Return a PybelProcessor by processing a CBN JGIF JSON file.
Parameters
----------
file_name : str
The path to a CBN JGIF JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
with open(file_name, 'r') as jgf:
return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf))) | Return a PybelProcessor by processing a CBN JGIF JSON file.
Parameters
----------
file_name : str
The path to a CBN JGIF JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements. | Below is the the instruction that describes the task:
### Input:
Return a PybelProcessor by processing a CBN JGIF JSON file.
Parameters
----------
file_name : str
The path to a CBN JGIF JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
### Response:
def process_cbn_jgif_file(file_name):
"""Return a PybelProcessor by processing a CBN JGIF JSON file.
Parameters
----------
file_name : str
The path to a CBN JGIF JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
with open(file_name, 'r') as jgf:
return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf))) |
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default | Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match. | Below is the the instruction that describes the task:
### Input:
Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
### Response:
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default |
def set(self, value: dict):
"""
Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious.
"""
self._s_request_reply({Msgs.cmd: Cmds.set_state, Msgs.info: value}) | Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious. | Below is the the instruction that describes the task:
### Input:
Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious.
### Response:
def set(self, value: dict):
"""
Set the state, completely over-writing the previous value.
.. caution::
This kind of operation usually leads to a data race.
Please take good care while using this.
Use the :py:func:`atomic` deocrator if you're feeling anxious.
"""
self._s_request_reply({Msgs.cmd: Cmds.set_state, Msgs.info: value}) |
def generate_schema_mapping(resolver, schema_uri, depth=1):
""" Try and recursively iterate a JSON schema and to generate an ES mapping
that encasulates it. """
visitor = SchemaVisitor({'$ref': schema_uri}, resolver)
return _generate_schema_mapping(visitor, set(), depth) | Try and recursively iterate a JSON schema and to generate an ES mapping
that encasulates it. | Below is the the instruction that describes the task:
### Input:
Try and recursively iterate a JSON schema and to generate an ES mapping
that encasulates it.
### Response:
def generate_schema_mapping(resolver, schema_uri, depth=1):
""" Try and recursively iterate a JSON schema and to generate an ES mapping
that encasulates it. """
visitor = SchemaVisitor({'$ref': schema_uri}, resolver)
return _generate_schema_mapping(visitor, set(), depth) |
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
"""A service check result brok has just arrived ..."""
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path) | A service check result brok has just arrived ... | Below is the the instruction that describes the task:
### Input:
A service check result brok has just arrived ...
### Response:
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches
"""A service check result brok has just arrived ..."""
host_name = b.data.get('host_name', None)
service_description = b.data.get('service_description', None)
if not host_name or not service_description:
return
service_id = host_name+"/"+service_description
logger.debug("service check result: %s", service_id)
# If host and service initial status broks have not been received, ignore ...
if not self.ignore_unknown and host_name not in self.hosts_cache:
logger.warning("received service check result for an unknown host: %s", service_id)
return
if service_id not in self.services_cache and not self.ignore_unknown:
logger.warning("received service check result for an unknown service: %s", service_id)
return
# Decode received metrics
metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data'])
if not metrics:
logger.debug("no metrics to send ...")
return
# If checks latency is ignored
if self.ignore_latency_limit >= b.data['latency'] > 0:
check_time = int(b.data['last_chk']) - int(b.data['latency'])
else:
check_time = int(b.data['last_chk'])
# Custom hosts variables
hname = sanitize_name(host_name)
if host_name in self.hosts_cache:
if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname))
if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None):
hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname))
# Custom services variables
desc = sanitize_name(service_description)
if service_id in self.services_cache:
if self.services_cache[service_id].get('_GRAPHITE_POST', None):
desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None)))
# Graphite data source
if self.graphite_data_source:
path = '.'.join((hname, self.graphite_data_source, desc))
else:
path = '.'.join((hname, desc))
# Realm as a prefix
if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None):
path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path))
realm_name = None
if host_name in self.hosts_cache:
realm_name = self.hosts_cache[host_name].get('realm_name', None)
# Send metrics
self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path) |
def get_passenger_cpu_usage(self, dict_stats):
"""
Execute % top; and return STDOUT.
"""
try:
proc1 = subprocess.Popen(
["top", "-b", "-n", "2"],
stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return (-1)
re_lspaces = re.compile("^\s*")
re_digit = re.compile("^\d")
overall_cpu = 0
for raw_line in std_out.splitlines():
line = re_lspaces.sub("", raw_line)
if not re_digit.match(line):
continue
line_splitted = line.split()
if line_splitted[0] in dict_stats["apache_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["nginx_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["passenger_procs"]:
overall_cpu += float(line_splitted[8])
return overall_cpu | Execute % top; and return STDOUT. | Below is the the instruction that describes the task:
### Input:
Execute % top; and return STDOUT.
### Response:
def get_passenger_cpu_usage(self, dict_stats):
"""
Execute % top; and return STDOUT.
"""
try:
proc1 = subprocess.Popen(
["top", "-b", "-n", "2"],
stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return (-1)
re_lspaces = re.compile("^\s*")
re_digit = re.compile("^\d")
overall_cpu = 0
for raw_line in std_out.splitlines():
line = re_lspaces.sub("", raw_line)
if not re_digit.match(line):
continue
line_splitted = line.split()
if line_splitted[0] in dict_stats["apache_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["nginx_procs"]:
overall_cpu += float(line_splitted[8])
elif line_splitted[0] in dict_stats["passenger_procs"]:
overall_cpu += float(line_splitted[8])
return overall_cpu |
def _protocolObjectGenerator(self, request, numObjects, getByIndexMethod):
"""
Returns a generator over the results for the specified request, from
a set of protocol objects of the specified size. The objects are
returned by call to the specified method, which must take a single
integer as an argument. The returned generator yields a sequence of
(object, nextPageToken) pairs, which allows this iteration to be picked
up at any point.
"""
currentIndex = 0
if request.page_token:
currentIndex, = paging._parsePageToken(
request.page_token, 1)
while currentIndex < numObjects:
object_ = getByIndexMethod(currentIndex)
currentIndex += 1
nextPageToken = None
if currentIndex < numObjects:
nextPageToken = str(currentIndex)
yield object_, nextPageToken | Returns a generator over the results for the specified request, from
a set of protocol objects of the specified size. The objects are
returned by call to the specified method, which must take a single
integer as an argument. The returned generator yields a sequence of
(object, nextPageToken) pairs, which allows this iteration to be picked
up at any point. | Below is the the instruction that describes the task:
### Input:
Returns a generator over the results for the specified request, from
a set of protocol objects of the specified size. The objects are
returned by call to the specified method, which must take a single
integer as an argument. The returned generator yields a sequence of
(object, nextPageToken) pairs, which allows this iteration to be picked
up at any point.
### Response:
def _protocolObjectGenerator(self, request, numObjects, getByIndexMethod):
"""
Returns a generator over the results for the specified request, from
a set of protocol objects of the specified size. The objects are
returned by call to the specified method, which must take a single
integer as an argument. The returned generator yields a sequence of
(object, nextPageToken) pairs, which allows this iteration to be picked
up at any point.
"""
currentIndex = 0
if request.page_token:
currentIndex, = paging._parsePageToken(
request.page_token, 1)
while currentIndex < numObjects:
object_ = getByIndexMethod(currentIndex)
currentIndex += 1
nextPageToken = None
if currentIndex < numObjects:
nextPageToken = str(currentIndex)
yield object_, nextPageToken |
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.temp_data.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.temp_data.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes | Obtain current Python import prefixes (for `import_by_name`)
from ``document.env`` | Below is the the instruction that describes the task:
### Input:
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
### Response:
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.temp_data.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.temp_data.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes |
def _getSyntaxByLanguageName(self, syntaxName):
"""Get syntax by its name. Name is defined in the xml file
"""
xmlFileName = self._syntaxNameToXmlFileName[syntaxName]
return self._getSyntaxByXmlFileName(xmlFileName) | Get syntax by its name. Name is defined in the xml file | Below is the the instruction that describes the task:
### Input:
Get syntax by its name. Name is defined in the xml file
### Response:
def _getSyntaxByLanguageName(self, syntaxName):
"""Get syntax by its name. Name is defined in the xml file
"""
xmlFileName = self._syntaxNameToXmlFileName[syntaxName]
return self._getSyntaxByXmlFileName(xmlFileName) |
def shell(self):
"""
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
"""
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template
r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"') | Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`. | Below is the the instruction that describes the task:
### Input:
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
### Response:
def shell(self):
"""
Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`.
"""
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template
r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"') |
def do_request(self, method, url, data=None, headers=None, params=None):
"""
Handle API requests / responses transport
:param method: HTTP method to use as string
:param data: Any data as PyDict (will be converted to XML string)
:param headers: Any data as PyDict
:return: If response is XML then an xml.etree.ElementTree else the raw content
:raise: Any unsuccessful HTTP response code if fail_mode=raise
"""
response_content = None
if data:
if headers:
headers.update({'Content-Type': 'application/xml'})
else:
headers = {'Content-Type': 'application/xml'}
if self._debug:
print md.parseString(data).toprettyxml()
response = self._session.request(method, url, headers=headers, params=params, data=data)
if 'content-type' in response.headers:
if response.headers['content-type'].find('application/xml') != -1:
response_content = xmloperations.xml_to_dict(et.fromstring(response.content))
elif response.headers['content-type'].find('application/json') != -1:
response_content = json.loads(response.content)
else:
response_content = response.content
response_odict = OrderedDict([('status', response.status_code), ('body', response_content),
('location', None), ('objectId', None), ('Etag', None)])
if 'location' in response.headers:
response_odict['location'] = response.headers['location']
response_odict['objectId'] = response.headers['location'].split('/')[-1]
if 'Etag' in response.headers:
response_odict['Etag'] = response.headers['Etag']
if response.status_code not in [200, 201, 202, 204]:
if self.fail_mode == 'exit':
sys.exit('receive bad status code {}\n{}'.format(response.status_code, response_content))
elif self.fail_mode == 'raise':
raise NsxError(response.status_code, response_content)
elif self.fail_mode == 'continue':
pass
return response_odict | Handle API requests / responses transport
:param method: HTTP method to use as string
:param data: Any data as PyDict (will be converted to XML string)
:param headers: Any data as PyDict
:return: If response is XML then an xml.etree.ElementTree else the raw content
:raise: Any unsuccessful HTTP response code if fail_mode=raise | Below is the the instruction that describes the task:
### Input:
Handle API requests / responses transport
:param method: HTTP method to use as string
:param data: Any data as PyDict (will be converted to XML string)
:param headers: Any data as PyDict
:return: If response is XML then an xml.etree.ElementTree else the raw content
:raise: Any unsuccessful HTTP response code if fail_mode=raise
### Response:
def do_request(self, method, url, data=None, headers=None, params=None):
"""
Handle API requests / responses transport
:param method: HTTP method to use as string
:param data: Any data as PyDict (will be converted to XML string)
:param headers: Any data as PyDict
:return: If response is XML then an xml.etree.ElementTree else the raw content
:raise: Any unsuccessful HTTP response code if fail_mode=raise
"""
response_content = None
if data:
if headers:
headers.update({'Content-Type': 'application/xml'})
else:
headers = {'Content-Type': 'application/xml'}
if self._debug:
print md.parseString(data).toprettyxml()
response = self._session.request(method, url, headers=headers, params=params, data=data)
if 'content-type' in response.headers:
if response.headers['content-type'].find('application/xml') != -1:
response_content = xmloperations.xml_to_dict(et.fromstring(response.content))
elif response.headers['content-type'].find('application/json') != -1:
response_content = json.loads(response.content)
else:
response_content = response.content
response_odict = OrderedDict([('status', response.status_code), ('body', response_content),
('location', None), ('objectId', None), ('Etag', None)])
if 'location' in response.headers:
response_odict['location'] = response.headers['location']
response_odict['objectId'] = response.headers['location'].split('/')[-1]
if 'Etag' in response.headers:
response_odict['Etag'] = response.headers['Etag']
if response.status_code not in [200, 201, 202, 204]:
if self.fail_mode == 'exit':
sys.exit('receive bad status code {}\n{}'.format(response.status_code, response_content))
elif self.fail_mode == 'raise':
raise NsxError(response.status_code, response_content)
elif self.fail_mode == 'continue':
pass
return response_odict |
def can_manage(user, semester=None, pool=None, any_pool=False):
"""
Whether a user is allowed to manage a workshift semester. This includes the
current workshift managers, that semester's workshift managers, and site
superusers.
"""
if semester and user in semester.workshift_managers.all():
return True
if Manager and Manager.objects.filter(
incumbent__user=user, workshift_manager=True,
).count() > 0:
return True
if pool and pool.managers.filter(incumbent__user=user).count() > 0:
return True
if any_pool and WorkshiftPool.objects.filter(
managers__incumbent__user=user,
):
return True
return user.is_superuser or user.is_staff | Whether a user is allowed to manage a workshift semester. This includes the
current workshift managers, that semester's workshift managers, and site
superusers. | Below is the the instruction that describes the task:
### Input:
Whether a user is allowed to manage a workshift semester. This includes the
current workshift managers, that semester's workshift managers, and site
superusers.
### Response:
def can_manage(user, semester=None, pool=None, any_pool=False):
"""
Whether a user is allowed to manage a workshift semester. This includes the
current workshift managers, that semester's workshift managers, and site
superusers.
"""
if semester and user in semester.workshift_managers.all():
return True
if Manager and Manager.objects.filter(
incumbent__user=user, workshift_manager=True,
).count() > 0:
return True
if pool and pool.managers.filter(incumbent__user=user).count() > 0:
return True
if any_pool and WorkshiftPool.objects.filter(
managers__incumbent__user=user,
):
return True
return user.is_superuser or user.is_staff |
def _set_upload_status(self, file_data_object, upload_status):
""" Set file_data_object.file_resource.upload_status
"""
uuid = file_data_object['uuid']
return self.connection.update_data_object(
uuid,
{'uuid': uuid, 'value': { 'upload_status': upload_status}}
) | Set file_data_object.file_resource.upload_status | Below is the the instruction that describes the task:
### Input:
Set file_data_object.file_resource.upload_status
### Response:
def _set_upload_status(self, file_data_object, upload_status):
""" Set file_data_object.file_resource.upload_status
"""
uuid = file_data_object['uuid']
return self.connection.update_data_object(
uuid,
{'uuid': uuid, 'value': { 'upload_status': upload_status}}
) |
def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
tokens = {
'use', '\\u',
'create',
'drop'
}
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
if first_token.lower() in tokens:
return True
except Exception:
return False | Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db. | Below is the the instruction that describes the task:
### Input:
Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db.
### Response:
def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
tokens = {
'use', '\\u',
'create',
'drop'
}
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
if first_token.lower() in tokens:
return True
except Exception:
return False |
def _set_mixed_moments_to_zero(self, closed_central_moments, n_counter):
r"""
In univariate case, set the cross-terms to 0.
:param closed_central_moments: matrix of closed central moment
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a matrix of new closed central moments with cross-terms equal to 0
"""
positive_n_counter = [n for n in n_counter if n.order > 1]
if self.is_multivariate:
return closed_central_moments
else:
return [0 if n.is_mixed else ccm for n,ccm in zip(positive_n_counter, closed_central_moments)] | r"""
In univariate case, set the cross-terms to 0.
:param closed_central_moments: matrix of closed central moment
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a matrix of new closed central moments with cross-terms equal to 0 | Below is the the instruction that describes the task:
### Input:
r"""
In univariate case, set the cross-terms to 0.
:param closed_central_moments: matrix of closed central moment
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a matrix of new closed central moments with cross-terms equal to 0
### Response:
def _set_mixed_moments_to_zero(self, closed_central_moments, n_counter):
r"""
In univariate case, set the cross-terms to 0.
:param closed_central_moments: matrix of closed central moment
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a matrix of new closed central moments with cross-terms equal to 0
"""
positive_n_counter = [n for n in n_counter if n.order > 1]
if self.is_multivariate:
return closed_central_moments
else:
return [0 if n.is_mixed else ccm for n,ccm in zip(positive_n_counter, closed_central_moments)] |
def update(self):
"""Update the status of the range setting."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
self.__maxrange_state = data['charge_to_max_range'] | Update the status of the range setting. | Below is the the instruction that describes the task:
### Input:
Update the status of the range setting.
### Response:
def update(self):
"""Update the status of the range setting."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
self.__maxrange_state = data['charge_to_max_range'] |
def _all_resources(self):
"""Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
"""
queryset = self.__model__.query
args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')}
limit = None
if args:
filters = []
order = []
for key, value in args.items():
if value.startswith('%'):
filters.append(getattr(self.__model__, key).like(str(value), escape='/'))
elif key == 'sort':
direction = desc if value.startswith('-') else asc
order.append(direction(getattr(self.__model__, value.lstrip('-'))))
elif key == 'limit':
limit = int(value)
elif hasattr(self.__model__, key):
filters.append(getattr(self.__model__, key) == value)
else:
raise BadRequestException('Invalid field [{}]'.format(key))
queryset = queryset.filter(*filters).order_by(*order)
if 'page' in request.args:
resources = queryset.paginate(page=int(request.args['page']), per_page=limit).items
else:
queryset = queryset.limit(limit)
resources = queryset.all()
return [r.to_dict() for r in resources] | Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model` | Below is the the instruction that describes the task:
### Input:
Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
### Response:
def _all_resources(self):
"""Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
"""
queryset = self.__model__.query
args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')}
limit = None
if args:
filters = []
order = []
for key, value in args.items():
if value.startswith('%'):
filters.append(getattr(self.__model__, key).like(str(value), escape='/'))
elif key == 'sort':
direction = desc if value.startswith('-') else asc
order.append(direction(getattr(self.__model__, value.lstrip('-'))))
elif key == 'limit':
limit = int(value)
elif hasattr(self.__model__, key):
filters.append(getattr(self.__model__, key) == value)
else:
raise BadRequestException('Invalid field [{}]'.format(key))
queryset = queryset.filter(*filters).order_by(*order)
if 'page' in request.args:
resources = queryset.paginate(page=int(request.args['page']), per_page=limit).items
else:
queryset = queryset.limit(limit)
resources = queryset.all()
return [r.to_dict() for r in resources] |
def qualifier_encoded(self):
"""Union[str, bytes]: The qualifier encoded in binary.
The type is ``str`` (Python 2.x) or ``bytes`` (Python 3.x). The module
will handle base64 encoding for you.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.%28key%29.bigtableOptions.columnFamilies.columns.qualifierEncoded
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns.qualifierEncoded
"""
prop = self._properties.get("qualifierEncoded")
if prop is None:
return None
return base64.standard_b64decode(_to_bytes(prop)) | Union[str, bytes]: The qualifier encoded in binary.
The type is ``str`` (Python 2.x) or ``bytes`` (Python 3.x). The module
will handle base64 encoding for you.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.%28key%29.bigtableOptions.columnFamilies.columns.qualifierEncoded
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns.qualifierEncoded | Below is the the instruction that describes the task:
### Input:
Union[str, bytes]: The qualifier encoded in binary.
The type is ``str`` (Python 2.x) or ``bytes`` (Python 3.x). The module
will handle base64 encoding for you.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.%28key%29.bigtableOptions.columnFamilies.columns.qualifierEncoded
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns.qualifierEncoded
### Response:
def qualifier_encoded(self):
"""Union[str, bytes]: The qualifier encoded in binary.
The type is ``str`` (Python 2.x) or ``bytes`` (Python 3.x). The module
will handle base64 encoding for you.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.%28key%29.bigtableOptions.columnFamilies.columns.qualifierEncoded
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.bigtableOptions.columnFamilies.columns.qualifierEncoded
"""
prop = self._properties.get("qualifierEncoded")
if prop is None:
return None
return base64.standard_b64decode(_to_bytes(prop)) |
def list(self, path=None, with_metadata=False, include_partitions=False):
'''get a list of all of the files in the repository'''
return self.upstream.list(
path,
with_metadata=with_metadata,
include_partitions=include_partitions) | get a list of all of the files in the repository | Below is the the instruction that describes the task:
### Input:
get a list of all of the files in the repository
### Response:
def list(self, path=None, with_metadata=False, include_partitions=False):
'''get a list of all of the files in the repository'''
return self.upstream.list(
path,
with_metadata=with_metadata,
include_partitions=include_partitions) |
def showEvent(self, event):
""" Overrides showEvent to update the viewport margins """
super(CodeEdit, self).showEvent(event)
self.panels.refresh() | Overrides showEvent to update the viewport margins | Below is the the instruction that describes the task:
### Input:
Overrides showEvent to update the viewport margins
### Response:
def showEvent(self, event):
""" Overrides showEvent to update the viewport margins """
super(CodeEdit, self).showEvent(event)
self.panels.refresh() |
def _GetISO8601String(self, structure):
"""Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
"""
time_offset = structure.time_offset
month = timelib.MONTH_DICT.get(structure.month.lower(), 0)
try:
time_offset_hours = int(time_offset[1:3], 10)
time_offset_minutes = int(time_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
date_time_string = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.000000'
'{6:s}{7:02d}:{8:02d}').format(
structure.year, month, structure.day, structure.hours,
structure.minutes, structure.seconds, time_offset[0],
time_offset_hours, time_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return date_time_string | Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string. | Below is the the instruction that describes the task:
### Input:
Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
### Response:
def _GetISO8601String(self, structure):
"""Normalize date time parsed format to an ISO 8601 date time string.
The date and time values in Apache access log files are formatted as:
"[18/Sep/2011:19:18:28 -0400]".
Args:
structure (pyparsing.ParseResults): structure of tokens derived from a
line of a text file.
Returns:
str: ISO 8601 date time string.
Raises:
ValueError: if the structure cannot be converted into a date time string.
"""
time_offset = structure.time_offset
month = timelib.MONTH_DICT.get(structure.month.lower(), 0)
try:
time_offset_hours = int(time_offset[1:3], 10)
time_offset_minutes = int(time_offset[3:5], 10)
except (IndexError, TypeError, ValueError) as exception:
raise ValueError(
'unable to parse time zone offset with error: {0!s}.'.format(
exception))
try:
date_time_string = (
'{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.000000'
'{6:s}{7:02d}:{8:02d}').format(
structure.year, month, structure.day, structure.hours,
structure.minutes, structure.seconds, time_offset[0],
time_offset_hours, time_offset_minutes)
except ValueError as exception:
raise ValueError(
'unable to format date time string with error: {0!s}.'.format(
exception))
return date_time_string |
def snake_case(a_string):
"""Returns a snake cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> snake_case('FooBar')
"foo_bar"
"""
partial = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', a_string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', partial).lower() | Returns a snake cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> snake_case('FooBar')
"foo_bar" | Below is the the instruction that describes the task:
### Input:
Returns a snake cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> snake_case('FooBar')
"foo_bar"
### Response:
def snake_case(a_string):
"""Returns a snake cased version of a string.
:param a_string: any :class:`str` object.
Usage:
>>> snake_case('FooBar')
"foo_bar"
"""
partial = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', a_string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', partial).lower() |
def desaturate(self):
"""Desaturates the layer, making it grayscale.
Instantly removes all color information from the layer,
while maintaing its alpha channel.
"""
alpha = self.img.split()[3]
self.img = self.img.convert("L")
self.img = self.img.convert("RGBA")
self.img.putalpha(alpha) | Desaturates the layer, making it grayscale.
Instantly removes all color information from the layer,
while maintaing its alpha channel. | Below is the the instruction that describes the task:
### Input:
Desaturates the layer, making it grayscale.
Instantly removes all color information from the layer,
while maintaing its alpha channel.
### Response:
def desaturate(self):
"""Desaturates the layer, making it grayscale.
Instantly removes all color information from the layer,
while maintaing its alpha channel.
"""
alpha = self.img.split()[3]
self.img = self.img.convert("L")
self.img = self.img.convert("RGBA")
self.img.putalpha(alpha) |
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == "s":
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != "s":
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == "l" or ch == "s" or ch == "z":
self.k = self.k + 1
elif self.m() == 1 and self.cvc(self.k):
self.setto("e") | step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet | Below is the the instruction that describes the task:
### Input:
step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
### Response:
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == "s":
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != "s":
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == "l" or ch == "s" or ch == "z":
self.k = self.k + 1
elif self.m() == 1 and self.cvc(self.k):
self.setto("e") |
def _MakeCacheInvariant(self, urn, age):
"""Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
age: The age policy used to build this object. Should be one of
ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache.
"""
precondition.AssertType(urn, Text)
return "%s:%s" % (urn, self.ParseAgeSpecification(age)) | Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
age: The age policy used to build this object. Should be one of
ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache. | Below is the the instruction that describes the task:
### Input:
Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
age: The age policy used to build this object. Should be one of
ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache.
### Response:
def _MakeCacheInvariant(self, urn, age):
"""Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
age: The age policy used to build this object. Should be one of
ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache.
"""
precondition.AssertType(urn, Text)
return "%s:%s" % (urn, self.ParseAgeSpecification(age)) |
def execute(self, input_data):
''' Execute method '''
my_ssdeep = input_data['meta_deep']['ssdeep']
my_md5 = input_data['meta_deep']['md5']
# For every PE sample in the database compute my ssdeep fuzzy match
sample_set = self.workbench.generate_sample_set('exe')
results = self.workbench.set_work_request('meta_deep', sample_set, ['md5','ssdeep'])
sim_list = []
for result in results:
if result['md5'] != my_md5:
sim_list.append({'md5':result['md5'], 'sim':ssd.compare(my_ssdeep, result['ssdeep'])})
# Sort and return the sim_list (with some logic for threshold)
sim_list.sort(key=itemgetter('sim'), reverse=True)
output_list = [sim for sim in sim_list if sim['sim'] > 0]
return {'md5': my_md5, 'sim_list':output_list} | Execute method | Below is the the instruction that describes the task:
### Input:
Execute method
### Response:
def execute(self, input_data):
''' Execute method '''
my_ssdeep = input_data['meta_deep']['ssdeep']
my_md5 = input_data['meta_deep']['md5']
# For every PE sample in the database compute my ssdeep fuzzy match
sample_set = self.workbench.generate_sample_set('exe')
results = self.workbench.set_work_request('meta_deep', sample_set, ['md5','ssdeep'])
sim_list = []
for result in results:
if result['md5'] != my_md5:
sim_list.append({'md5':result['md5'], 'sim':ssd.compare(my_ssdeep, result['ssdeep'])})
# Sort and return the sim_list (with some logic for threshold)
sim_list.sort(key=itemgetter('sim'), reverse=True)
output_list = [sim for sim in sim_list if sim['sim'] > 0]
return {'md5': my_md5, 'sim_list':output_list} |
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a wait_time condition into
this object.
'''
super(WaitTime, self).parse_xml_node(node)
wait_time_nodes = node.getElementsByTagNameNS(RTS_NS, 'WaitTime')
if wait_time_nodes.length != 1:
raise InvalidParticipantNodeError
self.wait_time = int(wait_time_nodes[0].getAttributeNS(RTS_NS,
'waitTime'))
return self | Parse an xml.dom Node object representing a wait_time condition into
this object. | Below is the the instruction that describes the task:
### Input:
Parse an xml.dom Node object representing a wait_time condition into
this object.
### Response:
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a wait_time condition into
this object.
'''
super(WaitTime, self).parse_xml_node(node)
wait_time_nodes = node.getElementsByTagNameNS(RTS_NS, 'WaitTime')
if wait_time_nodes.length != 1:
raise InvalidParticipantNodeError
self.wait_time = int(wait_time_nodes[0].getAttributeNS(RTS_NS,
'waitTime'))
return self |
def toggle_value(request, name):
"""
For manual shortcut links to perform toggle actions
"""
obj = service.system.namespace.get(name, None)
if not obj or service.read_only:
raise Http404
new_status = obj.status = not obj.status
if service.redirect_from_setters:
return HttpResponseRedirect(reverse('set_ready', args=(name, new_status)))
else:
return set_ready(request, name, new_status) | For manual shortcut links to perform toggle actions | Below is the the instruction that describes the task:
### Input:
For manual shortcut links to perform toggle actions
### Response:
def toggle_value(request, name):
"""
For manual shortcut links to perform toggle actions
"""
obj = service.system.namespace.get(name, None)
if not obj or service.read_only:
raise Http404
new_status = obj.status = not obj.status
if service.redirect_from_setters:
return HttpResponseRedirect(reverse('set_ready', args=(name, new_status)))
else:
return set_ready(request, name, new_status) |
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data | Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run. | Below is the the instruction that describes the task:
### Input:
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
### Response:
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data |
def generate_request_header(self, response, host, is_preemptive=False):
"""
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail.
"""
# Flags used by kerberos module.
gssflags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG
if self.delegate:
gssflags |= kerberos.GSS_C_DELEG_FLAG
try:
kerb_stage = "authGSSClientInit()"
# contexts still need to be stored by host, but hostname_override
# allows use of an arbitrary hostname for the kerberos exchange
# (eg, in cases of aliased hosts, internal vs external, CNAMEs
# w/ name-based HTTP hosting)
kerb_host = self.hostname_override if self.hostname_override is not None else host
kerb_spn = "{0}@{1}".format(self.service, kerb_host)
result, self.context[host] = kerberos.authGSSClientInit(kerb_spn,
gssflags=gssflags, principal=self.principal)
if result < 1:
raise EnvironmentError(result, kerb_stage)
# if we have a previous response from the server, use it to continue
# the auth process, otherwise use an empty value
negotiate_resp_value = '' if is_preemptive else _negotiate_value(response)
kerb_stage = "authGSSClientStep()"
# If this is set pass along the struct to Kerberos
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host],
negotiate_resp_value,
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host],
negotiate_resp_value)
if result < 0:
raise EnvironmentError(result, kerb_stage)
kerb_stage = "authGSSClientResponse()"
gss_response = kerberos.authGSSClientResponse(self.context[host])
return "Negotiate {0}".format(gss_response)
except kerberos.GSSError as error:
log.exception(
"generate_request_header(): {0} failed:".format(kerb_stage))
log.exception(error)
raise KerberosExchangeError("%s failed: %s" % (kerb_stage, str(error.args)))
except EnvironmentError as error:
# ensure we raised this for translation to KerberosExchangeError
# by comparing errno to result, re-raise if not
if error.errno != result:
raise
message = "{0} failed, result: {1}".format(kerb_stage, result)
log.error("generate_request_header(): {0}".format(message))
raise KerberosExchangeError(message) | Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail. | Below is the the instruction that describes the task:
### Input:
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail.
### Response:
def generate_request_header(self, response, host, is_preemptive=False):
"""
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail.
"""
# Flags used by kerberos module.
gssflags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG
if self.delegate:
gssflags |= kerberos.GSS_C_DELEG_FLAG
try:
kerb_stage = "authGSSClientInit()"
# contexts still need to be stored by host, but hostname_override
# allows use of an arbitrary hostname for the kerberos exchange
# (eg, in cases of aliased hosts, internal vs external, CNAMEs
# w/ name-based HTTP hosting)
kerb_host = self.hostname_override if self.hostname_override is not None else host
kerb_spn = "{0}@{1}".format(self.service, kerb_host)
result, self.context[host] = kerberos.authGSSClientInit(kerb_spn,
gssflags=gssflags, principal=self.principal)
if result < 1:
raise EnvironmentError(result, kerb_stage)
# if we have a previous response from the server, use it to continue
# the auth process, otherwise use an empty value
negotiate_resp_value = '' if is_preemptive else _negotiate_value(response)
kerb_stage = "authGSSClientStep()"
# If this is set pass along the struct to Kerberos
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host],
negotiate_resp_value,
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host],
negotiate_resp_value)
if result < 0:
raise EnvironmentError(result, kerb_stage)
kerb_stage = "authGSSClientResponse()"
gss_response = kerberos.authGSSClientResponse(self.context[host])
return "Negotiate {0}".format(gss_response)
except kerberos.GSSError as error:
log.exception(
"generate_request_header(): {0} failed:".format(kerb_stage))
log.exception(error)
raise KerberosExchangeError("%s failed: %s" % (kerb_stage, str(error.args)))
except EnvironmentError as error:
# ensure we raised this for translation to KerberosExchangeError
# by comparing errno to result, re-raise if not
if error.errno != result:
raise
message = "{0} failed, result: {1}".format(kerb_stage, result)
log.error("generate_request_header(): {0}".format(message))
raise KerberosExchangeError(message) |
def bbox(self):
"""
The bounding box ``(ymin, xmin, ymax, xmax)`` of the minimal
rectangular region containing the source segment.
"""
# (stop - 1) to return the max pixel location, not the slice index
return (self._slice[0].start, self._slice[1].start,
self._slice[0].stop - 1, self._slice[1].stop - 1) * u.pix | The bounding box ``(ymin, xmin, ymax, xmax)`` of the minimal
rectangular region containing the source segment. | Below is the the instruction that describes the task:
### Input:
The bounding box ``(ymin, xmin, ymax, xmax)`` of the minimal
rectangular region containing the source segment.
### Response:
def bbox(self):
"""
The bounding box ``(ymin, xmin, ymax, xmax)`` of the minimal
rectangular region containing the source segment.
"""
# (stop - 1) to return the max pixel location, not the slice index
return (self._slice[0].start, self._slice[1].start,
self._slice[0].stop - 1, self._slice[1].stop - 1) * u.pix |
def context_list2dict(context_list: typing.Sequence[typing.Any]) -> Context:
"""Converts a list of objects (functions, classes, or modules) to a
dictionary mapping the object names to the objects.
"""
return {obj.__name__.split(".")[-1]: obj for obj in context_list} | Converts a list of objects (functions, classes, or modules) to a
dictionary mapping the object names to the objects. | Below is the the instruction that describes the task:
### Input:
Converts a list of objects (functions, classes, or modules) to a
dictionary mapping the object names to the objects.
### Response:
def context_list2dict(context_list: typing.Sequence[typing.Any]) -> Context:
"""Converts a list of objects (functions, classes, or modules) to a
dictionary mapping the object names to the objects.
"""
return {obj.__name__.split(".")[-1]: obj for obj in context_list} |
def tabLayout(self):
''' For all tabs, specify the number of buttons in a row '''
self.childWindow.column += 1
if self.childWindow.column > Layout.BUTTONS_NUMBER:
self.childWindow.column = 0
self.childWindow.row += 1 | For all tabs, specify the number of buttons in a row | Below is the the instruction that describes the task:
### Input:
For all tabs, specify the number of buttons in a row
### Response:
def tabLayout(self):
''' For all tabs, specify the number of buttons in a row '''
self.childWindow.column += 1
if self.childWindow.column > Layout.BUTTONS_NUMBER:
self.childWindow.column = 0
self.childWindow.row += 1 |
def save(glob_str, base_path=None, policy="live"):
""" Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends
"""
global _saved_files
if run is None:
raise ValueError(
"You must call `wandb.init` before calling save")
if policy not in ("live", "end"):
raise ValueError(
'Only "live" and "end" policies are currently supported.')
if base_path is None:
base_path = os.path.dirname(glob_str)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode('utf-8')
wandb_glob_str = os.path.relpath(glob_str, base_path)
if "../" in wandb_glob_str:
raise ValueError(
"globs can't walk above base_path")
if (glob_str, base_path, policy) in _saved_files:
return []
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str)
run.send_message(
{"save_policy": {"glob": wandb_glob_str, "policy": policy}})
files = []
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(run.dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
_saved_files.add((glob_str, base_path, policy))
return files | Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends | Below is the the instruction that describes the task:
### Input:
Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends
### Response:
def save(glob_str, base_path=None, policy="live"):
""" Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends
"""
global _saved_files
if run is None:
raise ValueError(
"You must call `wandb.init` before calling save")
if policy not in ("live", "end"):
raise ValueError(
'Only "live" and "end" policies are currently supported.')
if base_path is None:
base_path = os.path.dirname(glob_str)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode('utf-8')
wandb_glob_str = os.path.relpath(glob_str, base_path)
if "../" in wandb_glob_str:
raise ValueError(
"globs can't walk above base_path")
if (glob_str, base_path, policy) in _saved_files:
return []
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str)
run.send_message(
{"save_policy": {"glob": wandb_glob_str, "policy": policy}})
files = []
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(run.dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
_saved_files.add((glob_str, base_path, policy))
return files |
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
return spaces + ', '.join(v.pretty_str() for v in self.variables) | Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation. | Below is the the instruction that describes the task:
### Input:
Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
### Response:
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
return spaces + ', '.join(v.pretty_str() for v in self.variables) |
def time_since(self, mtype):
'''return the time since the last message of type mtype was received'''
if not mtype in self.messages:
return time.time() - self.start_time
return time.time() - self.messages[mtype]._timestamp | return the time since the last message of type mtype was received | Below is the the instruction that describes the task:
### Input:
return the time since the last message of type mtype was received
### Response:
def time_since(self, mtype):
'''return the time since the last message of type mtype was received'''
if not mtype in self.messages:
return time.time() - self.start_time
return time.time() - self.messages[mtype]._timestamp |
def compute_total(self, precision=None):
'''
Gets the total of the invoice with a defined decimal precision
@param precision: int Number of decimal places
@return: Decimal
'''
return quantize(sum([group.compute_total(precision) for group
in self.__groups]), places=precision) or ZERO | Gets the total of the invoice with a defined decimal precision
@param precision: int Number of decimal places
@return: Decimal | Below is the the instruction that describes the task:
### Input:
Gets the total of the invoice with a defined decimal precision
@param precision: int Number of decimal places
@return: Decimal
### Response:
def compute_total(self, precision=None):
'''
Gets the total of the invoice with a defined decimal precision
@param precision: int Number of decimal places
@return: Decimal
'''
return quantize(sum([group.compute_total(precision) for group
in self.__groups]), places=precision) or ZERO |
def update_status(self, helper, status):
""" update the helper """
if status:
self.status(status[0])
# if the status is ok, add it to the long output
if status[0] == 0:
self.add_long_output(status[1])
# if the status is not ok, add it to the summary
else:
self.add_summary(status[1]) | update the helper | Below is the the instruction that describes the task:
### Input:
update the helper
### Response:
def update_status(self, helper, status):
""" update the helper """
if status:
self.status(status[0])
# if the status is ok, add it to the long output
if status[0] == 0:
self.add_long_output(status[1])
# if the status is not ok, add it to the summary
else:
self.add_summary(status[1]) |
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response) | GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` | Below is the the instruction that describes the task:
### Input:
GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
### Response:
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response) |
def angle2d(self):
"""determine the angle of this point on a circle, measured in radians (presume values represent a Vector)"""
if self.x==0:
if self.y<0: return math.pi/2.0*3
elif self.y>0: return math.pi/2.0
else: return 0
elif self.y==0:
if self.x<0: return math.pi
#elif self.x>0: return 0
else: return 0
ans = math.atan( self.y / self.x )
if self.x > 0:
if self.y>0: return ans
else: return ans + math.pi*2.0
else: return ans + math.pi | determine the angle of this point on a circle, measured in radians (presume values represent a Vector) | Below is the the instruction that describes the task:
### Input:
determine the angle of this point on a circle, measured in radians (presume values represent a Vector)
### Response:
def angle2d(self):
"""determine the angle of this point on a circle, measured in radians (presume values represent a Vector)"""
if self.x==0:
if self.y<0: return math.pi/2.0*3
elif self.y>0: return math.pi/2.0
else: return 0
elif self.y==0:
if self.x<0: return math.pi
#elif self.x>0: return 0
else: return 0
ans = math.atan( self.y / self.x )
if self.x > 0:
if self.y>0: return ans
else: return ans + math.pi*2.0
else: return ans + math.pi |
def d2BinaryRochedx2(r, D, q, F):
"""
Computes second derivative of the potential with respect to x.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
"""
return (2*r[0]*r[0]-r[1]*r[1]-r[2]*r[2])/(r[0]*r[0]+r[1]*r[1]+r[2]*r[2])**2.5 +\
q*(2*(r[0]-D)*(r[0]-D)-r[1]*r[1]-r[2]*r[2])/((r[0]-D)*(r[0]-D)+r[1]*r[1]+r[2]*r[2])**2.5 +\
F*F*(1+q) | Computes second derivative of the potential with respect to x.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter | Below is the the instruction that describes the task:
### Input:
Computes second derivative of the potential with respect to x.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
### Response:
def d2BinaryRochedx2(r, D, q, F):
"""
Computes second derivative of the potential with respect to x.
@param r: relative radius vector (3 components)
@param D: instantaneous separation
@param q: mass ratio
@param F: synchronicity parameter
"""
return (2*r[0]*r[0]-r[1]*r[1]-r[2]*r[2])/(r[0]*r[0]+r[1]*r[1]+r[2]*r[2])**2.5 +\
q*(2*(r[0]-D)*(r[0]-D)-r[1]*r[1]-r[2]*r[2])/((r[0]-D)*(r[0]-D)+r[1]*r[1]+r[2]*r[2])**2.5 +\
F*F*(1+q) |
def createCertRequest(pkey, digest="sha256"):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
req.get_subject().C = "FR"
req.get_subject().ST = "IDF"
req.get_subject().L = "Paris"
req.get_subject().O = "RedHat" # noqa
req.get_subject().OU = "DCI"
req.get_subject().CN = "DCI-remoteCI"
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req | Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object | Below is the the instruction that describes the task:
### Input:
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
### Response:
def createCertRequest(pkey, digest="sha256"):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
req.get_subject().C = "FR"
req.get_subject().ST = "IDF"
req.get_subject().L = "Paris"
req.get_subject().O = "RedHat" # noqa
req.get_subject().OU = "DCI"
req.get_subject().CN = "DCI-remoteCI"
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req |
def get_int(byte_array, signed=True):
"""
Gets the specified integer from its byte array.
This should be used by this module alone, as it works with big endian.
:param byte_array: the byte array representing th integer.
:param signed: whether the number is signed or not.
:return: the integer representing the given byte array.
"""
return int.from_bytes(byte_array, byteorder='big', signed=signed) | Gets the specified integer from its byte array.
This should be used by this module alone, as it works with big endian.
:param byte_array: the byte array representing th integer.
:param signed: whether the number is signed or not.
:return: the integer representing the given byte array. | Below is the the instruction that describes the task:
### Input:
Gets the specified integer from its byte array.
This should be used by this module alone, as it works with big endian.
:param byte_array: the byte array representing th integer.
:param signed: whether the number is signed or not.
:return: the integer representing the given byte array.
### Response:
def get_int(byte_array, signed=True):
"""
Gets the specified integer from its byte array.
This should be used by this module alone, as it works with big endian.
:param byte_array: the byte array representing th integer.
:param signed: whether the number is signed or not.
:return: the integer representing the given byte array.
"""
return int.from_bytes(byte_array, byteorder='big', signed=signed) |
def make_elb_json(self):
"""Render the JSON template with arguments.
Returns:
str: Rendered ELB template.
"""
env = self.env
region = self.region
elb_settings = self.properties['elb']
LOG.debug('Block ELB Settings:\n%s', pformat(elb_settings))
health_settings = elb_settings['health']
elb_subnet_purpose = elb_settings.get('subnet_purpose', 'internal')
region_subnets = get_subnets(target='elb', purpose=elb_subnet_purpose, env=env, region=region)
region_subnets.pop("subnet_ids", None)
# CAVEAT: Setting the ELB to public, you must use a public subnet,
# otherwise AWS complains about missing IGW on subnet.
if elb_subnet_purpose == 'internal':
is_internal = 'true'
else:
is_internal = 'false'
target = elb_settings.get('target', 'HTTP:80/health')
health = splay_health(target)
listeners = format_listeners(elb_settings=elb_settings, env=self.env, region=region)
idle_timeout = elb_settings.get('idle_timeout', None)
access_log = elb_settings.get('access_log', {})
connection_draining_timeout = elb_settings.get('connection_draining_timeout', None)
security_groups = DEFAULT_ELB_SECURITYGROUPS[env]
security_groups.append(self.app)
security_groups.extend(self.properties['security_group']['elb_extras'])
security_groups = remove_duplicate_sg(security_groups)
template_kwargs = {
'access_log': json.dumps(access_log),
'app_name': self.app,
'availability_zones': json.dumps(region_subnets),
'connection_draining_timeout': json.dumps(connection_draining_timeout),
'env': env,
'hc_string': target,
'health_interval': health_settings['interval'],
'health_path': health.path,
'health_port': health.port,
'health_protocol': health.proto,
'health_timeout': health_settings['timeout'],
'healthy_threshold': health_settings['threshold'],
'idle_timeout': json.dumps(idle_timeout),
'isInternal': is_internal,
'listeners': json.dumps(listeners),
'region_zones': json.dumps(region_subnets[region]),
'region': region,
'security_groups': json.dumps(security_groups),
'subnet_type': elb_subnet_purpose,
'unhealthy_threshold': health_settings['unhealthy_threshold'],
'vpc_id': get_vpc_id(env, region),
}
rendered_template = get_template(template_file='infrastructure/elb_data.json.j2', **template_kwargs)
return rendered_template | Render the JSON template with arguments.
Returns:
str: Rendered ELB template. | Below is the the instruction that describes the task:
### Input:
Render the JSON template with arguments.
Returns:
str: Rendered ELB template.
### Response:
def make_elb_json(self):
"""Render the JSON template with arguments.
Returns:
str: Rendered ELB template.
"""
env = self.env
region = self.region
elb_settings = self.properties['elb']
LOG.debug('Block ELB Settings:\n%s', pformat(elb_settings))
health_settings = elb_settings['health']
elb_subnet_purpose = elb_settings.get('subnet_purpose', 'internal')
region_subnets = get_subnets(target='elb', purpose=elb_subnet_purpose, env=env, region=region)
region_subnets.pop("subnet_ids", None)
# CAVEAT: Setting the ELB to public, you must use a public subnet,
# otherwise AWS complains about missing IGW on subnet.
if elb_subnet_purpose == 'internal':
is_internal = 'true'
else:
is_internal = 'false'
target = elb_settings.get('target', 'HTTP:80/health')
health = splay_health(target)
listeners = format_listeners(elb_settings=elb_settings, env=self.env, region=region)
idle_timeout = elb_settings.get('idle_timeout', None)
access_log = elb_settings.get('access_log', {})
connection_draining_timeout = elb_settings.get('connection_draining_timeout', None)
security_groups = DEFAULT_ELB_SECURITYGROUPS[env]
security_groups.append(self.app)
security_groups.extend(self.properties['security_group']['elb_extras'])
security_groups = remove_duplicate_sg(security_groups)
template_kwargs = {
'access_log': json.dumps(access_log),
'app_name': self.app,
'availability_zones': json.dumps(region_subnets),
'connection_draining_timeout': json.dumps(connection_draining_timeout),
'env': env,
'hc_string': target,
'health_interval': health_settings['interval'],
'health_path': health.path,
'health_port': health.port,
'health_protocol': health.proto,
'health_timeout': health_settings['timeout'],
'healthy_threshold': health_settings['threshold'],
'idle_timeout': json.dumps(idle_timeout),
'isInternal': is_internal,
'listeners': json.dumps(listeners),
'region_zones': json.dumps(region_subnets[region]),
'region': region,
'security_groups': json.dumps(security_groups),
'subnet_type': elb_subnet_purpose,
'unhealthy_threshold': health_settings['unhealthy_threshold'],
'vpc_id': get_vpc_id(env, region),
}
rendered_template = get_template(template_file='infrastructure/elb_data.json.j2', **template_kwargs)
return rendered_template |
def serve(project, port, no_watch):
""" Serve application in development mode """
header("Serving application in development mode ... ")
print("- Project: %s " % project)
print("")
print("- Port: %s" % port)
print("")
module = import_module(project, True)
extra_files = []
if not no_watch:
extra_dirs = [CWD,]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
module.app.run(debug=True,
host='0.0.0.0',
port=port,
extra_files=extra_files) | Serve application in development mode | Below is the the instruction that describes the task:
### Input:
Serve application in development mode
### Response:
def serve(project, port, no_watch):
""" Serve application in development mode """
header("Serving application in development mode ... ")
print("- Project: %s " % project)
print("")
print("- Port: %s" % port)
print("")
module = import_module(project, True)
extra_files = []
if not no_watch:
extra_dirs = [CWD,]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
module.app.run(debug=True,
host='0.0.0.0',
port=port,
extra_files=extra_files) |
def init(options):
""" Initialize some defaults """
# Set matlplotlib's backend so LIVVkit can plot to files.
import matplotlib
matplotlib.use('agg')
livvkit.output_dir = os.path.abspath(options.out_dir)
livvkit.index_dir = livvkit.output_dir
livvkit.verify = True if options.verify is not None else False
livvkit.validate = True if options.validate is not None else False
livvkit.publish = options.publish
# Get a list of bundles that provide model specific implementations
available_bundles = [mod for imp, mod, ispkg in pkgutil.iter_modules(bundles.__path__)]
if options.verify is not None:
livvkit.model_dir = os.path.normpath(options.verify[0])
livvkit.bench_dir = os.path.normpath(options.verify[1])
if not os.path.isdir(livvkit.model_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your comparison directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.model_dir+"\n\n")
sys.exit(1)
if not os.path.isdir(livvkit.bench_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your benchmark directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.bench_dir+"\n\n")
sys.exit(1)
livvkit.model_bundle = os.path.basename(livvkit.model_dir)
livvkit.bench_bundle = os.path.basename(livvkit.bench_dir)
if livvkit.model_bundle in available_bundles:
livvkit.numerics_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "numerics.json")
livvkit.numerics_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "numerics"]))
livvkit.verification_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "verification.json")
livvkit.verification_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "verification"]))
livvkit.performance_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "performance.json")
# NOTE: This isn't used right now...
# livvkit.performance_model_module = importlib.import_module(
# ".".join(["livvkit.bundles", livvkit.model_bundle, "performance"]))
else:
# TODO: Should implement some error checking here...
livvkit.verify = False
if options.validate is not None:
livvkit.validation_model_configs = options.validate
if not (livvkit.verify or livvkit.validate) and not options.serve:
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("----------------------------------------------------------")
print(" No verification or validation tests found/submitted!")
print("")
print(" Use either one or both of the --verify and")
print(" --validate options to run tests. For more ")
print(" information use the --help option, view the README")
print(" or check https://livvkit.github.io/Docs/")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
sys.exit(1)
return options | Initialize some defaults | Below is the the instruction that describes the task:
### Input:
Initialize some defaults
### Response:
def init(options):
""" Initialize some defaults """
# Set matlplotlib's backend so LIVVkit can plot to files.
import matplotlib
matplotlib.use('agg')
livvkit.output_dir = os.path.abspath(options.out_dir)
livvkit.index_dir = livvkit.output_dir
livvkit.verify = True if options.verify is not None else False
livvkit.validate = True if options.validate is not None else False
livvkit.publish = options.publish
# Get a list of bundles that provide model specific implementations
available_bundles = [mod for imp, mod, ispkg in pkgutil.iter_modules(bundles.__path__)]
if options.verify is not None:
livvkit.model_dir = os.path.normpath(options.verify[0])
livvkit.bench_dir = os.path.normpath(options.verify[1])
if not os.path.isdir(livvkit.model_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your comparison directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.model_dir+"\n\n")
sys.exit(1)
if not os.path.isdir(livvkit.bench_dir):
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" Your benchmark directory does not exist; please check")
print(" the path:")
print("\n"+livvkit.bench_dir+"\n\n")
sys.exit(1)
livvkit.model_bundle = os.path.basename(livvkit.model_dir)
livvkit.bench_bundle = os.path.basename(livvkit.bench_dir)
if livvkit.model_bundle in available_bundles:
livvkit.numerics_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "numerics.json")
livvkit.numerics_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "numerics"]))
livvkit.verification_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "verification.json")
livvkit.verification_model_module = importlib.import_module(
".".join(["livvkit.bundles", livvkit.model_bundle, "verification"]))
livvkit.performance_model_config = os.path.join(
livvkit.bundle_dir, livvkit.model_bundle, "performance.json")
# NOTE: This isn't used right now...
# livvkit.performance_model_module = importlib.import_module(
# ".".join(["livvkit.bundles", livvkit.model_bundle, "performance"]))
else:
# TODO: Should implement some error checking here...
livvkit.verify = False
if options.validate is not None:
livvkit.validation_model_configs = options.validate
if not (livvkit.verify or livvkit.validate) and not options.serve:
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" UH OH!")
print("----------------------------------------------------------")
print(" No verification or validation tests found/submitted!")
print("")
print(" Use either one or both of the --verify and")
print(" --validate options to run tests. For more ")
print(" information use the --help option, view the README")
print(" or check https://livvkit.github.io/Docs/")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
sys.exit(1)
return options |
def update_shortlink(self, shortlink_id, callback_uri=None,
description=None):
"""Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH
"""
arguments = {'callback_uri': callback_uri,
'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/shortlink/' +
shortlink_id + '/', arguments) | Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH | Below is the the instruction that describes the task:
### Input:
Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH
### Response:
def update_shortlink(self, shortlink_id, callback_uri=None,
description=None):
"""Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH
"""
arguments = {'callback_uri': callback_uri,
'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/shortlink/' +
shortlink_id + '/', arguments) |
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url.
"""
# check this worker hasn't already taken part
parts = models.Participant.query.filter_by(worker_id=worker_id).all()
if parts:
print "participant already exists!"
return Response(status=200)
# make the participant
participant = models.Participant(worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode)
session.add(participant)
session.commit()
# make a psiturk participant too, for now
from psiturk.models import Participant as PsiturkParticipant
psiturk_participant = PsiturkParticipant(workerid=worker_id,
assignmentid=assignment_id,
hitid=hit_id)
session_psiturk.add(psiturk_participant)
session_psiturk.commit()
# return the data
return success_response(field="participant",
data=participant.__json__(),
request_type="participant post") | Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url. | Below is the the instruction that describes the task:
### Input:
Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url.
### Response:
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url.
"""
# check this worker hasn't already taken part
parts = models.Participant.query.filter_by(worker_id=worker_id).all()
if parts:
print "participant already exists!"
return Response(status=200)
# make the participant
participant = models.Participant(worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode)
session.add(participant)
session.commit()
# make a psiturk participant too, for now
from psiturk.models import Participant as PsiturkParticipant
psiturk_participant = PsiturkParticipant(workerid=worker_id,
assignmentid=assignment_id,
hitid=hit_id)
session_psiturk.add(psiturk_participant)
session_psiturk.commit()
# return the data
return success_response(field="participant",
data=participant.__json__(),
request_type="participant post") |
def _init_items(self):
"""
.---------------.
| Ctrl | Screen |
|------| |
| Code | |
| | |
"""
root = self._root
root.resizable(0, 0)
frm_control = tk.Frame(root, bg='#bbb')
frm_control.grid(column=0, row=0, padx=5, sticky=tk.NW)
frm_screen = tk.Frame(root, bg='#aaa')
frm_screen.grid(column=1, row=0)
frm_screenshot = tk.Frame(frm_control)
frm_screenshot.grid(column=0, row=0, sticky=tk.W)
tk.Label(frm_control, text='-'*30).grid(column=0, row=1, sticky=tk.EW)
frm_code = tk.Frame(frm_control)
frm_code.grid(column=0, row=2, sticky=tk.EW)
self._btn_refresh = tk.Button(frm_screenshot, textvariable=self._refresh_text, command=self._refresh_screen)
self._btn_refresh.grid(column=0, row=0, sticky=tk.W)
# tk.Button(frm_screenshot, text="Wakeup", command=self._device.wakeup).grid(column=0, row=1, sticky=tk.W)
tk.Button(frm_screenshot, text=u"保存选中区域", command=self._save_crop).grid(column=0, row=1, sticky=tk.W)
# tk.Button(frm_screenshot, text="保存截屏", command=self._save_screenshot).grid(column=0, row=2, sticky=tk.W)
frm_checkbtns = tk.Frame(frm_screenshot)
frm_checkbtns.grid(column=0, row=3, sticky=(tk.W, tk.E))
tk.Checkbutton(frm_checkbtns, text="Auto refresh", variable=self._auto_refresh_var, command=self._run_check_refresh).grid(column=0, row=0, sticky=tk.W)
tk.Checkbutton(frm_checkbtns, text="UI detect", variable=self._uiauto_detect_var).grid(column=1, row=0, sticky=tk.W)
frm_code_editor = tk.Frame(frm_code)
frm_code_editor.grid(column=0, row=0, sticky=(tk.W, tk.E))
tk.Label(frm_code_editor, text='Generated code').grid(column=0, row=0, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._gencode_text, width=30).grid(column=0, row=1, sticky=tk.W)
tk.Label(frm_code_editor, text='Save file name').grid(column=0, row=2, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._genfile_name, width=30).grid(column=0, row=3, sticky=tk.W)
tk.Label(frm_code_editor, text='Extention name').grid(column=0, row=4, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._fileext_text, width=30).grid(column=0, row=5, sticky=tk.W)
frm_code_btns = tk.Frame(frm_code)
frm_code_btns.grid(column=0, row=2, sticky=(tk.W, tk.E))
tk.Button(frm_code_btns, text='Run', command=self._run_code).grid(column=0, row=0, sticky=tk.W)
self._btn_runedit = tk.Button(frm_code_btns, state=tk.DISABLED, text='Insert and Run', command=self._run_and_insert)
self._btn_runedit.grid(column=1, row=0, sticky=tk.W)
tk.Button(frm_code, text='Select File', command=self._run_selectfile).grid(column=0, row=4, sticky=tk.W)
tk.Label(frm_code, textvariable=self._attachfile_text).grid(column=0, row=5, sticky=tk.W)
tk.Button(frm_code, text='Reset', command=self._reset).grid(column=0, row=6, sticky=tk.W)
self.canvas = tk.Canvas(frm_screen, bg="blue", bd=0, highlightthickness=0, relief='ridge')
self.canvas.grid(column=0, row=0, padx=10, pady=10)
self.canvas.bind("<Button-1>", self._stroke_start)
self.canvas.bind("<B1-Motion>", self._stroke_move)
self.canvas.bind("<B1-ButtonRelease>", self._stroke_done)
self.canvas.bind("<Motion>", self._mouse_move) | .---------------.
| Ctrl | Screen |
|------| |
| Code | |
| | | | Below is the the instruction that describes the task:
### Input:
.---------------.
| Ctrl | Screen |
|------| |
| Code | |
| | |
### Response:
def _init_items(self):
"""
.---------------.
| Ctrl | Screen |
|------| |
| Code | |
| | |
"""
root = self._root
root.resizable(0, 0)
frm_control = tk.Frame(root, bg='#bbb')
frm_control.grid(column=0, row=0, padx=5, sticky=tk.NW)
frm_screen = tk.Frame(root, bg='#aaa')
frm_screen.grid(column=1, row=0)
frm_screenshot = tk.Frame(frm_control)
frm_screenshot.grid(column=0, row=0, sticky=tk.W)
tk.Label(frm_control, text='-'*30).grid(column=0, row=1, sticky=tk.EW)
frm_code = tk.Frame(frm_control)
frm_code.grid(column=0, row=2, sticky=tk.EW)
self._btn_refresh = tk.Button(frm_screenshot, textvariable=self._refresh_text, command=self._refresh_screen)
self._btn_refresh.grid(column=0, row=0, sticky=tk.W)
# tk.Button(frm_screenshot, text="Wakeup", command=self._device.wakeup).grid(column=0, row=1, sticky=tk.W)
tk.Button(frm_screenshot, text=u"保存选中区域", command=self._save_crop).grid(column=0, row=1, sticky=tk.W)
# tk.Button(frm_screenshot, text="保存截屏", command=self._save_screenshot).grid(column=0, row=2, sticky=tk.W)
frm_checkbtns = tk.Frame(frm_screenshot)
frm_checkbtns.grid(column=0, row=3, sticky=(tk.W, tk.E))
tk.Checkbutton(frm_checkbtns, text="Auto refresh", variable=self._auto_refresh_var, command=self._run_check_refresh).grid(column=0, row=0, sticky=tk.W)
tk.Checkbutton(frm_checkbtns, text="UI detect", variable=self._uiauto_detect_var).grid(column=1, row=0, sticky=tk.W)
frm_code_editor = tk.Frame(frm_code)
frm_code_editor.grid(column=0, row=0, sticky=(tk.W, tk.E))
tk.Label(frm_code_editor, text='Generated code').grid(column=0, row=0, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._gencode_text, width=30).grid(column=0, row=1, sticky=tk.W)
tk.Label(frm_code_editor, text='Save file name').grid(column=0, row=2, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._genfile_name, width=30).grid(column=0, row=3, sticky=tk.W)
tk.Label(frm_code_editor, text='Extention name').grid(column=0, row=4, sticky=tk.W)
tk.Entry(frm_code_editor, textvariable=self._fileext_text, width=30).grid(column=0, row=5, sticky=tk.W)
frm_code_btns = tk.Frame(frm_code)
frm_code_btns.grid(column=0, row=2, sticky=(tk.W, tk.E))
tk.Button(frm_code_btns, text='Run', command=self._run_code).grid(column=0, row=0, sticky=tk.W)
self._btn_runedit = tk.Button(frm_code_btns, state=tk.DISABLED, text='Insert and Run', command=self._run_and_insert)
self._btn_runedit.grid(column=1, row=0, sticky=tk.W)
tk.Button(frm_code, text='Select File', command=self._run_selectfile).grid(column=0, row=4, sticky=tk.W)
tk.Label(frm_code, textvariable=self._attachfile_text).grid(column=0, row=5, sticky=tk.W)
tk.Button(frm_code, text='Reset', command=self._reset).grid(column=0, row=6, sticky=tk.W)
self.canvas = tk.Canvas(frm_screen, bg="blue", bd=0, highlightthickness=0, relief='ridge')
self.canvas.grid(column=0, row=0, padx=10, pady=10)
self.canvas.bind("<Button-1>", self._stroke_start)
self.canvas.bind("<B1-Motion>", self._stroke_move)
self.canvas.bind("<B1-ButtonRelease>", self._stroke_done)
self.canvas.bind("<Motion>", self._mouse_move) |
def cumulative_blame(self, branch='master', limit=None, skip=None, num_datapoints=None, committer=True,
ignore_globs=None, include_globs=None):
"""
Returns the blame at every revision of interest. Index is a datetime, column per committer, with number of lines
blamed to each committer at each timestamp as data.
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:param committer: (optional, defualt=True) true if committer should be reported, false if author
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
revs = self.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
# get the commit history to stub out committers (hacky and slow)
if sys.version_info.major == 2:
committers = set([x.committer.name for x in self.repo.iter_commits(branch, max_count=sys.maxsize)])
else:
committers = {x.committer.name for x in self.repo.iter_commits(branch, max_count=sys.maxsize)}
for y in committers:
revs[y] = 0
if self.verbose:
print('Beginning processing for cumulative blame:')
# now populate that table with some actual values
for idx, row in revs.iterrows():
if self.verbose:
print('%s. [%s] getting blame for rev: %s' % (
str(idx), datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), row.rev,))
blame = self.blame(rev=row.rev, committer=committer, ignore_globs=ignore_globs, include_globs=include_globs)
for y in committers:
try:
loc = blame.loc[y, 'loc']
revs.set_value(idx, y, loc)
except KeyError:
pass
del revs['rev']
revs['date'] = to_datetime(revs['date'].map(datetime.datetime.fromtimestamp))
revs.set_index(keys=['date'], drop=True, inplace=True)
revs = revs.fillna(0.0)
# drop 0 cols
for col in revs.columns.values:
if col != 'col':
if revs[col].sum() == 0:
del revs[col]
# drop 0 rows
keep_idx = []
committers = [x for x in revs.columns.values if x != 'date']
for idx, row in revs.iterrows():
if sum([row[x] for x in committers]) > 0:
keep_idx.append(idx)
revs = revs.ix[keep_idx]
return revs | Returns the blame at every revision of interest. Index is a datetime, column per committer, with number of lines
blamed to each committer at each timestamp as data.
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:param committer: (optional, defualt=True) true if committer should be reported, false if author
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
Returns the blame at every revision of interest. Index is a datetime, column per committer, with number of lines
blamed to each committer at each timestamp as data.
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:param committer: (optional, defualt=True) true if committer should be reported, false if author
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
### Response:
def cumulative_blame(self, branch='master', limit=None, skip=None, num_datapoints=None, committer=True,
ignore_globs=None, include_globs=None):
"""
Returns the blame at every revision of interest. Index is a datetime, column per committer, with number of lines
blamed to each committer at each timestamp as data.
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:param committer: (optional, defualt=True) true if committer should be reported, false if author
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
revs = self.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
# get the commit history to stub out committers (hacky and slow)
if sys.version_info.major == 2:
committers = set([x.committer.name for x in self.repo.iter_commits(branch, max_count=sys.maxsize)])
else:
committers = {x.committer.name for x in self.repo.iter_commits(branch, max_count=sys.maxsize)}
for y in committers:
revs[y] = 0
if self.verbose:
print('Beginning processing for cumulative blame:')
# now populate that table with some actual values
for idx, row in revs.iterrows():
if self.verbose:
print('%s. [%s] getting blame for rev: %s' % (
str(idx), datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), row.rev,))
blame = self.blame(rev=row.rev, committer=committer, ignore_globs=ignore_globs, include_globs=include_globs)
for y in committers:
try:
loc = blame.loc[y, 'loc']
revs.set_value(idx, y, loc)
except KeyError:
pass
del revs['rev']
revs['date'] = to_datetime(revs['date'].map(datetime.datetime.fromtimestamp))
revs.set_index(keys=['date'], drop=True, inplace=True)
revs = revs.fillna(0.0)
# drop 0 cols
for col in revs.columns.values:
if col != 'col':
if revs[col].sum() == 0:
del revs[col]
# drop 0 rows
keep_idx = []
committers = [x for x in revs.columns.values if x != 'date']
for idx, row in revs.iterrows():
if sum([row[x] for x in committers]) > 0:
keep_idx.append(idx)
revs = revs.ix[keep_idx]
return revs |
def targets_for_class(self, target, classname):
"""Search which targets from `target`'s transitive dependencies contain `classname`."""
targets_with_class = set()
for target in target.closure():
for one_class in self._target_classes(target):
if classname in one_class:
targets_with_class.add(target)
break
return targets_with_class | Search which targets from `target`'s transitive dependencies contain `classname`. | Below is the the instruction that describes the task:
### Input:
Search which targets from `target`'s transitive dependencies contain `classname`.
### Response:
def targets_for_class(self, target, classname):
"""Search which targets from `target`'s transitive dependencies contain `classname`."""
targets_with_class = set()
for target in target.closure():
for one_class in self._target_classes(target):
if classname in one_class:
targets_with_class.add(target)
break
return targets_with_class |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, maskMap):
"""
WMS Dataset File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
if isinstance(maskMap, RasterMapFile) and maskMap.fileExtension == 'msk':
# Vars from mask map
columns = maskMap.columns
rows = maskMap.rows
upperLeftX = maskMap.west
upperLeftY = maskMap.north
# Derive the cell size (GSSHA cells are square, so it is the same in both directions)
cellSizeX = int(abs(maskMap.west - maskMap.east) / columns)
cellSizeY = -1 * cellSizeX
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'DATASET': wdc.datasetHeaderChunk,
'TS': wdc.datasetScalarTimeStepChunk}
# Open file and read plain text into text field
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse header chunk first
header = wdc.datasetHeaderChunk('DATASET', chunks['DATASET'][0])
# Parse each time step chunk and aggregate
timeStepRasters = []
for chunk in chunks['TS']:
timeStepRasters.append(wdc.datasetScalarTimeStepChunk(chunk, columns, header['numberCells']))
# Set WMS dataset file properties
self.name = header['name']
self.numberCells = header['numberCells']
self.numberData = header['numberData']
self.objectID = header['objectID']
if header['type'] == 'BEGSCL':
self.objectType = header['objectType']
self.type = self.SCALAR_TYPE
elif header['type'] == 'BEGVEC':
self.vectorType = header['objectType']
self.type = self.VECTOR_TYPE
# Create WMS raster dataset files for each raster
for timeStep, timeStepRaster in enumerate(timeStepRasters):
# Create new WMS raster dataset file object
wmsRasterDatasetFile = WMSDatasetRaster()
# Set the wms dataset for this WMS raster dataset file
wmsRasterDatasetFile.wmsDataset = self
# Set the time step and timestamp and other properties
wmsRasterDatasetFile.iStatus = timeStepRaster['iStatus']
wmsRasterDatasetFile.timestamp = timeStepRaster['timestamp']
wmsRasterDatasetFile.timeStep = timeStep + 1
# If spatial is enabled create PostGIS rasters
if spatial:
# Process the values/cell array
wmsRasterDatasetFile.raster = RasterLoader.makeSingleBandWKBRaster(session,
columns, rows,
upperLeftX, upperLeftY,
cellSizeX, cellSizeY,
0, 0,
spatialReferenceID,
timeStepRaster['cellArray'])
# Otherwise, set the raster text properties
else:
wmsRasterDatasetFile.rasterText = timeStepRaster['rasterText']
# Add current file object to the session
session.add(self)
else:
log.warning("Could not read {0}. Mask Map must be supplied "
"to read WMS Datasets.".format(filename)) | WMS Dataset File Read from File Method | Below is the the instruction that describes the task:
### Input:
WMS Dataset File Read from File Method
### Response:
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, maskMap):
"""
WMS Dataset File Read from File Method
"""
# Assign file extension attribute to file object
self.fileExtension = extension
if isinstance(maskMap, RasterMapFile) and maskMap.fileExtension == 'msk':
# Vars from mask map
columns = maskMap.columns
rows = maskMap.rows
upperLeftX = maskMap.west
upperLeftY = maskMap.north
# Derive the cell size (GSSHA cells are square, so it is the same in both directions)
cellSizeX = int(abs(maskMap.west - maskMap.east) / columns)
cellSizeY = -1 * cellSizeX
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'DATASET': wdc.datasetHeaderChunk,
'TS': wdc.datasetScalarTimeStepChunk}
# Open file and read plain text into text field
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse header chunk first
header = wdc.datasetHeaderChunk('DATASET', chunks['DATASET'][0])
# Parse each time step chunk and aggregate
timeStepRasters = []
for chunk in chunks['TS']:
timeStepRasters.append(wdc.datasetScalarTimeStepChunk(chunk, columns, header['numberCells']))
# Set WMS dataset file properties
self.name = header['name']
self.numberCells = header['numberCells']
self.numberData = header['numberData']
self.objectID = header['objectID']
if header['type'] == 'BEGSCL':
self.objectType = header['objectType']
self.type = self.SCALAR_TYPE
elif header['type'] == 'BEGVEC':
self.vectorType = header['objectType']
self.type = self.VECTOR_TYPE
# Create WMS raster dataset files for each raster
for timeStep, timeStepRaster in enumerate(timeStepRasters):
# Create new WMS raster dataset file object
wmsRasterDatasetFile = WMSDatasetRaster()
# Set the wms dataset for this WMS raster dataset file
wmsRasterDatasetFile.wmsDataset = self
# Set the time step and timestamp and other properties
wmsRasterDatasetFile.iStatus = timeStepRaster['iStatus']
wmsRasterDatasetFile.timestamp = timeStepRaster['timestamp']
wmsRasterDatasetFile.timeStep = timeStep + 1
# If spatial is enabled create PostGIS rasters
if spatial:
# Process the values/cell array
wmsRasterDatasetFile.raster = RasterLoader.makeSingleBandWKBRaster(session,
columns, rows,
upperLeftX, upperLeftY,
cellSizeX, cellSizeY,
0, 0,
spatialReferenceID,
timeStepRaster['cellArray'])
# Otherwise, set the raster text properties
else:
wmsRasterDatasetFile.rasterText = timeStepRaster['rasterText']
# Add current file object to the session
session.add(self)
else:
log.warning("Could not read {0}. Mask Map must be supplied "
"to read WMS Datasets.".format(filename)) |
def get_valid_har(har_data):
""" Return list of valid HAR entries.
:rtype: list
"""
new_entries = []
entries = har_data.get('log', {}).get('entries', [])
logger.debug('[+] Detected %(n)d entries in HAR', {'n': len(entries)})
for entry in entries:
url = entry['request']['url']
if not is_url_allowed(url):
continue
response = entry['response']['content']
if not is_valid_mimetype(response):
continue
if response.get('text'):
charset = get_charset(response)
response['text'] = base64.b64decode(response['text']).decode(charset)
else:
response['text'] = ''
new_entries.append(entry)
logger.debug('[+] Added URL: %(url)s ...', {'url': url[:100]})
return new_entries | Return list of valid HAR entries.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Return list of valid HAR entries.
:rtype: list
### Response:
def get_valid_har(har_data):
""" Return list of valid HAR entries.
:rtype: list
"""
new_entries = []
entries = har_data.get('log', {}).get('entries', [])
logger.debug('[+] Detected %(n)d entries in HAR', {'n': len(entries)})
for entry in entries:
url = entry['request']['url']
if not is_url_allowed(url):
continue
response = entry['response']['content']
if not is_valid_mimetype(response):
continue
if response.get('text'):
charset = get_charset(response)
response['text'] = base64.b64decode(response['text']).decode(charset)
else:
response['text'] = ''
new_entries.append(entry)
logger.debug('[+] Added URL: %(url)s ...', {'url': url[:100]})
return new_entries |
def warm(self):
"""
Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded.
"""
num_images_pre_warmed = 0
failed_to_create_image_path_list = []
total = self.queryset.count() * len(self.size_key_list)
for a, instance in enumerate(self.queryset, start=1):
for b, size_key in enumerate(self.size_key_list, start=1):
success, url_or_filepath = self._prewarm_versatileimagefield(
size_key,
reduce(getattr, self.image_attr.split("."), instance)
)
if success is True:
num_images_pre_warmed += 1
if self.verbose:
cli_progress_bar(num_images_pre_warmed, total)
else:
failed_to_create_image_path_list.append(url_or_filepath)
if a * b == total:
stdout.write('\n')
stdout.flush()
return (num_images_pre_warmed, failed_to_create_image_path_list) | Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded. | Below is the the instruction that describes the task:
### Input:
Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded.
### Response:
def warm(self):
"""
Returns a 2-tuple:
[0]: Number of images successfully pre-warmed
[1]: A list of paths on the storage class associated with the
VersatileImageField field being processed by `self` of
files that could not be successfully seeded.
"""
num_images_pre_warmed = 0
failed_to_create_image_path_list = []
total = self.queryset.count() * len(self.size_key_list)
for a, instance in enumerate(self.queryset, start=1):
for b, size_key in enumerate(self.size_key_list, start=1):
success, url_or_filepath = self._prewarm_versatileimagefield(
size_key,
reduce(getattr, self.image_attr.split("."), instance)
)
if success is True:
num_images_pre_warmed += 1
if self.verbose:
cli_progress_bar(num_images_pre_warmed, total)
else:
failed_to_create_image_path_list.append(url_or_filepath)
if a * b == total:
stdout.write('\n')
stdout.flush()
return (num_images_pre_warmed, failed_to_create_image_path_list) |
def not_all(*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
An alias for not_(and_(validators)).
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param catch_all: an optional boolean flag. By default, only Failure are silently caught and turned into
a 'ok' result. Turning this flag to True will assume that all exceptions should be caught and turned to a
'ok' result
:return:
"""
catch_all = pop_kwargs(kwargs, [('catch_all', False)])
# in case this is a list, create a 'and_' around it (otherwise and_ will return the validation function without
# wrapping it)
main_validator = and_(*validation_func)
return not_(main_validator, catch_all=catch_all) | An alias for not_(and_(validators)).
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param catch_all: an optional boolean flag. By default, only Failure are silently caught and turned into
a 'ok' result. Turning this flag to True will assume that all exceptions should be caught and turned to a
'ok' result
:return: | Below is the the instruction that describes the task:
### Input:
An alias for not_(and_(validators)).
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param catch_all: an optional boolean flag. By default, only Failure are silently caught and turned into
a 'ok' result. Turning this flag to True will assume that all exceptions should be caught and turned to a
'ok' result
:return:
### Response:
def not_all(*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
An alias for not_(and_(validators)).
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param catch_all: an optional boolean flag. By default, only Failure are silently caught and turned into
a 'ok' result. Turning this flag to True will assume that all exceptions should be caught and turned to a
'ok' result
:return:
"""
catch_all = pop_kwargs(kwargs, [('catch_all', False)])
# in case this is a list, create a 'and_' around it (otherwise and_ will return the validation function without
# wrapping it)
main_validator = and_(*validation_func)
return not_(main_validator, catch_all=catch_all) |
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i | append _ to all python keywords | Below is the the instruction that describes the task:
### Input:
append _ to all python keywords
### Response:
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i |
def print_flush(*args, **kwargs):
""" Like `print()`, except the file is `.flush()`ed afterwards. """
kwargs.setdefault('file', sys.stdout)
print(*args, **kwargs)
kwargs['file'].flush() | Like `print()`, except the file is `.flush()`ed afterwards. | Below is the the instruction that describes the task:
### Input:
Like `print()`, except the file is `.flush()`ed afterwards.
### Response:
def print_flush(*args, **kwargs):
""" Like `print()`, except the file is `.flush()`ed afterwards. """
kwargs.setdefault('file', sys.stdout)
print(*args, **kwargs)
kwargs['file'].flush() |
def format_output(data, fmt='table'): # pylint: disable=R0911,R0912
"""Given some data, will format it for console output.
:param data: One of: String, Table, FormattedItem, List, Tuple,
SequentialOutput
:param string fmt (optional): One of: table, raw, json, python
"""
if isinstance(data, utils.string_types):
if fmt in ('json', 'jsonraw'):
return json.dumps(data)
return data
# responds to .prettytable()
if hasattr(data, 'prettytable'):
if fmt == 'table':
return str(format_prettytable(data))
elif fmt == 'raw':
return str(format_no_tty(data))
# responds to .to_python()
if hasattr(data, 'to_python'):
if fmt == 'json':
return json.dumps(
format_output(data, fmt='python'),
indent=4,
cls=CLIJSONEncoder)
elif fmt == 'jsonraw':
return json.dumps(format_output(data, fmt='python'),
cls=CLIJSONEncoder)
elif fmt == 'python':
return data.to_python()
# responds to .formatted
if hasattr(data, 'formatted'):
if fmt == 'table':
return data.formatted
# responds to .separator
if hasattr(data, 'separator'):
output = [format_output(d, fmt=fmt) for d in data if d]
return str(SequentialOutput(data.separator, output))
# is iterable
if isinstance(data, list) or isinstance(data, tuple):
output = [format_output(d, fmt=fmt) for d in data]
if fmt == 'python':
return output
return format_output(listing(output, separator=os.linesep))
# fallback, convert this odd object to a string
return data | Given some data, will format it for console output.
:param data: One of: String, Table, FormattedItem, List, Tuple,
SequentialOutput
:param string fmt (optional): One of: table, raw, json, python | Below is the the instruction that describes the task:
### Input:
Given some data, will format it for console output.
:param data: One of: String, Table, FormattedItem, List, Tuple,
SequentialOutput
:param string fmt (optional): One of: table, raw, json, python
### Response:
def format_output(data, fmt='table'): # pylint: disable=R0911,R0912
"""Given some data, will format it for console output.
:param data: One of: String, Table, FormattedItem, List, Tuple,
SequentialOutput
:param string fmt (optional): One of: table, raw, json, python
"""
if isinstance(data, utils.string_types):
if fmt in ('json', 'jsonraw'):
return json.dumps(data)
return data
# responds to .prettytable()
if hasattr(data, 'prettytable'):
if fmt == 'table':
return str(format_prettytable(data))
elif fmt == 'raw':
return str(format_no_tty(data))
# responds to .to_python()
if hasattr(data, 'to_python'):
if fmt == 'json':
return json.dumps(
format_output(data, fmt='python'),
indent=4,
cls=CLIJSONEncoder)
elif fmt == 'jsonraw':
return json.dumps(format_output(data, fmt='python'),
cls=CLIJSONEncoder)
elif fmt == 'python':
return data.to_python()
# responds to .formatted
if hasattr(data, 'formatted'):
if fmt == 'table':
return data.formatted
# responds to .separator
if hasattr(data, 'separator'):
output = [format_output(d, fmt=fmt) for d in data if d]
return str(SequentialOutput(data.separator, output))
# is iterable
if isinstance(data, list) or isinstance(data, tuple):
output = [format_output(d, fmt=fmt) for d in data]
if fmt == 'python':
return output
return format_output(listing(output, separator=os.linesep))
# fallback, convert this odd object to a string
return data |
def headers_to_use(self):
'''
Defines features of columns to be used in multiqc table
'''
headers = OrderedDict()
headers['total.reads'] = {
'title': 'Total reads',
'description': 'Total number of reads',
'format': '{:,.0f}',
'scale': 'Greys'
}
headers['total.gigabases'] = {
'title': 'Total bases (GB)',
'description': 'Total bases',
'format': '{:,.2f}',
'scale': 'Blues'
}
headers['N50.length'] = {
'title': 'Reads N50',
'description': 'Minimum read length needed to cover 50% of all reads',
'format': '{:,.0f}',
'scale': 'Purples',
}
headers['mean.q'] = {
'title': 'Mean Q score',
'description': 'Mean quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'hidden': True,
'scale': 'Greens',
}
headers['median.q'] = {
'title': 'Median Q score',
'description': 'Median quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'scale': 'Greens',
}
headers['mean.length'] = {
'title': 'Mean length (bp)',
'description': 'Mean read length',
'format': '{:,.0f}',
'hidden': True,
'scale': 'Blues',
}
headers['median.length'] = {
'title': 'Median length (bp)',
'description': 'Median read length',
'format': '{:,.0f}',
'scale': 'Blues',
}
# Add row ID to avoid duplicates
for k in headers:
h_id = re.sub('[^0-9a-zA-Z]+', '_', headers[k]['title'])
headers[k]['rid'] = "rid_{}".format(h_id)
return headers | Defines features of columns to be used in multiqc table | Below is the the instruction that describes the task:
### Input:
Defines features of columns to be used in multiqc table
### Response:
def headers_to_use(self):
'''
Defines features of columns to be used in multiqc table
'''
headers = OrderedDict()
headers['total.reads'] = {
'title': 'Total reads',
'description': 'Total number of reads',
'format': '{:,.0f}',
'scale': 'Greys'
}
headers['total.gigabases'] = {
'title': 'Total bases (GB)',
'description': 'Total bases',
'format': '{:,.2f}',
'scale': 'Blues'
}
headers['N50.length'] = {
'title': 'Reads N50',
'description': 'Minimum read length needed to cover 50% of all reads',
'format': '{:,.0f}',
'scale': 'Purples',
}
headers['mean.q'] = {
'title': 'Mean Q score',
'description': 'Mean quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'hidden': True,
'scale': 'Greens',
}
headers['median.q'] = {
'title': 'Median Q score',
'description': 'Median quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'scale': 'Greens',
}
headers['mean.length'] = {
'title': 'Mean length (bp)',
'description': 'Mean read length',
'format': '{:,.0f}',
'hidden': True,
'scale': 'Blues',
}
headers['median.length'] = {
'title': 'Median length (bp)',
'description': 'Median read length',
'format': '{:,.0f}',
'scale': 'Blues',
}
# Add row ID to avoid duplicates
for k in headers:
h_id = re.sub('[^0-9a-zA-Z]+', '_', headers[k]['title'])
headers[k]['rid'] = "rid_{}".format(h_id)
return headers |
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return (
hasattr(obj, "__iter__")
and not isinstance(obj, str)
and not isinstance(obj, tuple)
) | Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway. | Below is the the instruction that describes the task:
### Input:
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
### Response:
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return (
hasattr(obj, "__iter__")
and not isinstance(obj, str)
and not isinstance(obj, tuple)
) |
def p_color(self, p):
""" color : css_color
| css_color t_ws
"""
try:
p[0] = Color().fmt(p[1])
if len(p) > 2:
p[0] = [p[0], p[2]]
except ValueError:
self.handle_error('Illegal color value `%s`' % p[1], p.lineno(1),
'W')
p[0] = p[1] | color : css_color
| css_color t_ws | Below is the the instruction that describes the task:
### Input:
color : css_color
| css_color t_ws
### Response:
def p_color(self, p):
""" color : css_color
| css_color t_ws
"""
try:
p[0] = Color().fmt(p[1])
if len(p) > 2:
p[0] = [p[0], p[2]]
except ValueError:
self.handle_error('Illegal color value `%s`' % p[1], p.lineno(1),
'W')
p[0] = p[1] |
def make_node(cls, lineno, s, lower, upper):
""" Creates a node for a string slice. S is the string expression Tree.
Lower and upper are the bounds, if lower & upper are constants, and
s is also constant, then a string constant is returned.
If lower > upper, an empty string is returned.
"""
if lower is None or upper is None or s is None:
return None
if not check_type(lineno, Type.string, s):
return None
lo = up = None
base = NUMBER(api.config.OPTIONS.string_base.value, lineno=lineno)
lower = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', lower, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
upper = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', upper, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
if lower is None or upper is None:
return None
if is_number(lower):
lo = lower.value
if lo < gl.MIN_STRSLICE_IDX:
lower.value = lo = gl.MIN_STRSLICE_IDX
if is_number(upper):
up = upper.value
if up > gl.MAX_STRSLICE_IDX:
upper.value = up = gl.MAX_STRSLICE_IDX
if is_number(lower, upper):
if lo > up:
return STRING('', lineno)
if s.token == 'STRING': # A constant string? Recalculate it now
up += 1
st = s.value.ljust(up) # Procrustean filled (right)
return STRING(st[lo:up], lineno)
# a$(0 TO INF.) = a$
if lo == gl.MIN_STRSLICE_IDX and up == gl.MAX_STRSLICE_IDX:
return s
return cls(s, lower, upper, lineno) | Creates a node for a string slice. S is the string expression Tree.
Lower and upper are the bounds, if lower & upper are constants, and
s is also constant, then a string constant is returned.
If lower > upper, an empty string is returned. | Below is the the instruction that describes the task:
### Input:
Creates a node for a string slice. S is the string expression Tree.
Lower and upper are the bounds, if lower & upper are constants, and
s is also constant, then a string constant is returned.
If lower > upper, an empty string is returned.
### Response:
def make_node(cls, lineno, s, lower, upper):
""" Creates a node for a string slice. S is the string expression Tree.
Lower and upper are the bounds, if lower & upper are constants, and
s is also constant, then a string constant is returned.
If lower > upper, an empty string is returned.
"""
if lower is None or upper is None or s is None:
return None
if not check_type(lineno, Type.string, s):
return None
lo = up = None
base = NUMBER(api.config.OPTIONS.string_base.value, lineno=lineno)
lower = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', lower, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
upper = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', upper, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
if lower is None or upper is None:
return None
if is_number(lower):
lo = lower.value
if lo < gl.MIN_STRSLICE_IDX:
lower.value = lo = gl.MIN_STRSLICE_IDX
if is_number(upper):
up = upper.value
if up > gl.MAX_STRSLICE_IDX:
upper.value = up = gl.MAX_STRSLICE_IDX
if is_number(lower, upper):
if lo > up:
return STRING('', lineno)
if s.token == 'STRING': # A constant string? Recalculate it now
up += 1
st = s.value.ljust(up) # Procrustean filled (right)
return STRING(st[lo:up], lineno)
# a$(0 TO INF.) = a$
if lo == gl.MIN_STRSLICE_IDX and up == gl.MAX_STRSLICE_IDX:
return s
return cls(s, lower, upper, lineno) |
def _run_cromwell(args):
"""Run CWL with Cromwell.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work"))
final_dir = utils.safe_makedir(os.path.join(work_dir, "final"))
if args.no_container:
_remove_bcbiovm_path()
log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name)
metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name)
option_file = os.path.join(work_dir, "%s-options.json" % project_name)
cromwell_opts = {"final_workflow_outputs_dir": final_dir,
"default_runtime_attributes": {"bootDiskSizeGb": 20}}
with open(option_file, "w") as out_handle:
json.dump(cromwell_opts, out_handle)
cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file),
"run", "--type", "CWL",
"-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)]
cmd += hpc.args_to_cromwell_cl(args)
cmd += ["--metadata-output", metadata_file, "--options", option_file,
"--inputs", json_file, main_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file)
if metadata_file and utils.file_exists(metadata_file):
with open(metadata_file) as in_handle:
metadata = json.load(in_handle)
if metadata["status"] == "Failed":
_cromwell_debug(metadata)
sys.exit(1)
else:
_cromwell_move_outputs(metadata, final_dir) | Run CWL with Cromwell. | Below is the the instruction that describes the task:
### Input:
Run CWL with Cromwell.
### Response:
def _run_cromwell(args):
"""Run CWL with Cromwell.
"""
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work"))
final_dir = utils.safe_makedir(os.path.join(work_dir, "final"))
if args.no_container:
_remove_bcbiovm_path()
log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name)
metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name)
option_file = os.path.join(work_dir, "%s-options.json" % project_name)
cromwell_opts = {"final_workflow_outputs_dir": final_dir,
"default_runtime_attributes": {"bootDiskSizeGb": 20}}
with open(option_file, "w") as out_handle:
json.dump(cromwell_opts, out_handle)
cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file),
"run", "--type", "CWL",
"-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)]
cmd += hpc.args_to_cromwell_cl(args)
cmd += ["--metadata-output", metadata_file, "--options", option_file,
"--inputs", json_file, main_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file)
if metadata_file and utils.file_exists(metadata_file):
with open(metadata_file) as in_handle:
metadata = json.load(in_handle)
if metadata["status"] == "Failed":
_cromwell_debug(metadata)
sys.exit(1)
else:
_cromwell_move_outputs(metadata, final_dir) |
def conversations(self, getsrcdst=None, draw = True, **kargs):
"""Graphes a conversations between sources and destinations and display it
(using graphviz)
getsrcdst: a function that takes an element of the list and return the source and dest
by defaults, return source and destination IP
if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: output filename. If None, matplotlib is used to display
prog: which graphviz program to use"""
if getsrcdst is None:
getsrcdst = lambda x:(x['IP'].src, x['IP'].dst)
conv = {}
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except:
#XXX warning()
continue
conv[c] = conv.get(c,0)+1
if NETWORKX: # networkx is available
gr = nx.DiGraph()
for s,d in conv:
if s not in gr:
gr.add_node(s)
if d not in gr:
gr.add_node(d)
gr.add_edge(s, d)
if draw:
return do_graph(gr, **kargs)
else:
return gr
else:
gr = 'digraph "conv" {\n'
for s,d in conv:
gr += '\t "%s" -> "%s"\n' % (s,d)
gr += "}\n"
return do_graph(gr, **kargs) | Graphes a conversations between sources and destinations and display it
(using graphviz)
getsrcdst: a function that takes an element of the list and return the source and dest
by defaults, return source and destination IP
if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: output filename. If None, matplotlib is used to display
prog: which graphviz program to use | Below is the the instruction that describes the task:
### Input:
Graphes a conversations between sources and destinations and display it
(using graphviz)
getsrcdst: a function that takes an element of the list and return the source and dest
by defaults, return source and destination IP
if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: output filename. If None, matplotlib is used to display
prog: which graphviz program to use
### Response:
def conversations(self, getsrcdst=None, draw = True, **kargs):
"""Graphes a conversations between sources and destinations and display it
(using graphviz)
getsrcdst: a function that takes an element of the list and return the source and dest
by defaults, return source and destination IP
if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: output filename. If None, matplotlib is used to display
prog: which graphviz program to use"""
if getsrcdst is None:
getsrcdst = lambda x:(x['IP'].src, x['IP'].dst)
conv = {}
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except:
#XXX warning()
continue
conv[c] = conv.get(c,0)+1
if NETWORKX: # networkx is available
gr = nx.DiGraph()
for s,d in conv:
if s not in gr:
gr.add_node(s)
if d not in gr:
gr.add_node(d)
gr.add_edge(s, d)
if draw:
return do_graph(gr, **kargs)
else:
return gr
else:
gr = 'digraph "conv" {\n'
for s,d in conv:
gr += '\t "%s" -> "%s"\n' % (s,d)
gr += "}\n"
return do_graph(gr, **kargs) |
def post_event_unpublish(self, id, **data):
"""
POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish.
"""
return self.post("/events/{0}/unpublish/".format(id), data=data) | POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish. | Below is the the instruction that describes the task:
### Input:
POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish.
### Response:
def post_event_unpublish(self, id, **data):
"""
POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish.
"""
return self.post("/events/{0}/unpublish/".format(id), data=data) |
def save_raw(self):
"""
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
"""
length = ctypes.c_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value) | Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model | Below is the the instruction that describes the task:
### Input:
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
### Response:
def save_raw(self):
"""
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
"""
length = ctypes.c_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value) |
def _acceptpeak(peak, amp, definitive_peaks, spk1, rr_buffer):
"""
Private function intended to insert a new RR interval in the buffer.
----------
Parameters
----------
peak : int
Sample where the peak under analysis is located.
amp : int
Amplitude of the peak under analysis.
definitive_peaks : list
List with the definitive_peaks stored until the present instant.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
Returns
-------
definitive_peaks_out : list
Definitive peaks list.
spk1 : float
Updated value of SPK1 parameter.
rr_buffer : list
Buffer after appending a new RR interval and excluding the oldest one.
"""
definitive_peaks_out = definitive_peaks
definitive_peaks_out = numpy.append(definitive_peaks_out, peak)
spk1 = 0.125 * amp + 0.875 * spk1 # spk1 is the running estimate of the signal peak
if len(definitive_peaks_out) > 1:
rr_buffer.pop(0)
rr_buffer += [definitive_peaks_out[-1] - definitive_peaks_out[-2]]
return numpy.array(definitive_peaks_out), spk1, rr_buffer | Private function intended to insert a new RR interval in the buffer.
----------
Parameters
----------
peak : int
Sample where the peak under analysis is located.
amp : int
Amplitude of the peak under analysis.
definitive_peaks : list
List with the definitive_peaks stored until the present instant.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
Returns
-------
definitive_peaks_out : list
Definitive peaks list.
spk1 : float
Updated value of SPK1 parameter.
rr_buffer : list
Buffer after appending a new RR interval and excluding the oldest one. | Below is the the instruction that describes the task:
### Input:
Private function intended to insert a new RR interval in the buffer.
----------
Parameters
----------
peak : int
Sample where the peak under analysis is located.
amp : int
Amplitude of the peak under analysis.
definitive_peaks : list
List with the definitive_peaks stored until the present instant.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
Returns
-------
definitive_peaks_out : list
Definitive peaks list.
spk1 : float
Updated value of SPK1 parameter.
rr_buffer : list
Buffer after appending a new RR interval and excluding the oldest one.
### Response:
def _acceptpeak(peak, amp, definitive_peaks, spk1, rr_buffer):
"""
Private function intended to insert a new RR interval in the buffer.
----------
Parameters
----------
peak : int
Sample where the peak under analysis is located.
amp : int
Amplitude of the peak under analysis.
definitive_peaks : list
List with the definitive_peaks stored until the present instant.
spk1 : float
Actual value of SPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm
(named signal peak).
rr_buffer : list
Data structure that stores the duration of the last eight RR intervals.
Returns
-------
definitive_peaks_out : list
Definitive peaks list.
spk1 : float
Updated value of SPK1 parameter.
rr_buffer : list
Buffer after appending a new RR interval and excluding the oldest one.
"""
definitive_peaks_out = definitive_peaks
definitive_peaks_out = numpy.append(definitive_peaks_out, peak)
spk1 = 0.125 * amp + 0.875 * spk1 # spk1 is the running estimate of the signal peak
if len(definitive_peaks_out) > 1:
rr_buffer.pop(0)
rr_buffer += [definitive_peaks_out[-1] - definitive_peaks_out[-2]]
return numpy.array(definitive_peaks_out), spk1, rr_buffer |
def get_notebook_object(self, notebook_id):
"""Get the NotebookNode representation of a notebook by notebook_id."""
path = self.find_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
info = os.stat(path)
last_modified = datetime.datetime.utcfromtimestamp(info.st_mtime)
with open(path,'r') as f:
s = f.read()
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Always use the filename as the notebook name.
nb.metadata.name = os.path.splitext(os.path.basename(path))[0]
return last_modified, nb | Get the NotebookNode representation of a notebook by notebook_id. | Below is the the instruction that describes the task:
### Input:
Get the NotebookNode representation of a notebook by notebook_id.
### Response:
def get_notebook_object(self, notebook_id):
"""Get the NotebookNode representation of a notebook by notebook_id."""
path = self.find_path(notebook_id)
if not os.path.isfile(path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id)
info = os.stat(path)
last_modified = datetime.datetime.utcfromtimestamp(info.st_mtime)
with open(path,'r') as f:
s = f.read()
try:
# v1 and v2 and json in the .ipynb files.
nb = current.reads(s, u'json')
except:
raise web.HTTPError(500, u'Unreadable JSON notebook.')
# Always use the filename as the notebook name.
nb.metadata.name = os.path.splitext(os.path.basename(path))[0]
return last_modified, nb |
def visit_Capture(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False
"""
begintag = ast.Attribute(
ast.Name('self', ast.Load()), 'beginTag', ast.Load())
endtag = ast.Attribute(
ast.Name('self', ast.Load()), 'endTag', ast.Load())
begin = ast.Call(begintag, [ast.Str(node.tagname)], [], None, None)
end = ast.Call(endtag, [ast.Str(node.tagname)], [], None, None)
result = [begin, self.visit(node.pt), end]
for clause in result:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.And(), result)
res = []
for stmt in map(self._clause, result):
res.extend(stmt)
return res | Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False | Below is the the instruction that describes the task:
### Input:
Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False
### Response:
def visit_Capture(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False
"""
begintag = ast.Attribute(
ast.Name('self', ast.Load()), 'beginTag', ast.Load())
endtag = ast.Attribute(
ast.Name('self', ast.Load()), 'endTag', ast.Load())
begin = ast.Call(begintag, [ast.Str(node.tagname)], [], None, None)
end = ast.Call(endtag, [ast.Str(node.tagname)], [], None, None)
result = [begin, self.visit(node.pt), end]
for clause in result:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.And(), result)
res = []
for stmt in map(self._clause, result):
res.extend(stmt)
return res |
def notify(self, n: int = 1) -> None:
"""Wake ``n`` waiters."""
waiters = [] # Waiters we plan to run right now.
while n and self._waiters:
waiter = self._waiters.popleft()
if not waiter.done(): # Might have timed out.
n -= 1
waiters.append(waiter)
for waiter in waiters:
future_set_result_unless_cancelled(waiter, True) | Wake ``n`` waiters. | Below is the the instruction that describes the task:
### Input:
Wake ``n`` waiters.
### Response:
def notify(self, n: int = 1) -> None:
"""Wake ``n`` waiters."""
waiters = [] # Waiters we plan to run right now.
while n and self._waiters:
waiter = self._waiters.popleft()
if not waiter.done(): # Might have timed out.
n -= 1
waiters.append(waiter)
for waiter in waiters:
future_set_result_unless_cancelled(waiter, True) |
def parse_from_file(filename, nodata=False):
"""Parse df message from file.
@filename - path to file
@nodata - do not load data
@return - [binary header, metadata, binary data]
"""
header = None
with open(filename, "rb") as file:
header = read_machine_header(file)
meta_raw = file.read(header['meta_len'])
meta = __parse_meta(meta_raw, header)
data = b''
if not nodata:
data = __decompress(meta, file.read(header['data_len']))
return header, meta, data | Parse df message from file.
@filename - path to file
@nodata - do not load data
@return - [binary header, metadata, binary data] | Below is the the instruction that describes the task:
### Input:
Parse df message from file.
@filename - path to file
@nodata - do not load data
@return - [binary header, metadata, binary data]
### Response:
def parse_from_file(filename, nodata=False):
"""Parse df message from file.
@filename - path to file
@nodata - do not load data
@return - [binary header, metadata, binary data]
"""
header = None
with open(filename, "rb") as file:
header = read_machine_header(file)
meta_raw = file.read(header['meta_len'])
meta = __parse_meta(meta_raw, header)
data = b''
if not nodata:
data = __decompress(meta, file.read(header['data_len']))
return header, meta, data |
def back_propagation(self, delta_arr):
'''
Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations.
'''
re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate(
delta_arr[:, -1]
)
re_encoder_grads_list.insert(0, None)
re_encoder_grads_list.insert(0, None)
observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple
delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(
decoded_arr,
observed_arr
)
delta_arr[:, -1] += re_encoder_delta_arr[:, -1]
decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(
delta_arr
)
return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list | Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations. | Below is the the instruction that describes the task:
### Input:
Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations.
### Response:
def back_propagation(self, delta_arr):
'''
Back propagation.
Args:
delta_output_arr: Delta.
Returns:
Tuple data.
- decoder's `list` of gradations,
- encoder's `np.ndarray` of Delta,
- encoder's `list` of gradations.
'''
re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate(
delta_arr[:, -1]
)
re_encoder_grads_list.insert(0, None)
re_encoder_grads_list.insert(0, None)
observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple
delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(
decoded_arr,
observed_arr
)
delta_arr[:, -1] += re_encoder_delta_arr[:, -1]
decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(
delta_arr
)
return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list |
def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(i)
currentSize += 1
return chunks | Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original | Below is the the instruction that describes the task:
### Input:
Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
### Response:
def chunk(self, maxSize):
"""Splits the `Collection` into _maxSize_ size or smaller `Collections`
# Parameters
_maxSize_ : `int`
> The maximum number of elements in a retuned `Collection`
# Returns
`list [Collection]`
> A list of `Collections` that if all merged (`|` operator) would create the original
"""
chunks = []
currentSize = maxSize + 1
for i in self:
if currentSize >= maxSize:
currentSize = 0
chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True))
else:
chunks[-1].add(i)
currentSize += 1
return chunks |
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
token = force_bytes(token, encoding='latin-1')
key = force_bytes(
get_random_string(len(token)),
encoding='latin-1'
)
value = b64_encode(xor(token, key))
return force_text(b'$'.join((key, value)), encoding='latin-1')
_get_val = lazy(_get_val, text_type)
return {'csrf_token': _get_val()} | Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware | Below is the the instruction that describes the task:
### Input:
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware
### Response:
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
token = force_bytes(token, encoding='latin-1')
key = force_bytes(
get_random_string(len(token)),
encoding='latin-1'
)
value = b64_encode(xor(token, key))
return force_text(b'$'.join((key, value)), encoding='latin-1')
_get_val = lazy(_get_val, text_type)
return {'csrf_token': _get_val()} |
def _codec_can_decode_with_surrogatepass(codec, _cache={}):
"""Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4
"""
try:
return _cache[codec]
except KeyError:
try:
u"\ud83d".encode(
codec, _surrogatepass).decode(codec, _surrogatepass)
except UnicodeDecodeError:
_cache[codec] = False
else:
_cache[codec] = True
return _cache[codec] | Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4 | Below is the the instruction that describes the task:
### Input:
Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4
### Response:
def _codec_can_decode_with_surrogatepass(codec, _cache={}):
"""Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4
"""
try:
return _cache[codec]
except KeyError:
try:
u"\ud83d".encode(
codec, _surrogatepass).decode(codec, _surrogatepass)
except UnicodeDecodeError:
_cache[codec] = False
else:
_cache[codec] = True
return _cache[codec] |
def intersect(d1, d2):
"""Intersect dictionaries d1 and d2 by key *and* value."""
return dict((k, d1[k]) for k in d1 if k in d2 and d1[k] == d2[k]) | Intersect dictionaries d1 and d2 by key *and* value. | Below is the the instruction that describes the task:
### Input:
Intersect dictionaries d1 and d2 by key *and* value.
### Response:
def intersect(d1, d2):
"""Intersect dictionaries d1 and d2 by key *and* value."""
return dict((k, d1[k]) for k in d1 if k in d2 and d1[k] == d2[k]) |
def _recursive_get(self, key, dic=None):
"""
Gets contents of requirement key recursively so users can search for
specific keys inside nested requirement dicts.
:param key: key or dot separated string of keys to look for.
:param dic: Optional dictionary to use in the search.
If not provided, self._requirements is used.
:return: results of search or None
"""
return recursive_search(key, dic) if dic else recursive_search(key, self._requirements) | Gets contents of requirement key recursively so users can search for
specific keys inside nested requirement dicts.
:param key: key or dot separated string of keys to look for.
:param dic: Optional dictionary to use in the search.
If not provided, self._requirements is used.
:return: results of search or None | Below is the the instruction that describes the task:
### Input:
Gets contents of requirement key recursively so users can search for
specific keys inside nested requirement dicts.
:param key: key or dot separated string of keys to look for.
:param dic: Optional dictionary to use in the search.
If not provided, self._requirements is used.
:return: results of search or None
### Response:
def _recursive_get(self, key, dic=None):
"""
Gets contents of requirement key recursively so users can search for
specific keys inside nested requirement dicts.
:param key: key or dot separated string of keys to look for.
:param dic: Optional dictionary to use in the search.
If not provided, self._requirements is used.
:return: results of search or None
"""
return recursive_search(key, dic) if dic else recursive_search(key, self._requirements) |
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit) | Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill | Below is the the instruction that describes the task:
### Input:
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
### Response:
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit) |
def defaults(self):
""" component default component
.. Note:: default components is just an indication for user and the
views, except if the Block is required. If required then default is
selected if nothing explisitely selected.
"""
default = self._defaults
# if require and no default, the first component as default
if not len(default) and self.required and len(self._components):
default = [six.next(six.itervalues(self._components)).name]
return default | component default component
.. Note:: default components is just an indication for user and the
views, except if the Block is required. If required then default is
selected if nothing explisitely selected. | Below is the the instruction that describes the task:
### Input:
component default component
.. Note:: default components is just an indication for user and the
views, except if the Block is required. If required then default is
selected if nothing explisitely selected.
### Response:
def defaults(self):
""" component default component
.. Note:: default components is just an indication for user and the
views, except if the Block is required. If required then default is
selected if nothing explisitely selected.
"""
default = self._defaults
# if require and no default, the first component as default
if not len(default) and self.required and len(self._components):
default = [six.next(six.itervalues(self._components)).name]
return default |
def MapFields(function, key=True):
"""
Transformation factory that maps `function` on the values of a row.
It can be applied either to
1. all columns (`key=True`),
2. no column (`key=False`), or
3. a subset of columns by passing a callable, which takes column name and returns `bool`
(same as the parameter `function` in `filter`).
:param function: callable
:param key: bool or callable
:return: callable
"""
@use_raw_input
def _MapFields(bag):
try:
factory = type(bag)._make
except AttributeError:
factory = type(bag)
if callable(key):
try:
fields = bag._fields
except AttributeError as e:
raise UnrecoverableAttributeError(
'This transformation works only on objects with named'
' fields (namedtuple, BagType, ...).') from e
return factory(
function(value) if key(key_) else value for key_, value in zip(fields, bag)
)
elif key:
return factory(function(value) for value in bag)
else:
return NOT_MODIFIED
return _MapFields | Transformation factory that maps `function` on the values of a row.
It can be applied either to
1. all columns (`key=True`),
2. no column (`key=False`), or
3. a subset of columns by passing a callable, which takes column name and returns `bool`
(same as the parameter `function` in `filter`).
:param function: callable
:param key: bool or callable
:return: callable | Below is the the instruction that describes the task:
### Input:
Transformation factory that maps `function` on the values of a row.
It can be applied either to
1. all columns (`key=True`),
2. no column (`key=False`), or
3. a subset of columns by passing a callable, which takes column name and returns `bool`
(same as the parameter `function` in `filter`).
:param function: callable
:param key: bool or callable
:return: callable
### Response:
def MapFields(function, key=True):
"""
Transformation factory that maps `function` on the values of a row.
It can be applied either to
1. all columns (`key=True`),
2. no column (`key=False`), or
3. a subset of columns by passing a callable, which takes column name and returns `bool`
(same as the parameter `function` in `filter`).
:param function: callable
:param key: bool or callable
:return: callable
"""
@use_raw_input
def _MapFields(bag):
try:
factory = type(bag)._make
except AttributeError:
factory = type(bag)
if callable(key):
try:
fields = bag._fields
except AttributeError as e:
raise UnrecoverableAttributeError(
'This transformation works only on objects with named'
' fields (namedtuple, BagType, ...).') from e
return factory(
function(value) if key(key_) else value for key_, value in zip(fields, bag)
)
elif key:
return factory(function(value) for value in bag)
else:
return NOT_MODIFIED
return _MapFields |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.