text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def cookies(self, url):
"""
Return cookies that are matching the path and are still valid
:param url:
:return:
"""
part = urlparse(url)
#if part.port:
# _domain = "%s:%s" % (part.hostname, part.port)
#else:
_domain = part.hostname
cookie_dict = {}
now = utc_now()
for _, a in list(self.cookiejar._cookies.items()):
for _, b in a.items():
for cookie in list(b.values()):
# print(cookie)
if cookie.expires and cookie.expires <= now:
continue
if not re.search("%s$" % cookie.domain, _domain):
continue
if not re.match(cookie.path, part.path):
continue
cookie_dict[cookie.name] = cookie.value
return cookie_dict | 0.004315 |
def _format_variants(self, variant, index, case_obj, add_all_info=False):
"""Return a Variant object
Format variant make a variant that includes enough information for
the variant view.
If add_all_info then all transcripts will be parsed
Args:
variant (cython2.Variant): A variant object
index (int): The index of the variant
case_obj (puzzle.models.Case): A case object
"""
header_line = self.head.header
# Get the individual ids for individuals in vcf file
vcf_individuals = set([ind_id for ind_id in self.head.individuals])
#Create a info dict:
info_dict = dict(variant.INFO)
chrom = variant.CHROM
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_obj = Variant(
CHROM=chrom,
POS=variant.POS,
ID=variant.ID,
REF=variant.REF,
ALT=variant.ALT[0],
QUAL=variant.QUAL,
FILTER=variant.FILTER,
)
variant_obj._set_variant_id()
logger.debug("Creating a variant object of variant {0}".format(
variant_obj.variant_id))
variant_obj.index = index
logger.debug("Updating index to: {0}".format(
index))
########### Get the coordinates for the variant ##############
variant_obj.start = variant.start
variant_obj.stop = variant.end
#SV variants needs to be handeled a bit different since the can be huge
#it would take to much power to parse all vep/snpeff entrys for these.
if self.variant_type == 'sv':
variant_obj.stop = int(info_dict.get('END', variant_obj.POS))
self._add_sv_coordinates(variant_obj)
variant_obj.sv_type = info_dict.get('SVTYPE')
# Special for FindSV software:
# SV specific tag for number of occurances
occurances = info_dict.get('OCC')
if occurances:
logger.debug("Updating occurances to: {0}".format(
occurances))
variant_obj['occurances'] = float(occurances)
variant_obj.add_frequency('OCC', occurances)
else:
self._add_thousand_g(variant_obj, info_dict)
self._add_cadd_score(variant_obj, info_dict)
self._add_genetic_models(variant_obj, info_dict)
self._add_transcripts(variant_obj, info_dict)
self._add_exac(variant_obj, info_dict)
self._add_hgnc_symbols(variant_obj)
if add_all_info:
self._add_genotype_calls(variant_obj, str(variant), case_obj)
self._add_compounds(variant_obj, info_dict)
self._add_gmaf(variant_obj, info_dict)
self._add_genes(variant_obj)
##### Add consequences ####
self._add_consequences(variant_obj, str(variant))
self._add_most_severe_consequence(variant_obj)
self._add_impact_severity(variant_obj)
self._add_rank_score(variant_obj, info_dict)
variant_obj.set_max_freq()
return variant_obj | 0.0028 |
def most_by_mask(self, mask, y, mult):
""" Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities
Arguments:
mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else
y (int): the selected class
mult (int): sets the ordering; -1 descending, 1 ascending
Returns:
idxs (ndarray): An array of indexes of length 4
"""
idxs = np.where(mask)[0]
cnt = min(4, len(idxs))
return idxs[np.argsort(mult * self.probs[idxs,y])[:cnt]] | 0.006878 |
def typedef(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to typedef declaration, that is matched
defined criteria"""
return (
self._find_single(
self._impl_matchers[scopedef_t.typedef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.typedef],
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
) | 0.003082 |
def _get_role_arn():
"""
Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default.
"""
role_arn = bottle.request.headers.get('X-Role-ARN')
if not role_arn:
role_arn = _lookup_ip_role_arn(bottle.request.environ.get('REMOTE_ADDR'))
if not role_arn:
role_arn = _role_arn
return role_arn | 0.005168 |
def _adjust_scrollbars(self):
""" Expands the vertical scrollbar beyond the range set by Qt.
"""
# This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp
# and qtextedit.cpp.
document = self._control.document()
scrollbar = self._control.verticalScrollBar()
viewport_height = self._control.viewport().height()
if isinstance(self._control, QtGui.QPlainTextEdit):
maximum = max(0, document.lineCount() - 1)
step = viewport_height / self._control.fontMetrics().lineSpacing()
else:
# QTextEdit does not do line-based layout and blocks will not in
# general have the same height. Therefore it does not make sense to
# attempt to scroll in line height increments.
maximum = document.size().height()
step = viewport_height
diff = maximum - scrollbar.maximum()
scrollbar.setRange(0, maximum)
scrollbar.setPageStep(step)
# Compensate for undesirable scrolling that occurs automatically due to
# maximumBlockCount() text truncation.
if diff < 0 and document.blockCount() == document.maximumBlockCount():
scrollbar.setValue(scrollbar.value() + diff) | 0.001579 |
def _import(self, name):
''' Import namespace package '''
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | 0.008889 |
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pycodestyle.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'e72', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713', 'e714')):
continue
if aggressive <= 2:
if issue_id.startswith(('e704')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r | 0.000503 |
def add(name, gid=None, **kwargs):
'''
Add the specified group
CLI Example:
.. code-block:: bash
salt '*' group.add foo 3456
'''
### NOTE: **kwargs isn't used here but needs to be included in this
### function for compatibility with the group.present state
if info(name):
raise CommandExecutionError(
'Group \'{0}\' already exists'.format(name)
)
if salt.utils.stringutils.contains_whitespace(name):
raise SaltInvocationError('Group name cannot contain whitespace')
if name.startswith('_'):
raise SaltInvocationError(
'Salt will not create groups beginning with underscores'
)
if gid is not None and not isinstance(gid, int):
raise SaltInvocationError('gid must be an integer')
# check if gid is already in use
gid_list = _list_gids()
if six.text_type(gid) in gid_list:
raise CommandExecutionError(
'gid \'{0}\' already exists'.format(gid)
)
cmd = ['dseditgroup', '-o', 'create']
if gid:
cmd.extend(['-i', gid])
cmd.append(name)
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 | 0.00254 |
def load_cPkl(fpath, verbose=None, n=None):
"""
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
"""
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * load_cPkl(%r)' % (util_path.tail(fpath, n=n),))
try:
with open(fpath, 'rb') as file_:
data = pickle.load(file_)
except UnicodeDecodeError:
if six.PY3:
# try to open python2 pickle
with open(fpath, 'rb') as file_:
data = pickle.load(file_, encoding='latin1')
else:
raise
except ValueError as ex:
if six.PY2:
if ex.message == 'unsupported pickle protocol: 4':
raise ValueError(
'unsupported Python3 pickle protocol 4 '
'in Python2 for fpath=%r' % (fpath,))
else:
raise
else:
raise
return data | 0.000853 |
def _open_dataset(self, urlpath):
"""Open dataset using dask and use pattern fields to set new columns
"""
import dask.dataframe
if self.pattern is None:
self._dataframe = dask.dataframe.read_csv(
urlpath, storage_options=self._storage_options,
**self._csv_kwargs)
return
if not (DASK_VERSION >= '0.19.0'):
raise ValueError("Your version of dask is '{}'. "
"The ability to include filenames in read_csv output "
"(``include_path_column``) was added in 0.19.0, so "
"pattern urlpaths are not supported.".format(DASK_VERSION))
drop_path_column = 'include_path_column' not in self._csv_kwargs
path_column = self._path_column()
self._dataframe = dask.dataframe.read_csv(
urlpath, storage_options=self._storage_options, **self._csv_kwargs)
# add the new columns to the dataframe
self._set_pattern_columns(path_column)
if drop_path_column:
self._dataframe = self._dataframe.drop([path_column], axis=1) | 0.004429 |
def _set_no_advertise(self, v, load=False):
"""
Setter method for no_advertise, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/no_advertise (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_no_advertise is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_no_advertise() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="no-advertise", rest_name="no-advertise", parent=self, choice=(u'ch-valid-type', u'ca-no-advertise'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Do not advertise prefix', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """no_advertise must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="no-advertise", rest_name="no-advertise", parent=self, choice=(u'ch-valid-type', u'ca-no-advertise'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Do not advertise prefix', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='empty', is_config=True)""",
})
self.__no_advertise = t
if hasattr(self, '_set'):
self._set() | 0.005663 |
def fetch(cls, client, symbol):
"""
fetch data for stock
"""
assert(type(symbol) is str)
url = ("https://api.robinhood.com/instruments/?symbol={0}".
format(symbol))
data = client.get(url)
return data["results"][0] | 0.007018 |
def monitoring_plot(ind, shap_values, features, feature_names=None):
""" Create a SHAP monitoring plot.
(Note this function is preliminary and subject to change!!)
A SHAP monitoring plot is meant to display the behavior of a model
over time. Often the shap_values given to this plot explain the loss
of a model, so changes in a feature's impact on the model's loss over
time can help in monitoring the model's performance.
Parameters
----------
ind : int
Index of the feature to plot.
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame
Matrix of feature values (# samples x # features)
feature_names : list
Names of the features (length # features)
"""
if str(type(features)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = features.columns
features = features.values
pl.figure(figsize=(10,3))
ys = shap_values[:,ind]
xs = np.arange(len(ys))#np.linspace(0, 12*2, len(ys))
pvals = []
inc = 50
for i in range(inc, len(ys)-inc, inc):
#stat, pval = scipy.stats.mannwhitneyu(v[:i], v[i:], alternative="two-sided")
stat, pval = scipy.stats.ttest_ind(ys[:i], ys[i:])
pvals.append(pval)
min_pval = np.min(pvals)
min_pval_ind = np.argmin(pvals)*inc + inc
if min_pval < 0.05 / shap_values.shape[1]:
pl.axvline(min_pval_ind, linestyle="dashed", color="#666666", alpha=0.2)
pl.scatter(xs, ys, s=10, c=features[:,ind], cmap=colors.red_blue)
pl.xlabel("Sample index")
pl.ylabel(truncate_text(feature_names[ind], 30) + "\nSHAP value", size=13)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
cb = pl.colorbar()
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
cb.set_label(truncate_text(feature_names[ind], 30), size=13)
pl.show() | 0.00762 |
def iter_links(self):
"""An iterator over the working links in the machine.
Generates a series of (x, y, link) tuples.
"""
for x in range(self.width):
for y in range(self.height):
for link in Links:
if (x, y, link) in self:
yield (x, y, link) | 0.005764 |
def get_tripIs_active_in_range(self, start, end):
"""
Obtain from the (standard) GTFS database, list of trip_IDs (and other trip_related info)
that are active between given 'start' and 'end' times.
The start time of a trip is determined by the departure time at the last stop of the trip.
The end time of a trip is determined by the arrival time at the last stop of the trip.
Parameters
----------
start, end : int
the start and end of the time interval in unix time seconds
Returns
-------
active_trips : pandas.DataFrame with columns
trip_I, day_start_ut, start_time_ut, end_time_ut, shape_id
"""
to_select = "trip_I, day_start_ut, start_time_ut, end_time_ut, shape_id "
query = "SELECT " + to_select + \
"FROM day_trips " \
"WHERE " \
"(end_time_ut > {start_ut} AND start_time_ut < {end_ut})".format(start_ut=start, end_ut=end)
return pd.read_sql_query(query, self.conn) | 0.006554 |
def identify(self, header):
"""Identifies a signature and returns the appropriate Signer object.
This is done by reading an authorization header and matching it to signature characteristics.
None is returned if the authorization header does not match the format of any signature identified by this identifier.
Keyword arguments:
header -- the Authorization header of a request.
"""
for ver, signer in self.signers.items():
if signer.matches(header):
return signer
return None | 0.007042 |
def get_csv_rows_for_installed(
old_csv_rows, # type: Iterable[List[str]]
installed, # type: Dict[str, str]
changed, # type: set
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning(
'RECORD line has more than three elements: {}'.format(row)
)
# Make a copy because we are mutating the row.
row = list(row)
old_path = row[0]
new_path = installed.pop(old_path, old_path)
row[0] = new_path
if new_path in changed:
digest, length = rehash(new_path)
row[1] = digest
row[2] = length
installed_rows.append(tuple(row))
for f in generated:
digest, length = rehash(f)
installed_rows.append((normpath(f, lib_dir), digest, str(length)))
for f in installed:
installed_rows.append((installed[f], '', ''))
return installed_rows | 0.000848 |
def cafferesnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['cafferesnet101'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | 0.00243 |
def start(self):
"""Start the worker (emits sig_started signal with worker as arg)."""
if not self._started:
self.sig_started.emit(self)
self._started = True | 0.010152 |
def add_user_role(self, user, role):
"""Add role to given user.
Args:
user (string): User name.
role (string): Role to assign.
Raises:
requests.HTTPError on failure.
"""
self.service.add_user_role(
user, role,
self.url_prefix, self.auth, self.session, self.session_send_opts) | 0.005305 |
def undefine(self):
"""Undefine the Template.
Python equivalent of the CLIPS undeftemplate command.
The object becomes unusable after this method has been called.
"""
if lib.EnvUndeftemplate(self._env, self._tpl) != 1:
raise CLIPSError(self._env) | 0.006645 |
def get_ISI_ratio(sorting, sampling_frequency, unit_ids=None, save_as_property=True):
'''This function calculates the ratio between the frequency of spikes present
within 0- to 2-ms (refractory period) interspike interval (ISI) and those at 0- to 20-ms
interval. It then returns the ratios and also adds a property, ISI_ratio, for
the passed in sorting extractor. Taken from:
"Large-scale, high-density (up to 512 channels) recording of local circuits
in behaving animals" - Antal Berényi, et al.
Parameters
----------
unit_ids: list
List of unit ids for which to get ISIratios
sorting: SortingExtractor
SortingExtractor for the results file being analyzed
sampling_frequency: float
The sampling frequency of recording
save_as_property: boolean
If True, this will save the ISI_ratio as a property in the given
sorting extractor.
Returns
----------
ISI_ratios: list of floats
A list of ratios for each unit passed into this function. Each ratio is
the ratio between the frequency of spikes present within 0- to 2-ms ISI
and those at 0- to 20-ms interval for the corresponding spike train.
'''
ISI_ratios = []
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
for unit_id in unit_ids:
unit_spike_train = sorting.get_unit_spike_train(unit_id)
ref_frame_period = sampling_frequency*0.002
long_interval = sampling_frequency*0.02
ISIs = np.diff(unit_spike_train)
num_ref_violations = float(sum(ISIs<ref_frame_period))
num_longer_interval = float(sum(ISIs<long_interval))
ISI_ratio = num_ref_violations / num_longer_interval
if save_as_property:
sorting.set_unit_property(unit_id, 'ISI_ratio', ISI_ratio)
ISI_ratios.append(ISI_ratio)
return ISI_ratios | 0.004228 |
def heuristic_cost(self, start, target):
""" assumes start and target are an (x,y) grid """
(x1, y1) = start
(x2, y2) = target
return abs(x1 - x2) + abs(y1 - y2) | 0.010363 |
def get_safe(self, section, key, default=None):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
return self.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
return default | 0.004535 |
def estimation_required(func, *args, **kw):
"""
Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated
function is called before estimator.estimate() has been called.
If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e.,
@property
@estimation_required
def func(self):
....
"""
self = args[0] if len(args) > 0 else None
if self and hasattr(self, '_estimated') and not self._estimated:
raise ValueError("Tried calling %s on %s which requires the estimator to be estimated."
% (func.__name__, self.__class__.__name__))
return func(*args, **kw) | 0.00542 |
def get_http_method_arg_name(self):
"""
Return the HTTP function to call and the params/data argument name
"""
if self.method == 'get':
arg_name = 'params'
else:
arg_name = 'data'
return getattr(requests, self.method), arg_name | 0.006689 |
def plot(self, **kargs):
"""Plot the data set, using the sampling information to set the x-axis
correctly."""
from pylab import plot, linspace, xlabel, ylabel, grid
time = linspace(1*self.dt, self.N*self.dt, self.N)
plot(time, self.data, **kargs)
xlabel('Time')
ylabel('Amplitude')
grid(True) | 0.005618 |
def next(self):
"""Request next data container.
This function call is blocking.
Returns
-------
data : dict
The data for this train, keyed by source name.
meta : dict
The metadata for this train, keyed by source name.
This dictionary is populated for protocol version 1.0 and 2.2.
For other protocol versions, metadata information is available in
`data` dict.
Raises
------
TimeoutError
If timeout is reached before receiving data.
"""
if self._pattern == zmq.REQ and not self._recv_ready:
self._socket.send(b'next')
self._recv_ready = True
try:
msg = self._socket.recv_multipart(copy=False)
except zmq.error.Again:
raise TimeoutError(
'No data received from {} in the last {} ms'.format(
self._socket.getsockopt_string(zmq.LAST_ENDPOINT),
self._socket.getsockopt(zmq.RCVTIMEO)))
self._recv_ready = False
return self._deserialize(msg) | 0.001764 |
def wikidata_get(identifier):
"""
https://www.wikidata.org/wiki/Special:EntityData/P248.json
"""
url = 'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(identifier)
#logging.info(url)
return json.loads(requests.get(url).content) | 0.011029 |
def by_sql(cls, sql, engine_or_session):
"""
Query with sql statement or texture sql.
"""
ses, auto_close = ensure_session(engine_or_session)
result = ses.query(cls).from_statement(sql).all()
if auto_close:
ses.close()
return result | 0.006667 |
def index_of_item(self, item):
"""Get the index for the given TreeItem
:param item: the treeitem to query
:type item: :class:`TreeItem`
:returns: the index of the item
:rtype: :class:`QtCore.QModelIndex`
:raises: ValueError
"""
# root has an invalid index
if item == self._root:
return QtCore.QModelIndex()
# find all parents to get their index
parents = [item]
i = item
while True:
parent = i.parent()
# break if parent is root because we got all parents we need
if parent == self._root:
break
if parent is None:
# No new parent but last parent wasn't root!
# This means that the item was not in the model!
return QtCore.QModelIndex()
# a new parent was found and we are still not at root
# search further until we get to root
i = parent
parents.append(parent)
# get the parent indexes until
index = QtCore.QModelIndex()
for treeitem in reversed(parents):
parent = treeitem.parent()
row = parent.childItems.index(treeitem)
index = self.index(row, 0, index)
return index | 0.00152 |
def log_results():
"""This is the callback that is run once the Async task is finished. It
takes the output from grep and logs it."""
from furious.context import get_current_async
# Get the recently finished Async object.
async = get_current_async()
# Pull out the result data and log it.
for result in async.result:
logging.info(result) | 0.008 |
def give(self, terrain, num=1):
"""
Add a certain number of resources to the trade from giver->getter
:param terrain: resource type, models.Terrain
:param num: number to add, int
:return: None
"""
for _ in range(num):
logging.debug('terrain={}'.format(terrain))
self._give.append(terrain) | 0.005435 |
def get_all_project_owners(project_ids=None, **kwargs):
"""
Get the project owner entries for all the requested projects.
If the project_ids argument is None, return all the owner entries
for ALL projects
"""
projowner_qry = db.DBSession.query(ProjectOwner)
if project_ids is not None:
projowner_qry = projowner_qry.filter(ProjectOwner.project_id.in_(project_ids))
project_owners_i = projowner_qry.all()
return [JSONObject(project_owner_i) for project_owner_i in project_owners_i] | 0.009259 |
def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap | 0.00133 |
def rotate_z(self, angle):
"""
Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='z') | 0.007299 |
def check(self, var):
"""Return True if the variable matches this type, and False otherwise."""
if self._class is None: self._init()
return self._class and self._checker(var, self._class) | 0.018957 |
def find_serial_devices(serial_matcher="ED"):
"""
Finds a list of USB devices where the serial number (partially) matches the given string.
:param str serial_matcher (optional):
only device IDs starting with this string are returned
:rtype: List[str]
"""
objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
objSWbemServices = objWMIService.ConnectServer(".", "root\cimv2")
items = objSWbemServices.ExecQuery("SELECT * FROM Win32_USBControllerDevice")
ids = (item.Dependent.strip('"')[-8:] for item in items)
return [e for e in ids if e.startswith(serial_matcher)] | 0.006329 |
def spec(self):
"""Return a dict with values that can be fed directly into SelectiveRowGenerator"""
return dict(
headers=self.header_lines,
start=self.start_line,
comments=self.comment_lines,
end=self.end_line
) | 0.010601 |
def return_tip(self) -> 'InstrumentContext':
"""
If a tip is currently attached to the pipette, then it will return the
tip to it's location in the tiprack.
It will not reset tip tracking so the well flag will remain False.
:returns: This instance
"""
if not self.hw_pipette['has_tip']:
self._log.warning('Pipette has no tip to return')
loc = self._last_tip_picked_up_from
if not isinstance(loc, Well):
raise TypeError('Last tip location should be a Well but it is: '
'{}'.format(loc))
bot = loc.bottom()
bot = bot._replace(point=bot.point._replace(z=bot.point.z + 10))
self.drop_tip(bot)
return self | 0.002639 |
def get_user_roles(self, user):
"""get permissions of a user"""
memberShipRecords = AuthMembership.objects(creator=self.client, user=user).only('groups')
results = []
for each in memberShipRecords:
for group in each.groups:
results.append({'role':group.role})
return results | 0.011696 |
def update_share(self, share_id, **kwargs):
"""Updates a given share
:param share_id: (int) Share ID
:param perms: (int) update permissions (see share_file_with_user() below)
:param password: (string) updated password for public link Share
:param public_upload: (boolean) enable/disable public upload for public shares
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
perms = kwargs.get('perms', None)
password = kwargs.get('password', None)
public_upload = kwargs.get('public_upload', None)
if (isinstance(perms, int)) and (perms > self.OCS_PERMISSION_ALL):
perms = None
if not (perms or password or (public_upload is not None)):
return False
if not isinstance(share_id, int):
return False
data = {}
if perms:
data['permissions'] = perms
if isinstance(password, six.string_types):
data['password'] = password
if (public_upload is not None) and (isinstance(public_upload, bool)):
data['publicUpload'] = str(public_upload).lower()
res = self._make_ocs_request(
'PUT',
self.OCS_SERVICE_SHARE,
'shares/' + str(share_id),
data=data
)
if res.status_code == 200:
return True
raise HTTPResponseError(res) | 0.002681 |
def LSL(self, a):
"""
Shifts all bits of accumulator A or B or memory location M one place to
the left. Bit zero is loaded with a zero. Bit seven of accumulator A or
B or memory location M is shifted into the C (carry) bit.
This is a duplicate assembly-language mnemonic for the single machine
instruction ASL.
source code forms: LSL Q; LSLA; LSLB
CC bits "HNZVC": naaas
"""
r = a << 1
self.clear_NZVC()
self.update_NZVC_8(a, a, r)
return r | 0.003656 |
def get_available_palettes(chosen_palette):
''' Given a chosen palette, returns tuple of those available,
or None when not found.
Because palette support of a particular level is almost always a
superset of lower levels, this should return all available palettes.
Returns:
Boolean, None: is tty or None if not found.
'''
result = None
try:
result = ALL_PALETTES[:ALL_PALETTES.index(chosen_palette)+1]
except ValueError:
pass
return result | 0.001908 |
def set_sys(layout):
'''
Set current system keyboard setting
CLI Example:
.. code-block:: bash
salt '*' keyboard.set_sys dvorak
'''
if salt.utils.path.which('localectl'):
__salt__['cmd.run']('localectl set-keymap {0}'.format(layout))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/keyboard',
'^LAYOUT=.*',
'LAYOUT={0}'.format(layout))
elif 'Debian' in __grains__['os_family']:
__salt__['file.sed']('/etc/default/keyboard',
'^XKBLAYOUT=.*',
'XKBLAYOUT={0}'.format(layout))
elif 'Gentoo' in __grains__['os_family']:
__salt__['file.sed']('/etc/conf.d/keymaps',
'^keymap=.*',
'keymap={0}'.format(layout))
return layout | 0.001107 |
def eeg_to_df(eeg, index=None, include="all", exclude=None, hemisphere="both", central=True):
"""
Convert mne Raw or Epochs object to dataframe or dict of dataframes.
DOCS INCOMPLETE :(
"""
if isinstance(eeg, mne.Epochs):
data = {}
if index is None:
index = range(len(eeg))
for epoch_index, epoch in zip(index, eeg.get_data()):
epoch = pd.DataFrame(epoch.T)
epoch.columns = eeg.ch_names
epoch.index = eeg.times
selection = eeg_select_electrodes(eeg, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
data[epoch_index] = epoch[selection]
else: # it might be a Raw object
data = eeg.get_data().T
data = pd.DataFrame(data)
data.columns = eeg.ch_names
data.index = eeg.times
return(data) | 0.00344 |
def set_mtime(self, filename, mtime, size):
"""Store real file mtime in meta data.
This is needed on FTP targets, because FTP servers don't allow to set
file mtime, but use to the upload time instead.
We also record size and upload time, so we can detect if the file was
changed by other means and we have to discard our meta data.
"""
ut = time.time() # UTC time stamp
if self.target.server_time_ofs:
# We add the estimated time offset, so the stored 'u' time stamp matches
# better the mtime value that the server will generate for that file
ut += self.target.server_time_ofs
self.list[filename] = {"m": mtime, "s": size, "u": ut}
if self.PRETTY:
self.list[filename].update(
{"mtime_str": pretty_stamp(mtime), "uploaded_str": pretty_stamp(ut)}
)
# print("set_mtime", self.list[filename])
self.modified_list = True | 0.005051 |
def countok(self):
"""
Boolean array showing which stars pass all count constraints.
A "count constraint" is a constraint that affects the number of stars.
"""
ok = np.ones(len(self.stars)).astype(bool)
for name in self.constraints:
c = self.constraints[name]
if c.name not in self.selectfrac_skip:
ok &= c.ok
return ok | 0.004808 |
def parse_commit(parts):
'''Accept a parsed single commit. Some of the named groups
require further processing, so parse those groups.
Return a dictionary representing the completely parsed
commit.
'''
commit = {}
commit['commit'] = parts['commit']
commit['tree'] = parts['tree']
parent_block = parts['parents']
commit['parents'] = [
parse_parent_line(parentline)
for parentline in
parent_block.splitlines()
]
commit['author'] = parse_author_line(parts['author'])
commit['committer'] = parse_committer_line(parts['committer'])
message_lines = [
parse_message_line(msgline)
for msgline in
parts['message'].split("\n")
]
commit['message'] = "\n".join(
msgline
for msgline in
message_lines
if msgline is not None
)
commit['changes'] = [
parse_numstat_line(numstat)
for numstat in
parts['numstats'].splitlines()
]
return commit | 0.000993 |
def boundplot(results, dims, it=None, idx=None, prior_transform=None,
periodic=None, ndraws=5000, color='gray', plot_kwargs=None,
labels=None, label_kwargs=None, max_n_ticks=5,
use_math_text=False, show_live=False, live_color='darkviolet',
live_kwargs=None, span=None, fig=None):
"""
Return the bounding distribution used to propose either (1) live points
at a given iteration or (2) a specific dead point during
the course of a run, projected onto the two dimensions specified
by `dims`.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run.
dims : length-2 tuple
The dimensions used to plot the bounding.
it : int, optional
If provided, returns the bounding distribution at the specified
iteration of the nested sampling run. **Note that this option and
`idx` are mutually exclusive.**
idx : int, optional
If provided, returns the bounding distribution used to propose the
dead point at the specified iteration of the nested sampling run.
**Note that this option and `it` are mutually exclusive.**
prior_transform : func, optional
The function transforming samples within the unit cube back to samples
in the native model space. If provided, the transformed bounding
distribution will be plotted in the native model space.
periodic : iterable, optional
A list of indices for parameters with periodic boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may wrap around the edge. It is assumed that their periodicity
is dealt with in the `prior_transform`.
Default is `None` (i.e. no periodic boundary conditions).
ndraws : int, optional
The number of random samples to draw from the bounding distribution
when plotting. Default is `5000`.
color : str, optional
The color of the points randomly sampled from the bounding
distribution. Default is `'gray'`.
plot_kwargs : dict, optional
Extra keyword arguments used when plotting the bounding draws.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
show_live : bool, optional
Whether the live points at a given iteration (for `it`) or
associated with the bounding (for `idx`) should be highlighted.
Default is `False`. In the dynamic case, only the live points
associated with the batch used to construct the relevant bound
are plotted.
live_color : str, optional
The color of the live points. Default is `'darkviolet'`.
live_kwargs : dict, optional
Extra keyword arguments used when plotting the live points.
span : iterable with shape (2,), optional
A list where each element is a length-2 tuple containing
lower and upper bounds. Default is `None` (no bound).
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the draws onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
bounding_plot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output plot of the bounding distribution.
"""
# Initialize values.
if plot_kwargs is None:
plot_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if live_kwargs is None:
live_kwargs = dict()
# Check that either `idx` or `it` has been specified.
if (it is None and idx is None) or (it is not None and idx is not None):
raise ValueError("You must specify either an iteration or an index!")
# Set defaults.
plot_kwargs['marker'] = plot_kwargs.get('marker', 'o')
plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None')
plot_kwargs['markersize'] = plot_kwargs.get('markersize', 1)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.4)
live_kwargs['marker'] = live_kwargs.get('marker', 'o')
live_kwargs['linestyle'] = live_kwargs.get('linestyle', 'None')
live_kwargs['markersize'] = live_kwargs.get('markersize', 1)
# Extract bounding distributions.
try:
bounds = results['bound']
except:
raise ValueError("No bounds were saved in the results!")
nsamps = len(results['samples'])
# Gather non-periodic boundary conditions.
if periodic is not None:
nonperiodic = np.ones(bounds[0].n, dtype='bool')
nonperiodic[periodic] = False
else:
nonperiodic = None
if it is not None:
if it >= nsamps:
raise ValueError("The iteration requested goes beyond the "
"number of iterations in the run.")
# Extract bound iterations.
try:
bound_iter = np.array(results['bound_iter'])
except:
raise ValueError("Cannot reconstruct the bound used at the "
"specified iteration since bound "
"iterations were not saved in the results.")
# Find bound at the specified iteration.
if it == 0:
pidx = 0
else:
pidx = bound_iter[it]
else:
if idx >= nsamps:
raise ValueError("The index requested goes beyond the "
"number of samples in the run.")
try:
samples_bound = results['samples_bound']
except:
raise ValueError("Cannot reconstruct the bound used to "
"compute the specified dead point since "
"sample bound indices were not saved "
"in the results.")
# Grab relevant bound.
pidx = samples_bound[idx]
# Get desired bound.
bound = bounds[pidx]
# Do we want to show the live points at the specified iteration?
# If so, we need to rewind our bound to check.
# (We could also go forward; this is an arbitrary choice.)
if show_live:
try:
# We can only reconstruct the run if the final set of live points
# were added to the results. This is true by default for dynamic
# nested sampling runs but not guaranteeed for standard runs.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not included "
"in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
# Run our sampling backwards.
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
# In the dynamic sampling case, we will show the live points used
# during the batch associated with a particular iteration/bound.
batch = results['samples_batch'][it] # select batch
nbatch = results['batch_nlive'][batch] # nlive in the batch
bsel = results['samples_batch'] == batch # select batch
niter_eff = sum(bsel) - nbatch # "effective" iterations in batch
# Grab our final set of live points (with proper IDs).
samples = results['samples_u'][bsel]
samples_id = results['samples_id'][bsel]
samples_id -= min(samples_id) # re-index to start at zero
ndim = samples.shape[1]
live_u = np.empty((nbatch, ndim))
live_u[samples_id[-nbatch:]] = samples[-nbatch:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
it_eff = sum(bsel[:it+1]) # effective iteration in batch
# Run our sampling backwards.
for i in range(1, niter_eff - it_eff + 1):
r = -(nbatch + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
# Draw samples from the bounding distribution.
try:
# If bound is "fixed", go ahead and draw samples from it.
psamps = bound.samples(ndraws)
except:
# If bound is based on the distribution of live points at a
# specific iteration, we need to reconstruct what those were.
if not show_live:
try:
# Only reconstruct the run if we haven't done it already.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not "
"included in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Run our sampling backwards.
if it is None:
it = results['samples_it'][idx]
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
raise ValueError("Live point tracking currently not "
"implemented for dynamic sampling results.")
# Construct a KDTree to speed up nearest-neighbor searches.
kdtree = spatial.KDTree(live_u)
# Draw samples.
psamps = bound.samples(ndraws, live_u, kdtree=kdtree)
# Projecting samples to input dimensions and possibly
# the native model space.
if prior_transform is None:
x1, x2 = psamps[:, dims].T
if show_live:
l1, l2 = live_u[:, dims].T
else:
# Remove points outside of the unit cube as appropriate.
sel = [unitcheck(point, nonperiodic) for point in psamps]
vsamps = np.array(list(map(prior_transform, psamps[sel])))
x1, x2 = vsamps[:, dims].T
if show_live:
lsamps = np.array(list(map(prior_transform, live_u)))
l1, l2 = lsamps[:, dims].T
# Setting up default plot layout.
if fig is None:
fig, axes = pl.subplots(1, 1, figsize=(6, 6))
else:
fig, axes = fig
try:
axes.plot()
except:
raise ValueError("Provided axes do not match the required shape "
"for plotting samples.")
# Plotting.
axes.plot(x1, x2, color=color, zorder=1, **plot_kwargs)
if show_live:
axes.plot(l1, l2, color=live_color, zorder=2, **live_kwargs)
# Setup axes
if span is not None:
axes.set_xlim(span[0])
axes.set_ylim(span[1])
if max_n_ticks == 0:
axes.xaxis.set_major_locator(NullLocator())
axes.yaxis.set_major_locator(NullLocator())
else:
axes.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
axes.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
axes.xaxis.set_major_formatter(sf)
axes.yaxis.set_major_formatter(sf)
if labels is not None:
axes.set_xlabel(labels[0], **label_kwargs)
axes.set_ylabel(labels[1], **label_kwargs)
else:
axes.set_xlabel(r"$x_{"+str(dims[0]+1)+"}$", **label_kwargs)
axes.set_ylabel(r"$x_{"+str(dims[1]+1)+"}$", **label_kwargs)
return fig, axes | 0.000618 |
def is_empty(self):
'''
Return `True` if form is valid and contains an empty lookup.
'''
return (self.is_valid() and
not self.simple_lookups and
not self.complex_conditions and
not self.extra_conditions) | 0.01845 |
def client_receives_without_validation(self, *parameters):
"""Receive a message with template defined using `New Message`.
Message template has to be defined with `New Message` before calling
this.
Optional parameters:
- `name` the client name (default is the latest used) example: `name=Client 1`
- `timeout` for receiving message. example: `timeout=0.1`
- `latest` if set to True, get latest message from buffer instead first. Default is False. Example: `latest=True`
Examples:
| ${msg} = | Client receives without validation |
| ${msg} = | Client receives without validation | name=Client1 | timeout=5 |
"""
with self._receive(self._clients, *parameters) as (msg, _, _):
return msg | 0.006289 |
def replace_find(self, focus_replace_text=False, replace_all=False):
"""Replace and find"""
if (self.editor is not None):
replace_text = to_text_string(self.replace_text.currentText())
search_text = to_text_string(self.search_text.currentText())
re_pattern = None
# Check regexp before proceeding
if self.re_button.isChecked():
try:
re_pattern = re.compile(search_text)
# Check if replace_text can be substituted in re_pattern
# Fixes issue #7177
re_pattern.sub(replace_text, '')
except re.error:
# Do nothing with an invalid regexp
return
case = self.case_button.isChecked()
first = True
cursor = None
while True:
if first:
# First found
seltxt = to_text_string(self.editor.get_selected_text())
cmptxt1 = search_text if case else search_text.lower()
cmptxt2 = seltxt if case else seltxt.lower()
if re_pattern is None:
has_selected = self.editor.has_selected_text()
if has_selected and cmptxt1 == cmptxt2:
# Text was already found, do nothing
pass
else:
if not self.find(changed=False, forward=True,
rehighlight=False):
break
else:
if len(re_pattern.findall(cmptxt2)) > 0:
pass
else:
if not self.find(changed=False, forward=True,
rehighlight=False):
break
first = False
wrapped = False
position = self.editor.get_position('cursor')
position0 = position
cursor = self.editor.textCursor()
cursor.beginEditBlock()
else:
position1 = self.editor.get_position('cursor')
if is_position_inf(position1,
position0 + len(replace_text) -
len(search_text) + 1):
# Identify wrapping even when the replace string
# includes part of the search string
wrapped = True
if wrapped:
if position1 == position or \
is_position_sup(position1, position):
# Avoid infinite loop: replace string includes
# part of the search string
break
if position1 == position0:
# Avoid infinite loop: single found occurrence
break
position0 = position1
if re_pattern is None:
cursor.removeSelectedText()
cursor.insertText(replace_text)
else:
seltxt = to_text_string(cursor.selectedText())
cursor.removeSelectedText()
cursor.insertText(re_pattern.sub(replace_text, seltxt))
if self.find_next():
found_cursor = self.editor.textCursor()
cursor.setPosition(found_cursor.selectionStart(),
QTextCursor.MoveAnchor)
cursor.setPosition(found_cursor.selectionEnd(),
QTextCursor.KeepAnchor)
else:
break
if not replace_all:
break
if cursor is not None:
cursor.endEditBlock()
if focus_replace_text:
self.replace_text.setFocus() | 0.000702 |
def readerForDoc(cur, URL, encoding, options):
"""Create an xmltextReader for an XML in-memory document. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForDoc(cur, URL, encoding, options)
if ret is None:raise treeError('xmlReaderForDoc() failed')
return xmlTextReader(_obj=ret) | 0.008646 |
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values | 0.001236 |
def build_method_map(self, prototype, prefix=''):
""" Add prototype methods to the dispatcher.
Parameters
----------
prototype : object or dict
Initial method mapping.
If given prototype is a dictionary then all callable objects will
be added to dispatcher.
If given prototype is an object then all public methods will
be used.
prefix: string, optional
Prefix of methods
"""
if not isinstance(prototype, dict):
prototype = dict((method, getattr(prototype, method))
for method in dir(prototype)
if not method.startswith('_'))
for attr, method in prototype.items():
if callable(method):
self[prefix + attr] = method | 0.002353 |
def colorpalette(self, colorpalette):
"""
Set the colorpalette which should be used
"""
if isinstance(colorpalette, str): # we assume it's a path to a color file
colorpalette = colors.parse_colors(colorpalette)
self._colorpalette = colors.sanitize_color_palette(colorpalette) | 0.009119 |
def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt) | 0.006711 |
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output | 0.005056 |
def create_driver_script(name, create=None): # noqa: E501
"""Create a new script
Create a new script # noqa: E501
:param name: Get status of a driver with this name
:type name: str
:param create: The data needed to create this script
:type create: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
create = Create1.from_dict(connexion.request.get_json()) # noqa: E501
response = errorIfUnauthorized(role='developer')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
driver.saveDriverScript(create.script.name, create.script.content)
return Response(status=200, body=response.getResponseBody()) | 0.001316 |
def create_default_views(self, create_datastore_views=False):
# type: (bool) -> None
"""Create default resource views for all resources in dataset
Args:
create_datastore_views (bool): Whether to try to create resource views that point to the datastore
Returns:
None
"""
package = deepcopy(self.data)
if self.resources:
package['resources'] = self._convert_hdxobjects(self.resources)
data = {'package': package, 'create_datastore_views': create_datastore_views}
self._write_to_hdx('create_default_views', data, 'package') | 0.007924 |
def push_record_set(self, **kwargs):
"""
Push build config set record to Brew.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.push_record_set(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BuildConfigSetRecordPushRequestRest body:
:return: list[ResultRest]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.push_record_set_with_http_info(**kwargs)
else:
(data) = self.push_record_set_with_http_info(**kwargs)
return data | 0.00293 |
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data | 0.00369 |
def style_ansi(raw_code, lang=None):
""" actual code hilite """
lexer = 0
if lang:
try:
lexer = get_lexer_by_name(lang)
except ValueError:
print col(R, 'Lexer for %s not found' % lang)
lexer = None
if not lexer:
try:
if guess_lexer:
lexer = pyg_guess_lexer(raw_code)
except:
pass
if not lexer:
lexer = get_lexer_by_name(def_lexer)
tokens = lex(raw_code, lexer)
cod = []
for t, v in tokens:
if not v:
continue
_col = code_hl_tokens.get(t)
if _col:
cod.append(col(v, _col))
else:
cod.append(v)
return ''.join(cod) | 0.002766 |
def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:
""" Calculates F-1 score between y_true and y_predicted
F-1 score uses the best matching y_true answer
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
F-1 score : float
"""
f1_total = 0.0
for ground_truth, prediction in zip(y_true, y_predicted):
prediction_tokens = normalize_answer(prediction).split()
f1s = []
for gt in ground_truth:
gt_tokens = normalize_answer(gt).split()
if len(gt_tokens) == 0 or len(prediction_tokens) == 0:
f1s.append(float(gt_tokens == prediction_tokens))
continue
common = Counter(prediction_tokens) & Counter(gt_tokens)
num_same = sum(common.values())
if num_same == 0:
f1s.append(0.0)
continue
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(gt_tokens)
f1 = (2 * precision * recall) / (precision + recall)
f1s.append(f1)
f1_total += max(f1s)
return 100 * f1_total / len(y_true) if len(y_true) > 0 else 0 | 0.001505 |
def get(name):
"""
Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``.
"""
for matcher in matchers:
if matcher.__name__ == name or getattr(matcher, 'name', None) == name:
return matcher | 0.002778 |
def get_pole(continent,age):
"""
returns rotation poles and angles for specified continents and ages
assumes fixed Africa.
Parameters
__________
continent :
aus : Australia
eur : Eurasia
mad : Madacascar
[nwaf,congo] : NW Africa [choose one]
col : Colombia
grn : Greenland
nam : North America
par : Paraguay
eant : East Antarctica
ind : India
[neaf,kala] : NE Africa [choose one]
[sac,sam] : South America [choose one]
ib : Iberia
saf : South Africa
Returns
_______
[pole longitude, pole latitude, rotation angle] : for the continent at specified age
"""
age=int(age)
if continent=='congo':continent='nwaf'
if continent=='kala':continent='neaf'
if continent=='sam':continent='sac'
if continent=='ant':continent='eant'
if continent=='af':
return [0,0,0] # assume africa fixed
if continent=='aus':
cont= [[5,9.7,54.3,-3.3],[10,10.4,52.8,-6.2],[15,11.5,49.8,-9.0],[20,12.4,48.0,-11.8],[25,12.9,48.3,-15.0],[30,12.8,49.9,-18.1],[35,13.5,50.8,-20.9],[40,14.1,52.7,-22.1],[45,14.4,54.7,-22.9],[50,14.7,56.5,-23.6],[55,14.0,57.3,-24.7],[60,12.9,57.9,-25.7],[65,13.6,58.8,-26.3],[70,17.3,60.2,-26.3],[75,19.8,63.3,-26.7],[80,20.5,68.5,-26.6],[85,19.8,74.6,-26.9],[90,17.7,80.9,-28.9],[95,15.9,86.2,-31.1],[100,18.4,89.3,-30.7],[105,17.9,95.6,-32.6],[110,17.3,101.0,-34.8],[115,16.8,105.6,-37.4],[120,16.4,109.4,-40.3],[125,15.7,110.3,-42.3],[130,15.9,111.6,-44.4],[135,15.9,113.1,-46.6],[140,15.6,113.7,-48.3],[145,15.0,113.1,-50.5],[150,15.5,113.5,-52.5],[155,17.6,115.7,-54.3],[160,19.5,117.8,-56.2],[165,19.5,117.8,-56.2],[170,19.5,117.8,-56.2],[175,19.5,117.8,-56.2],[180,19.5,117.8,-56.2],[185,19.5,117.8,-56.2],[190,19.5,117.8,-56.2],[195,19.5,117.8,-56.2],[200,19.5,117.8,-56.2],[205,19.5,117.8,-56.2],[210,19.5,117.8,-56.2],[215,19.5,117.8,-56.2],[220,19.5,117.8,-56.2],[225,19.5,117.8,-56.2],[230,19.5,117.8,-56.2],[235,19.5,117.8,-56.2],[240,19.5,117.8,-56.2],[245,19.5,117.8,-56.2],[250,19.5,117.8,-56.2],[255,19.5,117.8,-56.2],[260,19.5,117.8,-56.2],[265,19.5,117.8,-56.2],[270,19.5,117.8,-56.2],[275,19.5,117.8,-56.2],[280,19.5,117.8,-56.2],[285,19.5,117.8,-56.2],[290,19.5,117.8,-56.2],[295,19.5,117.8,-56.2],[300,19.5,117.8,-56.2],[305,19.5,117.8,-56.2],[310,19.5,117.8,-56.2],[315,19.5,117.8,-56.2],[320,19.5,117.8,-56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eur':
cont= [[5,17.9,-27.1,0.6],[10,18.4,-26.3,1.2],[15,18.9,-24.6,1.8],[20,17.2,-22.7,2.4],[25,20.7,-19.0,3.0],[30,24.9,-19.5,4.3],[35,27.2,-19.3,5.8],[40,28.7,-18.5,7.5],[45,30.3,-18.2,9.0],[50,30.8,-16.7,10.0],[55,32.7,-15.4,11.3],[60,34.8,-15.7,12.6],[65,36.0,-15.8,13.6],[70,35.4,-16.1,14.9],[75,35.5,-15.7,15.5],[80,36.1,-15.2,16.9],[85,37.0,-14.2,18.8],[90,39.6,-13.7,21.9],[95,39.8,-13.7,25.2],[100,40.2,-12.5,28.5],[105,41.6,-11.2,31.7],[110,42.6,-9.8,34.5],[115,43.4,-8.5,37.3],[120,44.5,-6.9,40.3],[125,45.3,-6.3,42.0],[130,45.9,-5.7,43.0],[135,46.6,-5.3,44.0],[140,47.3,-4.9,45.2],[145,47.8,-4.8,46.4],[150,48.6,-4.0,47.9],[155,49.8,-2.2,50.0],[160,50.6,-1.2,52.1],[165,51.4,-0.3,54.2],[170,52.1,0.6,56.3],[175,52.9,1.9,59.6],[180,53.0,2.0,60.0],[185,53.0,2.0,60.4],[190,53.1,2.1,60.8],[195,53.2,2.2,61.1],[200,53.3,2.2,61.5],[205,53.2,2.6,59.7],[210,53.1,2.9,57.8],[215,53.1,3.3,55.9],[220,52.9,3.6,53.6],[225,52.7,4.0,51.4],[230,52.4,4.4,49.1],[235,52.2,4.8,46.8],[240,51.9,5.3,44.5],[245,51.9,5.3,44.5],[250,51.9,5.3,44.5],[255,51.9,5.3,44.5],[260,51.9,5.3,44.5],[265,51.9,5.3,44.5],[270,51.9,5.3,44.5],[275,51.9,5.3,44.5],[280,51.9,5.3,44.5],[285,51.9,5.3,44.5],[290,51.9,5.3,44.5],[295,51.9,5.3,44.5],[300,51.9,5.3,44.5],[305,51.9,5.3,44.5],[310,51.9,5.3,44.5],[315,51.9,5.3,44.5],[320,51.9,5.3,44.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='mad':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,90.0,0.0,0.0],[90,90.0,0.0,0.0],[95,90.0,0.0,0.0],[100,90.0,0.0,0.0],[105,90.0,0.0,0.0],[110,90.0,0.0,0.0],[115,90.0,0.0,0.0],[120,90.0,0.0,0.0],[125,2.6,-63.3,1.8],[130,2.6,-63.3,3.9],[135,1.5,-57.6,5.7],[140,1.0,-55.9,7.2],[145,0.6,118.3,-8.9],[150,4.5,119.8,-10.9],[155,10.6,130.1,-13.0],[160,14.8,137.5,-15.4],[165,14.8,137.5,-15.4],[170,14.8,137.5,-15.4],[175,14.8,137.5,-15.4],[180,14.8,137.5,-15.4],[185,14.8,137.5,-15.4],[190,14.8,137.5,-15.4],[195,14.8,137.5,-15.4],[200,14.8,137.5,-15.4],[205,14.8,137.5,-15.4],[210,14.8,137.5,-15.4],[215,14.8,137.5,-15.4],[220,14.8,137.5,-15.4],[225,14.8,137.5,-15.4],[230,14.8,137.5,-15.4],[235,14.8,137.5,-15.4],[240,14.8,137.5,-15.4],[245,14.8,137.5,-15.4],[250,14.8,137.5,-15.4],[255,14.8,137.5,-15.4],[260,14.8,137.5,-15.4],[265,14.8,137.5,-15.4],[270,14.8,137.5,-15.4],[275,14.8,137.5,-15.4],[280,14.8,137.5,-15.4],[285,14.8,137.5,-15.4],[290,14.8,137.5,-15.4],[295,14.8,137.5,-15.4],[300,14.8,137.5,-15.4],[305,14.8,137.5,-15.4],[310,14.8,137.5,-15.4],[315,14.8,137.5,-15.4],[320,14.8,137.5,-15.4]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nwaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,19.6,6.7,0.0],[90,16.6,6.7,-0.2],[95,16.5,6.7,-0.4],[100,16.5,6.7,-0.5],[105,16.5,6.7,-0.7],[110,16.5,6.7,-0.8],[115,16.5,6.7,-1.0],[120,16.5,6.7,-1.1],[125,16.5,6.7,-1.2],[130,16.5,6.7,-1.2],[135,16.5,6.7,-1.2],[140,16.5,6.7,-1.2],[145,16.5,6.7,-1.2],[150,16.5,6.7,-1.2],[155,16.5,6.7,-1.2],[160,16.5,6.7,-1.2],[165,16.5,6.7,-1.2],[170,16.5,6.7,-1.2],[175,16.5,6.7,-1.2],[180,16.5,6.7,-1.2],[185,16.5,6.7,-1.2],[190,16.5,6.7,-1.2],[195,16.5,6.7,-1.2],[200,16.5,6.7,-1.2],[205,16.5,6.7,-1.2],[210,16.5,6.7,-1.2],[215,16.5,6.7,-1.2],[220,16.5,6.7,-1.2],[225,16.5,6.7,-1.2],[230,16.5,6.7,-1.2],[235,16.5,6.7,-1.2],[240,16.5,6.7,-1.2],[245,16.5,6.7,-1.2],[250,16.5,6.7,-1.2],[255,16.5,6.7,-1.2],[260,16.5,6.7,-1.2],[265,16.5,6.7,-1.2],[270,16.5,6.7,-1.2],[275,16.5,6.7,-1.2],[280,16.5,6.7,-1.2],[285,16.5,6.7,-1.2],[290,16.5,6.7,-1.2],[295,16.5,6.7,-1.2],[300,16.5,6.7,-1.2],[305,16.5,6.7,-1.2],[310,16.5,6.7,-1.2],[315,16.5,6.7,-1.2],[320,16.5,6.7,-1.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='col':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='grn':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,74.8,7.2,10.2],[40,72.6,9.5,11.5],[45,71.4,11.4,12.7],[50,71.0,20.7,14.2],[55,71.8,29.6,16.8],[60,71.9,30.5,17.5],[65,71.3,32.9,17.6],[70,69.8,29.0,17.9],[75,69.0,26.6,18.5],[80,67.6,21.0,19.8],[85,66.3,16.4,21.5],[90,65.9,11.5,24.2],[95,64.2,5.5,26.9],[100,62.7,2.8,30.1],[105,62.4,1.6,33.3],[110,62.1,0.9,36.5],[115,61.8,0.5,39.7],[120,61.8,0.8,43.1],[125,61.9,1.0,44.9],[130,62.2,1.3,46.0],[135,62.4,1.6,47.1],[140,62.7,1.6,48.4],[145,62.9,1.3,49.7],[150,63.2,1.8,51.4],[155,63.7,3.6,53.8],[160,64.1,4.2,56.0],[165,64.4,4.8,58.3],[170,64.7,5.3,60.6],[175,64.8,6.0,64.1],[180,64.9,6.0,64.5],[185,64.9,5.9,64.9],[190,65.0,5.9,65.4],[195,65.0,5.8,65.8],[200,65.1,5.8,66.2],[205,65.1,5.7,66.7],[210,65.2,5.7,67.1],[215,65.2,5.6,67.5],[220,65.2,5.6,67.5],[225,65.2,5.6,67.5],[230,65.2,5.6,67.5],[235,65.2,5.6,67.5],[240,65.2,5.6,67.5],[245,65.2,5.6,67.5],[250,65.2,5.6,67.5],[255,65.2,5.6,67.5],[260,65.2,5.6,67.5],[265,65.2,5.6,67.5],[270,65.2,5.6,67.5],[275,65.2,5.6,67.5],[280,65.2,5.6,67.5],[285,65.2,5.6,67.5],[290,65.2,5.6,67.5],[295,65.2,5.6,67.5],[300,65.2,5.6,67.5],[305,65.2,5.6,67.5],[310,65.2,5.6,67.5],[315,65.2,5.6,67.5],[320,65.2,5.6,67.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nam':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,75.4,3.5,10.5],[40,74.5,-1.1,12.6],[45,74.3,-4.3,14.6],[50,75.9,-3.5,16.2],[55,79.8,4.1,17.6],[60,81.6,5.1,19.1],[65,82.6,3.2,20.7],[70,81.6,-6.5,22.4],[75,80.4,-13.1,24.6],[80,78.2,-18.8,27.5],[85,76.2,-21.3,30.5],[90,74.6,-23.0,33.8],[95,72.0,-24.7,36.9],[100,70.0,-24.0,40.2],[105,69.1,-23.3,43.6],[110,68.3,-22.6,47.0],[115,67.6,-21.8,50.4],[120,67.1,-20.4,53.9],[125,67.0,-19.7,55.6],[130,67.0,-19.1,56.7],[135,67.1,-18.7,57.9],[140,67.2,-18.4,59.2],[145,67.1,-18.3,60.5],[150,67.3,-17.6,62.2],[155,67.6,-15.5,64.6],[160,67.6,-14.5,66.8],[165,67.7,-13.6,69.1],[170,67.8,-12.8,71.4],[175,67.7,-11.5,74.8],[180,67.7,-11.5,75.3],[185,67.7,-11.5,75.7],[190,67.7,-11.5,76.1],[195,67.7,-11.5,76.6],[200,67.7,-11.5,77.0],[205,67.7,-11.5,77.4],[210,67.7,-11.5,77.9],[215,67.7,-11.5,78.3],[220,67.7,-11.5,78.3],[225,67.7,-11.5,78.3],[230,67.7,-11.5,78.3],[235,67.7,-11.5,78.3],[240,67.7,-11.5,78.3],[245,67.7,-11.5,78.3],[250,67.7,-11.5,78.3],[255,67.7,-11.5,78.3],[260,67.7,-11.5,78.3],[265,67.7,-11.5,78.3],[270,67.7,-11.5,78.3],[275,67.7,-11.5,78.3],[280,67.7,-11.5,78.3],[285,67.7,-11.5,78.3],[290,67.7,-11.5,78.3],[295,67.7,-11.5,78.3],[300,67.7,-11.5,78.3],[305,67.7,-11.5,78.3],[310,67.7,-11.5,78.3],[315,67.7,-11.5,78.3],[320,67.7,-11.5,78.3]]
for rec in cont:
if int(age)==int(rec[0]):
pole= [rec[1],rec[2],rec[3]]
return pole
if continent=='par':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eant':
cont= [[5,8.2,-49.4,0.8],[10,8.2,-49.4,1.5],[15,9.8,-48.4,2.1],[20,10.7,-47.9,2.8],[25,11.4,-48.2,3.8],[30,11.8,-48.3,4.8],[35,12.5,-46.1,6.0],[40,13.6,-41.5,7.4],[45,11.1,-41.1,8.5],[50,9.1,-40.9,9.6],[55,9.4,-43.5,10.3],[60,10.6,-47.4,10.8],[65,8.1,-47.7,11.3],[70,0.4,-43.3,12.2],[75,3.7,138.9,-13.8],[80,2.7,142.7,-16.1],[85,0.6,144.7,-18.8],[90,1.4,-37.0,22.3],[95,2.9,-38.3,25.8],[100,3.1,146.5,-26.8],[105,5.5,148.9,-30.3],[110,7.4,150.7,-33.9],[115,9.0,152.3,-37.6],[120,10.3,153.6,-41.3],[125,9.4,152.4,-43.0],[130,9.1,151.5,-45.3],[135,8.6,150.9,-47.6],[140,8.0,150.1,-49.2],[145,7.3,148.1,-50.7],[150,7.4,147.1,-52.6],[155,9.0,148.0,-55.4],[160,10.5,148.8,-58.2],[165,10.5,148.8,-58.2],[170,10.5,148.8,-58.2],[175,10.5,148.8,-58.2],[180,10.5,148.8,-58.2],[185,10.5,148.8,-58.2],[190,10.5,148.8,-58.2],[195,10.5,148.8,-58.2],[200,10.5,148.8,-58.2],[205,10.5,148.8,-58.2],[210,10.5,148.8,-58.2],[215,10.5,148.8,-58.2],[220,10.5,148.8,-58.2],[225,10.5,148.8,-58.2],[230,10.5,148.8,-58.2],[235,10.5,148.8,-58.2],[240,10.5,148.8,-58.2],[245,10.5,148.8,-58.2],[250,10.5,148.8,-58.2],[255,10.5,148.8,-58.2],[260,10.5,148.8,-58.2],[265,10.5,148.8,-58.2],[270,10.5,148.8,-58.2],[275,10.5,148.8,-58.2],[280,10.4,148.8,-58.2],[285,10.5,148.8,-58.2],[290,10.5,148.8,-58.2],[295,10.5,148.8,-58.2],[300,10.5,148.8,-58.2],[305,10.4,148.8,-58.2],[310,10.5,148.8,-58.2],[315,10.5,148.8,-58.2],[320,10.5,148.8,-58.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ind':
cont= [[5,22.7,32.9,-2.3],[10,23.8,33.1,-4.6],[15,27.1,27.4,-6.0],[20,29.6,23.9,-7.5],[25,25.1,33.2,-10.3],[30,22.5,38.5,-13.3],[35,22.6,41.3,-15.9],[40,25.5,42.7,-17.4],[45,24.2,40.1,-19.7],[50,24.0,34.2,-23.5],[55,22.1,29.2,-28.3],[60,19.5,25.2,-34.4],[65,19.0,21.9,-40.2],[70,20.5,18.9,-44.4],[75,21.8,18.2,-47.3],[80,22.3,18.2,-49.1],[85,21.8,22.1,-53.8],[90,20.0,27.5,-58.8],[95,20.7,28.1,-57.8],[100,21.3,28.8,-56.8],[105,21.9,29.6,-55.9],[110,22.6,30.3,-54.9],[115,23.3,31.1,-54.0],[120,24.0,32.0,-53.1],[125,23.4,34.8,-55.2],[130,21.2,36.2,-60.1],[135,21.2,36.2,-61.6],[140,21.9,37.5,-61.5],[145,22.6,39.0,-62.5],[150,24.1,40.4,-62.9],[155,26.9,41.2,-61.6],[160,29.8,42.1,-60.5],[165,29.8,42.1,-60.5],[170,29.8,42.1,-60.5],[175,29.8,42.1,-60.5],[180,29.8,42.1,-60.5],[185,29.8,42.1,-60.5],[190,29.8,42.1,-60.5],[195,29.8,42.1,-60.5],[200,29.8,42.1,-60.5],[205,29.8,42.1,-60.5],[210,29.8,42.1,-60.5],[215,29.8,42.1,-60.5],[220,29.8,42.1,-60.5],[225,29.8,42.1,-60.5],[230,29.8,42.1,-60.5],[235,29.8,42.1,-60.5],[240,29.8,42.1,-60.5],[245,29.8,42.1,-60.5],[250,29.8,42.1,-60.5],[255,29.8,42.1,-60.5],[260,29.8,42.1,-60.5],[265,29.8,42.1,-60.5],[270,29.8,42.1,-60.5],[275,29.8,42.1,-60.5],[280,29.8,42.1,-60.5],[285,29.8,42.1,-60.5],[290,29.8,42.1,-60.5],[295,29.8,42.1,-60.5],[300,29.8,42.1,-60.5],[305,29.8,42.1,-60.5],[310,29.8,42.1,-60.5],[315,29.8,42.1,-60.5],[320,29.8,42.1,-60.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='neaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,27.9,-61.4,0.0],[90,39.8,-61.4,-0.1],[95,40.8,-61.4,-0.2],[100,40.3,-61.4,-0.3],[105,40.6,-61.4,-0.4],[110,40.6,-61.4,-0.5],[115,40.5,-61.4,-0.6],[120,40.5,-61.4,-0.7],[125,40.5,-61.4,-0.7],[130,40.5,-61.4,-0.7],[135,40.5,-61.4,-0.7],[140,40.5,-61.4,-0.7],[145,40.5,-61.4,-0.7],[150,40.5,-61.4,-0.7],[155,40.5,-61.4,-0.7],[160,40.5,-61.4,-0.7],[165,40.5,-61.4,-0.7],[170,40.5,-61.4,-0.7],[175,40.5,-61.4,-0.7],[180,40.5,-61.4,-0.7],[185,40.5,-61.4,-0.7],[190,40.5,-61.4,-0.7],[195,40.5,-61.4,-0.7],[200,40.5,-61.4,-0.7],[205,40.5,-61.4,-0.7],[210,40.5,-61.4,-0.7],[215,40.5,-61.4,-0.7],[220,40.5,-61.4,-0.7],[225,40.5,-61.4,-0.7],[230,40.5,-61.4,-0.7],[235,40.5,-61.4,-0.7],[240,40.5,-61.4,-0.7],[245,40.4,-61.4,-0.7],[250,40.4,-61.4,-0.7],[255,40.4,-61.4,-0.7],[260,40.4,-61.4,-0.7],[265,40.4,-61.4,-0.7],[270,40.4,-61.4,-0.7],[275,40.4,-61.4,-0.7],[280,40.4,-61.4,-0.7],[285,40.4,-61.4,-0.7],[290,40.4,-61.4,-0.7],[295,40.4,-61.4,-0.7],[300,40.4,-61.4,-0.7],[305,40.4,-61.4,-0.7],[310,40.4,-61.4,-0.7],[315,40.4,-61.4,-0.7],[320,40.4,-61.4,-0.7]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='sac':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,50.1,-32.8,54.9],[135,50.0,-32.5,55.1],[140,50.0,-32.5,55.1],[145,50.0,-32.5,55.1],[150,50.0,-32.5,55.1],[155,50.0,-32.5,55.1],[160,50.0,-32.5,55.1],[165,50.0,-32.5,55.1],[170,50.0,-32.5,55.1],[175,50.0,-32.5,55.1],[180,50.0,-32.5,55.1],[185,50.0,-32.5,55.1],[190,50.0,-32.5,55.1],[195,50.0,-32.5,55.1],[200,50.0,-32.5,55.1],[205,50.0,-32.5,55.1],[210,50.0,-32.5,55.1],[215,50.0,-32.5,55.1],[220,50.0,-32.5,55.1],[225,50.0,-32.5,55.1],[230,50.0,-32.5,55.1],[235,50.0,-32.5,55.1],[240,50.0,-32.5,55.1],[245,50.0,-32.5,55.1],[250,50.0,-32.5,55.1],[255,50.0,-32.5,55.1],[260,50.0,-32.5,55.1],[265,50.0,-32.5,55.1],[270,50.0,-32.5,55.1],[275,50.0,-32.5,55.1],[280,50.0,-32.5,55.1],[285,50.0,-32.5,55.1],[290,50.0,-32.5,55.1],[295,50.0,-32.5,55.1],[300,50.0,-32.5,55.1],[305,50.0,-32.5,55.1],[310,50.0,-32.5,55.1],[315,50.0,-32.5,55.1],[320,50.0,-32.5,55.1]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ib':
cont= [[5,0,0,0],[10,0,0,0],[15,77.93,59.14,.12],[20,77.93,59.14,.24],[25,77.93,59.14,.24],[30,-31.21,166.79,1.73],[35,-31.21,166.79,1.73],[40,-27,160,1.73],[45,-23.85,157.12,1.72],[50,-20.6,157.88,2.1],[55,-20.72,162.4,2.61],[60,-16,164,3],[65,-12.95,165.77,3.1],[70,-16.45,167.49,3.1],[75,-16.45,167.49,3.1],[80,-37.17,169,8.04],[85,-38.86,169.85,10.28],[90,-42.64,173.2,16.56],[95,-43.,174.,20],[100,-43.,174.,25],[105,-43.,174.,30],[110,-43.,174.,35],[115,-43.,174.,35],[120,-43.,174.,44.77],[120,-43.86,174.17,44.77],[125,-43.86,174.17,44.77],[130,-46.19,177.47,45.91],[135,-46.19,177.47,45.91],[140,-46.19,177.47,45.91],[145,-46.19,177.47,45.91],[150,-46.19,177.47,45.91],[155,-47.12,179.45,46.29],[160,-47.12,179.45,46.29],[165,-47.12,179.45,46.29],[170,-47.55,180.35,50.62],[175,-46.8,181.1,50.33],[180,-46.8,181.1,50.33],[185,-46.8,181.1,50.33],[190,-46.8,181.1,50.33],[195,-46.8,181.1,50.33],[200,-46.8,181.1,50.33],[205,-46.8,181.1,50.33],[210,-46.8,181.1,50.33],[215,-46.8,181.1,50.33],[220,-46.8,181.1,50.33],[225,-46.8,181.1,50.33],[230,-46.8,181.1,50.33],[235,-46.8,181.1,50.33],[240,-46.8,181.1,50.33],[245,-46.8,181.1,50.33],[250,-46.8,181.1,50.33],[255,-46.8,181.1,50.33],[260,-46.8,181.1,50.33],[265,-46.8,181.1,50.33],[270,-46.8,181.1,50.33],[275,-46.8,181.1,50.33],[280,-46.8,181.1,50.33],[285,-46.8,181.1,50.33],[290,-46.8,181.1,50.33],[295,-46.8,181.1,50.33],[300,-46.8,181.1,50.33],[305,-46.8,181.1,50.33],[310,-46.8,181.1,50.33],[315,-46.8,181.1,50.33],[320,-46.8,181.1,50.33]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='saf':
cont= [[0,0,56.0,2.2],[5,0,57.6,2.5],[10,0,53.9,2.5],[15,0,66.5,3.0],[20,0,75.5,4.7],[25,0,84.1,6.8],[30,0,95.8,7.9],[35,0,98.8,8.7],[40,0,107.5,9.2],[45,0,110.9,10.3],[50,0,111.6,13.2],[55,0,115.7,13.9],[60,0,123.5,15.7],[65,0,127.8,17.5],[70,0,137.2,17.5],[75,0,140.3,19.2],[80,0,138.1,19.3],[85,0,142.9,19.6],[90,0,144.7,20.5],[95,0,144.3,20.8],[100,0,150.8,22.3],[105,0,160.2,26.9],[110,0,169.2,32.1],[115,0,170.3,35.6],[120,0,171.3,36.2],[125,0,172.1,37.5],[130,0,170.0,39.4],[135,0,172.6,42.1],[140,0,163.1,40.8],[145,0,155.2,38.1],[150,0,155.0,34.8],[155,0,155.0,33.2],[160,0,157.0,30.7],[165,0,159.5,32.5],[170,0,167.6,28.8],[175,0,167.8,27.7],[180,0,167.4,25.9],[185,0,168.4,21.6],[190,0,158.8,18.2],[195,0,147.9,17.8],[200,0,144.4,19.2],[205,0,137.4,20.7],[210,0,133.6,23.1],[215,0,129.9,26.4],[220,0,127.2,27.2],[225,0,128.0,29.4],[230,0,130.0,31.4],[235,0,133.6,35.3],[240,0,137.4,36.5],[245,0,143.1,39.6],[250,0,145.4,40.4],[255,0,145.6,41.8],[260,0,144.8,41.9],[265,0,141.6,47.1],[270,0,140.3,46.8],[275,0,138.2,51.1],[280,0,138.6,51.6],[285,0,136.5,51.8],[290,0,135.8,52.8],[295,0,136.8,53.5],[300,0,136.9,55.4],[305,0,138.9,56.3],[310,0,139.9,59.5],[315,0,138.9,60.8],[320,0,132.5,61.6]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
return 'NONE' | 0.17147 |
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data):
"""Run variant quality score recalibration.
"""
cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91",
"99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"]
if sensitivity_cutoff not in cutoffs:
cutoffs.append(sensitivity_cutoff)
cutoffs.sort()
broad_runner = broad.runner_from_config(data["config"])
gatk_type = broad_runner.gatk_type()
base = utils.splitext_plus(in_file)[0]
recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base)
tranches_file = "%s.tranches" % base
plot_file = "%s-plots.R" % base
if not utils.file_exists(recal_file):
with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file):
params = ["-T", "VariantRecalibrator",
"-R", ref_file,
"--mode", filter_type]
if gatk_type == "gatk4":
params += ["--variant", in_file, "--output", tx_recal,
"--tranches-file", tx_tranches, "--rscript-file", tx_plot_file]
else:
params += ["--input", in_file, "--recal_file", tx_recal,
"--tranches_file", tx_tranches, "--rscript_file", tx_plot_file]
params += _get_vqsr_training(filter_type, vrn_files, gatk_type)
resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"])
opts = resources.get("options", [])
if not opts:
for cutoff in cutoffs:
opts += ["-tranche", str(cutoff)]
for a in _get_vqsr_annotations(filter_type, data):
opts += ["-an", a]
params += opts
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
try:
broad_runner.new_resources("gatk-vqsr")
broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True)
except: # Can fail to run if not enough values are present to train.
return None, None
if gatk_type == "gatk4":
vcfutils.bgzip_and_index(recal_file, data["config"])
return recal_file, tranches_file | 0.004906 |
def delete_after_days(self, bucket, key, days):
"""更新文件生命周期
Returns:
一个dict变量,返回结果类似:
[
{ "code": <HttpCode int>, "data": <Data> },
{ "code": <HttpCode int> },
{ "code": <HttpCode int> },
{ "code": <HttpCode int> },
{ "code": <HttpCode int>, "data": { "error": "<ErrorMessage string>" } },
...
]
一个ResponseInfo对象
Args:
bucket: 目标资源空间
key: 目标资源文件名
days: 指定天数
"""
resource = entry(bucket, key)
return self.__rs_do('deleteAfterDays', resource, days) | 0.004213 |
def step(self, observation, argmax_sampling=False):
""" Select actions based on model's output """
policy_params, q = self(observation)
actions = self.action_head.sample(policy_params, argmax_sampling=argmax_sampling)
# log probability - we can do that, because we support only discrete action spaces
logprobs = self.action_head.logprob(actions, policy_params)
return {
'actions': actions,
'q': q,
'logprobs': policy_params,
'action:logprobs': logprobs
} | 0.007143 |
def get_item_sh_fields(self, identity=None, item_date=None, sh_id=None,
rol='author'):
""" Get standard SH fields from a SH identity """
eitem_sh = self.__get_item_sh_fields_empty(rol)
if identity:
# Use the identity to get the SortingHat identity
sh_ids = self.get_sh_ids(identity, self.get_connector_name())
eitem_sh[rol + "_id"] = sh_ids.get('id', '')
eitem_sh[rol + "_uuid"] = sh_ids.get('uuid', '')
eitem_sh[rol + "_name"] = identity.get('name', '')
eitem_sh[rol + "_user_name"] = identity.get('username', '')
eitem_sh[rol + "_domain"] = self.get_identity_domain(identity)
elif sh_id:
# Use the SortingHat id to get the identity
eitem_sh[rol + "_id"] = sh_id
eitem_sh[rol + "_uuid"] = self.get_uuid_from_id(sh_id)
else:
# No data to get a SH identity. Return an empty one.
return eitem_sh
# If the identity does not exists return and empty identity
if rol + "_uuid" not in eitem_sh or not eitem_sh[rol + "_uuid"]:
return self.__get_item_sh_fields_empty(rol, undefined=True)
# Get the SH profile to use first this data
profile = self.get_profile_sh(eitem_sh[rol + "_uuid"])
if profile:
# If name not in profile, keep its old value (should be empty or identity's name field value)
eitem_sh[rol + "_name"] = profile.get('name', eitem_sh[rol + "_name"])
email = profile.get('email', None)
if email:
eitem_sh[rol + "_domain"] = self.get_email_domain(email)
eitem_sh[rol + "_gender"] = profile.get('gender', self.unknown_gender)
eitem_sh[rol + "_gender_acc"] = profile.get('gender_acc', 0)
elif not profile and sh_id:
logger.warning("Can't find SH identity profile: %s", sh_id)
# Ensure we always write gender fields
if not eitem_sh.get(rol + "_gender"):
eitem_sh[rol + "_gender"] = self.unknown_gender
eitem_sh[rol + "_gender_acc"] = 0
eitem_sh[rol + "_org_name"] = self.get_enrollment(eitem_sh[rol + "_uuid"], item_date)
eitem_sh[rol + "_bot"] = self.is_bot(eitem_sh[rol + '_uuid'])
return eitem_sh | 0.002988 |
def create_git_release(self, tag, name, message, draft=False, prerelease=False, target_commitish=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/releases <http://developer.github.com/v3/repos/releases>`_
:param tag: string
:param name: string
:param message: string
:param draft: bool
:param prerelease: bool
:param target_commitish: string or :class:`github.Branch.Branch` or :class:`github.Commit.Commit` or :class:`github.GitCommit.GitCommit`
:rtype: :class:`github.GitRelease.GitRelease`
"""
assert isinstance(tag, (str, unicode)), tag
assert isinstance(name, (str, unicode)), name
assert isinstance(message, (str, unicode)), message
assert isinstance(draft, bool), draft
assert isinstance(prerelease, bool), prerelease
assert target_commitish is github.GithubObject.NotSet or isinstance(target_commitish, (str, unicode, github.Branch.Branch, github.Commit.Commit, github.GitCommit.GitCommit)), target_commitish
post_parameters = {
"tag_name": tag,
"name": name,
"body": message,
"draft": draft,
"prerelease": prerelease,
}
if isinstance(target_commitish, (str, unicode)):
post_parameters["target_commitish"] = target_commitish
elif isinstance(target_commitish, github.Branch.Branch):
post_parameters["target_commitish"] = target_commitish.name
elif isinstance(target_commitish, (github.Commit.Commit, github.GitCommit.GitCommit)):
post_parameters["target_commitish"] = target_commitish.sha
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/releases",
input=post_parameters
)
return github.GitRelease.GitRelease(self._requester, headers, data, completed=True) | 0.004145 |
def tag_and_push(context):
"""Tags your git repo with the new version number"""
tag_option = '--annotate'
if probe.has_signing_key(context):
tag_option = '--sign'
shell.dry_run(
TAG_TEMPLATE % (tag_option, context.new_version, context.new_version),
context.dry_run,
)
shell.dry_run('git push --tags', context.dry_run) | 0.002725 |
async def emit(self, name):
"""
Add a callback to the event named 'name'.
Returns this object for chained 'on' calls.
"""
for cb in self._event_list[name]:
if isawaitable(cb):
await cb
else:
cb() | 0.006873 |
def sde(self):
"""
Return the state space representation of the covariance.
Note! For Sparse GP inference too small or two high values of lengthscale
lead to instabilities. This is because Qc are too high or too low
and P_inf are not full rank. This effect depends on approximatio order.
For N = 10. lengthscale must be in (0.8,8). For other N tests must be conducted.
N=6: (0.06,31)
Variance should be within reasonable bounds as well, but its dependence is linear.
The above facts do not take into accout regularization.
"""
#import pdb; pdb.set_trace()
if self.approx_order is not None:
N = self.approx_order
else:
N = 10# approximation order ( number of terms in exponent series expansion)
roots_rounding_decimals = 6
fn = np.math.factorial(N)
p_lengthscale = float( self.lengthscale )
p_variance = float(self.variance)
kappa = 1.0/2.0/p_lengthscale**2
Qc = np.array( ((p_variance*np.sqrt(np.pi/kappa)*fn*(4*kappa)**N,),) )
eps = 1e-12
if (float(Qc) > 1.0/eps) or (float(Qc) < eps):
warnings.warn("""sde_RBF kernel: the noise variance Qc is either very large or very small.
It influece conditioning of P_inf: {0:e}""".format(float(Qc)) )
pp1 = np.zeros((2*N+1,)) # array of polynomial coefficients from higher power to lower
for n in range(0, N+1): # (2N+1) - number of polynomial coefficients
pp1[2*(N-n)] = fn*(4.0*kappa)**(N-n)/np.math.factorial(n)*(-1)**n
pp = sp.poly1d(pp1)
roots = sp.roots(pp)
neg_real_part_roots = roots[np.round(np.real(roots) ,roots_rounding_decimals) < 0]
aa = sp.poly1d(neg_real_part_roots, r=True).coeffs
F = np.diag(np.ones((N-1,)),1)
F[-1,:] = -aa[-1:0:-1]
L= np.zeros((N,1))
L[N-1,0] = 1
H = np.zeros((1,N))
H[0,0] = 1
# Infinite covariance:
Pinf = lyap(F, -np.dot(L,np.dot( Qc[0,0],L.T)))
Pinf = 0.5*(Pinf + Pinf.T)
# Allocating space for derivatives
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
# Derivatives:
dFvariance = np.zeros(F.shape)
dFlengthscale = np.zeros(F.shape)
dFlengthscale[-1,:] = -aa[-1:0:-1]/p_lengthscale * np.arange(-N,0,1)
dQcvariance = Qc/p_variance
dQclengthscale = np.array(( (p_variance*np.sqrt(2*np.pi)*fn*2**N*p_lengthscale**(-2*N)*(1-2*N),),))
dPinf_variance = Pinf/p_variance
lp = Pinf.shape[0]
coeff = np.arange(1,lp+1).reshape(lp,1) + np.arange(1,lp+1).reshape(1,lp) - 2
coeff[np.mod(coeff,2) != 0] = 0
dPinf_lengthscale = -1/p_lengthscale*Pinf*coeff
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinf_variance
dPinf[:,:,1] = dPinf_lengthscale
P0 = Pinf.copy()
dP0 = dPinf.copy()
if self.balance:
# Benefits of this are not very sound. Helps only in one case:
# SVD Kalman + RBF kernel
import GPy.models.state_space_main as ssm
(F, L, Qc, H, Pinf, P0, dF, dQc, dPinf,dP0) = ssm.balance_ss_model(F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0 )
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0) | 0.021189 |
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None | 0.004525 |
def extract_exception(*args):
"""
Extracts the exception from given arguments or from :func:`sys.exc_info`.
:param \*args: Arguments.
:type \*args: \*
:return: Extracted exception.
:rtype: tuple
"""
cls, instance, trcback = sys.exc_info()
exceptions = filter(lambda x: issubclass(type(x), BaseException), args)
trcbacks = filter(lambda x: issubclass(type(x), types.TracebackType), args)
cls, instance = (type(exceptions[0]), exceptions[0]) if exceptions else (cls, instance)
trcback = trcbacks[0] if trcbacks else trcback
return cls, instance, trcback | 0.008224 |
def RRX_C(value, carry, width):
"""
The ARM RRX (rotate right with extend and with carry) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to rotate it.
:param int width: Width of the value
:return: Resultant value and carry result
:rtype tuple
"""
carry_out = Bit(value, 0)
result = (value >> 1) | (carry << (width - 1))
return (result, carry_out) | 0.002193 |
def _set_link_fault_signaling(self, v, load=False):
"""
Setter method for link_fault_signaling, mapped from YANG variable /interface/ethernet/link_fault_signaling (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_fault_signaling is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_fault_signaling() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_fault_signaling.link_fault_signaling, is_container='container', presence=False, yang_name="link-fault-signaling", rest_name="link-fault-signaling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure a link-fault-signaling', u'callpoint': u'Lfs', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lfs', defining_module='brocade-lfs', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_fault_signaling must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_fault_signaling.link_fault_signaling, is_container='container', presence=False, yang_name="link-fault-signaling", rest_name="link-fault-signaling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure a link-fault-signaling', u'callpoint': u'Lfs', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-lfs', defining_module='brocade-lfs', yang_type='container', is_config=True)""",
})
self.__link_fault_signaling = t
if hasattr(self, '_set'):
self._set() | 0.004873 |
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
"""Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
"""
h, w = img.shape[:2]
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(
'Invalid scale {}, must be positive.'.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
'Scale must be a number or tuple of int, but got {}'.format(
type(scale)))
new_size = _scale_size((w, h), scale_factor)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img | 0.000678 |
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None # type: ignore
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
return stream | 0.003257 |
def _check_datetime(self, node):
""" Check that a datetime was infered.
If so, emit boolean-datetime warning.
"""
try:
infered = next(node.infer())
except astroid.InferenceError:
return
if isinstance(infered, Instance) and infered.qname() == "datetime.time":
self.add_message("boolean-datetime", node=node) | 0.007692 |
def reload(script, input, output):
"""
reloads the generator script when the script files
or the input files changes
"""
script = Path(script).expand().abspath()
output = Path(output).expand().abspath()
input = input if isinstance(input, (list, tuple)) else [input]
output.makedirs_p()
_script_reload(script, input, output) | 0.002786 |
def insert_local_var(self, vname, vtype, position):
"Inserts a new local variable"
index = self.insert_id(vname, SharedData.KINDS.LOCAL_VAR, [SharedData.KINDS.LOCAL_VAR, SharedData.KINDS.PARAMETER], vtype)
self.table[index].attribute = position | 0.01107 |
def read_property_from_xml(root, path):
"""
Get the text from an XML property.
Whitespaces, tabs and new lines are trimmed
:param root: container in which we search
:type root: ElementTree.Element
:param path: path to search in root
:type path: str
:return: the text of the element at the given path
:rtype: str, None
"""
element = root.find(path, XML_NS)
try:
return element.text.strip(' \t\n\r')
except AttributeError:
return None | 0.001992 |
def drag_and_drop(self, droppable):
"""
Performs drag a element to another elmenet.
Currently works only on Chrome driver.
"""
self.scroll_to()
ActionChains(self.parent.driver).drag_and_drop(self._element, droppable._element).perform() | 0.010563 |
def fqn(o):
"""Returns the fully qualified class name of an object or a class
:param o: object or class
:return: class name
"""
parts = []
if isinstance(o, (str, bytes)):
return o
if not hasattr(o, '__module__'):
raise ValueError('Invalid argument `%s`' % o)
parts.append(o.__module__)
if isclass(o):
parts.append(o.__name__)
elif isinstance(o, types.FunctionType):
parts.append(o.__name__)
else:
parts.append(o.__class__.__name__)
return '.'.join(parts) | 0.001842 |
def get_inventory_text(self):
"""Return the inventory information from the device."""
inventory_text = None
if self.inventory_cmd:
try:
inventory_text = self.device.send(self.inventory_cmd, timeout=120)
self.log('Inventory collected')
except CommandError:
self.log('Unable to collect inventory')
else:
self.log('No inventory command for {}'.format(self.platform))
return inventory_text | 0.005894 |
def next(self):
"""
This method is deprecated, a holdover from when queries were iterators,
rather than iterables.
@return: one element of massaged data.
"""
if self._selfiter is None:
warnings.warn(
"Calling 'next' directly on a query is deprecated. "
"Perhaps you want to use iter(query).next(), or something "
"more expressive like store.findFirst or store.findOrCreate?",
DeprecationWarning, stacklevel=2)
self._selfiter = self.__iter__()
return self._selfiter.next() | 0.003247 |
def set_variable(self, name, type_, size):
"""
Register variable of name and type_, with a (multidimensional) size.
:param name: variable name as it appears in code
:param type_: may be any key from Kernel.datatypes_size (typically float or double)
:param size: either None for scalars or an n-tuple of ints for an n-dimensional array
"""
assert type_ in self.datatypes_size, 'only float and double variables are supported'
if self.datatype is None:
self.datatype = type_
else:
assert type_ == self.datatype, 'mixing of datatypes within a kernel is not supported.'
assert type(size) in [tuple, type(None)], 'size has to be defined as tuple or None'
self.variables[name] = (type_, size) | 0.008772 |
def geometry(obj):
"""
Apply ``vtkGeometryFilter``.
"""
gf = vtk.vtkGeometryFilter()
gf.SetInputData(obj)
gf.Update()
return gf.GetOutput() | 0.005988 |
def delete_edge(self, tail_node_or_ID, head_node_or_ID):
""" Removes an edge from the graph. Returns the deleted edge or None.
"""
if isinstance(tail_node_or_ID, Node):
tail_node = tail_node_or_ID
else:
tail_node = self.get_node(tail_node_or_ID)
if isinstance(head_node_or_ID, Node):
head_node = head_node_or_ID
else:
head_node = self.get_node(head_node_or_ID)
if (tail_node is None) or (head_node is None):
return None
for i, edge in enumerate(self.edges):
if (edge.tail_node == tail_node) and (edge.head_node == head_node):
edge = self.edges.pop(i)
return edge
return None | 0.002653 |
def inform(self, reading):
"""Inform strategy creator of the sensor status."""
try:
self._inform_callback(self._sensor, reading)
except Exception:
log.exception('Unhandled exception trying to send {!r} '
'for sensor {!r} of type {!r}'
.format(reading, self._sensor.name, self._sensor.type)) | 0.007692 |
def noisered(self, profile_path, amount=0.5):
'''Reduce noise in the audio signal by profiling and filtering.
This effect is moderately effective at removing consistent
background noise such as hiss or hum.
Parameters
----------
profile_path : str
Path to a noise profile file.
This file can be generated using the `noiseprof` effect.
amount : float, default=0.5
How much noise should be removed is specified by amount. Should
be between 0 and 1. Higher numbers will remove more noise but
present a greater likelihood of removing wanted components of
the audio signal.
See Also
--------
noiseprof
'''
if not os.path.exists(profile_path):
raise IOError(
"profile_path {} does not exist.".format(profile_path))
if not is_number(amount) or amount < 0 or amount > 1:
raise ValueError("amount must be a number between 0 and 1.")
effect_args = [
'noisered',
profile_path,
'{:f}'.format(amount)
]
self.effects.extend(effect_args)
self.effects_log.append('noisered')
return self | 0.001567 |
def run_pod(self, pod, startup_timeout=120, get_logs=True):
# type: (Pod, int, bool) -> Tuple[State, Optional[str]]
"""
Launches the pod synchronously and waits for completion.
Args:
pod (Pod):
startup_timeout (int): Timeout for startup of the pod (if pod is pending for
too long, considers task a failure
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
delta = dt.now() - curr_time
if delta.seconds >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
self.log.debug('Pod not yet started')
return self._monitor_pod(pod, get_logs) | 0.004751 |
def add_constraint(self, constraint, variables=tuple()):
"""Add a constraint.
Args:
constraint (function/iterable/:obj:`.Constraint`):
Constraint definition in one of the supported formats:
1. Function, with input arguments matching the order and
:attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`
argument, that evaluates True when the constraint is satisfied.
2. List explicitly specifying each allowed configuration as a tuple.
3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.
variables(iterable):
Variables associated with the constraint. Not required when `constraint` is
a :obj:`.Constraint` object.
Examples:
This example defines a function that evaluates True when the constraint is satisfied.
The function's input arguments match the order and type of the `variables` argument.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN
... return (a == b) and (b == c)
>>> csp.add_constraint(all_equal, ['a', 'b', 'c'])
>>> csp.check({'a': 0, 'b': 0, 'c': 0})
True
>>> csp.check({'a': 0, 'b': 0, 'c': 1})
False
This example explicitly lists allowed configurations.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> eq_configurations = {(-1, -1), (1, 1)}
>>> csp.add_constraint(eq_configurations, ['v0', 'v1'])
>>> csp.check({'v0': -1, 'v1': +1})
False
>>> csp.check({'v0': -1, 'v1': -1})
True
This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
if isinstance(constraint, Constraint):
if variables and (tuple(variables) != constraint.variables):
raise ValueError("mismatched variables and Constraint")
elif isinstance(constraint, Callable):
constraint = Constraint.from_func(constraint, variables, self.vartype)
elif isinstance(constraint, Iterable):
constraint = Constraint.from_configurations(constraint, variables, self.vartype)
else:
raise TypeError("Unknown constraint type given")
self.constraints.append(constraint)
for v in constraint.variables:
self.variables[v].append(constraint) | 0.005573 |
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format) | 0.00314 |
def indexed_sum_over_const(cls, ops, kwargs):
r'''Execute an indexed sum over a term that does not depend on the
summation indices
.. math::
\sum_{j=1}^{N} a = N a
>>> a = symbols('a')
>>> i, j = (IdxSym(s) for s in ('i', 'j'))
>>> unicode(Sum(i, 1, 2)(a))
'2 a'
>>> unicode(Sum(j, 1, 2)(Sum(i, 1, 2)(a * i)))
'∑_{i=1}^{2} 2 i a'
'''
term, *ranges = ops
new_ranges = []
new_term = term
for r in ranges:
if r.index_symbol not in term.free_symbols:
try:
new_term *= len(r)
except TypeError:
new_ranges.append(r)
else:
new_ranges.append(r)
if len(new_ranges) == 0:
return new_term
else:
return (new_term, ) + tuple(new_ranges), kwargs | 0.001239 |
def _check_panel(self, length):
"""
Check that given fixed panel length evenly divides index.
Parameters
----------
length : int
Fixed length with which to subdivide index
"""
n = len(self.index)
if divmod(n, length)[1] != 0:
raise ValueError("Panel length '%g' must evenly divide length of series '%g'"
% (length, n))
if n == length:
raise ValueError("Panel length '%g' cannot be length of series '%g'"
% (length, n)) | 0.006838 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.