text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_urls(self):
""" Extend the admin urls for the CompetitionEntryAdmin model
to be able to invoke a CSV export view on the admin model """
urls = super(CompetitionEntryAdmin, self).get_urls()
csv_urls = patterns('',
url(
r'^exportcsv/$',
self.admin_site.admin_view(self.csv_export),
name='competition-csv-export'
)
)
return csv_urls + urls | 0.006397 |
def prepare_weighted_spans(targets, # type: List[TargetExplanation]
preserve_density=None, # type: Optional[bool]
):
# type: (...) -> List[Optional[List[PreparedWeightedSpans]]]
""" Return weighted spans prepared for rendering.
Calculate a separate weight range for each different weighted
span (for each different index): each target has the same number
of weighted spans.
"""
targets_char_weights = [
[get_char_weights(ws, preserve_density=preserve_density)
for ws in t.weighted_spans.docs_weighted_spans]
if t.weighted_spans else None
for t in targets] # type: List[Optional[List[np.ndarray]]]
max_idx = max_or_0(len(ch_w or []) for ch_w in targets_char_weights)
targets_char_weights_not_None = [
cw for cw in targets_char_weights
if cw is not None] # type: List[List[np.ndarray]]
spans_weight_ranges = [
max_or_0(
abs(x) for char_weights in targets_char_weights_not_None
for x in char_weights[idx])
for idx in range(max_idx)]
return [
[PreparedWeightedSpans(ws, char_weights, weight_range)
for ws, char_weights, weight_range in zip(
t.weighted_spans.docs_weighted_spans, # type: ignore
t_char_weights,
spans_weight_ranges)]
if t_char_weights is not None else None
for t, t_char_weights in zip(targets, targets_char_weights)] | 0.002009 |
def kube_node_status_ready(self, metric, scraper_config):
""" The ready status of a cluster node (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.ready'
for sample in metric.samples:
node_tag = self._label_to_tag("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_positive,
tags=[node_tag] + scraper_config['custom_tags'],
) | 0.005386 |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 4:
return self.print_help()
text_format = gf.safe_unicode(self.actual_arguments[0])
if text_format == u"list":
text = gf.safe_unicode(self.actual_arguments[1])
elif text_format in TextFileFormat.ALLOWED_VALUES:
text = self.actual_arguments[1]
if not self.check_input_file(text):
return self.ERROR_EXIT_CODE
else:
return self.print_help()
l1_id_regex = self.has_option_with_value(u"--l1-id-regex")
l2_id_regex = self.has_option_with_value(u"--l2-id-regex")
l3_id_regex = self.has_option_with_value(u"--l3-id-regex")
id_regex = self.has_option_with_value(u"--id-regex")
class_regex = self.has_option_with_value(u"--class-regex")
sort = self.has_option_with_value(u"--sort")
parameters = {
gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort,
}
if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)):
self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format")
return self.ERROR_EXIT_CODE
if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None):
self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format")
return self.ERROR_EXIT_CODE
language = gf.safe_unicode(self.actual_arguments[2])
audio_file_path = self.actual_arguments[3]
if not self.check_input_file(audio_file_path):
return self.ERROR_EXIT_CODE
text_file = self.get_text_file(text_format, text, parameters)
if text_file is None:
self.print_error(u"Unable to build a TextFile from the given parameters")
return self.ERROR_EXIT_CODE
elif len(text_file) == 0:
self.print_error(u"No text fragments found")
return self.ERROR_EXIT_CODE
text_file.set_language(language)
self.print_info(u"Read input text with %d fragments" % (len(text_file)))
self.print_info(u"Reading audio...")
try:
audio_file_mfcc = AudioFileMFCC(audio_file_path, rconf=self.rconf, logger=self.logger)
except AudioFileConverterError:
self.print_error(u"Unable to call the ffmpeg executable '%s'" % (self.rconf[RuntimeConfiguration.FFMPEG_PATH]))
self.print_error(u"Make sure the path to ffmpeg is correct")
return self.ERROR_EXIT_CODE
except (AudioFileUnsupportedFormatError, AudioFileNotInitializedError):
self.print_error(u"Cannot read file '%s'" % (audio_file_path))
self.print_error(u"Check that its format is supported by ffmpeg")
return self.ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while reading the audio file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
self.print_info(u"Reading audio... done")
self.print_info(u"Running VAD...")
audio_file_mfcc.run_vad()
self.print_info(u"Running VAD... done")
min_head = gf.safe_float(self.has_option_with_value(u"--min-head"), None)
max_head = gf.safe_float(self.has_option_with_value(u"--max-head"), None)
min_tail = gf.safe_float(self.has_option_with_value(u"--min-tail"), None)
max_tail = gf.safe_float(self.has_option_with_value(u"--max-tail"), None)
self.print_info(u"Detecting audio interval...")
start_detector = SD(audio_file_mfcc, text_file, rconf=self.rconf, logger=self.logger)
start, end = start_detector.detect_interval(min_head, max_head, min_tail, max_tail)
self.print_info(u"Detecting audio interval... done")
self.print_result(audio_file_mfcc.audio_length, start, end)
return self.NO_ERROR_EXIT_CODE | 0.003794 |
def _regular_gate(self, gate, lines, ctrl_lines, used_lines):
"""
Draw a regular gate.
:param string gate: Gate to draw.
:param lines: Lines the gate acts on.
:type: list[int]
:param int ctrl_lines: Control lines.
:param int used_lines: The lines that are actually involved in the gate.
:return: LaTeX string drawing a regular gate at the given location.
:rtype: string
"""
imax = max(lines)
imin = min(lines)
delta_pos = self._gate_offset(gate)
gate_width = self._gate_width(gate)
gate_height = self._gate_height(gate)
name = self._gate_name(gate)
lines = list(range(imin, imax + 1))
tex_str = ""
pos = self.pos[lines[0]]
node_str = "\n\\node[none] ({}) at ({},-{}) {{}};"
for l in lines:
node1 = node_str.format(self._op(l), pos, l)
if l in used_lines:
tex_str += self._phase(l, pos)
node2 = ("\n\\node[none,minimum height={}cm,outer sep=0] ({}) at"
" ({},-{}) {{}};").format(gate_height, self._op(l, offset=1), pos + gate_width / 2., l)
node3 = node_str.format(self._op(l, offset=2), pos + gate_width, l)
tex_str += node1 + node2 + node3
tex_str += ("\n\\draw[operator,edgestyle,outer sep={width}cm]"
" ([yshift={half_height}cm]{op1})"
" rectangle ([yshift=-{half_height}cm]{op2}) node[pos=.5]{{\\verb|{name}|}};"
).format(width=gate_width, op1=self._op(imin), op2=self._op(imax, offset=2),
half_height=.5 * gate_height, name=name)
for l in lines:
self.pos[l] = pos + gate_width / 2.
self.op_count[l] += 3
for l in range(min(ctrl_lines + lines), max(ctrl_lines + lines) + 1):
self.pos[l] = pos + delta_pos + gate_width
return tex_str | 0.004582 |
def _handle_return(state: GlobalState) -> None:
"""
Adds all the annotations into the state which correspond to the
locations in the memory returned by RETURN opcode.
:param state: The Global State
"""
stack = state.mstate.stack
try:
offset, length = get_concrete_int(stack[-1]), get_concrete_int(stack[-2])
except TypeError:
return
for element in state.mstate.memory[offset : offset + length]:
if not isinstance(element, Expression):
continue
for annotation in element.annotations:
if isinstance(annotation, OverUnderflowAnnotation):
state.annotate(
OverUnderflowStateAnnotation(
annotation.overflowing_state,
annotation.operator,
annotation.constraint,
)
) | 0.004073 |
def _FracInt(x,y,z,a,b,c,tau,n):
"""Returns
1 x^2 y^2 z^2
-------------------------- (1 - ------- - ------- - -------)^n
sqrt(tau+a)(tau+b)(tau+c)) tau+a tau+b tau+c
"""
denom = np.sqrt((a + tau)*(b + tau)*(c + tau))
return (1. - x**2/(a + tau) - y**2/(b + tau) - z**2/(c + tau))**n / denom | 0.021277 |
def get_action(self, action=None):
""" Returns action to take after call """
if action:
self.action = action
if self.action not in AjaxResponseAction.choices:
raise ValueError(
"Invalid action selected: '{}'".format(self.action))
return self.action | 0.006211 |
def update_link(self):
"""
redirects all links to self (the new linked object)
"""
name = repr(self)
if not name:
return self
l = self.__class__._get_links()
to_be_changed = list()
if name in l:
for wal in l[name]:
if wal.ref_obj and self is not wal():
to_be_changed.append((wal.ref_obj, wal.attr))
for o, a in to_be_changed:
setattr(o, a, self)
self.clean_up_link_dict()
return self | 0.005495 |
async def replace_dialog(self, dialog_id: str, options: object = None) -> DialogTurnResult:
"""
Ends the active dialog and starts a new dialog in its place. This is particularly useful
for creating loops or redirecting to another dialog.
:param dialog_id: ID of the dialog to search for.
:param options: (Optional) additional argument(s) to pass to the new dialog.
:return:
"""
# End the current dialog and giving the reason.
await self.end_active_dialog(DialogReason.ReplaceCalled)
# Start replacement dialog
return await self.begin_dialog(dialog_id, options) | 0.007704 |
def get_credentials():
"""
Returns the user's stored API key if a valid credentials file is found.
Raises CredentialsError if no valid credentials file is found.
"""
try:
netrc_path = netrc.path()
auths = netrc(netrc_path).authenticators(
urlparse(solvebio.api_host).netloc)
except (IOError, TypeError, NetrcParseError) as e:
raise CredentialsError(
'Could not open credentials file: ' + str(e))
if auths:
# auths = (login, account, password)
return auths[2]
else:
return None | 0.001721 |
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
'''
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: 2018.3.0
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
'''
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
'''
DRY helper for getting the source_sum value from a locally cached
path.
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
if urlparsed_source.scheme in salt.utils.files.VALID_PROTOS:
parsed_scheme = urlparsed_source.scheme
else:
parsed_scheme = ''
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
unix_local_source = parsed_scheme in ('file', '')
if parsed_scheme == '':
parsed_path = sfn = source
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
elif parsed_scheme == 'file':
sfn = parsed_path
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found in saltenv \'{1}\''.format(source, saltenv)
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(salt.utils.url.redact_http_basic_auth(source))
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
_source = salt.utils.url.redact_http_basic_auth(source)
return '', {}, 'Failed to cache {0}: {1}'.format(_source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
_source = salt.utils.url.redact_http_basic_auth(source)
return sfn, {}, 'Source file \'{0}\' not found'.format(_source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict = salt.utils.dictupdate.merge(context_dict, context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, '' | 0.00092 |
def delete(self, *args, **kwargs):
"""
Delete the image, along with any generated thumbnails.
"""
source_cache = self.get_source_cache()
# First, delete any related thumbnails.
self.delete_thumbnails(source_cache)
# Next, delete the source image.
super(ThumbnailerFieldFile, self).delete(*args, **kwargs)
# Finally, delete the source cache entry.
if source_cache and source_cache.pk is not None:
source_cache.delete() | 0.003929 |
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional `pattern` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``'test'``.
The `errors=` keyword argument controls behavior when an
error occurs. The default is ``'strict'``, which causes an
exception. The other allowed values are ``'warn'`` (which
reports the error via :func:`warnings.warn()`), and ``'ignore'``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir | 0.001474 |
def parsebam(self):
"""
Parse the dictionaries of the sorted bam files extracted using pysam
"""
# Threading is actually the worst - need multiprocessing to make this work at all
logging.info('Parsing BAM files')
# The sample objects are too big to get pickled. To hack our way around this, try to dump the sample object to
# json, and have the processing function turn the object into a dictionary.
json_files = list()
with tempfile.TemporaryDirectory() as tmpdir:
best_assemblies = list()
sample_names = list()
for sample in self.runmetadata:
json_name = os.path.join(tmpdir, '{sn}.json'.format(sn=sample.name))
best_assemblies.append(sample.general.bestassemblyfile)
sample_names.append(sample.name)
with open(json_name, 'w') as f:
json.dump(sample[self.analysistype].dump(), f, sort_keys=True, indent=4)
json_files.append(json_name)
p = multiprocessing.Pool(processes=self.cpus)
analysis_type_list = [self.analysistype] * len(self.runmetadata)
iupac_list = [self.iupac] * len(self.runmetadata)
cutoff_list = [self.cutoff] * len(self.runmetadata)
depth_list = [self.averagedepth] * len(self.runmetadata)
allow_soft_clip_list = [self.allow_soft_clips] * len(self.runmetadata)
sample_results = p.starmap(Sippr.parse_one_sample,
zip(json_files, sample_names, best_assemblies, analysis_type_list,
iupac_list, cutoff_list, depth_list, allow_soft_clip_list))
p.close()
p.join()
# Since we had to json-ize the sample objects, we now need to update the metadata for everything.
for sample in self.runmetadata:
sample[self.analysistype].faidict = dict()
sample[self.analysistype].results = dict()
sample[self.analysistype].avgdepth = dict()
sample[self.analysistype].resultssnp = dict()
sample[self.analysistype].snplocations = dict()
sample[self.analysistype].resultsgap = dict()
sample[self.analysistype].gaplocations = dict()
sample[self.analysistype].sequences = dict()
sample[self.analysistype].maxcoverage = dict()
sample[self.analysistype].mincoverage = dict()
sample[self.analysistype].standarddev = dict()
# Figure out which of the sample results to use.
for sample_result in sample_results:
if sample_result['name'] == sample.name:
sample[self.analysistype].faidict = sample_result['faidict']
sample[self.analysistype].results = sample_result['results']
sample[self.analysistype].avgdepth = sample_result['avgdepth']
sample[self.analysistype].resultssnp = sample_result['resultssnp']
sample[self.analysistype].snplocations = sample_result['snplocations']
sample[self.analysistype].resultsgap = sample_result['resultsgap']
sample[self.analysistype].gaplocations = sample_result['gaplocations']
sample[self.analysistype].sequences = sample_result['sequences']
sample[self.analysistype].maxcoverage = sample_result['maxcoverage']
sample[self.analysistype].mincoverage = sample_result['mincoverage']
sample[self.analysistype].standarddev = sample_result['standarddev']
logging.info('Done parsing BAM files') | 0.006019 |
def user_deleted_from_site_event(event):
""" Remove deleted user from all the workspaces where he
is a member """
userid = event.principal
catalog = api.portal.get_tool('portal_catalog')
query = {'object_provides': WORKSPACE_INTERFACE}
query['workspace_members'] = userid
workspaces = [
IWorkspace(b._unrestrictedGetObject())
for b in catalog.unrestrictedSearchResults(query)
]
for workspace in workspaces:
workspace.remove_from_team(userid) | 0.001972 |
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column.
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True) | 0.001024 |
def _upload_code(s3_conn, bucket, prefix, name, contents, content_hash,
payload_acl):
"""Upload a ZIP file to S3 for use by Lambda.
The key used for the upload will be unique based on the checksum of the
contents. No changes will be made if the contents in S3 already match the
expected contents.
Args:
s3_conn (botocore.client.S3): S3 connection to use for operations.
bucket (str): name of the bucket to create.
prefix (str): S3 prefix to prepend to the constructed key name for
the uploaded file
name (str): desired name of the Lambda function. Will be used to
construct a key name for the uploaded file.
contents (str): byte string with the content of the file upload.
content_hash (str): md5 hash of the contents to be uploaded.
payload_acl (str): The canned S3 object ACL to be applied to the
uploaded payload
Returns:
troposphere.awslambda.Code: CloudFormation Lambda Code object,
pointing to the uploaded payload in S3.
Raises:
botocore.exceptions.ClientError: any error from boto3 is passed
through.
"""
logger.debug('lambda: ZIP hash: %s', content_hash)
key = '{}lambda-{}-{}.zip'.format(prefix, name, content_hash)
if _head_object(s3_conn, bucket, key):
logger.info('lambda: object %s already exists, not uploading', key)
else:
logger.info('lambda: uploading object %s', key)
s3_conn.put_object(Bucket=bucket, Key=key, Body=contents,
ContentType='application/zip',
ACL=payload_acl)
return Code(S3Bucket=bucket, S3Key=key) | 0.000583 |
def load_module(self, name):
"""
Load the ``pygal.maps.name`` module from the previously
loaded plugin
"""
if name not in sys.modules:
sys.modules[name] = getattr(maps, name.split('.')[2])
return sys.modules[name] | 0.007326 |
def model(self) -> 'modeltools.Model':
"""The |Model| object handled by the actual |Element| object.
Directly after their initialisation, elements do not know
which model they require:
>>> from hydpy import Element
>>> hland = Element('hland', outlets='outlet')
>>> hland.model
Traceback (most recent call last):
...
AttributeError: The model object of element `hland` has been \
requested but not been prepared so far.
During scripting and when working interactively in the Python
shell, it is often convenient to assign a |model| directly.
>>> from hydpy.models.hland_v1 import *
>>> parameterstep('1d')
>>> hland.model = model
>>> hland.model.name
'hland_v1'
>>> del hland.model
>>> hasattr(hland, 'model')
False
For the "usual" approach to prepare models, please see the method
|Element.init_model|.
The following examples show that assigning |Model| objects
to property |Element.model| creates some connection required by
the respective model type automatically . These
examples should be relevant for developers only.
The following |hbranch| model branches a single input value
(from to node `inp`) to multiple outputs (nodes `out1` and `out2`):
>>> from hydpy import Element, Node, reverse_model_wildcard_import
>>> reverse_model_wildcard_import()
>>> element = Element('a_branch',
... inlets='branch_input',
... outlets=('branch_output_1', 'branch_output_2'))
>>> inp = element.inlets.branch_input
>>> out1, out2 = element.outlets
>>> from hydpy.models.hbranch import *
>>> parameterstep()
>>> xpoints(0.0, 3.0)
>>> ypoints(branch_output_1=[0.0, 1.0], branch_output_2=[0.0, 2.0])
>>> parameters.update()
>>> element.model = model
To show that the inlet and outlet connections are built properly,
we assign a new value to the inlet node `inp` and verify that the
suitable fractions of this value are passed to the outlet nodes
out1` and `out2` by calling method |Model.doit|:
>>> inp.sequences.sim = 999.0
>>> model.doit(0)
>>> fluxes.input
input(999.0)
>>> out1.sequences.sim
sim(333.0)
>>> out2.sequences.sim
sim(666.0)
"""
model = vars(self).get('model')
if model:
return model
raise AttributeError(
f'The model object of element `{self.name}` has '
f'been requested but not been prepared so far.') | 0.00073 |
def get(self, session):
'''taobao.aftersale.get 查询用户售后服务模板
查询用户设置的售后服务模板,仅返回标题和id'''
request = TOPRequest('taobao.aftersale.get')
self.create(self.execute(request, session))
return self.after_sales | 0.012195 |
def parse(self, buffer, inlineparent = None):
'''
Compatible to Parser.parse()
'''
size = 0
v = []
for i in range(0, self.size): # @UnusedVariable
r = self.innerparser.parse(buffer[size:], None)
if r is None:
return None
v.append(r[0])
size += r[1]
return (v, size) | 0.010363 |
def compute_trans(expnums, ccd, version, prefix=None, default="WCS"):
"""
Pull the astrometric header for each image, compute an x/y transform and compare to trans.jmp
this one overides trans.jmp if they are very different.
@param expnums:
@param ccd:
@param version:
@param prefix:
@return: None
"""
wcs_dict = {}
for expnum in expnums:
try:
# TODO This assumes that the image is already N/E flipped.
# If compute_trans is called after the image is retrieved from archive then we get the disk version.
filename = storage.get_image(expnum, ccd, version, prefix=prefix)
this_wcs = wcs.WCS(fits.open(filename)[0].header)
except Exception as err:
logging.warning("WCS Trans compute failed. {}".format(str(err)))
return
wcs_dict[expnum] = this_wcs
x0 = wcs_dict[expnums[0]].header['NAXIS1'] / 2.0
y0 = wcs_dict[expnums[0]].header['NAXIS2'] / 2.0
(ra0, dec0) = wcs_dict[expnums[0]].xy2sky(x0, y0)
result = ""
for expnum in expnums:
filename = storage.get_file(expnum, ccd, version, ext='.trans.jmp', prefix=prefix)
jmp_trans = file(filename, 'r').readline().split()
(x, y) = wcs_dict[expnum].sky2xy(ra0, dec0)
x1 = float(jmp_trans[0]) + float(jmp_trans[1]) * x + float(jmp_trans[2]) * y
y1 = float(jmp_trans[3]) + float(jmp_trans[4]) * x + float(jmp_trans[5]) * y
dr = math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
if dr > 0.5:
result += "WARNING: WCS-JMP transforms mis-matched {} reverting to using {}.\n".format(expnum, default)
if default == "WCS":
uri = storage.dbimages_uri(expnum, ccd, version, ext='.trans.jmp', prefix=prefix)
filename = os.path.basename(uri)
trans = file(filename, 'w')
trans.write("{:5.2f} 1. 0. {:5.2f} 0. 1.\n".format(x0 - x, y0 - y))
trans.close()
else:
result += "WCS-JMP transforms match {}\n".format(expnum)
return result | 0.007187 |
def parse_panel_app_panel(panel_info, hgnc_map, institute='cust000', panel_type='clinical'):
"""Parse a PanelApp panel
Args:
panel_info(dict)
hgnc_map(dict): Map from symbol to hgnc ids
institute(str)
panel_type(str)
Returns:
gene_panel(dict)
"""
date_format = "%Y-%m-%dT%H:%M:%S.%f"
gene_panel = {}
gene_panel['version'] = float(panel_info['version'])
gene_panel['date'] = get_date(panel_info['Created'][:-1], date_format=date_format)
gene_panel['display_name'] = panel_info['SpecificDiseaseName']
gene_panel['institute'] = institute
gene_panel['panel_type'] = panel_type
LOG.info("Parsing panel %s", gene_panel['display_name'])
gene_panel['genes'] = []
nr_low_confidence = 1
nr_genes = 0
for nr_genes, gene in enumerate(panel_info['Genes'],1):
gene_info = parse_panel_app_gene(gene, hgnc_map)
if not gene_info:
nr_low_confidence += 1
continue
gene_panel['genes'].append(gene_info)
LOG.info("Number of genes in panel %s", nr_genes)
LOG.info("Number of low confidence genes in panel %s", nr_low_confidence)
return gene_panel | 0.009772 |
def compute_and_save_video_metrics(
output_dirs, problem_name, video_length, frame_shape):
"""Compute and saves the video metrics."""
statistics, all_results = compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape)
for results, output_dir in zip(all_results, output_dirs):
save_results(results, output_dir, problem_name)
parent_dir = os.path.join(output_dirs[0], os.pardir)
final_dir = os.path.join(parent_dir, "decode")
tf.gfile.MakeDirs(parent_dir)
save_results(statistics, final_dir, problem_name) | 0.015873 |
def get_blocks(self, block_structure=None):
"""For a reducible circuit, get a sequence of subblocks that when
concatenated again yield the original circuit. The block structure
given has to be compatible with the circuits actual block structure,
i.e. it can only be more coarse-grained.
Args:
block_structure (tuple): The block structure according to which the
subblocks are generated (default = ``None``, corresponds to the
circuit's own block structure)
Returns:
A tuple of subblocks that the circuit consists of.
Raises:
.IncompatibleBlockStructures
"""
if block_structure is None:
block_structure = self.block_structure
try:
return self._get_blocks(block_structure)
except IncompatibleBlockStructures as e:
raise e | 0.002188 |
def well(self, well_x=1, well_y=1):
"""ScanWellData of specific well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
lxml.objectify.ObjectifiedElement
"""
xpath = './ScanWellData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
# assume we find only one
return self.well_array.find(xpath) | 0.004435 |
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
blink=None, reverse=None, reset=True):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``reset`` (reset the color code only)
.. versionadded:: 2.0
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
bits = []
if fg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
except ValueError:
raise TypeError('Unknown color %r' % fg)
if bg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
except ValueError:
raise TypeError('Unknown color %r' % bg)
if bold is not None:
bits.append('\033[%dm' % (1 if bold else 22))
if dim is not None:
bits.append('\033[%dm' % (2 if dim else 22))
if underline is not None:
bits.append('\033[%dm' % (4 if underline else 24))
if blink is not None:
bits.append('\033[%dm' % (5 if blink else 25))
if reverse is not None:
bits.append('\033[%dm' % (7 if reverse else 27))
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return ''.join(bits) | 0.000384 |
def _format_data(self, data):
"""
Sort the data in blue wavelengths to red, and ignore any spectra that
have entirely non-finite or negative fluxes.
"""
return [spectrum for spectrum in \
sorted(data if isinstance(data, (list, tuple)) else [data],
key=lambda x: x.disp[0]) if np.any(np.isfinite(spectrum.flux))] | 0.010554 |
def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) | 0.014599 |
def _node_to_msg(store_load, node):
"""Maps a given node and a store_load constant to the message that is understood by
the storage service.
"""
if node.v_is_leaf:
if store_load == STORE:
return pypetconstants.LEAF
elif store_load == LOAD:
return pypetconstants.LEAF
elif store_load == REMOVE:
return pypetconstants.DELETE
else:
if store_load == STORE:
return pypetconstants.GROUP
elif store_load == LOAD:
return pypetconstants.GROUP
elif store_load == REMOVE:
return pypetconstants.DELETE | 0.004298 |
def update_primary(hdu_in, hdu=None):
""" 'Update' a primary HDU
This checks hdu exists and creates it from hdu_in if it does not.
If hdu does exist, this adds the data in hdu_in to hdu
"""
if hdu is None:
hdu = fits.PrimaryHDU(data=hdu_in.data, header=hdu_in.header)
else:
hdu.data += hdu_in.data
return hdu | 0.002833 |
def parse_translation(f, lineno):
"""Read a single translation entry from the file F and return a
tuple with the comments, msgid and msgstr. The comments is returned
as a list of lines which do not end in new-lines. The msgid and
msgstr are strings, possibly with embedded newlines"""
line = f.readline()
def get_line(f, line, need_keys, lineno, default='""'):
line = line.rstrip()
if not line:
return lineno, need_keys[0], default, line
key, value = line.split(' ', 1)
# Parse msgid
if key not in need_keys:
print 'Error Line, need %r: %d, line=' % (need_keys, lineno, line)
raise RuntimeError("parse error")
v = value
while 1:
line = f.readline()
line = line.rstrip()
lineno += 1
if not line or line[0] != '"':
break
v += '\n' + line[:]
return lineno, key, v, line
# Parse comments
comments = []
while 1:
if not line:
return lineno, None, None, None
if line.strip() == '':
return lineno, comments, None, None
elif line[0] == '#':
comments.append(line[:-1])
else:
break
line = f.readline()
lineno += 1
lineno, key, msgid, line = get_line(f, line, ['msgid'], lineno)
lineno, key, value, line = get_line(f, line, ['msgid_plural', 'msgstr'], lineno)
if key == 'msgid_plural':
msgid = (msgid, value)
lineno, key, v1, line = get_line(f, line, ['msgstr[0]'], lineno)
lineno, key, v2, line = get_line(f, line, ['msgstr[1]'], lineno)
msgstr = (v1, v2)
else:
msgstr = value
if line != '':
print 'File: %s Error Line: %s' % (f.name, line)
raise RuntimeError("parse error")
return lineno, comments, msgid, msgstr | 0.002542 |
def inputs(form_args):
"""
Creates list of input elements
"""
element = []
for name, value in form_args.items():
element.append(
'<input type="hidden" name="{}" value="{}"/>'.format(name, value))
return "\n".join(element) | 0.003774 |
def push_to_registry(image, repository, tag, project):
"""
:param image: DockerImage, image to push
:param repository: str, new name of image
:param tag: str, new tag of image
:param project: str, oc project
:return: DockerImage, new docker image
"""
return image.push("%s/%s/%s" % (get_internal_registry_ip(), project, repository), tag=tag) | 0.005362 |
def delete_unused_subjects():
"""Delete any unused subjects from the database.
This is not strictly required as any unused subjects will automatically be reused if
needed in the future.
"""
# This causes Django to create a single join (check with query.query)
query = d1_gmn.app.models.Subject.objects.all()
query = query.filter(scienceobject_submitter__isnull=True)
query = query.filter(scienceobject_rights_holder__isnull=True)
query = query.filter(eventlog__isnull=True)
query = query.filter(permission__isnull=True)
query = query.filter(whitelistforcreateupdatedelete__isnull=True)
logger.debug('Deleting {} unused subjects:'.format(query.count()))
for s in query.all():
logging.debug(' {}'.format(s.subject))
query.delete() | 0.002509 |
def build_signature(self, user_api_key, user_secret, request):
"""Return the signature for the request."""
path = request.get_full_path()
sent_signature = request.META.get(
self.header_canonical('Authorization'))
signature_headers = self.get_headers_from_signature(sent_signature)
unsigned = self.build_dict_to_sign(request, signature_headers)
# Sign string and compare.
signer = HeaderSigner(
key_id=user_api_key, secret=user_secret,
headers=signature_headers, algorithm=self.ALGORITHM)
signed = signer.sign(unsigned, method=request.method, path=path)
return signed['authorization'] | 0.00289 |
def update_H(self, mean_field, l):
"""Updates the spin hamiltonian and recalculates its eigenbasis"""
self.H_s = self.spin_hamiltonian(mean_field, l)
try:
self.eig_energies, self.eig_states = diagonalize(self.H_s)
except np.linalg.linalg.LinAlgError:
np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l)
raise
except ValueError:
np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l)
print(mean_field, l)
raise | 0.005607 |
def read_cluster_role_binding(self, name, **kwargs):
"""
read the specified ClusterRoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cluster_role_binding(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRoleBinding (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ClusterRoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_cluster_role_binding_with_http_info(name, **kwargs)
else:
(data) = self.read_cluster_role_binding_with_http_info(name, **kwargs)
return data | 0.004149 |
def build_columns(self, X, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
X[:, self.feature][:, np.newaxis]
splines = b_spline_basis(X[:, self.feature],
edge_knots=self.edge_knots_,
spline_order=self.spline_order,
n_splines=self.n_splines,
sparse=True,
periodic=self.basis in ['cp'],
verbose=verbose)
if self.by is not None:
splines = splines.multiply(X[:, self.by][:, np.newaxis])
return splines | 0.002188 |
def _loc_to_features(loc):
"""Converts a location string "{Half}, {YardLine}" into a tuple of those
values, the second being an int.
:l: The string from the play by play table representing location.
:returns: A tuple that separates out the values, making them missing
(np.nan) when necessary.
"""
if loc:
if isinstance(loc, basestring):
loc = loc.strip()
if ' ' in loc:
r = loc.split()
r[0] = r[0].lower()
r[1] = int(r[1])
else:
r = (np.nan, int(loc))
elif isinstance(loc, float):
return (np.nan, 50)
else:
r = (np.nan, np.nan)
return r | 0.001408 |
def set_default_symbols(self):
"""Set self.symbols based on self.numbers and the periodic table."""
self.symbols = tuple(periodic[n].symbol for n in self.numbers) | 0.011236 |
def knapsack_iterative(items, maxweight):
# Knapsack requires integral weights
weights = [t[1] for t in items]
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
int_maxweight = int(maxweight * coeff)
int_items = [(v, int(w * coeff), idx) for v, w, idx in items]
"""
items = int_items
maxweight = int_maxweight
"""
return knapsack_iterative_int(int_items, int_maxweight) | 0.002088 |
def traverse(self, traverser, **kwargs):
"""
Implementation of mandatory interface for traversing the whole rule tree.
This method will call the implementation of :py:func:`pynspect.rules.RuleTreeTraverser.function`
method with reference to ``self`` instance as first argument and with the
result of traversing left subtree as second argument. The optional ``kwargs``
are passed down to traverser callback as additional arguments and can be
used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback.
"""
atr = []
for arg in self.args:
atr.append(arg.traverse(traverser, **kwargs))
return traverser.function(self, atr, **kwargs) | 0.009688 |
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard | 0.008163 |
def check_time_coordinate(self, ds):
'''
Check variables defining time are valid under CF
CF §4.4 Variables representing time must always explicitly include the
units attribute; there is no default value.
The units attribute takes a string value formatted as per the
recommendations in the Udunits package.
The acceptable units for time are listed in the udunits.dat file. The
most commonly used of these strings (and their abbreviations) includes
day (d), hour (hr, h), minute (min) and second (sec, s). Plural forms
are also acceptable. The reference time string (appearing after the
identifier since) may include date alone; date and time; or date, time,
and time zone. The reference time is required. A reference time in year
0 has a special meaning (see Section 7.4, "Climatological Statistics").
Recommend that the unit year be used with caution. It is not a calendar
year. For similar reasons the unit month should also be used with
caution.
A time coordinate is identifiable from its units string alone.
Optionally, the time coordinate may be indicated additionally by
providing the standard_name attribute with an appropriate value, and/or
the axis attribute with the value T.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
for name in cfutil.get_time_variables(ds):
variable = ds.variables[name]
# Has units
has_units = hasattr(variable, 'units')
if not has_units:
result = Result(BaseCheck.HIGH,
False,
self.section_titles['4.4'],
['%s does not have units' % name])
ret_val.append(result)
continue
# Correct and identifiable units
result = Result(BaseCheck.HIGH,
True,
self.section_titles['4.4'])
ret_val.append(result)
correct_units = util.units_temporal(variable.units)
reasoning = None
if not correct_units:
reasoning = ['%s does not have correct time units' % name]
result = Result(BaseCheck.HIGH,
correct_units,
self.section_titles['4.4'],
reasoning)
ret_val.append(result)
return ret_val | 0.00076 |
def _node_errors(builder, env, tlist, slist):
"""Validate that the lists of target and source nodes are
legal for this builder and environment. Raise errors or
issue warnings as appropriate.
"""
# First, figure out if there are any errors in the way the targets
# were specified.
for t in tlist:
if t.side_effect:
raise UserError("Multiple ways to build the same target were specified for: %s" % t)
if t.has_explicit_builder():
# Check for errors when the environments are different
# No error if environments are the same Environment instance
if (not t.env is None and not t.env is env and
# Check OverrideEnvironment case - no error if wrapped Environments
# are the same instance, and overrides lists match
not (getattr(t.env, '__subject', 0) is getattr(env, '__subject', 1) and
getattr(t.env, 'overrides', 0) == getattr(env, 'overrides', 1) and
not builder.multi)):
action = t.builder.action
t_contents = t.builder.action.get_contents(tlist, slist, t.env)
contents = builder.action.get_contents(tlist, slist, env)
if t_contents == contents:
msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % (t, action.genstring(tlist, slist, t.env))
SCons.Warnings.warn(SCons.Warnings.DuplicateEnvironmentWarning, msg)
else:
try:
msg = "Two environments with different actions were specified for the same target: %s\n(action 1: %s)\n(action 2: %s)" % (t,t_contents.decode('utf-8'),contents.decode('utf-8'))
except UnicodeDecodeError as e:
msg = "Two environments with different actions were specified for the same target: %s"%t
raise UserError(msg)
if builder.multi:
if t.builder != builder:
msg = "Two different builders (%s and %s) were specified for the same target: %s" % (t.builder.get_name(env), builder.get_name(env), t)
raise UserError(msg)
# TODO(batch): list constructed each time!
if t.get_executor().get_all_targets() != tlist:
msg = "Two different target lists have a target in common: %s (from %s and from %s)" % (t, list(map(str, t.get_executor().get_all_targets())), list(map(str, tlist)))
raise UserError(msg)
elif t.sources != slist:
msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % (t, list(map(str, t.sources)), list(map(str, slist)))
raise UserError(msg)
if builder.single_source:
if len(slist) > 1:
raise UserError("More than one source given for single-source builder: targets=%s sources=%s" % (list(map(str,tlist)), list(map(str,slist)))) | 0.006121 |
def ver_cmp(ver1, ver2):
"""
Compare lago versions
Args:
ver1(str): version string
ver2(str): version string
Returns:
Return negative if ver1<ver2, zero if ver1==ver2, positive if
ver1>ver2.
"""
return cmp(
pkg_resources.parse_version(ver1), pkg_resources.parse_version(ver2)
) | 0.002874 |
def get_cluster_nodes(self):
"""
return list with all nodes in cluster
"""
if not hasattr(self, '_cluster_nodes_cache'):
server, port = self._servers[0].split(':')
try:
self._cluster_nodes_cache = (
get_cluster_info(server, port,
self._ignore_cluster_errors)['nodes'])
except (socket.gaierror, socket.timeout) as err:
raise Exception('Cannot connect to cluster {0} ({1})'.format(
self._servers[0], err
))
return self._cluster_nodes_cache | 0.003135 |
def save_to_object(self):
"""Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
"""
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_prefix = self.save(tmpdir)
data = {}
base_dir = os.path.dirname(checkpoint_prefix)
for path in os.listdir(base_dir):
path = os.path.join(base_dir, path)
if path.startswith(checkpoint_prefix):
with open(path, "rb") as f:
data[os.path.basename(path)] = f.read()
out = io.BytesIO()
data_dict = pickle.dumps({
"checkpoint_name": os.path.basename(checkpoint_prefix),
"data": data,
})
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue() | 0.001889 |
def task_log(self):
"""Get task log.
:rtype: str
:returns: The task log as a string.
"""
if self.task_id is None:
raise ValueError('task_id is None')
return self.get_task_log(self.task_id, self.session, self.request_kwargs) | 0.010526 |
def user_loc_value_to_instance_string(axis_tag, user_loc):
"""Return the Glyphs UI string (from the instance dropdown) that is
closest to the provided user location.
>>> user_loc_value_to_instance_string('wght', 430)
'Normal'
>>> user_loc_value_to_instance_string('wdth', 150)
'Extra Expanded'
"""
codes = {}
if axis_tag == "wght":
codes = WEIGHT_CODES
elif axis_tag == "wdth":
codes = WIDTH_CODES
else:
raise NotImplementedError
class_ = user_loc_value_to_class(axis_tag, user_loc)
return min(
sorted((code, class_) for code, class_ in codes.items() if code is not None),
key=lambda item: abs(item[1] - class_),
)[0] | 0.002801 |
def create_agent_signer(user_id):
"""Sign digest with existing GPG keys using gpg-agent tool."""
sock = connect_to_agent(env=os.environ)
keygrip = get_keygrip(user_id)
def sign(digest):
"""Sign the digest and return an ECDSA/RSA/DSA signature."""
return sign_digest(sock=sock, keygrip=keygrip, digest=digest)
return sign | 0.002793 |
def esinw(b, orbit, solve_for=None, **kwargs):
"""
Create a constraint for esinw in an orbit.
If 'esinw' does not exist in the orbit, it will be created
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 'esinw' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 'ecc', 'per0')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
"""
orbit_ps = _get_system_ps(b, orbit)
metawargs = orbit_ps.meta
metawargs.pop('qualifier')
esinw_def = FloatParameter(qualifier='esinw', value=0.0, default_unit=u.dimensionless_unscaled, limits=(-1.0,1.0), description='Eccentricity times sin of argument of periastron')
esinw, created = b.get_or_create('esinw', esinw_def, **metawargs)
ecc = b.get_parameter(qualifier='ecc', **metawargs)
per0 = b.get_parameter(qualifier='per0', **metawargs)
if solve_for in [None, esinw]:
lhs = esinw
rhs = ecc * sin(per0)
elif solve_for == ecc:
lhs = ecc
rhs = esinw / sin(per0)
elif solve_for == per0:
lhs = per0
#rhs = arcsin(esinw/ecc)
rhs = esinw2per0(ecc, esinw)
else:
raise NotImplementedError
return lhs, rhs, {'orbit': orbit} | 0.003408 |
def eli_hanley(T, MW, Tc, Vc, Zc, omega, Cvm):
r'''Estimates the thermal conductivity of a gas as a function of
temperature using the reference fluid method of Eli and Hanley [1]_ as
shown in [2]_.
.. math::
\lambda = \lambda^* + \frac{\eta^*}{MW}(1.32)\left(C_v - \frac{3R}{2}\right)
Tr = \text{min}(Tr, 2)
\theta = 1 + (\omega-0.011)\left(0.56553 - 0.86276\ln Tr - \frac{0.69852}{Tr}\right)
\psi = [1 + (\omega - 0.011)(0.38560 - 1.1617\ln Tr)]\frac{0.288}{Z_c}
f = \frac{T_c}{190.4}\theta
h = \frac{V_c}{9.92E-5}\psi
T_0 = T/f
\eta_0^*(T_0)= \sum_{n=1}^9 C_n T_0^{(n-4)/3}
\theta_0 = 1944 \eta_0
\lambda^* = \lambda_0 H
\eta^* = \eta^*_0 H \frac{MW}{16.04}
H = \left(\frac{16.04}{MW}\right)^{0.5}f^{0.5}/h^{2/3}
Parameters
----------
T : float
Temperature of the gas [K]
MW : float
Molecular weight of the gas [g/mol]
Tc : float
Critical temperature of the gas [K]
Vc : float
Critical volume of the gas [m^3/mol]
Zc : float
Critical compressibility of the gas []
omega : float
Acentric factor of the gas [-]
Cvm : float
Molar contant volume heat capacity of the gas [J/mol/K]
Returns
-------
kg : float
Estimated gas thermal conductivity [W/m/k]
Notes
-----
Reference fluid is Methane.
MW internally converted to kg/g-mol.
Examples
--------
2-methylbutane at low pressure, 373.15 K. Mathes calculation in [2]_.
>>> eli_hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267,
... omega=0.227, Cvm=135.9)
0.02247951789135337
References
----------
.. [1] Ely, James F., and H. J. M. Hanley. "Prediction of Transport
Properties. 2. Thermal Conductivity of Pure Fluids and Mixtures."
Industrial & Engineering Chemistry Fundamentals 22, no. 1 (February 1,
1983): 90-97. doi:10.1021/i100009a016.
.. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
'''
Cs = [2.907741307E6, -3.312874033E6, 1.608101838E6, -4.331904871E5,
7.062481330E4, -7.116620750E3, 4.325174400E2, -1.445911210E1, 2.037119479E-1]
Tr = T/Tc
if Tr > 2: Tr = 2
theta = 1 + (omega - 0.011)*(0.56553 - 0.86276*log(Tr) - 0.69852/Tr)
psi = (1 + (omega-0.011)*(0.38560 - 1.1617*log(Tr)))*0.288/Zc
f = Tc/190.4*theta
h = Vc/9.92E-5*psi
T0 = T/f
eta0 = 1E-7*sum([Ci*T0**((i+1. - 4.)/3.) for i, Ci in enumerate(Cs)])
k0 = 1944*eta0
H = (16.04/MW)**0.5*f**0.5*h**(-2/3.)
etas = eta0*H*MW/16.04
ks = k0*H
return ks + etas/(MW/1000.)*1.32*(Cvm - 1.5*R) | 0.002161 |
def required_validator(validator, req, instance, schema):
"""Swagger 1.2 expects `required` to be a bool in the Parameter object, but
a list of properties in a Model object.
"""
if schema.get('paramType'):
if req is True and not instance:
return [ValidationError("%s is required" % schema['name'])]
return []
return _validators.required_draft4(validator, req, instance, schema) | 0.002353 |
def _read_section(self):
"""Read and return an entire section"""
lines = [self._last[self._last.find(":")+1:]]
self._last = self._f.readline()
while len(self._last) > 0 and len(self._last[0].strip()) == 0:
lines.append(self._last)
self._last = self._f.readline()
return lines | 0.0059 |
def _json_to_term_model(term_data):
"""
Returns a term model created from the passed json data.
param: term_data loaded json data
"""
strptime = datetime.strptime
day_format = "%Y-%m-%d"
datetime_format = "%Y-%m-%dT%H:%M:%S"
term = TermModel()
term.year = term_data["Year"]
term.quarter = term_data["Quarter"]
term.last_day_add = parse_sws_date(term_data["LastAddDay"])
term.first_day_quarter = parse_sws_date(term_data["FirstDay"])
term.last_day_instruction = parse_sws_date(term_data["LastDayOfClasses"])
term.last_day_drop = parse_sws_date(term_data["LastDropDay"])
term.census_day = parse_sws_date(term_data["CensusDay"])
if term_data["ATermLastDay"] is not None:
term.aterm_last_date = parse_sws_date(term_data["ATermLastDay"])
if term_data["BTermFirstDay"] is not None:
term.bterm_first_date = parse_sws_date(term_data["BTermFirstDay"])
if term_data["LastAddDayATerm"] is not None:
term.aterm_last_day_add = parse_sws_date(term_data["LastAddDayATerm"])
if term_data["LastAddDayBTerm"] is not None:
term.bterm_last_day_add = parse_sws_date(term_data["LastAddDayBTerm"])
term.last_final_exam_date = parse_sws_date(term_data["LastFinalExamDay"])
try:
term.grading_period_open = strptime(
term_data["GradingPeriodOpen"], datetime_format)
except (TypeError, ValueError):
logger.warn('Malformed term_data["GradingPeriodOpen"]: {}'.format(
term_data["GradingPeriodOpen"]))
term.grading_period_open = strptime(
'{}T08:00:00'.format(term_data['LastFinalExamDay']),
datetime_format)
if term_data["GradingPeriodOpenATerm"] is not None:
term.aterm_grading_period_open = strptime(
term_data["GradingPeriodOpenATerm"], datetime_format)
try:
term.grading_period_close = strptime(
term_data["GradingPeriodClose"], datetime_format)
except (TypeError, ValueError):
logger.warn('Malformed term_data["GradingPeriodClose"]: {}'.format(
term_data["GradingPeriodClose"]))
term.grading_period_close = strptime(
'{}T17:00:00'.format(term_data['LastFinalExamDay']),
datetime_format)
try:
term.grade_submission_deadline = strptime(
term_data["GradeSubmissionDeadline"], datetime_format)
except (TypeError, ValueError):
logger.warn(
'Malformed term_data["GradeSubmissionDeadline"]: {}'.format(
term_data["GradeSubmissionDeadline"]))
term.grade_submission_deadline = strptime(
'{}T17:00:00'.format(term_data['LastFinalExamDay']),
datetime_format)
if term_data["RegistrationServicesStart"] is not None:
term.registration_services_start = parse_sws_date(
term_data["RegistrationServicesStart"])
if term_data["RegistrationPeriods"][0]["StartDate"] is not None:
term.registration_period1_start = parse_sws_date(
term_data["RegistrationPeriods"][0]["StartDate"])
if term_data["RegistrationPeriods"][0]["EndDate"] is not None:
term.registration_period1_end = parse_sws_date(
term_data["RegistrationPeriods"][0]["EndDate"])
if term_data["RegistrationPeriods"][1]["StartDate"] is not None:
term.registration_period2_start = parse_sws_date(
term_data["RegistrationPeriods"][1]["StartDate"])
if term_data["RegistrationPeriods"][1]["EndDate"] is not None:
term.registration_period2_end = parse_sws_date(
term_data["RegistrationPeriods"][1]["EndDate"])
if term_data["RegistrationPeriods"][2]["StartDate"] is not None:
term.registration_period3_start = parse_sws_date(
term_data["RegistrationPeriods"][2]["StartDate"])
if term_data["RegistrationPeriods"][2]["EndDate"] is not None:
term.registration_period3_end = parse_sws_date(
term_data["RegistrationPeriods"][2]["EndDate"])
term.time_schedule_construction = {}
for campus in term_data["TimeScheduleConstruction"]:
term.time_schedule_construction[campus.lower()] = True if (
term_data["TimeScheduleConstruction"][campus]) else False
term.time_schedule_published = {}
for campus in term_data["TimeSchedulePublished"]:
term.time_schedule_published[campus.lower()] = True if (
term_data["TimeSchedulePublished"][campus]) else False
term.clean_fields()
return term | 0.000221 |
def get_firmware_version(self):
"""Call PN532 GetFirmwareVersion function and return a tuple with the IC,
Ver, Rev, and Support values.
"""
response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)
if response is None:
raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')
return (response[0], response[1], response[2], response[3]) | 0.006861 |
def format_sensor(self, sensor):
""" Format a sensor value. If pango is enabled color is per sensor. """
current_val = sensor.current
if self.pango_enabled:
percentage = self.percentage(sensor.current, sensor.critical)
if self.dynamic_color:
color = self.colors[int(percentage)]
return self.format_pango(color, current_val)
return current_val | 0.004651 |
def _write_dihedral_information(gsd_file, structure):
"""Write the dihedrals in the system.
Parameters
----------
gsd_file :
The file object of the GSD file being written
structure : parmed.Structure
Parmed structure object holding system information
"""
gsd_file.dihedrals.N = len(structure.rb_torsions)
unique_dihedral_types = set()
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
unique_dihedral_types.add(dihedral_type)
unique_dihedral_types = sorted(list(unique_dihedral_types), key=natural_sort)
gsd_file.dihedrals.types = unique_dihedral_types
dihedral_typeids = []
dihedral_groups = []
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
dihedral_typeids.append(unique_dihedral_types.index(dihedral_type))
dihedral_groups.append((dihedral.atom1.idx, dihedral.atom2.idx,
dihedral.atom3.idx, dihedral.atom4.idx))
gsd_file.dihedrals.typeid = dihedral_typeids
gsd_file.dihedrals.group = dihedral_groups | 0.001225 |
def _calc_inst_pmf(self):
"""Calculate the epsilon-greedy instrumental distribution"""
# Easy vars
t = self.t_
epsilon = self.epsilon
alpha = self.alpha
preds = self._preds_avg_in_strata
weights = self.strata.weights_[:,np.newaxis]
p1 = self._BB_model.theta_[:,np.newaxis]
p0 = 1 - p1
if t==0:
F = self._F_guess[self.opt_class]
else:
F = self._estimate[t - 1, self.opt_class]
# Fill in non-finite estimates with the initial guess
nonfinite = ~np.isfinite(F)
F[nonfinite] = self._F_guess[self.opt_class][nonfinite]
# Calculate optimal instrumental pmf
sqrt_arg = np.sum(preds * (alpha**2 * F**2 * p0 + (1 - F)**2 * p1) + \
(1 - preds) * (1 - alpha)**2 * F**2 * p1, \
axis=1, keepdims=True) #: sum is over classifiers
inst_pmf = weights * np.sqrt(sqrt_arg)
# Normalize
inst_pmf /= np.sum(inst_pmf)
# Epsilon-greedy: (1 - epsilon) q + epsilon * p
inst_pmf *= (1 - epsilon)
inst_pmf += epsilon * weights
if self.record_inst_hist:
self._inst_pmf[:,t] = inst_pmf.ravel()
else:
self._inst_pmf = inst_pmf.ravel() | 0.00687 |
def parse_coach_bsites_inf(infile):
"""Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions
Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished
For each site (cluster), there are three lines:
- Line 1: site number, c-score of coach prediction, cluster size
- Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates),
c-score of the algorithm's prediction, binding residues from single template
- Line 3: Statistics of ligands in the cluster
C-score information:
- "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below
0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf)
Args:
infile (str): Path to Bsites.inf
Returns:
list: Ranked list of dictionaries, keys defined below
- ``site_num``: cluster which is the consensus binding site
- ``c_score``: confidence score of the cluster prediction
- ``cluster_size``: number of predictions within this cluster
- ``algorithm``: main? algorithm used to make the prediction
- ``pdb_template_id``: PDB ID of the template used to make the prediction
- ``pdb_template_chain``: chain of the PDB which has the ligand
- ``pdb_ligand``: predicted ligand to bind
- ``binding_location_coords``: centroid of the predicted ligand position in the homology model
- ``c_score_method``: confidence score for the main algorithm
- ``binding_residues``: predicted residues to bind the ligand
- ``ligand_cluster_counts``: number of predictions per ligand
"""
bsites_results = []
with open(infile) as pp:
lines = list(filter(None, (line.rstrip() for line in pp)))
for i in range(len(lines) // 3):
bsites_site_dict = {}
line1 = lines[i * 3].split('\t')
line2 = lines[i * 3 + 1].split('\t')
line3 = lines[i * 3 + 2]
bsites_site_dict['site_num'] = line1[0]
bsites_site_dict['c_score'] = float(line1[1])
bsites_site_dict['cluster_size'] = line1[2]
bsites_site_dict['algorithm'] = line2[0]
bsites_site_dict['pdb_template_id'] = line2[1][:4]
bsites_site_dict['pdb_template_chain'] = line2[1][4]
bsites_site_dict['pdb_ligand'] = line2[2]
bsites_site_dict['binding_location_coords'] = tuple(float(x) for x in line2[3].split())
# TODO: what's the difference between this c-score and the cluster's c-score?
# how is the cluster's c-score computed? it's not the average c-score of all methods
# also why are some COFACTOR c-scores >1?
# 160411 - seems like the COFACTOR "BS-score" is being reported here, not its c-score...
tmp_split = line2[4].split(' :')
bsites_site_dict['c_score_method'] = tmp_split[0]
bsites_site_dict['binding_residues'] = tmp_split[1]
bsites_site_dict['ligand_cluster_counts'] = line3
bsites_results.append(bsites_site_dict)
return bsites_results | 0.004916 |
def _is_autocomplete_valid(cur_commands, alias_command):
"""
Determine whether autocomplete can be performed at the current state.
Args:
parser: The current CLI parser.
cur_commands: The current commands typed in the console.
alias_command: The alias command.
Returns:
True if autocomplete can be performed.
"""
parent_command = ' '.join(cur_commands[1:])
with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file:
try:
tab_completion_table = json.loads(tab_completion_table_file.read())
return alias_command in tab_completion_table and parent_command in tab_completion_table[alias_command]
except Exception: # pylint: disable=broad-except
return False | 0.003812 |
def unlock(self, pwd):
""" Unlock the wallet database
"""
if self.store.is_encrypted():
return self.store.unlock(pwd) | 0.013072 |
def azimuth(self, point):
"""
Compute the azimuth (in decimal degrees) between this point
and the given point.
:param point:
Destination point.
:type point:
Instance of :class:`Point`
:returns:
The azimuth, value in a range ``[0, 360)``.
:rtype:
float
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude) | 0.004032 |
def update(self, iterations=10):
""" Iterates the graph layout and updates node positions.
"""
# The graph fades in when initially constructed.
self.alpha += 0.05
self.alpha = min(self.alpha, 1.0)
# Iterates over the graph's layout.
# Each step the graph's bounds are recalculated
# and a number of iterations are processed,
# more and more as the layout progresses.
if self.layout.i == 0:
self.layout.prepare()
self.layout.i += 1
elif self.layout.i == 1:
self.layout.iterate()
elif self.layout.i < self.layout.n:
n = min(iterations, self.layout.i / 10 + 1)
for i in range(n):
self.layout.iterate()
# Calculate the absolute center of the graph.
min_, max = self.layout.bounds
self.x = _ctx.WIDTH - max.x*self.d - min_.x*self.d
self.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.d
self.x /= 2
self.y /= 2
return not self.layout.done | 0.007246 |
def lookup_type(storable_type):
"""
Look for the Python type that corresponds to a storable type name.
"""
if storable_type.startswith('Python'):
_, module_name = storable_type.split('.', 1)
else:
module_name = storable_type
#type_name, module_name = \
names = [ _name[::-1] for _name in module_name[::-1].split('.', 1) ]
if names[1:]:
type_name, module_name = names
else:
type_name = names[0]
return eval(type_name)
try:
module = importlib.import_module(module_name)
python_type = getattr(module, type_name)
except (ImportError, AttributeError):
python_type = None
return python_type | 0.005747 |
def get_actns(learn, hook:Hook, dl:DataLoader, pool=AdaptiveConcatPool2d, pool_dim:int=4, **kwargs):
"Gets activations at the layer specified by `hook`, applies `pool` of dim `pool_dim` and concatenates"
print('Getting activations...')
actns = []
learn.model.eval()
with torch.no_grad():
for (xb,yb) in progress_bar(dl):
learn.model(xb)
actns.append((hook.stored).cpu())
if pool:
pool = pool(pool_dim)
return pool(torch.cat(actns)).view(len(dl.x),-1)
else: return torch.cat(actns).view(len(dl.x),-1) | 0.0208 |
def input_file(self):
"""Returns the input file name, with a default relative path
"""
return path.join(path.dirname(__file__), 'data', 'tgs{:s}.tsv'.format(self.number)) | 0.015464 |
def get_url(self, agent_id, media_id):
"""
获取永久素材下载地址
详情请参考
https://qydev.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E6%B0%B8%E4%B9%85%E7%B4%A0%E6%9D%90
:param agent_id: 企业应用的id
:param media_id: 媒体文件 ID
:return: 临时素材下载地址
"""
parts = (
'https://qyapi.weixin.qq.com/cgi-bin/material/get',
'?access_token=',
self.access_token,
'&media_id=',
media_id,
'&agentid=',
agent_id,
)
return ''.join(parts) | 0.00346 |
def detach(self):
"""
Detaches this volume from any device it may be attached to. If it
is not attached, nothing happens.
"""
attachments = self.attachments
if not attachments:
# Not attached; no error needed, just return
return
# A volume can only be attached to one device at a time, but for some
# reason this is a list instead of a singular value
att = attachments[0]
instance_id = att["server_id"]
attachment_id = att["id"]
try:
self._nova_volumes.delete_server_volume(instance_id, attachment_id)
except Exception as e:
raise exc.VolumeDetachmentFailed("%s" % e) | 0.002782 |
def parse(self, xmp):
"""Run parser and return a dictionary of all the parsed metadata."""
tree = etree.fromstring(xmp)
rdf_tree = tree.find(RDF_NS + 'RDF')
meta = defaultdict(dict)
for desc in rdf_tree.findall(RDF_NS + 'Description'):
for el in desc.getchildren():
ns, tag = self._parse_tag(el)
value = self._parse_value(el)
meta[ns][tag] = value
return dict(meta) | 0.004228 |
def generate_csr(self, domain='', r=None):
"""
Creates a certificate signing request to be submitted to a formal
certificate authority to generate a certificate.
Note, the provider may say the CSR must be created on the target server,
but this is not necessary.
"""
r = r or self.local_renderer
r.env.domain = domain or r.env.domain
role = self.genv.ROLE or ALL
site = self.genv.SITE or self.genv.default_site
print('self.genv.default_site:', self.genv.default_site, file=sys.stderr)
print('site.csr0:', site, file=sys.stderr)
ssl_dst = 'roles/%s/ssl' % (role,)
print('ssl_dst:', ssl_dst)
if not os.path.isdir(ssl_dst):
os.makedirs(ssl_dst)
for site, site_data in self.iter_sites():
print('site.csr1:', site, file=sys.stderr)
assert r.env.domain, 'No SSL domain defined.'
r.env.ssl_base_dst = '%s/%s' % (ssl_dst, r.env.domain.replace('*.', ''))
r.env.ssl_csr_year = date.today().year
r.local('openssl req -nodes -newkey rsa:{ssl_length} '
'-subj "/C={ssl_country}/ST={ssl_state}/L={ssl_city}/O={ssl_organization}/CN={ssl_domain}" '
'-keyout {ssl_base_dst}.{ssl_csr_year}.key -out {ssl_base_dst}.{ssl_csr_year}.csr') | 0.006677 |
def __tableStringParser(self, tableString):
"""
Will parse and check tableString parameter for any invalid strings.
Args:
tableString (str): Standard table string with header and decisions.
Raises:
ValueError: tableString is empty.
ValueError: One of the header element is not unique.
ValueError: Missing data value.
ValueError: Missing parent data.
Returns:
Array of header and decisions::
print(return)
[
['headerVar1', ... ,'headerVarN'],
[
['decisionValue1', ... ,'decisionValueN'],
[<row2 strings>],
...
[<rowN strings>]
]
]
"""
error = []
header = []
decisions = []
if tableString.split() == []:
error.append('Table variable is empty!')
else:
tableString = tableString.split('\n')
newData = []
for element in tableString:
if element.strip():
newData.append(element)
for element in newData[0].split():
if not element in header:
header.append(element)
else:
error.append('Header element: ' + element + ' is not unique!')
for i, tableString in enumerate(newData[2:]):
split = tableString.split()
if len(split) == len(header):
decisions.append(split)
else:
error.append('Row: {}==> missing: {} data'.format(
str(i).ljust(4),
str(len(header) - len(split)).ljust(2))
)
if error:
view.Tli.showErrors('TableStringError', error)
else:
return [header, decisions] | 0.037216 |
def log_startup_info():
"""Log info about the current environment."""
LOG.always("Starting mongo-connector version: %s", __version__)
if "dev" in __version__:
LOG.warning(
"This is a development version (%s) of mongo-connector", __version__
)
LOG.always("Python version: %s", sys.version)
LOG.always("Platform: %s", platform.platform())
if hasattr(pymongo, "__version__"):
pymongo_version = pymongo.__version__
else:
pymongo_version = pymongo.version
LOG.always("pymongo version: %s", pymongo_version)
if not pymongo.has_c():
LOG.warning(
"pymongo version %s was installed without the C extensions. "
'"InvalidBSON: Date value out of range" errors may occur if '
"there are documents with BSON Datetimes that represent times "
"outside of Python's datetime limit.",
pymongo.__version__,
) | 0.002119 |
def setDefaultApplicationForMimeType(self, pchAppKey, pchMimeType):
"""Adds this mime-type to the list of supported mime types for this application"""
fn = self.function_table.setDefaultApplicationForMimeType
result = fn(pchAppKey, pchMimeType)
return result | 0.010309 |
def draw_edges(self):
"""
Draws edges to screen.
"""
if self.backend == "matplotlib":
for i, (n1, n2) in enumerate(self.edges):
x1, y1 = self.locs[n1]
x2, y2 = self.locs[n2]
color = self.edge_colors[i]
line = Line2D(
xdata=[x1, x2],
ydata=[y1, y2],
color=color,
zorder=0,
alpha=0.3,
)
self.ax.add_line(line)
elif self.backend == "altair":
marker_attrs = dict()
marker_attrs["color"] = "black" # MAGICNUMBER
marker_attrs["strokeWidth"] = 1 # MAGICNUMBER
self.edge_chart = (
alt.Chart(self.edge_df)
.mark_line(**marker_attrs)
.encode(
alt.X(f"{self.node_lon}:Q"),
alt.Y(f"{self.node_lat}:Q"),
detail="edge",
)
) | 0.001918 |
def read_csv(filename, keys=None, convert_types=False, **kwargs):
'''
Read a CSV in canonical form: ::
<agent_id, t_step, key, value, value_type>
'''
df = pd.read_csv(filename)
if convert_types:
df = convert_types_slow(df)
if keys:
df = df[df['key'].isin(keys)]
df = process_one(df)
return df | 0.002857 |
def unflatten(self, obj):
"""
Translate substitutions dictionary into objects.
"""
obj.substitutions = [
dict(from_id=key, to_id=value)
for key, value in getattr(obj, "substitutions", {}).items()
] | 0.007634 |
def get_dataset(self, key, info):
"""Read data from file and return the corresponding projectables."""
if key.name in ['longitude', 'latitude']:
logger.debug('Reading coordinate arrays.')
if self.lons is None or self.lats is None:
self.lons, self.lats = self.get_lonlats()
if key.name == 'latitude':
proj = Dataset(self.lats, id=key, **info)
else:
proj = Dataset(self.lons, id=key, **info)
else:
data = self.get_sds_variable(key.name)
proj = Dataset(data, id=key, **info)
return proj | 0.00313 |
def evaluate(self, data, env):
"""
Evaluate statement
Parameters
----------
data : pandas.DataFrame
Data in whose namespace the statement will be
evaluated. Typically, this is a group dataframe.
Returns
-------
out : object
Result of the evaluation.pandas.DataFrame
"""
def n():
"""
Return number of rows in groups
This function is part of the public API
"""
return len(data)
if isinstance(self.stmt, str):
# Add function n() that computes the
# size of the group data to the inner namespace.
if self._has_n_func:
namespace = dict(data, n=n)
else:
namespace = data
# Avoid obvious keywords e.g if a column
# is named class
if self.stmt not in KEYWORDS:
value = env.eval(
self.stmt,
source_name='Expression.evaluate',
inner_namespace=namespace)
else:
value = namespace[self.stmt]
elif callable(self.stmt):
value = self.stmt(data)
else:
value = self.stmt
return value | 0.001514 |
def get_variable(name, temp_s):
'''
Get variable by name.
'''
return tf.Variable(tf.zeros(temp_s), name=name) | 0.008 |
def block_code(self):
inputs = self._get_all_input_values()
outputs = {}
"""
self.f = self.user_function(**inputs)
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
"""
if self.first_time:
self.f = self.user_function(**inputs)
outputs = self.f.next()
self.first_time = False
else:
try:
outputs = self.f.send(inputs)
except StopIteration:
self.terminate()
if outputs:
for key in outputs.keys():
self.set_output_data(key, outputs[key])
if 'previous_outputs' in self.output_channels.keys():
self.output_channels['previous_outputs'].set_value(Data(self.time, copy.deepcopy(outputs))) | 0.003538 |
def solve_T(self, P, V, quick=True):
r'''Method to calculate `T` from a specified `P` and `V` for the PRSV
EOS. Uses `Tc`, `a`, `b`, `kappa0` and `kappa` as well, obtained from
the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
quick : bool, optional
Whether to use a SymPy cse-derived expression (somewhat faster) or
individual formulas.
Returns
-------
T : float
Temperature, [K]
Notes
-----
Not guaranteed to produce a solution. There are actually two solution,
one much higher than normally desired; it is possible the solver could
converge on this.
'''
Tc, a, b, kappa0, kappa1 = self.Tc, self.a, self.b, self.kappa0, self.kappa1
if quick:
x0 = V - b
R_x0 = R/x0
x3 = (100.*(V*(V + b) + b*x0))
x4 = 10.*kappa0
kappa110 = kappa1*10.
kappa17 = kappa1*7.
def to_solve(T):
x1 = T/Tc
x2 = x1**0.5
return (T*R_x0 - a*((x4 - (kappa110*x1 - kappa17)*(x2 + 1.))*(x2 - 1.) - 10.)**2/x3) - P
else:
def to_solve(T):
P_calc = R*T/(V - b) - a*((kappa0 + kappa1*(sqrt(T/Tc) + 1)*(-T/Tc + 7/10))*(-sqrt(T/Tc) + 1) + 1)**2/(V*(V + b) + b*(V - b))
return P_calc - P
return newton(to_solve, Tc*0.5) | 0.006394 |
def _internal_request(self, request_obj, url, method, **kwargs):
""" Internal handling of requests. Handles Exceptions.
:param request_obj: a requests session.
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
method = method.lower()
if method not in self._allowed_methods:
raise ValueError('Method must be one of the allowed ones')
if method == 'get':
kwargs.setdefault('allow_redirects', True)
elif method in ['post', 'put', 'patch']:
if 'headers' not in kwargs:
kwargs['headers'] = {}
if kwargs.get('headers') is not None and kwargs['headers'].get(
'Content-type') is None:
kwargs['headers']['Content-type'] = 'application/json'
if 'data' in kwargs and kwargs['data'] is not None and kwargs['headers'].get(
'Content-type') == 'application/json':
kwargs['data'] = json.dumps(kwargs['data']) # convert to json
request_done = False
token_refreshed = False
while not request_done:
self._check_delay() # sleeps if needed
try:
log.info('Requesting ({}) URL: {}'.format(method.upper(), url))
log.info('Request parameters: {}'.format(kwargs))
# auto_retry will occur inside this function call if enabled
response = request_obj.request(method, url,
**kwargs)
response.raise_for_status() # raise 4XX and 5XX error codes.
log.info('Received response ({}) from URL {}'.format(
response.status_code, response.url))
request_done = True
return response
except TokenExpiredError as e:
# Token has expired, try to refresh the token and try again on the next loop
if not self.token_backend.token.is_long_lived:
raise e
if token_refreshed:
# Refresh token done but still TokenExpiredError raise
raise RuntimeError('Token Refresh Operation not working')
log.info('Oauth Token is expired, fetching a new token')
self.refresh_token()
log.info('New oauth token fetched')
token_refreshed = True
except (ConnectionError, ProxyError, SSLError, Timeout) as e:
# We couldn't connect to the target url, raise error
log.debug('Connection Error calling: {}.{}'
''.format(url, ('Using proxy: {}'.format(self.proxy)
if self.proxy else '')))
raise e # re-raise exception
except HTTPError as e:
# Server response with 4XX or 5XX error status codes
# try to extract the error message:
try:
error = response.json()
error_message = error.get('error', {}).get('message', '')
except ValueError:
error_message = ''
status_code = int(e.response.status_code / 100)
if status_code == 4:
# Client Error
# Logged as error. Could be a library error or Api changes
log.error('Client Error: {} | Error Message: {}'.format(str(e), error_message))
else:
# Server Error
log.debug('Server Error: {}'.format(str(e)))
if self.raise_http_errors:
if error_message:
raise HTTPError('{} | Error Message: {}'.format(e.args[0], error_message), response=response) from None
else:
raise e
else:
return e.response
except RequestException as e:
# catch any other exception raised by requests
log.debug('Request Exception: {}'.format(str(e)))
raise e | 0.001379 |
def do_thread(self, arg):
"""th(read) [threadnumber]
Without argument, display a summary of all active threads.
The summary prints for each thread:
1. the thread number assigned by pdb
2. the thread name
3. the python thread identifier
4. the current stack frame summary for that thread
An asterisk '*' to the left of the pdb thread number indicates the
current thread, a plus sign '+' indicates the thread being traced by
pdb.
With a pdb thread number as argument, make this thread the current
thread. The 'where', 'up' and 'down' commands apply now to the frame
stack of this thread. The current scope is now the frame currently
executed by this thread at the time the command is issued and the
'list', 'll', 'args', 'p', 'pp', 'source' and 'interact' commands are
run in the context of that frame. Note that this frame may bear no
relationship (for a non-deadlocked thread) to that thread's current
activity by the time you are examining the frame.
This command does not stop the thread.
"""
# Import the threading module in the main interpreter to get an
# enumeration of the main interpreter threads.
if PY3:
try:
import threading
except ImportError:
import dummy_threading as threading
else:
# Do not use relative import detection to avoid the RuntimeWarning:
# Parent module 'pdb_clone' not found while handling absolute
# import.
try:
threading = __import__('threading', level=0)
except ImportError:
threading = __import__('dummy_threading', level=0)
if not self.pdb_thread:
self.pdb_thread = threading.current_thread()
if not self.current_thread:
self.current_thread = self.pdb_thread
current_frames = sys._current_frames()
tlist = sorted(threading.enumerate(), key=attrgetter('name', 'ident'))
try:
self._do_thread(arg, current_frames, tlist)
finally:
# For some reason this local must be explicitly deleted in order
# to release the subinterpreter.
del current_frames | 0.001275 |
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs | 0.00464 |
def add_dataset(self, name=None, label=None,
x_column_label=None, y_column_label=None, index=None, control=False):
"""Add a dataset to a specific plot.
This method adds a dataset to a plot. Its functional use is imperative
to the plot generation. It handles adding new files as well
as indexing to files that are added to other plots.
All Args default to None. However, these are note the defaults
in the code. See DataImportContainer attributes for defaults in code.
Args:
name (str, optional): Name (path) for file.
Required if reading from a file (at least one).
Required if file_name is not in "general". Must be ".txt" or ".hdf5".
Can include path from working directory.
label (str, optional): Column label in the dataset corresponding to desired SNR value.
Required if reading from a file (at least one).
x_column_label/y_column_label (str, optional): Column label from input file identifying
x/y values. This can override setting in "general". Default
is `x`/`y`.
index (int, optional): Index of plot with preloaded data.
Required if not loading a file.
control (bool, optional): If True, this dataset is set to the control.
This is needed for Ratio plots. It sets
the baseline. Default is False.
Raises:
ValueError: If no options are passes. This means no file indication
nor index.
"""
if name is None and label is None and index is None:
raise ValueError("Attempting to add a dataset without"
+ "supplying index or file information.")
if index is None:
trans_dict = DataImportContainer()
if name is not None:
trans_dict.file_name = name
if label is not None:
trans_dict.label = label
if x_column_label is not None:
trans_dict.x_column_label = x_column_label
if y_column_label is not None:
trans_dict.y_column_label = y_column_label
if control:
self.control = trans_dict
else:
# need to append file to file list.
if 'file' not in self.__dict__:
self.file = []
self.file.append(trans_dict)
else:
if control:
self.control = DataImportContainer()
self.control.index = index
else:
# need to append index to index list.
if 'indices' not in self.__dict__:
self.indices = []
self.indices.append(index)
return | 0.002782 |
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime) | 0.001919 |
def compute_dprime(n_Hit=None, n_Miss=None, n_FA=None, n_CR=None):
"""
Computes the d', beta, aprime, b''d and c parameters based on the signal detection theory (SDT). **Feel free to help me expand the documentation of this function with details and interpretation guides.**
Parameters
----------
n_Hit : int
Number of hits.
n_Miss : int
Number of misses.
n_FA : int
Number of false alarms.
n_CR : int
Number of correct rejections.
Returns
----------
parameters : dict
A dictionary with the parameters (see details).
Example
----------
>>> import neurokit as nk
>>>
>>> nk.compute_dprime(n_Hit=7, n_Miss=4, n_FA=6, n_CR=6)
Notes
----------
*Details*
The Signal Detection Theory (often abridged as SDT) is used in very different domains from psychology (psychophysics, perception, memory), medical diagnostics (do the symptoms match a known diagnostic or can they be dismissed are irrelevant), to statistical decision (do the data indicate that the experiment has an effect or not). It evolved from the development of communications and radar equipment the first half of this century to psychology, as an attempt to understand some features of human behavior that were not well explained by tradition models. SDT is, indeed, used to analyze data coming from experiments where the task is to categorize ambiguous stimuli which can be generated either by a known process (called the *signal*) or be obtained by chance (called the *noise* in the SDT framework). Based on the number of hits, misses, false alarms and correct rejections, it estimates two main parameters from the experimental data: **d' (d-prime, for discriminability index**) and C (a variant of it is called beta). Non parametric variants are aprime and b''d (bppd)
- **dprime**: The sensitivity index. Indicates the strength of the signal (relative to the noise). More specifically, it is the standardized difference between the means of the Signal Present and Signal Absent distributions.
- **beta**: Response bias index.
- **aprime**: Non-parametric sensitivity index.
- **bppd**: Non-parametric response bias index.
- **c**: Response bias index.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
*See Also*
- `neuropsychology <https://www.rdocumentation.org/packages/neuropsychology/topics/dprime>`_
- http://lindeloev.net/calculating-d-in-python-and-php/
"""
# Ratios
hit_rate = n_Hit/(n_Hit + n_Miss)
fa_rate = n_FA/(n_FA + n_CR)
# Adjusted ratios
hit_rate_adjusted = (n_Hit+ 0.5)/((n_Hit+ 0.5) + n_Miss + 1)
fa_rate_adjusted = (n_FA+ 0.5)/((n_FA+ 0.5) + n_CR + 1)
# dprime
dprime = scipy.stats.norm.ppf(hit_rate_adjusted) - scipy.stats.norm.ppf(fa_rate_adjusted)
# beta
zhr = scipy.stats.norm.ppf(hit_rate_adjusted)
zfar = scipy.stats.norm.ppf(fa_rate_adjusted)
beta = np.exp(-zhr*zhr/2 + zfar*zfar/2)
# aprime
a = 1/2+((hit_rate-fa_rate)*(1+hit_rate-fa_rate) / (4*hit_rate*(1-fa_rate)))
b = 1/2-((fa_rate-hit_rate)*(1+fa_rate-hit_rate) / (4*fa_rate*(1-hit_rate)))
if fa_rate > hit_rate:
aprime = b
elif fa_rate < hit_rate:
aprime = a
else:
aprime = 0.5
# bppd
bppd = ((1-hit_rate)*(1-fa_rate)-hit_rate*fa_rate) / ((1-hit_rate)*(1-fa_rate)+hit_rate*fa_rate)
# c
c = -(scipy.stats.norm.ppf(hit_rate_adjusted) + scipy.stats.norm.ppf(fa_rate_adjusted))/2
parameters = dict(dprime=dprime, beta=beta, aprime=aprime, bppd=bppd, c=c)
return(parameters) | 0.00435 |
def _parse_snapshot_hits(self, file_obj):
"""Parse and store snapshot hits."""
for _ in range(self.n_snapshot_hits):
dom_id, pmt_id = unpack('<ib', file_obj.read(5))
tdc_time = unpack('>I', file_obj.read(4))[0]
tot = unpack('<b', file_obj.read(1))[0]
self.snapshot_hits.append((dom_id, pmt_id, tdc_time, tot)) | 0.005362 |
def set_exception(self, exc_class, exc_info, exc_stack):
"""Set an exception as the result of this operation.
Args:
exc_class (object): The exception type """
if self.is_finished():
raise InternalError("set_exception called on finished AsynchronousResponse",
result=self._result, exception=self._exception)
self._exception = (exc_class, exc_info, exc_stack)
self.finish() | 0.006383 |
def inverse(self):
"""Invert all instructions."""
for index, instruction in enumerate(self.instructions):
self.instructions[index] = instruction.inverse()
return self | 0.009901 |
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserCreationForm, self).save(commit=commit)
random_password = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
instance.set_password(random_password)
instance.save()
email_form = PasswordResetForm({'email': self.cleaned_data['email']})
email_form.is_valid()
email_form.save(email_template_name='accounts/welcome_email.html')
return instance | 0.006645 |
def build(values):
"""Build a tree from `list representation`_ and return its root node.
.. _list representation:
https://en.wikipedia.org/wiki/Binary_tree#Arrays
:param values: List representation of the binary tree, which is a list of
node values in breadth-first order starting from the root (current
node). If a node is at index i, its left child is always at 2i + 1,
right child at 2i + 2, and parent at floor((i - 1) / 2). None indicates
absence of a node at that index. See example below for an illustration.
:type values: [int | float | None]
:return: Root node of the binary tree.
:rtype: binarytree.Node
:raise binarytree.exceptions.NodeNotFoundError: If the list representation
is malformed (e.g. a parent node is missing).
**Example**:
.. doctest::
>>> from binarytree import build
>>>
>>> root = build([1, 2, 3, None, 4])
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
\\
4
<BLANKLINE>
.. doctest::
>>> from binarytree import build
>>>
>>> root = build([None, 2, 3]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
NodeNotFoundError: parent node missing at index 0
"""
nodes = [None if v is None else Node(v) for v in values]
for index in range(1, len(nodes)):
node = nodes[index]
if node is not None:
parent_index = (index - 1) // 2
parent = nodes[parent_index]
if parent is None:
raise NodeNotFoundError(
'parent node missing at index {}'.format(parent_index))
setattr(parent, 'left' if index % 2 else 'right', node)
return nodes[0] if nodes else None | 0.000537 |
def to_molden(cartesian_list, buf=None, sort_index=True,
overwrite=True, float_format='{:.6f}'.format):
"""Write a list of Cartesians into a molden file.
.. note:: Since it permamently writes a file, this function
is strictly speaking **not sideeffect free**.
The list to be written is of course not changed.
Args:
cartesian_list (list):
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the Cartesian
is sorted by the index before writing.
overwrite (bool): May overwrite existing files.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
Returns:
formatted : string (or unicode, depending on data and options)
"""
if sort_index:
cartesian_list = [molecule.sort_index() for molecule in cartesian_list]
give_header = ("[MOLDEN FORMAT]\n"
+ "[N_GEO]\n"
+ str(len(cartesian_list)) + "\n"
+ '[GEOCONV]\n'
+ 'energy\n{energy}'
+ 'max-force\n{max_force}'
+ 'rms-force\n{rms_force}'
+ '[GEOMETRIES] (XYZ)\n').format
values = len(cartesian_list) * '1\n'
energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list]
energy = '\n'.join(energy) + '\n'
header = give_header(energy=energy, max_force=values, rms_force=values)
coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format)
for x in cartesian_list]
output = header + '\n'.join(coordinates)
if buf is not None:
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output | 0.000502 |
def get_border_phase(self, idn=0, idr=0):
"""Return one of nine border fields
Parameters
----------
idn: int
Index for refractive index.
One of -1 (left), 0 (center), 1 (right)
idr: int
Index for radius.
One of -1 (left), 0 (center), 1 (right)
"""
assert idn in [-1, 0, 1]
assert idr in [-1, 0, 1]
n = self.sphere_index + self.dn * idn
r = self.radius + self.dr * idr
# convert to array indices
idn += 1
idr += 1
# find out whether we need to compute a new border field
if self._n_border[idn, idr] == n and self._r_border[idn, idr] == r:
if self.verbose > 3:
print("Using cached border phase (n{}, r{})".format(idn, idr))
# return previously computed field
pha = self._border_pha[(idn, idr)]
else:
if self.verbose > 3:
print("Computing border phase (n{}, r{})".format(idn, idr))
kwargs = self.model_kwargs.copy()
kwargs["radius"] = r
kwargs["sphere_index"] = n
kwargs["center"] = [self.posx_offset, self.posy_offset]
tb = time.time()
pha = self.sphere_method(**kwargs).pha
if self.verbose > 2:
print("Border phase computation time:",
self.sphere_method.__module__, time.time() - tb)
self._border_pha[(idn, idr)] = pha
self._n_border[idn, idr] = n
self._r_border[idn, idr] = r
return pha | 0.001241 |
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams | 0.028721 |
def prepare_worker_options(self):
"""
Prepare (and return as a dict) all options to be passed to the worker
"""
worker_options = dict()
for option_name in (self.options.worker_class.parameters):
option = getattr(self.options, option_name)
if option is not None:
worker_options[option_name] = option
return worker_options | 0.004902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.