text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path) | 0.011628 |
def get_context_data(self, **kwargs):
"""Add context data to view"""
context = super().get_context_data(**kwargs)
context.update({
'title': self.get_title(),
'ajax_url': self.get_ajax_url(),
'download_url': self.get_download_url(),
'create_url': self.get_create_url(),
})
return context | 0.005348 |
def is_instance_of(self, some_class):
"""Asserts that val is an instance of the given class."""
try:
if not isinstance(self.val, some_class):
if hasattr(self.val, '__name__'):
t = self.val.__name__
elif hasattr(self.val, '__class__'):
t = self.val.__class__.__name__
else:
t = 'unknown'
self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__))
except TypeError:
raise TypeError('given arg must be a class')
return self | 0.004608 |
def sort_topologically(dag):
"""Sort the dag breath first topologically.
Only the nodes inside the dag are returned, i.e. the nodes that are also keys.
Returns:
a topological ordering of the DAG.
Raises:
an error if this is not possible (graph is not valid).
"""
dag = copy.deepcopy(dag)
sorted_nodes = []
independent_nodes = deque(get_independent_nodes(dag))
while independent_nodes:
node = independent_nodes.popleft()
sorted_nodes.append(node)
# this alters the dag so that we are sure we are visiting the nodes only once
downstream_nodes = dag[node]
while downstream_nodes:
downstream_node = downstream_nodes.pop(0)
if downstream_node not in dag:
continue
if not has_dependencies(downstream_node, dag):
independent_nodes.append(downstream_node)
if len(sorted_nodes) != len(dag.keys()):
raise ValueError('graph is not acyclic')
return sorted_nodes | 0.002913 |
def _activity_or_list(value):
"""Tries to convert value to :class:`tincan.ActivityList`
:setter type: :class:`tincan.ActivityList`
:rtype: :class:`tincan.ActivityList`
"""
result = value
if value is not None and not isinstance(value, ActivityList):
try:
result = ActivityList([Activity(value)])
except (TypeError, AttributeError):
result = ActivityList(value)
return result | 0.004115 |
def next(self):
"""Return the next transaction object.
StopIteration will be propagated from self.csvreader.next()
"""
try:
return self.dict_to_xn(self.csvreader.next())
except MetadataException:
# row was metadata; proceed to next row
return next(self) | 0.006061 |
def create_ticket(self, ticket=None, **kwargs):
"""
Create a new ``Ticket``. Additional arguments are passed to the
``create()`` function. Return the newly created ``Ticket``.
"""
if not ticket:
ticket = self.create_ticket_str()
if 'service' in kwargs:
kwargs['service'] = clean_service_url(kwargs['service'])
if 'expires' not in kwargs:
expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE)
kwargs['expires'] = expires
t = self.create(ticket=ticket, **kwargs)
logger.debug("Created %s %s" % (t.name, t.ticket))
return t | 0.003049 |
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'):
# This is needed only to ensure that in the case where we end up
# returning arg (errors == "ignore"), and where the input is a
# generator, we return a useful list-like instead of a
# used-up generator
arg = np.array(list(arg), dtype=object)
try:
value = sequence_to_td64ns(arg, unit=unit,
errors=errors, copy=False)[0]
except ValueError:
if errors == 'ignore':
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit='ns', name=name)
return value | 0.000757 |
def load_grouped_actions(spec, default_group=None, key_prefix="actions", pop_keys=False, expr_parser=None):
"""Instanciates actions from a dict. Will look for a key name key_prefix and
for key starting with key_prefix followed by a dot and a group name. A group
name can be any string and will can be used later to filter actions.
Values associated to these keys should be lists that will be loaded using load_actions()
"""
actions = ActionList()
if expr_parser is None:
expr_parser = ExpressionParser()
for key in spec.keys():
if key != key_prefix and not key.startswith(key_prefix + "."):
continue
group = default_group
if "." in key:
(_, group) = key.split(".")
actions.extend(load_actions(spec[key], group, expr_parser))
if pop_keys:
spec.pop(key)
return actions | 0.005643 |
def cdf(arr, pos=None):
'''
Return the cumulative density function of a given array or
its intensity at a given position (0-1)
'''
r = (arr.min(), arr.max())
hist, bin_edges = np.histogram(arr, bins=2 * int(r[1] - r[0]), range=r)
hist = np.asfarray(hist) / hist.sum()
cdf = np.cumsum(hist)
if pos is None:
return cdf
i = np.argmax(cdf > pos)
return bin_edges[i] | 0.002342 |
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" Everything except a particular user
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = (fasnick or []) and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fmn.rules.utils.msg2usernames(message, **config):
valid = False
break
return valid | 0.001724 |
def generate_vector_color_map(self):
"""Generate color stops array for use with match expression in mapbox template"""
vector_stops = []
# if join data specified as filename or URL, parse JSON to list of Python dicts
if type(self.data) == str:
self.data = geojson_to_dict_list(self.data)
# loop through features in self.data to create join-data map
for row in self.data:
# map color to JSON feature using color_property
color = color_map(row[self.color_property], self.color_stops, self.color_default)
# link to vector feature using data_join_property (from JSON object)
vector_stops.append([row[self.data_join_property], color])
return vector_stops | 0.008951 |
def create_mssql_pyodbc(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mssql_pyodbc), **kwargs
) | 0.010526 |
def vcs_rbridge_config_input_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_config = ET.Element("vcs_rbridge_config")
config = vcs_rbridge_config
input = ET.SubElement(vcs_rbridge_config, "input")
vcs_id = ET.SubElement(input, "vcs-id")
vcs_id.text = kwargs.pop('vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004237 |
def get_slide_context(self):
"""Return the context dict for rendering this slide."""
return {
'title': self.title,
'level': self.level,
'content': self.content,
'classes': self.classes,
'slide_classes': self._filter_classes(exclude='content-'),
'content_classes': self._filter_classes(include='content-'),
'slide_number': self.slide_number,
'config': self._translator.builder.config,
'id': self.id,
} | 0.003752 |
def interpolate_data(self, data, times, resampled_times):
""" Interpolates data feature
:param data: Array in a shape of t x nobs, where nobs = h x w x n
:type data: numpy.ndarray
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param resampled_times: Array of reference times in second relative to the first timestamp in initial timestamp
array.
:type resampled_times: numpy.array
:return: Array of interpolated values
:rtype: numpy.ndarray
"""
if True in np.unique(np.isnan(data)):
raise ValueError('Data must not contain any masked/invalid pixels or NaN values')
interp_func = self.get_interpolation_function(times, data)
time_mask = (resampled_times >= np.min(times)) & (resampled_times <= np.max(times))
new_data = np.full((resampled_times.size,) + data.shape[1:], np.nan, dtype=data.dtype)
new_data[time_mask] = interp_func(resampled_times[time_mask])
return new_data | 0.006329 |
def check(self, value, major):
"""Check whether the value in the set of allowed values.
Raise a ValueError if it is not.
"""
if self._case_insensitive:
value = value.lower()
values = self._valid_values_lower
caseflag = " (case-insensitive)"
else:
values = self._valid_values
caseflag = ""
if value not in values:
raise ValueError("Discrete value '%s' is not one of %s%s."
% (value, list(self._values), caseflag)) | 0.003552 |
def phon(self, cls='current', previousdelimiter="", strict=False,correctionhandling=CorrectionHandling.CURRENT):
"""Get the phonetic representation associated with this element (of the specified class)
The phonetic content will be constructed from child-elements whereever possible, as they are more specific.
If no phonetic content can be obtained from the children and the element has itself phonetic content associated with
it, then that will be used.
Parameters:
cls (str): The class of the phonetic content to obtain, defaults to ``current``.
retaintokenisation (bool): If set, the space attribute on words will be ignored, otherwise it will be adhered to and phonetic content will be detokenised as much as possible. Defaults to ``False``.
previousdelimiter (str): Can be set to a delimiter that was last outputed, useful when chaining calls to :meth:`phon`. Defaults to an empty string.
strict (bool): Set this if you are strictly interested in the phonetic content explicitly associated with the element, without recursing into children. Defaults to ``False``.
correctionhandling: Specifies what phonetic content to retrieve when corrections are encountered. The default is ``CorrectionHandling.CURRENT``, which will retrieve the corrected/current phonetic content. You can set this to ``CorrectionHandling.ORIGINAL`` if you want the phonetic content prior to correction, and ``CorrectionHandling.EITHER`` if you don't care.
Example::
word.phon()
Returns:
The phonetic content of the element (``unicode`` instance in Python 2, ``str`` in Python 3)
Raises:
:class:`NoSuchPhon`: if no phonetic conent is found at all.
See also:
:meth:`phoncontent`: Retrieves the phonetic content as an element rather than a string
:meth:`text`
:meth:`textcontent`
"""
if strict:
return self.phoncontent(cls,correctionhandling).phon()
if self.PHONCONTAINER:
s = ""
for e in self:
if isstring(e):
s += e
else:
try:
if s: s += e.TEXTDELIMITER #We use TEXTDELIMITER for phon too
except AttributeError:
pass
s += e.phon()
return s
elif not self.SPEAKABLE: #only readable elements can hold phonetic content
raise NoSuchPhon
else:
#Get text from children first
delimiter = ""
s = ""
for e in self:
if e.SPEAKABLE and not isinstance(e, PhonContent) and not isinstance(e,String):
try:
s += e.phon(cls, delimiter,False,correctionhandling)
#delimiter will be buffered and only printed upon next iteration, this prevents the delimiter being outputted at the end of a sequence and to be compounded with other delimiters
delimiter = e.gettextdelimiter() #We use TEXTDELIMITER for phon too
except NoSuchPhon:
#No text, that's okay, just continue
continue
if not s and self.hasphon(cls):
s = self.phoncontent(cls,correctionhandling).phon()
if s and previousdelimiter:
return previousdelimiter + s
elif s:
return s
else:
#No text found at all :`(
raise NoSuchPhon | 0.00955 |
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
mpl.rcParams = {}
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
if rc is not None:
mpl.rcParams.update(rc)
return mpl.rcParams | 0.000803 |
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
email: ...
api_token: ...
:param filename: The filename to parse
:return: A tuple with:
- email
- api_token
"""
api_key = None
email = None
try:
doc = yaml.load(file(filename).read())
email = doc['credentials']['email']
api_key = doc['credentials']['api_key']
except (KeyError, TypeError):
print(INVALID_FILE)
return None, None
except yaml.scanner.ScannerError, e:
print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line))
return None, None
# Just in case, we don't want the auth to fail because of a space
email = email.strip()
api_key = api_key.strip()
if not is_valid_api_key(api_key):
cli_logger.debug(INVALID_UUID)
api_key = None
if not is_valid_email(email):
cli_logger.debug('Invalid email address: %s' % email)
email = None
return email, api_key | 0.003707 |
def stop(self):
"""
Stops the service.
"""
if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL):
try:
self.log_file.close()
except Exception:
pass
if self.process is None:
return
try:
self.send_remote_shutdown_command()
except TypeError:
pass
try:
if self.process:
for stream in [self.process.stdin,
self.process.stdout,
self.process.stderr]:
try:
stream.close()
except AttributeError:
pass
self.process.terminate()
self.process.wait()
self.process.kill()
self.process = None
except OSError:
pass | 0.003165 |
def show_keyword_version_message(sender, keyword_version, inasafe_version):
"""Show a message indicating that the keywords version is mismatch
.. versionadded: 3.2
:param sender: Sender of the message signal. Default to Any object.
:type sender: object
:param keyword_version: The version of the layer's keywords
:type keyword_version: str
:param inasafe_version: The version of the InaSAFE
:type inasafe_version: str
.. note:: The print button will be disabled if this method is called.
"""
LOGGER.debug('Showing Mismatch Version Message')
message = generate_input_error_message(
tr('Layer Keyword\'s Version Mismatch:'),
m.Paragraph(
tr(
'Your layer\'s keyword\'s version ({layer_version}) does not '
'match with your InaSAFE version ({inasafe_version}). If you '
'wish to use it as an exposure, hazard, or aggregation layer '
'in an analysis, please use the keyword wizard to update the '
'keywords. You can open the wizard by clicking on '
'the ').format(
layer_version=keyword_version,
inasafe_version=inasafe_version),
m.Image(
'file:///%s/img/icons/'
'show-keyword-wizard.svg' % resources_path(),
**SMALL_ICON_STYLE),
tr(
' icon in the toolbar.'))
)
send_static_message(sender, message) | 0.000668 |
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client | 0.002924 |
def construct_formset(self):
"""
Returns an instance of the inline formset
"""
if not self.inline_model or not self.parent_model:
msg = "Parent and Inline models are required in {}".format(self.__class__.__name__)
raise NotModelException(msg)
return inlineformset_factory(
self.parent_model,
self.inline_model,
**self.get_factory_kwargs()) | 0.006818 |
def update(self, other):
""" Merge two XmlCtsWorkMetadata Objects.
- Original (left Object) keeps his parent.
- Added document overwrite text if it already exists
:param other: XmlCtsWorkMetadata object
:type other: CtsWorkMetadata
:return: XmlCtsWorkMetadata Object
:rtype XmlCtsWorkMetadata:
"""
if not isinstance(other, CtsWorkMetadata):
raise TypeError("Cannot add %s to CtsWorkMetadata" % type(other))
elif self.urn != other.urn:
raise InvalidURN("Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s " % (self.urn, other.urn))
for urn, text in other.children.items():
self.texts[urn] = text
self.texts[urn].parent = self
self.texts[urn].resource = None
return self | 0.003619 |
def zero_year_special_case(from_date, to_date, start, end):
"""strptime does not resolve a 0000 year, we must handle this."""
if start == 'pos' and end == 'pos':
# always interval from earlier to later
if from_date.startswith('0000') and not to_date.startswith('0000'):
return True
# always interval from later to earlier
if not from_date.startswith('0000') and to_date.startswith('0000'):
return False
# an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!!
if from_date.startswith('0000') and to_date.startswith('0000'):
# fill from date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(from_date) == 4:
fm, fd = 1, 1
# missing d, assume the 1st
elif len(from_date) == 7:
fm, fd = int(from_date[5:7]), 1
# not missing any date objects
elif len(from_date) == 10:
fm, fd = int(from_date[5:7]), int(from_date[8:10])
# fill to date assuming first subsequent date object if missing
# missing m+d, assume jan 1
if len(to_date) == 4:
tm, td = 1, 1
# missing d, assume the 1st
elif len(to_date) == 7:
tm, td = int(to_date[5:7]), 1
# not missing any date objects
elif len(to_date) == 10:
tm, td = int(to_date[5:7]), int(to_date[8:10])
# equality check
if from_date == to_date:
return True
# compare the dates
if fm <= tm:
if fd <= td:
return True
else:
return False
else:
return False
# these cases are always one way or the other
# "-0000" is an invalid edtf
elif start == 'neg' and end == 'neg':
return False
# False unless start is not "0000"
elif start == 'neg' and end == 'pos':
if from_date.startswith("0000"):
return False
else:
return True | 0.000465 |
def combine_tax_scales(node):
"""
Combine all the MarginalRateTaxScales in the node into a single MarginalRateTaxScale.
"""
combined_tax_scales = None
for child_name in node:
child = node[child_name]
if not isinstance(child, AbstractTaxScale):
log.info('Skipping {} with value {} because it is not a tax scale'.format(child_name, child))
continue
if combined_tax_scales is None:
combined_tax_scales = MarginalRateTaxScale(name = child_name)
combined_tax_scales.add_bracket(0, 0)
combined_tax_scales.add_tax_scale(child)
return combined_tax_scales | 0.007622 |
def huji_sample(orient_file, meths='FS-FD:SO-POM:SO-SUN', location_name='unknown',
samp_con="1", ignore_dip=True, data_model_num=3,
samp_file="samples.txt", site_file="sites.txt",
dir_path=".", input_dir_path=""):
"""
Convert HUJI sample file to MagIC file(s)
Parameters
----------
orient_file : str
input file name
meths : str
colon-delimited sampling methods, default FS-FD:SO-POM:SO-SUN
for more options, see info below
location : str
location name, default "unknown"
samp_con : str
sample/site naming convention, default '1', see info below
ignore_dip : bool
set sample az/dip to 0, default True
data_model_num : int
MagIC data model 2 or 3, default 3
samp_file : str
sample file name to output (default : samples.txt)
site_file : str
site file name to output (default : site.txt)
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written)
Info
--------
Sampling method codes:
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
"""
try:
samp_con, Z = samp_con.split("-")
except ValueError:
samp_con = samp_con
Z = 1
version_num = pmag.get_version()
if data_model_num == 2:
loc_col = "er_location_name"
site_col = "er_site_name"
samp_col = "er_sample_name"
citation_col = "er_citation_names"
class_col = "site_class"
lithology_col = "site_lithology"
definition_col = "site_definition"
type_col = "site_type"
sample_bed_dip_direction_col = "sample_bed_dip_direction"
sample_bed_dip_col = "sample_bed_dip"
site_bed_dip_direction_col = "site_bed_dip_direction"
site_bed_dip_col = "site_bed_dip"
sample_dip_col = "sample_dip"
sample_az_col = "sample_azimuth"
sample_lat_col = "sample_lat"
sample_lon_col = "sample_lon"
site_lat_col = "site_lat"
site_lon_col = "site_lon"
meth_col = "magic_method_codes"
software_col = "magic_software_packages"
else:
loc_col = "location"
site_col = "site"
samp_col = "sample"
citation_col = "citations"
class_col = "class"
lithology_col = "lithology"
definition_col = "definition"
type_col = "type"
sample_bed_dip_direction_col = 'bed_dip_direction'
sample_bed_dip_col = 'bed_dip'
site_bed_dip_direction_col = 'bed_dip_direction'
site_bed_dip_col = "bed_dip"
sample_dip_col = "dip"
sample_az_col = "azimuth"
sample_lat_col = "lat"
sample_lon_col = "lon"
site_lat_col = "lat"
site_lon_col = "lon"
meth_col = "method_codes"
software_col = "software_packages"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
site_file = pmag.resolve_file_name(site_file, dir_path)
orient_file = pmag.resolve_file_name(orient_file, input_dir_path)
print("-I- reading in: {}".format(orient_file))
#
# read in file to convert
#
with open(orient_file, 'r') as azfile:
AzDipDat = azfile.readlines()
SampOut = []
SiteOut = []
for line in AzDipDat[1:]:
orec = line.split()
if len(orec) > 1:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), '3')
bed_dip_dir = (orec[3])
bed_dip = (orec[4])
SampRec = {}
SiteRec = {}
SampRec[loc_col] = location_name
SampRec[citation_col] = "This study"
SiteRec[loc_col] = location_name
SiteRec[citation_col] = "This study"
SiteRec[class_col] = ""
SiteRec[lithology_col] = ""
SiteRec[type_col] = ""
SiteRec[definition_col] = "s"
#
# parse information common to all orientation methods
#
SampRec[samp_col] = orec[0]
SampRec[sample_bed_dip_direction_col] = orec[3]
SampRec[sample_bed_dip_col] = orec[4]
SiteRec[site_bed_dip_direction_col] = orec[3]
SiteRec[site_bed_dip_col] = orec[4]
if not ignore_dip:
SampRec[sample_dip_col] = '%7.1f' % (labdip)
SampRec[sample_az_col] = '%7.1f' % (labaz)
else:
SampRec[sample_dip_col] = '0'
SampRec[sample_az_col] = '0'
SampRec[sample_lat_col] = orec[5]
SampRec[sample_lon_col] = orec[6]
SiteRec[site_lat_col] = orec[5]
SiteRec[site_lon_col] = orec[6]
SampRec[meth_col] = meths
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
SampRec[site_col] = site
SampRec[software_col] = version_num
SiteRec[site_col] = site
SiteRec[software_col] = version_num
SampOut.append(SampRec)
SiteOut.append(SiteRec)
if data_model_num == 2:
pmag.magic_write(samp_file, SampOut, "er_samples")
pmag.magic_write(site_file, SiteOut, "er_sites")
else:
pmag.magic_write(samp_file, SampOut, "samples")
pmag.magic_write(site_file, SiteOut, "sites")
print("Sample info saved in ", samp_file)
print("Site info saved in ", site_file)
return True, samp_file | 0.000897 |
def is_cached(self):
"""Returns true if this rule is already cached."""
# TODO: cache by target+hash, not per file.
try:
for item in self.rule.output_files:
log.info(item)
self.cachemgr.in_cache(item, self._metahash())
except cache.CacheMiss:
log.info('[%s]: Not cached.', self.address)
return False
else:
log.info('[%s]: found in cache.', self.address)
return True | 0.004024 |
def display(self, *amplExpressions):
"""
Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated.
"""
exprs = list(map(str, amplExpressions))
lock_and_call(
lambda: self._impl.displayLst(exprs, len(exprs)),
self._lock
) | 0.003899 |
def to_py(o, keyword_fn: Callable[[kw.Keyword], Any] = _kw_name):
"""Recursively convert Lisp collections into Python collections."""
if isinstance(o, ISeq):
return _to_py_list(o, keyword_fn=keyword_fn)
elif not isinstance(
o, (IPersistentList, IPersistentMap, IPersistentSet, IPersistentVector)
):
return o
else: # pragma: no cover
return _to_py_backup(o, keyword_fn=keyword_fn) | 0.002315 |
def _year_month_delta_from_elements(elements):
"""
Return a tuple of (years, months) from a dict of date elements.
Accepts a dict containing any of the following:
- years
- months
Example:
>>> _year_month_delta_from_elements({'years': 2, 'months': 14})
(3, 2)
"""
return divmod(
(int(elements.get('years', 0)) * MONTHS_IN_YEAR) +
elements.get('months', 0),
MONTHS_IN_YEAR
) | 0.002232 |
def metastability(alpha, T, right_eigenvectors, square_map, pi):
"""Return the metastability PCCA+ objective function.
Parameters
----------
alpha : ndarray
Parameters of objective function (e.g. flattened A)
T : csr sparse matrix
Transition matrix
right_eigenvectors : ndarray
The right eigenvectors.
square_map : ndarray
Mapping from square indices (i,j) to flat indices (k).
pi : ndarray
Equilibrium Populations of transition matrix.
Returns
-------
obj : float
The objective function
Notes
-------
metastability: try to make metastable fuzzy state decomposition.
Defined in ref. [2].
"""
num_micro, num_eigen = right_eigenvectors.shape
A, chi, mapping = calculate_fuzzy_chi(alpha, square_map,
right_eigenvectors)
# If current point is infeasible or leads to degenerate lumping.
if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or
has_constraint_violation(A, right_eigenvectors)):
return -1.0 * np.inf
obj = 0.0
# Calculate metastabilty of the lumped model. Eqn 4.20 in LAA.
for i in range(num_eigen):
obj += np.dot(T.dot(chi[:, i]), pi * chi[:, i]) / np.dot(chi[:, i], pi)
return obj | 0.000757 |
def update_subtask(self, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
Returns:
Subtask with given ID with properties and revision updated
'''
return subtasks_endpoint.update_subtask(self, subtask_id, revision, title=title, completed=completed) | 0.00883 |
def capture_pywarnings(handler):
"""Log python system warnings."""
logger = logging.getLogger('py.warnings')
# Check for previously installed handlers.
for h in logger.handlers:
if isinstance(h, handler.__class__):
return
logger.addHandler(handler)
logger.setLevel(logging.WARNING) | 0.005602 |
def close(self):
"""Close this metrics repository."""
for reporter in self._reporters:
reporter.close()
self._metrics.clear() | 0.012346 |
def _parse_date(self, date_string):
"""Parse the date_string and return a datetime object as UTC."""
date = datetime.strptime(date_string, self._DATE_FORMAT)
self.date = self._TZ.localize(date).astimezone(pytz.UTC) | 0.008403 |
def format_context(
context: Context, formatter: typing.Union[str, Formatter] = "full"
) -> str:
"""Output the a context dictionary as a string."""
if not context:
return ""
if callable(formatter):
formatter_func = formatter
else:
if formatter in CONTEXT_FORMATTERS:
formatter_func = CONTEXT_FORMATTERS[formatter]
else:
raise ValueError(f'Invalid context format: "{formatter}"')
return formatter_func(context) | 0.002041 |
def _dryrun_cell(args, cell_body):
"""Implements the BigQuery cell magic used to dry run BQ queries.
The supported syntax is:
%%bq dryrun [-q|--sql <query identifier>]
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the argument following '%bq dryrun'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The response wrapped in a DryRunStats object
"""
query = _get_query_argument(args, cell_body, google.datalab.utils.commands.notebook_environment())
if args['verbose']:
print(query.sql)
context = google.datalab.utils._utils._construct_context_for_args(args)
result = query.dry_run(context=context)
return bigquery._query_stats.QueryStats(
total_bytes=result['totalBytesProcessed'], is_cached=result['cacheHit']) | 0.010025 |
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f | 0.008547 |
def base64_process(**kwargs):
"""
Process base64 file io
"""
str_fileToSave = ""
str_fileToRead = ""
str_action = "encode"
data = None
for k,v in kwargs.items():
if k == 'action': str_action = v
if k == 'payloadBytes': data = v
if k == 'payloadFile': str_fileToRead = v
if k == 'saveToFile': str_fileToSave = v
# if k == 'sourcePath': str_sourcePath = v
if str_action == "encode":
# Encode the contents of the file at targetPath as ASCII for transmission
if len(str_fileToRead):
with open(str_fileToRead, 'rb') as f:
data = f.read()
f.close()
data_b64 = base64.b64encode(data)
with open(str_fileToSave, 'wb') as f:
f.write(data_b64)
f.close()
return {
'msg': 'Encode successful',
'fileProcessed': str_fileToSave,
'status': True
# 'encodedBytes': data_b64
}
if str_action == "decode":
# if len(data) % 4:
# not a multiple of 4, add padding:
# data += '=' * (4 - len(data) % 4)
# adding 3 padding = will never succumb to the TypeError and will always produce the same result.
# https://gist.github.com/perrygeo/ee7c65bb1541ff6ac770
bytes_decoded = base64.b64decode(data + "===")
with open(str_fileToSave, 'wb') as f:
f.write(bytes_decoded)
f.close()
return {
'msg': 'Decode successful',
'fileProcessed': str_fileToSave,
'status': True
# 'decodedBytes': bytes_decoded
} | 0.012848 |
def django_include(context, template_name, **kwargs):
'''
Mako tag to include a Django template withing the current DMP (Mako) template.
Since this is a Django template, it is search for using the Django search
algorithm (instead of the DMP app-based concept).
See https://docs.djangoproject.com/en/2.1/topics/templates/.
The current context is sent to the included template, which makes all context
variables available to the Django template. Any additional kwargs are added
to the context.
'''
try:
djengine = engines['django']
except KeyError as e:
raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e
djtemplate = djengine.get_template(template_name)
djcontext = {}
djcontext.update(context)
djcontext.update(kwargs)
return djtemplate.render(djcontext, context['request']) | 0.004228 |
def create_server(initialize=True):
"""Create a server"""
with provider() as p:
host_string = p.create_server()
if initialize:
env.host_string = host_string
initialize_server() | 0.004464 |
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime) | 0.01737 |
def _put(self, *args, **kwargs):
"""
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>`
if there is an error from New Relic
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
response = requests.put(*args, **kwargs)
if not response.ok:
raise NewRelicAPIServerException('{}: {}'.format(response.status_code, response.text))
return response.json() | 0.005857 |
def K2findCampaigns_main(args=None):
"""Exposes K2findCampaigns to the command line."""
parser = argparse.ArgumentParser(
description="Check if a celestial coordinate is "
"(or was) observable by any past or future "
"observing campaign of NASA's K2 mission.")
parser.add_argument('ra', nargs=1, type=float,
help="Right Ascension in decimal degrees (J2000).")
parser.add_argument('dec', nargs=1, type=float,
help="Declination in decimal degrees (J2000).")
parser.add_argument('-p', '--plot', action='store_true',
help="Produce a plot showing the target position "
"with respect to all K2 campaigns.")
args = parser.parse_args(args)
ra, dec = args.ra[0], args.dec[0]
campaigns = findCampaigns(ra, dec)
# Print the result
if len(campaigns):
print(Highlight.GREEN + "Success! The target is on silicon "
"during K2 campaigns {0}.".format(campaigns) + Highlight.END)
else:
print(Highlight.RED + "Sorry, the target is not on silicon "
"during any K2 campaign." + Highlight.END)
# Print the pixel positions
for c in campaigns:
printChannelColRow(c, ra, dec)
# Make a context plot if the user requested so
if args.plot:
save_context_plots(ra, dec, "Your object") | 0.000685 |
def paragraph_sub(match):
"""Captures paragraphs."""
text = re.sub(r' \n', r'\n<br/>\n', match.group(0).strip())
return '<p>{}</p>'.format(text) | 0.006369 |
def print_pack(document_loader, # type: Loader
processobj, # type: CommentedMap
uri, # type: Text
metadata # type: Dict[Text, Any]
): # type (...) -> Text
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(document_loader, processobj, uri, metadata)
if len(packed["$graph"]) > 1:
return json_dumps(packed, indent=4)
return json_dumps(packed["$graph"][0], indent=4) | 0.003984 |
def explain_template_loading_attempts(app, template, attempts):
"""
This should help developers understand what failed. Mostly the same as
:func:`flask.debughelpers.explain_template_loading_attempts`, except here we've
extended it to support showing what :class:`UnchainedJinjaLoader` is doing.
"""
from flask import Flask, Blueprint
from flask.debughelpers import _dump_loader_info
from flask.globals import _request_ctx_stack
template, expected_priors = parse_template(template)
info = [f'Locating {pretty_num(expected_priors + 1)} template "{template}":']
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name,
srcobj.import_name)
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (
idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line)
if triple is None:
detail = 'no match'
else:
if total_found < expected_priors:
action = 'skipping'
elif total_found == expected_priors:
action = 'using'
else:
action = 'ignoring'
detail = '%s (%r)' % (action, triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail)
seems_fishy = False
if total_found < expected_priors:
info.append('Error: the template could not be found.')
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that '
'belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
app.logger.info('\n'.join(info)) | 0.00172 |
def intersect_leaderboards(self, destination, keys, aggregate='SUM'):
'''
Intersect leaderboards given by keys with this leaderboard into a named destination leaderboard.
@param destination [String] Destination leaderboard name.
@param keys [Array] Leaderboards to be merged with the current leaderboard.
@param options [Hash] Options for intersecting the leaderboards.
'''
keys.insert(0, self.leaderboard_name)
self.redis_connection.zinterstore(destination, keys, aggregate) | 0.007407 |
def point(self, x, y, char):
"""Create a point on ASCII canvas.
Args:
x (int): x coordinate. Should be >= 0 and < number of columns in
the canvas.
y (int): y coordinate. Should be >= 0 an < number of lines in the
canvas.
char (str): character to place in the specified point on the
canvas.
"""
assert len(char) == 1
assert x >= 0
assert x < self.cols
assert y >= 0
assert y < self.lines
self.canvas[y][x] = char | 0.003515 |
def to_fits(self):
"""
Converts a `~regions.ShapeList` to a `~astropy.table.Table` object.
"""
max_length_coord = 1
coord_x = []
coord_y = []
shapes = []
radius = []
rotangle_deg = []
components = []
reg_reverse_mapping = {value: key for key, value in
reg_mapping['FITS_REGION'].items()}
reg_reverse_mapping['rectangle'] = 'ROTBOX'
reg_reverse_mapping['circleannulus'] = 'ANNULUS'
reg_reverse_mapping['ellipseannulus'] = 'ELLIPTANNULUS'
for num, shape in enumerate(self):
shapes.append(reg_reverse_mapping[shape.region_type])
if shape.region_type == 'polygon':
max_length_coord = max(len(shape.coord)/2, max_length_coord)
coord = [x.value for x in shape.coord]
coord_x.append(coord[::2])
coord_y.append(coord[1::2])
radius.append(0)
rotangle_deg.append(0)
else:
coord_x.append(shape.coord[0].value)
coord_y.append(shape.coord[1].value)
if shape.region_type in ['circle', 'circleannulus', 'point']:
radius.append([float(val) for val in shape.coord[2:]])
rotangle_deg.append(0)
else:
radius.append([float(x) for x in shape.coord[2:-1]])
rotangle_deg.append(shape.coord[-1].to('deg').value)
tag = shape.meta.get('tag', '')
if tag.isdigit():
components.append(int(tag))
else:
components.append(num + 1)
# padding every value with zeros at the end to make sure that all values
# in the column have same length.
for i in range(len(self)):
if np.isscalar(coord_x[i]):
coord_x[i] = np.array([coord_x[i]])
if np.isscalar(coord_y[i]):
coord_y[i] = np.array([coord_y[i]])
if np.isscalar(radius[i]):
radius[i] = np.array([radius[i]])
coord_x[i] = np.pad(coord_x[i], (0, int(max_length_coord - len(coord_x[i]))),
'constant', constant_values=(0, 0))
coord_y[i] = np.pad(coord_y[i], (0, int(max_length_coord - len(coord_y[i]))),
'constant', constant_values=(0, 0))
radius[i] = np.pad(radius[i], (0, 4 - len(radius[i])), 'constant',
constant_values=(0, 0))
table = Table([coord_x, coord_y, shapes, radius, rotangle_deg, components],
names=('X', 'Y', 'SHAPE', 'R', 'ROTANG', 'COMPONENT'))
table['X'].unit = 'pix'
table['Y'].unit = 'pix'
table['R'].unit = 'pix'
table['ROTANG'].unit = 'deg'
return table | 0.002415 |
def isoncurve(self, p):
"""
verifies if a point is on the curve
"""
return p.iszero() or p.y ** 2 == p.x ** 3 + self.a * p.x + self.b | 0.012121 |
def load_vpatt(filename1, filename2):
"""Loads a VectorPatternUniform pattern that is saved between two files.
"""
with open(filename1) as f:
lines = f.readlines()
lst = lines[0].split(',')
patt1 = np.zeros([int(lst[0]), int(lst[1])],
dtype=np.complex128)
lines.pop(0)
for line in lines:
lst = line.split(',')
n = int(lst[0])
m = int(lst[1])
re = float(lst[2])
im = float(lst[3])
patt1[n, m] = re + 1j * im
with open(filename2) as f:
lines2 = f.readlines()
lst = lines2[0].split(',')
patt2 = np.zeros([int(lst[0]), int(lst[1])],
dtype=np.complex128)
lines2.pop(0)
for line in lines2:
lst = line.split(',')
n = int(lst[0])
m = int(lst[1])
re = float(lst[2])
im = float(lst[3])
patt2[n, m] = re + 1j * im
return sp.VectorPatternUniform(patt1, patt2) | 0.004753 |
def divisions(self):
"""
Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision.
"""
from .placeholder_division import PlaceholderDivision
placeholder = None
for item in self.__parts_and_divisions:
if item.tag == 'part':
if not placeholder:
placeholder = PlaceholderDivision()
placeholder.parts.append(item)
else:
if placeholder:
yield placeholder
placeholder = None
yield item
if placeholder:
yield placeholder | 0.005222 |
def get_tow_guide(session, vehicle_index):
"""Get tow guide information."""
profile = get_profile(session)
_validate_vehicle(vehicle_index, profile)
return session.post(TOW_URL, {
'vin': profile['vehicles'][vehicle_index]['vin']
}).json() | 0.003759 |
def _fix_outputs(self, op, outputs):
"""A workaround to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op == 'Dropout':
assert len(outputs) == 2, "ONNX have two outputs for dropout layer."
outputs = outputs[:-1]
return outputs | 0.012422 |
def run_filters(actions,
prepare=None, finalize=None,
input_stream=None, output_stream=None,
doc=None,
**kwargs):
"""
Receive a Pandoc document from the input stream (default is stdin),
walk through it applying the functions in *actions* to each element,
and write it back to the output stream (default is stdout).
Notes:
- It receives and writes the Pandoc documents as JSON--encoded strings;
this is done through the :func:`.load` and :func:`.dump` functions.
- It walks through the document once for every function in *actions*,
so the actions are applied sequentially.
- By default, it will read from stdin and write to stdout,
but these can be modified.
- It can also apply functions to the entire document at the beginning and
end; this allows for global operations on the document.
- If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters``
will return the document instead of writing it to the output stream.
:param actions: sequence of functions; each function takes (element, doc)
as argument, so a valid header would be ``def action(elem, doc):``
:type actions: [:class:`function`]
:param prepare: function executed at the beginning;
right after the document is received and parsed
:type prepare: :class:`function`
:param finalize: function executed at the end;
right before the document is converted back to JSON and written to stdout.
:type finalize: :class:`function`
:param input_stream: text stream used as input
(default is :data:`sys.stdin`)
:param output_stream: text stream used as output
(default is :data:`sys.stdout`)
:param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element
:type doc: ``None`` | :class:`.Doc`
:param \*kwargs: keyword arguments will be passed through to the *action*
functions (so they can actually receive more than just two arguments
(*element* and *doc*)
"""
load_and_dump = (doc is None)
if load_and_dump:
doc = load(input_stream=input_stream)
if prepare is not None:
prepare(doc)
for action in actions:
if kwargs:
action = partial(action, **kwargs)
doc = doc.walk(action, doc)
if finalize is not None:
finalize(doc)
if load_and_dump:
dump(doc, output_stream=output_stream)
else:
return(doc) | 0.001188 |
def get_object(brain_object_uid, default=_marker):
"""Get the full content object
:param brain_object_uid: A catalog brain or content object or uid
:type brain_object_uid: PortalObject/ATContentType/DexterityContentType
/CatalogBrain/basestring
:returns: The full object
"""
if is_uid(brain_object_uid):
return get_object_by_uid(brain_object_uid)
if not is_object(brain_object_uid):
if default is _marker:
fail("{} is not supported.".format(repr(brain_object_uid)))
return default
if is_brain(brain_object_uid):
return brain_object_uid.getObject()
return brain_object_uid | 0.001524 |
def get_art(cache_dir, size, client):
"""Get the album art."""
song = client.currentsong()
if len(song) < 2:
print("album: Nothing currently playing.")
return
file_name = f"{song['artist']}_{song['album']}_{size}.jpg".replace("/", "")
file_name = cache_dir / file_name
if file_name.is_file():
shutil.copy(file_name, cache_dir / "current.jpg")
print("album: Found cached art.")
else:
print("album: Downloading album art...")
brainz.init()
album_art = brainz.get_cover(song, size)
if album_art:
util.bytes_to_file(album_art, cache_dir / file_name)
util.bytes_to_file(album_art, cache_dir / "current.jpg")
print(f"album: Swapped art to {song['artist']}, {song['album']}.") | 0.001244 |
def get_filtered_path(path_to_image, filename_key, storage):
"""
Return the 'filtered path'
"""
containing_folder, filename = os.path.split(path_to_image)
filtered_filename = get_filtered_filename(filename, filename_key)
path_to_return = os.path.join(*[
containing_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME,
filtered_filename
])
# Removing spaces so this path is memcached key friendly
path_to_return = path_to_return.replace(' ', '')
return path_to_return | 0.001908 |
def redirect_to_ssl(self):
"""Redirect incoming requests to HTTPS."""
# Should we redirect?
criteria = [
request.is_secure,
current_app.debug,
current_app.testing,
request.headers.get('X-Forwarded-Proto', 'http') == 'https'
]
if not any(criteria) and not self.skip:
if request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 302
if self.permanent:
code = 301
r = redirect(url, code=code)
return r | 0.00315 |
def fromseconds(cls, seconds):
"""Return a |Period| instance based on a given number of seconds."""
try:
seconds = int(seconds)
except TypeError:
seconds = int(seconds.flatten()[0])
return cls(datetime.timedelta(0, int(seconds))) | 0.007018 |
def save_pickle(obj, outfile, protocol=2):
"""Save the object as a pickle file
Args:
outfile (str): Filename
protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2
Returns:
str: Path to pickle file
"""
with open(outfile, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
return outfile | 0.005277 |
def components(self):
"""
Returns full :class:`dict` of :class:`Component` instances, after
a successful :meth:`build`
:return: dict of named :class:`Component` instances
:rtype: :class:`dict`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._components is None:
self.build(recursive=False)
return self._components | 0.004598 |
def get_grid_mapping_variables(ds):
'''
Returns a list of grid mapping variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset
'''
grid_mapping_variables = []
for ncvar in ds.get_variables_by_attributes(grid_mapping=lambda x: x is not None):
if ncvar.grid_mapping in ds.variables:
grid_mapping_variables.append(ncvar.grid_mapping)
return grid_mapping_variables | 0.004831 |
def _expectation(p, constant_mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <m(x_n)^T K_{x_n, Z}>_p(x_n)
- m(x_i) = c :: Constant function
- K_{.,.} :: Kernel function
:return: NxQxM
"""
with params_as_tensors_for(constant_mean):
c = constant_mean(p.mu) # NxQ
eKxz = expectation(p, (kern, feat), nghp=nghp) # NxM
return c[..., None] * eKxz[:, None, :] | 0.002188 |
def last_time_non_ok_or_up(self):
"""Get the last time the service was in a non-OK state
:return: the nearest last time the service was not ok
:rtype: int
"""
non_ok_times = [x for x in [self.last_time_warning,
self.last_time_critical,
self.last_time_unknown]
if x > self.last_time_ok]
if not non_ok_times:
last_time_non_ok = 0 # todo: program_start would be better?
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok | 0.003231 |
def recent(self):
"""
Retrieve a selection of conversations with the most recent activity, and store them in the cache.
Each conversation is only retrieved once, so subsequent calls will retrieve older conversations.
Returns:
:class:`SkypeChat` list: collection of recent conversations
"""
url = "{0}/users/ME/conversations".format(self.skype.conn.msgsHost)
params = {"startTime": 0,
"view": "msnp24Equivalent",
"targetType": "Passport|Skype|Lync|Thread"}
resp = self.skype.conn.syncStateCall("GET", url, params, auth=SkypeConnection.Auth.RegToken).json()
chats = {}
for json in resp.get("conversations", []):
cls = SkypeSingleChat
if "threadProperties" in json:
info = self.skype.conn("GET", "{0}/threads/{1}".format(self.skype.conn.msgsHost, json.get("id")),
auth=SkypeConnection.Auth.RegToken,
params={"view": "msnp24Equivalent"}).json()
json.update(info)
cls = SkypeGroupChat
chats[json.get("id")] = self.merge(cls.fromRaw(self.skype, json))
return chats | 0.005564 |
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path) | 0.003712 |
def generateVariant(self, referenceName, position, randomNumberGenerator):
"""
Generate a random variant for the specified position using the
specified random number generator. This generator should be seeded
with a value that is unique to this position so that the same variant
will always be produced regardless of the order it is generated in.
"""
variant = self._createGaVariant()
variant.reference_name = referenceName
variant.start = position
variant.end = position + 1 # SNPs only for now
bases = ["A", "C", "G", "T"]
ref = randomNumberGenerator.choice(bases)
variant.reference_bases = ref
alt = randomNumberGenerator.choice(
[base for base in bases if base != ref])
variant.alternate_bases.append(alt)
randChoice = randomNumberGenerator.randint(0, 2)
if randChoice == 0:
variant.filters_applied = False
elif randChoice == 1:
variant.filters_applied = True
variant.filters_passed = True
else:
variant.filters_applied = True
variant.filters_passed = False
variant.filters_failed.append('q10')
for callSet in self.getCallSets():
call = variant.calls.add()
call.call_set_id = callSet.getId()
# for now, the genotype is either [0,1], [1,1] or [1,0] with equal
# probability; probably will want to do something more
# sophisticated later.
randomChoice = randomNumberGenerator.choice(
[[0, 1], [1, 0], [1, 1]])
call.genotype.extend(randomChoice)
# TODO What is a reasonable model for generating these likelihoods?
# Are these log-scaled? Spec does not say.
call.genotype_likelihood.extend([-100, -100, -100])
variant.id = self.getVariantId(variant)
return variant | 0.00102 |
def generate_safemode_windows():
"""Produce batch file to run QML in safe-mode
Usage:
$ python -c "import compat;compat.generate_safemode_windows()"
$ run.bat
"""
try:
import pyblish
import pyblish_qml
import PyQt5
except ImportError:
return sys.stderr.write(
"Run this in a terminal with access to "
"the Pyblish libraries and PyQt5.\n")
template = r"""@echo off
:: Clear all environment variables
@echo off
if exist ".\backup_env.bat" del ".\backup_env.bat"
for /f "tokens=1* delims==" %%a in ('set') do (
echo set %%a=%%b>> .\backup_env.bat
set %%a=
)
:: Set only the bare essentials
set PATH={PyQt5}
set PATH=%PATH%;{python}
set PYTHONPATH={pyblish}
set PYTHONPATH=%PYTHONPATH%;{pyblish_qml}
set PYTHONPATH=%PYTHONPATH%;{PyQt5}
set SystemRoot=C:\Windows
:: Run Pyblish
python -m pyblish_qml
:: Restore environment
backup_env.bat
"""
values = {}
for lib in (pyblish, pyblish_qml, PyQt5):
values[lib.__name__] = os.path.dirname(os.path.dirname(lib.__file__))
values["python"] = os.path.dirname(sys.executable)
with open("run.bat", "w") as f:
print("Writing %s" % template.format(**values))
f.write(template.format(**values)) | 0.000738 |
def wraps(__fn, **kw):
"""Like ``functools.wraps``, with support for annotations."""
kw['assigned'] = kw.get('assigned', WRAPPER_ASSIGNMENTS)
return functools.wraps(__fn, **kw) | 0.01 |
def get_sorted_attachments(self):
"""Returns a sorted list of analysis info dictionaries
"""
inf = float("inf")
order = self.get_attachments_order()
attachments = self.get_attachments()
def att_cmp(att1, att2):
_n1 = att1.get('UID')
_n2 = att2.get('UID')
_i1 = _n1 in order and order.index(_n1) + 1 or inf
_i2 = _n2 in order and order.index(_n2) + 1 or inf
return cmp(_i1, _i2)
sorted_attachments = sorted(attachments, cmp=att_cmp)
return sorted_attachments | 0.003431 |
def unregister(self, model):
"""
Unregister a permission handler from the model
Parameters
----------
model : django model class
A django model class
Raises
------
KeyError
Raise when the model have not registered in registry yet.
"""
if model not in self._registry:
raise KeyError("A permission handler class have not been "
"registered for '%s' yet" % model)
# remove from registry
del self._registry[model] | 0.003509 |
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(fake_pred, target, output) | 0.012048 |
def calc_conf_intervals(self,
conf_percentage,
interval_type='all',
init_vals=None,
epsilon=abc.EPSILON,
**fit_kwargs):
"""
Calculates percentile, bias-corrected and accelerated, and approximate
bootstrap confidence intervals.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
interval_type : str in {'all', 'pi', 'bca', 'abc'}, optional.
Denotes the type of confidence intervals that should be calculated.
'all' results in all types of confidence intervals being
calculated. 'pi' means 'percentile intervals', 'bca' means
'bias-corrected and accelerated', and 'abc' means 'approximate
bootstrap confidence' intervals. Default == 'all'.
init_vals : 1D ndarray.
The initial values used to estimate the one's choice model.
epsilon : positive float, optional.
Should denote the 'very small' value being used to calculate the
desired finite difference approximations to the various influence
functions for the 'abc' intervals. Should be close to zero.
Default == sys.float_info.epsilon.
fit_kwargs : additional keyword arguments, optional.
Should contain any additional kwargs used to alter the default
behavior of `model_obj.fit_mle` and thereby enforce conformity with
how the MLE was obtained. Will be passed directly to
`model_obj.fit_mle` when calculating the 'abc' intervals.
Returns
-------
None. Will store the confidence intervals on their respective model
objects: `self.percentile_interval`, `self.bca_interval`,
`self.abc_interval`, or all of these objects.
"""
if interval_type == 'pi':
self.calc_percentile_interval(conf_percentage)
elif interval_type == 'bca':
self.calc_bca_interval(conf_percentage)
elif interval_type == 'abc':
self.calc_abc_interval(conf_percentage,
init_vals,
epsilon=epsilon,
**fit_kwargs)
elif interval_type == 'all':
print("Calculating Percentile Confidence Intervals")
sys.stdout.flush()
self.calc_percentile_interval(conf_percentage)
print("Calculating BCa Confidence Intervals")
sys.stdout.flush()
self.calc_bca_interval(conf_percentage)
# Note we don't print a user message here since that is done in
# self.calc_abc_interval().
self.calc_abc_interval(conf_percentage,
init_vals,
epsilon=epsilon,
**fit_kwargs)
# Get the alpha % for the given confidence percentage.
alpha = bc.get_alpha_from_conf_percentage(conf_percentage)
# Get lists of the interval type names and the endpoint names
interval_type_names = ['percentile_interval',
'BCa_interval',
'ABC_interval']
endpoint_names = ['{:.3g}%'.format(alpha / 2.0),
'{:.3g}%'.format(100 - alpha / 2.0)]
# Create the column names for the dataframe of confidence intervals
multi_index_names =\
list(itertools.product(interval_type_names, endpoint_names))
df_column_index = pd.MultiIndex.from_tuples(multi_index_names)
# Create the dataframe containing all confidence intervals
self.all_intervals = pd.concat([self.percentile_interval,
self.bca_interval,
self.abc_interval],
axis=1,
ignore_index=True)
# Store the column names for the combined confidence intervals
self.all_intervals.columns = df_column_index
self.all_intervals.index = self.mle_params.index
else:
msg =\
"interval_type MUST be in `['pi', 'bca', 'abc', 'all']`"
raise ValueError(msg)
return None | 0.001506 |
def get(self, node, path):
"""
Return instance at `path`.
An example module tree:
>>> from anytree import Node
>>> top = Node("top", parent=None)
>>> sub0 = Node("sub0", parent=top)
>>> sub0sub0 = Node("sub0sub0", parent=sub0)
>>> sub0sub1 = Node("sub0sub1", parent=sub0)
>>> sub1 = Node("sub1", parent=top)
A resolver using the `name` attribute:
>>> r = Resolver('name')
Relative paths:
>>> r.get(top, "sub0/sub0sub0")
Node('/top/sub0/sub0sub0')
>>> r.get(sub1, "..")
Node('/top')
>>> r.get(sub1, "../sub0/sub0sub1")
Node('/top/sub0/sub0sub1')
>>> r.get(sub1, ".")
Node('/top/sub1')
>>> r.get(sub1, "")
Node('/top/sub1')
>>> r.get(top, "sub2")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'.
Absolute paths:
>>> r.get(sub0sub0, "/top")
Node('/top')
>>> r.get(sub0sub0, "/top/sub0")
Node('/top/sub0')
>>> r.get(sub0sub0, "/")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: root node missing. root is '/top'.
>>> r.get(sub0sub0, "/bar")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'.
"""
node, parts = self.__start(node, path)
for part in parts:
if part == "..":
node = node.parent
elif part in ("", "."):
pass
else:
node = self.__get(node, part)
return node | 0.002248 |
def GeneralGuinier(q, G, Rg, s):
"""Generalized Guinier scattering
Inputs:
-------
``q``: independent variable
``G``: factor
``Rg``: radius of gyration
``s``: dimensionality parameter (can be 1, 2, 3)
Formula:
--------
``G/q**(3-s)*exp(-(q^2*Rg^2)/s)``
"""
return G / q ** (3 - s) * np.exp(-(q * Rg) ** 2 / s) | 0.002639 |
def FileTransfer(*args, **kwargs):
"""Factory function selects the proper SCP class and creates object based on device_type."""
if len(args) >= 1:
device_type = args[0].device_type
else:
device_type = kwargs["ssh_conn"].device_type
if device_type not in scp_platforms:
raise ValueError(
"Unsupported SCP device_type: "
"currently supported platforms are: {}".format(scp_platforms_str)
)
FileTransferClass = FILE_TRANSFER_MAP[device_type]
return FileTransferClass(*args, **kwargs) | 0.003578 |
def comment_sync(self, comment):
"""Update comments to host and notify subscribers"""
self.host.update(key="comment", value=comment)
self.host.emit("commented", comment=comment) | 0.00995 |
def plot(X, marker='.', kind='plot', title=None, fig='current', ax=None,
**kwargs):
'''General plotting function that aims to cover most common cases.
X : numpy array of 1d, 2d, or 3d points, with one point per row.
marker : passed to the underlying plotting function
kind : one of {plot, scatter} that controls the plot type.
title : if given, used as the axis title
fig : a matplotlib.Figure, or one of {current, new}. Only used when ax=None.
ax : a matplotlib.Axes object, or None
All other keyword arguments are passed on to the underlying plotting function.
'''
X = np.asanyarray(X)
if X.ndim not in (1,2) or (X.ndim == 2 and X.shape[1] not in (1,2,3)):
raise ValueError('Input data must be rows of 1, 2, or 3 dimensional points')
is_3d = X.ndim == 2 and X.shape[1] == 3
is_1d = X.ndim == 1 or X.shape[1] == 1
ax = _get_axis(fig, ax, is_3d)
# XXX: support old-style scatter=True kwarg usage
if kwargs.get('scatter', False):
kind = 'scatter'
del kwargs['scatter']
# Do the plotting
if kind is 'scatter':
if is_1d:
ax.scatter(np.arange(len(X)), X, marker=marker, **kwargs)
elif is_3d:
ax.scatter(X[:,0], X[:,1], X[:,2], marker=marker, **kwargs)
else:
ax.scatter(X[:,0], X[:,1], marker=marker, **kwargs)
elif kind is 'plot':
if is_1d:
ax.plot(X, marker, **kwargs)
elif is_3d:
ax.plot(X[:,0], X[:,1], X[:,2], marker, **kwargs)
else:
ax.plot(X[:,0], X[:,1], marker, **kwargs)
else:
raise ValueError('Unsupported kind: %r' % kind)
if title:
ax.set_title(title)
return plt.show | 0.022388 |
def root_parent(self, category=None):
""" Returns the topmost parent of the current category. """
return next(filter(lambda c: c.is_root, self.hierarchy())) | 0.011628 |
def image_query_record(self, imagename=None):
"""Query the image record from database, if imagename is None, all
of the image records will be returned, otherwise only the specified
image record will be returned."""
if imagename:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image WHERE "
"imagename=?", (imagename,))
image_list = result.fetchall()
if not image_list:
obj_desc = "Image with name: %s" % imagename
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image")
image_list = result.fetchall()
# Map each image record to be a dict, with the key is the field name in
# image DB
image_keys_list = ['imagename', 'imageosdistro', 'md5sum',
'disk_size_units', 'image_size_in_bytes', 'type',
'comments']
image_result = []
for item in image_list:
image_item = dict(zip(image_keys_list, item))
image_result.append(image_item)
return image_result | 0.003698 |
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
#Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = OrderedDict((k, []) for k in keys)
if fields is not None:
#print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section.
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
# Convert values to numpy arrays.
if fields:
return OrderedDict([(k, np.array(v)) for k, v in fields.items()])
else:
return None | 0.004471 |
def download_parsed(self, days=60):
"""Downloaded OFX response parsed by :py:meth:`OfxParser.parse`
:param days: Number of days to look back at
:type days: integer
:rtype: :py:class:`ofxparser.Ofx`
"""
if IS_PYTHON_2:
return OfxParser.parse(
self.download(days=days)
)
else:
return OfxParser.parse(
BytesIO(self.download(days=days).read().encode())
) | 0.004107 |
def urisplit(uristring):
"""Split a well-formed URI reference string into a tuple with five
components corresponding to a URI's general structure::
<scheme>://<authority>/<path>?<query>#<fragment>
"""
if isinstance(uristring, bytes):
result = SplitResultBytes
else:
result = SplitResultUnicode
return result(*result.RE.match(uristring).groups()) | 0.002545 |
def spin_thread(self, interval=1):
"""call Client.spin() in a background thread on some regular interval
This helps ensure that messages don't pile up too much in the zmq queue
while you are working on other things, or just leaving an idle terminal.
It also helps limit potential padding of the `received` timestamp
on AsyncResult objects, used for timings.
Parameters
----------
interval : float, optional
The interval on which to spin the client in the background thread
(simply passed to time.sleep).
Notes
-----
For precision timing, you may want to use this method to put a bound
on the jitter (in seconds) in `received` timestamps used
in AsyncResult.wall_time.
"""
if self._spin_thread is not None:
self.stop_spin_thread()
self._stop_spinning.clear()
self._spin_thread = Thread(target=self._spin_every, args=(interval,))
self._spin_thread.daemon = True
self._spin_thread.start() | 0.008803 |
def cp(hdfs_src, hdfs_dst):
"""Copy a file
:param hdfs_src: Source (str)
:param hdfs_dst: Destination (str)
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd) | 0.003559 |
def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
"""Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data | 0.004965 |
def gen_locale(locale, **kwargs):
'''
Generate a locale. Options:
.. versionadded:: 2014.7.0
:param locale: Any locale listed in /usr/share/i18n/locales or
/usr/share/i18n/SUPPORTED for Debian and Gentoo based distributions,
which require the charmap to be specified as part of the locale
when generating it.
verbose
Show extra warnings about errors that are normally ignored.
CLI Example:
.. code-block:: bash
salt '*' locale.gen_locale en_US.UTF-8
salt '*' locale.gen_locale 'en_IE.UTF-8 UTF-8' # Debian/Gentoo only
'''
on_debian = __grains__.get('os') == 'Debian'
on_ubuntu = __grains__.get('os') == 'Ubuntu'
on_gentoo = __grains__.get('os_family') == 'Gentoo'
on_suse = __grains__.get('os_family') == 'Suse'
on_solaris = __grains__.get('os_family') == 'Solaris'
if on_solaris: # all locales are pre-generated
return locale in __salt__['locale.list_avail']()
locale_info = salt.utils.locales.split_locale(locale)
locale_search_str = '{0}_{1}'.format(locale_info['language'], locale_info['territory'])
# if the charmap has not been supplied, normalize by appening it
if not locale_info['charmap'] and not on_ubuntu:
locale_info['charmap'] = locale_info['codeset']
locale = salt.utils.locales.join_locale(locale_info)
if on_debian or on_gentoo: # file-based search
search = '/usr/share/i18n/SUPPORTED'
valid = __salt__['file.search'](search,
'^{0}$'.format(locale),
flags=re.MULTILINE)
else: # directory-based search
if on_suse:
search = '/usr/share/locale'
else:
search = '/usr/share/i18n/locales'
try:
valid = locale_search_str in os.listdir(search)
except OSError as ex:
log.error(ex)
raise CommandExecutionError(
"Locale \"{0}\" is not available.".format(locale))
if not valid:
log.error(
'The provided locale "%s" is not found in %s', locale, search)
return False
if os.path.exists('/etc/locale.gen'):
__salt__['file.replace'](
'/etc/locale.gen',
r'^\s*#\s*{0}\s*$'.format(locale),
'{0}\n'.format(locale),
append_if_not_found=True
)
elif on_ubuntu:
__salt__['file.touch'](
'/var/lib/locales/supported.d/{0}'.format(locale_info['language'])
)
__salt__['file.replace'](
'/var/lib/locales/supported.d/{0}'.format(locale_info['language']),
locale,
locale,
append_if_not_found=True
)
if salt.utils.path.which('locale-gen'):
cmd = ['locale-gen']
if on_gentoo:
cmd.append('--generate')
if on_ubuntu:
cmd.append(salt.utils.locales.normalize_locale(locale))
else:
cmd.append(locale)
elif salt.utils.path.which('localedef'):
cmd = ['localedef', '--force', '-i', locale_search_str, '-f', locale_info['codeset'],
'{0}.{1}'.format(locale_search_str,
locale_info['codeset']),
kwargs.get('verbose', False) and '--verbose' or '--quiet']
else:
raise CommandExecutionError(
'Command "locale-gen" or "localedef" was not found on this system.')
res = __salt__['cmd.run_all'](cmd)
if res['retcode']:
log.error(res['stderr'])
if kwargs.get('verbose'):
return res
else:
return res['retcode'] == 0 | 0.001092 |
def enumerate_joint_ask(X, e, P):
"""Return a probability distribution over the values of the variable X,
given the {var:val} observations e, in the JointProbDist P. [Section 13.3]
>>> P = JointProbDist(['X', 'Y'])
>>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
>>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
'0: 0.667, 1: 0.167, 2: 0.167'
"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X) # probability distribution for X, initially empty
Y = [v for v in P.variables if v != X and v not in e] # hidden vars.
for xi in P.values(X):
Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
return Q.normalize() | 0.004208 |
def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Get a dictionary of this object's attributes. Optional format for storage in a Pandas DataFrame.
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
# Choose attributes to return, return everything in the object if a list is not specified
if not only_attributes:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_attributes)
# Remove keys you don't want returned
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
# Copy attributes into a new dictionary
df_dict = {}
for k, orig_v in self.__dict__.items():
if k in keys:
v = deepcopy(orig_v)
if df_format:
if v and not isinstance(v, str) and not isinstance(v, int) and not isinstance(v,
float) and not isinstance(
v, bool):
try:
df_dict[k] = ssbio.utils.force_string(deepcopy(v))
except TypeError:
log.warning('{}: excluding attribute from dict, cannot transform into string'.format(k))
elif not v and not isinstance(v, int) and not isinstance(v, float):
df_dict[k] = None
else:
df_dict[k] = deepcopy(v)
else:
df_dict[k] = deepcopy(v)
return df_dict | 0.005324 |
def rewrite_subject_group(root, subjects, subject_group_type, overwrite=True):
"add or rewrite subject tags inside subj-group tags"
parent_tag_name = 'subj-group'
tag_name = 'subject'
wrap_tag_name = 'article-categories'
tag_attribute = 'subj-group-type'
# the parent tag where it should be found
xpath_parent = './/front/article-meta/article-categories'
# the wraping tag in case article-categories does not exist
xpath_article_meta = './/front/article-meta'
# the xpath to find the subject tags we are interested in
xpath = './/{parent_tag_name}[@{tag_attribute}="{group_type}"]'.format(
parent_tag_name=parent_tag_name,
tag_attribute=tag_attribute,
group_type=subject_group_type)
count = 0
# get the parent tag
parent_tag = root.find(xpath_parent)
if parent_tag is None:
# parent tag not found, add one
wrap_tag = root.find(xpath_article_meta)
article_categories_tag = SubElement(wrap_tag, wrap_tag_name)
parent_tag = article_categories_tag
insert_index = 0
# iterate all tags to find the index of the first tag we are interested in
if parent_tag is not None:
for tag_index, tag in enumerate(parent_tag.findall('*')):
if tag.tag == parent_tag_name and tag.get(tag_attribute) == subject_group_type:
insert_index = tag_index
if overwrite is True:
# if overwriting use the first one found
break
# if not overwriting, use the last one found + 1
if overwrite is not True:
insert_index += 1
# remove the tag if overwriting the existing values
if overwrite is True:
# remove all the tags
for tag in root.findall(xpath):
parent_tag.remove(tag)
# add the subjects
for subject in subjects:
subj_group_tag = Element(parent_tag_name)
subj_group_tag.set(tag_attribute, subject_group_type)
subject_tag = SubElement(subj_group_tag, tag_name)
subject_tag.text = subject
parent_tag.insert(insert_index, subj_group_tag)
count += 1
insert_index += 1
return count | 0.000905 |
def squash(self, a, b):
"""
Returns a generator that squashes two iterables into one.
```
['this', 'that'], [[' and', ' or']] => ['this and', 'this or', 'that and', 'that or']
```
"""
return ((''.join(x) if isinstance(x, tuple) else x) for x in itertools.product(a, b)) | 0.012232 |
def set_checked(self, checked):
""" Properly check the correct radio button.
"""
if not checked:
self.widget.clearCheck()
else:
#: Checked is a reference to the radio declaration
#: so we need to get the ID of it
rb = checked.proxy.widget
if not rb:
return
self.widget.check(rb.getId()) | 0.004914 |
def get_file_range(ase, offsets, timeout=None):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.download.Offsets, int) -> bytes
"""Retrieve file range
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.download.Offsets offsets: download offsets
:param int timeout: timeout
:rtype: bytes
:return: content for file range
"""
dir, fpath, _ = parse_file_path(ase.name)
return ase.client._get_file(
share_name=ase.container,
directory_name=dir,
file_name=fpath,
start_range=offsets.range_start,
end_range=offsets.range_end,
validate_content=False, # HTTPS takes care of integrity during xfer
timeout=timeout,
snapshot=ase.snapshot,
).content | 0.001232 |
def parse_args():
"""Define and parse command line arguments"""
parser = argparse.ArgumentParser(
description='''Convert a set of files to a single h5features file, input files
can be npz (numpy) or mat (MATLAB) files.''')
parser.add_argument('file', nargs='+', type=str,
help='File to convert in the h5features format')
parser.add_argument('-o', '--output', default='features.h5', type=str,
help='''The output h5features file to write on
(default is %(default)s)''')
parser.add_argument('-g', '--group', default='h5features', type=str,
help='''The group to write in the output file
(default is %(default)s)''')
parser.add_argument('--chunk', default=0.1, type=float,
help='''size of a file chunk in MB
(default is %(default)s)''')
return parser.parse_args() | 0.002051 |
def main(argv=None):
""" Invoke the cosmic ray evaluation.
:param argv: the command line arguments
"""
signal.signal(
signal.SIGINT,
lambda *args: sys.exit(_SIGNAL_EXIT_CODE_BASE + signal.SIGINT))
if hasattr(signal, 'SIGINFO'):
signal.signal(
getattr(signal, 'SIGINFO'),
lambda *args: report_progress(sys.stderr))
try:
return docopt_subcommands.main(
commands=dsc,
argv=argv,
doc_template=DOC_TEMPLATE,
exit_at_end=False)
except docopt.DocoptExit as exc:
print(exc, file=sys.stderr)
return ExitCode.USAGE
except FileNotFoundError as exc:
print(exc, file=sys.stderr)
return ExitCode.NO_INPUT
except PermissionError as exc:
print(exc, file=sys.stderr)
return ExitCode.NO_PERM
except cosmic_ray.config.ConfigError as exc:
print(repr(exc), file=sys.stderr)
if exc.__cause__ is not None:
print(exc.__cause__, file=sys.stderr)
return ExitCode.CONFIG
except subprocess.CalledProcessError as exc:
print('Error in subprocess', file=sys.stderr)
print(exc, file=sys.stderr)
return exc.returncode | 0.000805 |
def load():
"""Loads the libdmtx shared library.
"""
if 'Windows' == platform.system():
# Possible scenarios here
# 1. Run from source, DLLs are in pylibdmtx directory
# cdll.LoadLibrary() imports DLLs in repo root directory
# 2. Wheel install into CPython installation
# cdll.LoadLibrary() imports DLLs in package directory
# 3. Wheel install into virtualenv
# cdll.LoadLibrary() imports DLLs in package directory
# 4. Frozen
# cdll.LoadLibrary() imports DLLs alongside executable
fname = _windows_fname()
try:
libdmtx = cdll.LoadLibrary(fname)
except OSError:
libdmtx = cdll.LoadLibrary(
str(Path(__file__).parent.joinpath(fname))
)
else:
# Assume a shared library on the path
path = find_library('dmtx')
if not path:
raise ImportError('Unable to find dmtx shared library')
libdmtx = cdll.LoadLibrary(path)
return libdmtx | 0.000935 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.