text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_resolv_dns():
"""
Returns the dns servers configured in /etc/resolv.conf
"""
result = []
try:
for line in open('/etc/resolv.conf', 'r'):
if line.startswith('search'):
result.append(line.strip().split(' ')[1])
except FileNotFoundError:
pass
return result | 0.002967 |
def parse(self, only_known = False):
'''Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
'''
self.parsed = not only_known or self.parsed
logger.info('parsing parameters')
logger.debug('sys.argv: %s', sys.argv)
if only_known:
args = [ _ for _ in copy.copy(sys.argv) if not re.match('-h|--help', _) ]
self._group_parsers['default'].parse_known_args(args = args, namespace = self._argument_namespace)
else:
self._group_parsers['default'].parse_args(namespace = self._argument_namespace) | 0.009554 |
def get_comments(self, issue_id):
"""Retrieve all the comments of a given issue.
:param issue_id: ID of the issue
"""
url = urijoin(self.base_url, self.RESOURCE, self.VERSION_API, self.ISSUE, issue_id, self.COMMENT)
comments = self.get_items(DEFAULT_DATETIME, url, expand_fields=False)
return comments | 0.008547 |
def getPublicKeys(self, current=False):
""" Return all installed public keys
:param bool current: If true, returns only keys for currently
connected blockchain
"""
pubkeys = self.store.getPublicKeys()
if not current:
return pubkeys
pubs = []
for pubkey in pubkeys:
# Filter those keys not for our network
if pubkey[: len(self.prefix)] == self.prefix:
pubs.append(pubkey)
return pubs | 0.003854 |
def save_screenshot(driver, name):
"""
Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in the output file name.
Returns:
None
"""
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if not screenshot_dir:
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
image_name = os.path.join(screenshot_dir, name + '.png')
driver.save_screenshot(image_name)
else:
msg = (
u"Browser does not support screenshots. "
u"Could not save screenshot '{name}'"
).format(name=name)
LOGGER.warning(msg) | 0.00273 |
def show_info(self):
"""
displays the doc string of the selected element
"""
sender = self.sender()
tree = sender.parent()
index = tree.selectedIndexes()
info = ''
if index != []:
index = index[0]
name = str(index.model().itemFromIndex(index).text())
if name in set(list(self.elements_from_file.keys()) + list(self.elements_selected.keys())):
probe_name = None
instrument_name = name
else:
instrument_name = str(index.model().itemFromIndex(index).parent().text())
probe_name = name
module = __import__('pylabcontrol.instruments', fromlist=[instrument_name])
if probe_name is None:
info = getattr(module, instrument_name).__doc__
else:
if probe_name in list(getattr(module, instrument_name)._PROBES.keys()):
info = getattr(module, instrument_name)._PROBES[probe_name]
if info is not None:
self.lbl_info.setText(info) | 0.006335 |
def _filter_netcdf4_metadata(self, mdata_dict, coltype, remove=False):
"""Filter metadata properties to be consistent with netCDF4.
Notes
-----
removed forced to True if coltype consistent with a string type
Parameters
----------
mdata_dict : dict
Dictionary equivalent to Meta object info
coltype : type
Type provided by _get_data_info
remove : boolean (False)
Removes FillValue and associated parameters disallowed for strings
Returns
-------
dict
Modified as needed for netCDf4
"""
# Coerce boolean types to integers
for key in mdata_dict:
if type(mdata_dict[key]) == bool:
mdata_dict[key] = int(mdata_dict[key])
if (coltype == type(' ')) or (coltype == type(u' ')):
remove = True
# print ('coltype', coltype, remove, type(coltype), )
if u'_FillValue' in mdata_dict.keys():
# make sure _FillValue is the same type as the data
if remove:
mdata_dict.pop('_FillValue')
else:
mdata_dict['_FillValue'] = np.array(mdata_dict['_FillValue']).astype(coltype)
if u'FillVal' in mdata_dict.keys():
# make sure _FillValue is the same type as the data
if remove:
mdata_dict.pop('FillVal')
else:
mdata_dict['FillVal'] = np.array(mdata_dict['FillVal']).astype(coltype)
return mdata_dict | 0.005636 |
def __create(self, client_id, client_secret, calls, **kwargs):
"""Call documentation: `/batch/create
<https://www.wepay.com/developer/reference/batch#create>`_, plus extra
keyword parameter:
:keyword str access_token: will be used instead of instance's
``access_token``
"""
params = {
'client_id': client_id,
'client_secret': client_secret,
'calls': calls
}
return self.make_call(self.__create, params, kwargs) | 0.005639 |
def run(name,
cmd,
no_start=False,
preserve_state=True,
stdin=None,
python_shell=True,
output_loglevel='debug',
use_vt=False,
path=None,
ignore_retcode=False,
chroot_fallback=False,
keep_env='http_proxy,https_proxy,no_proxy'):
'''
.. versionadded:: 2015.8.0
Run :mod:`cmd.run <salt.modules.cmdmod.run>` within a container
.. warning::
Many shell builtins do not work, failing with stderr similar to the
following:
.. code-block:: bash
lxc_container: No such file or directory - failed to exec 'command'
The same error will be displayed in stderr if the command being run
does not exist. If no output is returned using this function, try using
:mod:`lxc.run_stderr <salt.modules.lxc.run_stderr>` or
:mod:`lxc.run_all <salt.modules.lxc.run_all>`.
name
Name of the container in which to run the command
cmd
Command to run
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
no_start : False
If the container is not running, don't start it
preserve_state : True
After running the command, return the container to its previous state
stdin : None
Standard input to be used for the command
output_loglevel : debug
Level at which to log the output from the command. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console. Assumes
``output=all``.
chroot_fallback
if the container is not running, try to run the command using chroot
default: false
keep_env : http_proxy,https_proxy,no_proxy
A list of env vars to preserve. May be passed as commma-delimited list.
CLI Example:
.. code-block:: bash
salt myminion lxc.run mycontainer 'ifconfig -a'
'''
return _run(name,
cmd,
path=path,
output=None,
no_start=no_start,
preserve_state=preserve_state,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
use_vt=use_vt,
ignore_retcode=ignore_retcode,
chroot_fallback=chroot_fallback,
keep_env=keep_env) | 0.000404 |
def overlap(self, feature, stranded: bool=False):
"""Determine if a feature's position overlaps with the entry
Args:
feature (class): GFF3Entry object
stranded (bool): allow features to overlap on different strands
if True [default: False]
Returns:
bool: True if features overlap, else False
"""
# Allow features to overlap on different strands
feature_strand = feature.strand
strand = self.strand
if stranded and ((strand == '.') or (strand == '+' and \
feature_strand in ['-', '.']) or (strand == '-' and \
feature_strand in ['+', '.'])):
return False
iv_1 = set(range(feature.start, feature.end + 1))
iv_2 = set(range(self.start, self.end + 1))
if len(iv_1.intersection(iv_2)) > 0:
return True
else:
return False | 0.009677 |
def list_dataset_uris(cls, base_uri, config_path):
"""Return list containing URIs in base_uri."""
parsed_uri = generous_parse_uri(base_uri)
irods_path = parsed_uri.path
uri_list = []
logger.info("irods_path: '{}'".format(irods_path))
for dir_path in _ls_abspaths(irods_path):
logger.info("dir path: '{}'".format(dir_path))
base, uuid = os.path.split(dir_path)
base_uri = "irods:{}".format(base)
uri = cls.generate_uri(
name=None,
uuid=uuid,
base_uri=base_uri
)
storage_broker = cls(uri, config_path)
if storage_broker.has_admin_metadata():
uri_list.append(uri)
return uri_list | 0.002538 |
def to_ds9(self, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a list of ``regions.Shape`` objects to ds9 region strings.
Parameters
----------
coordsys : str
This overrides the coordinate system frame for all regions.
fmt : str
A python string format defining the output precision.
Default is .6f, which is accurate to 0.0036 arcseconds.
radunit : str
This denotes the unit of the radius.
Returns
-------
region_string : str
ds9 region string
Examples
--------
TODO
"""
valid_symbols_reverse = {y: x for x, y in valid_symbols_ds9.items()}
ds9_strings = {
'circle': '{0}circle({1:FMT},{2:FMT},{3:FMT}RAD)',
'circleannulus': '{0}annulus({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD)',
'ellipse': '{0}ellipse({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD,{5:FMT})',
'rectangle': '{0}box({1:FMT},{2:FMT},{3:FMT}RAD,{4:FMT}RAD,{5:FMT})',
'polygon': '{0}polygon({1})',
'point': '{0}point({1:FMT},{2:FMT})',
'line': '{0}line({1:FMT},{2:FMT},{3:FMT},{4:FMT})',
'text': '{0}text({1:FMT},{2:FMT})'
}
output = '# Region file format: DS9 astropy/regions\n'
if radunit == 'arcsec':
# what's this for?
if coordsys in coordsys_mapping['DS9'].values():
radunitstr = '"'
else:
raise ValueError('Radius unit arcsec not valid for coordsys {}'.format(coordsys))
else:
radunitstr = ''
for key, val in ds9_strings.items():
ds9_strings[key] = val.replace("FMT", fmt).replace("RAD", radunitstr)
output += '{}\n'.format(coordsys)
for shape in self:
shape.check_ds9()
shape.meta = to_ds9_meta(shape.meta)
# if unspecified, include is True.
include = "-" if shape.include in (False, '-') else ""
if 'point' in shape.meta:
shape.meta['point'] = valid_symbols_reverse[shape.meta['point']]
if 'symsize' in shape.meta:
shape.meta['point'] += " {}".format(shape.meta.pop('symsize'))
meta_str = " ".join("{0}={1}".format(key, val) for key, val in
shape.meta.items() if key not in ('include', 'tag', 'comment', 'font', 'text'))
if 'tag' in shape.meta:
meta_str += " " + " ".join(["tag={0}".format(tag) for tag in shape.meta['tag']])
if 'font' in shape.meta:
meta_str += " " + 'font="{0}"'.format(shape.meta['font'])
if shape.meta.get('text', '') != '':
meta_str += " " + 'text={' + shape.meta['text'] + '}'
if 'comment' in shape.meta:
meta_str += " " + shape.meta['comment']
coord = []
if coordsys not in ['image', 'physical']:
for val in shape.coord:
if isinstance(val, Angle):
coord.append(float(val.value))
else:
if radunit == '' or None:
coord.append(float(val.value))
else:
coord.append(float(val.to(radunit).value))
if shape.region_type in ['ellipse', 'rectangle'] and len(shape.coord) % 2 == 1:
coord[-1] = float(shape.coord[-1].to('deg').value)
else:
for val in shape.coord:
if isinstance(val, u.Quantity):
coord.append(float(val.value))
else:
coord.append(float(val))
if shape.region_type in ['polygon', 'line']:
coord = [x+1 for x in coord]
else:
coord[0] += 1
coord[1] += 1
if shape.region_type == 'polygon':
val = "{0:" + fmt + "}"
temp = [val.format(x) for x in coord]
coord = ",".join(temp)
line = ds9_strings['polygon'].format(include, coord)
elif shape.region_type == 'ellipse':
coord[2:] = [x / 2 for x in coord[2:]]
if len(coord) % 2 == 1:
coord[-1] *= 2
line = ds9_strings['ellipse'].format(include, *coord)
else:
line = ds9_strings[shape.region_type].format(include, *coord)
if meta_str.strip():
output += "{0} # {1}\n".format(line, meta_str)
else:
output += "{0}\n".format(line)
return output | 0.002292 |
def assure_image(fnc):
"""
Converts a image ID passed as the 'image' parameter to a image object.
"""
@wraps(fnc)
def _wrapped(self, img, *args, **kwargs):
if not isinstance(img, Image):
# Must be the ID
img = self._manager.get(img)
return fnc(self, img, *args, **kwargs)
return _wrapped | 0.002849 |
def add_circle(self,
center_lat=None,
center_lng=None,
radius=None,
**kwargs):
""" Adds a circle dict to the Map.circles attribute
The circle in a sphere is called "spherical cap" and is defined in the
Google Maps API by at least the center coordinates and its radius, in
meters. A circle has color and opacity both for the border line and the
inside area.
It accepts a circle dict representation as well.
Args:
center_lat (float): The circle center latitude
center_lng (float): The circle center longitude
radius (float): The circle radius, in meters
.. _Circle:
https://developers.google.com/maps/documen
tation/javascript/reference#Circle
"""
kwargs.setdefault('center', {})
if center_lat:
kwargs['center']['lat'] = center_lat
if center_lng:
kwargs['center']['lng'] = center_lng
if radius:
kwargs['radius'] = radius
if set(('lat', 'lng')) != set(kwargs['center'].keys()):
raise AttributeError('circle center coordinates required')
if 'radius' not in kwargs:
raise AttributeError('circle radius definition required')
kwargs.setdefault('stroke_color', '#FF0000')
kwargs.setdefault('stroke_opacity', .8)
kwargs.setdefault('stroke_weight', 2)
kwargs.setdefault('fill_color', '#FF0000')
kwargs.setdefault('fill_opacity', .3)
self.circles.append(kwargs) | 0.003697 |
def _perform_type_validation(self, path, typ, value, results):
"""
Validates a given value to match specified type.
The type can be defined as a Schema, type, a type name or [[TypeCode]].
When type is a Schema, it executes validation recursively against that Schema.
:param path: a dot notation path to the value.
:param typ: a type to match the value type
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
# If type it not defined then skip
if typ == None:
return
# Perform validation against schema
if isinstance(typ, Schema):
schema = typ
schema._perform_validation(path, value, results)
return
# If value is null then skip
value = ObjectReader.get_value(value)
if value == None:
return
name = path if path != None else "value"
value_type = type(value)
# Match types
if TypeMatcher.match_type(typ, value_type):
return
# Generate type mismatch error
results.append(
ValidationResult(
path,
ValidationResultType.Error,
"TYPE_MISMATCH",
name + " type must be " + self._type_to_string(typ) + " but found " + self._type_to_string(value_type),
typ,
value_type
)
) | 0.005288 |
async def _get(self):
"""
Read from the input queue.
If Queue raises (like Timeout or Empty), stat won't be changed.
"""
input_bag = await self.input.get()
# Store or check input type
if self._input_type is None:
self._input_type = type(input_bag)
elif type(input_bag) != self._input_type:
try:
if self._input_type == tuple:
input_bag = self._input_type(input_bag)
else:
input_bag = self._input_type(*input_bag)
except Exception as exc:
raise UnrecoverableTypeError(
"Input type changed to incompatible type between calls to {!r}.\nGot {!r} which is not of type {!r}.".format(
self.wrapped, input_bag, self._input_type
)
) from exc
# Store or check input length, which is a soft fallback in case we're just using tuples
if self._input_length is None:
self._input_length = len(input_bag)
elif len(input_bag) != self._input_length:
raise UnrecoverableTypeError(
"Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.".format(
self.wrapped, self._input_length, len(input_bag), input_bag
)
)
self.increment("in") # XXX should that go before type check ?
return input_bag | 0.003356 |
def authenticate(username, password):
"""Authenticate with a DC/OS cluster and return an ACS token.
return: ACS token
"""
url = _gen_url('acs/api/v1/auth/login')
creds = {
'uid': username,
'password': password
}
response = dcos.http.request('post', url, json=creds)
if response.status_code == 200:
return response.json()['token']
else:
return None | 0.002392 |
def match(self):
"""
*match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)*
**Return:**
- ``classification`` -- the crossmatch results and classifications assigned to the transients
See the class docstring for usage.
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``match`` method')
classifications = []
# COUNT NUMBER OF TRANSIENT TO CROSSMATCH
numberOfTransients = len(self.transients)
count = 0
# GRAB SEARCH ALGORITHM
sa = self.settings["search algorithm"]
# FOR EACH TRANSIENT SOURCE IN THE LIST ...
allCatalogueMatches = []
catalogueMatches = []
nonSynonymTransients = self.transients[:]
# SYNONYM SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "synonym" not in searchPara[bf] or searchPara[bf]["synonym"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="synonym"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=self.transients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="synonym"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
synonymIDs = []
synonymIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonSynonymTransients = []
nonSynonymTransients[:] = [
t for t in self.transients if t["id"] not in synonymIDs]
# ASSOCIATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
if len(nonSynonymTransients) > 0:
for search_name, searchPara in sa.iteritems():
self.log.debug(""" searching: %(search_name)s""" % locals())
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "association" not in searchPara[bf] or searchPara[bf]["association"] == False:
continue
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="association"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonSynonymTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="association"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
catalogueMatches = []
associationIDs = []
associationIDs[:] = [xm["transient_object_id"]
for xm in allCatalogueMatches]
nonAssociationTransients = []
nonAssociationTransients[:] = [
t for t in self.transients if t["id"] not in associationIDs]
# ANNOTATION SEARCHES
# ITERATE THROUGH SEARCH ALGORITHM IN ORDER
# PRESENTED IN THE SETTINGS FILE
brightnessFilters = ["bright", "faint", "general"]
for search_name, searchPara in sa.iteritems():
for bf in brightnessFilters:
if bf not in searchPara:
continue
if "annotation" not in searchPara[bf] or searchPara[bf]["annotation"] == False:
continue
self.log.debug(""" searching: %(search_name)s""" % locals())
if "physical radius kpc" in searchPara[bf]:
# THE PHYSICAL SEPARATION SEARCHES
self.log.debug(
'checking physical distance crossmatches in %(search_name)s' % locals())
if bf in searchPara:
catalogueMatches = self.physical_separation_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " physical",
brightnessFilter=bf,
classificationType="annotation"
)
else:
# THE ANGULAR SEPARATION SEARCHES
self.log.debug(
'Crossmatching against %(search_name)s' % locals())
# RENAMED from searchCatalogue
if bf in searchPara:
catalogueMatches = self.angular_crossmatch_against_catalogue(
objectList=nonAssociationTransients,
searchPara=searchPara,
search_name=search_name + " angular",
brightnessFilter=bf,
classificationType="annotation"
)
# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND
if catalogueMatches:
allCatalogueMatches = allCatalogueMatches + catalogueMatches
self.log.debug('completed the ``match`` method')
return allCatalogueMatches | 0.00272 |
def user_search_results(self):
""" Add [member] to a user title if user is a member
of current workspace
"""
results = super(SharingView, self).user_search_results()
ws = IWorkspace(self.context)
roles_mapping = ws.available_groups
roles = roles_mapping.get(self.context.participant_policy.title())
for result in results:
if result["id"] in ws.members:
groups = ws.get(result["id"]).groups
for role in roles:
result["roles"][role] = "acquired"
if "Admins" in groups:
title = "administrator"
result["roles"]["TeamManager"] = "acquired"
else:
title = "member"
result["title"] = "%s [%s]" % (result["title"], title)
return results | 0.002291 |
def get_embedding_weights_from_file(word_dict, file_path, ignore_case=False):
"""Load pre-trained embeddings from a text file.
Each line in the file should look like this:
word feature_dim_1 feature_dim_2 ... feature_dim_n
The `feature_dim_i` should be a floating point number.
:param word_dict: A dict that maps words to indice.
:param file_path: The location of the text file containing the pre-trained embeddings.
:param ignore_case: Whether ignoring the case of the words.
:return weights: A numpy array.
"""
pre_trained = {}
with codecs.open(file_path, 'r', 'utf8') as reader:
for line in reader:
line = line.strip()
if not line:
continue
parts = line.split()
if ignore_case:
parts[0] = parts[0].lower()
pre_trained[parts[0]] = list(map(float, parts[1:]))
embd_dim = len(next(iter(pre_trained.values())))
weights = [[0.0] * embd_dim for _ in range(max(word_dict.values()) + 1)]
for word, index in word_dict.items():
if not word:
continue
if ignore_case:
word = word.lower()
if word in pre_trained:
weights[index] = pre_trained[word]
else:
weights[index] = numpy.random.random((embd_dim,)).tolist()
return numpy.asarray(weights) | 0.001449 |
def gradient(self):
"""Gradient of the compositon according to the chain rule."""
func = self.left
op = self.right
class FunctionalCompositionGradient(Operator):
"""Gradient of the compositon according to the chain rule."""
def __init__(self):
"""Initialize a new instance."""
super(FunctionalCompositionGradient, self).__init__(
op.domain, op.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
return op.derivative(x).adjoint(func.gradient(op(x)))
def derivative(self, x):
"""The derivative in point ``x``.
This is only defined
"""
if not op.is_linear:
raise NotImplementedError('derivative only implemented '
'for linear opertors.')
else:
return (op.adjoint * func.gradient * op).derivative(x)
return FunctionalCompositionGradient() | 0.001779 |
def _get_download_or_cache(filename, data_home=None,
url=SESAR_RRLYRAE_URL,
force_download=False):
"""Private utility to download and/or load data from disk cache."""
# Import here so astroML is not required at package level
from astroML.datasets.tools import get_data_home
if data_home is None:
data_home = get_data_home(data_home)
data_home = os.path.join(data_home, 'Sesar2010')
if not os.path.exists(data_home):
os.makedirs(data_home)
src_url = SESAR_RRLYRAE_URL + filename
save_loc = os.path.join(data_home, filename)
if force_download or not os.path.exists(save_loc):
fhandle = urlopen(src_url)
with open(save_loc, 'wb') as cache:
cache.write(fhandle.read())
return save_loc | 0.001218 |
def pilot_PLL(xr,fq,fs,loop_type,Bn,zeta):
"""
theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta)
Mark Wickert, April 2014
"""
T = 1/float(fs)
# Set the VCO gain in Hz/V
Kv = 1.0
# Design a lowpass filter to remove the double freq term
Norder = 5
b_lp,a_lp = signal.butter(Norder,2*(fq/2.)/float(fs))
fstate = np.zeros(Norder) # LPF state vector
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
fn = Bn
Kt = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
fn = 1/(2*np.pi)*2*Bn/(zeta + 1/(4*zeta)) # given Bn in Hz
Kt = 4*np.pi*zeta*fn # loop natural frequency in rad/s
a = np.pi*fn/zeta
else:
print('Loop type must be 1 or 2')
# Initialize integration approximation filters
filt_in_last = 0
filt_out_last = 0
vco_in_last = 0
vco_out = 0
vco_out_last = 0
# Initialize working and final output vectors
n = np.arange(0,len(xr))
theta = np.zeros(len(xr))
ev = np.zeros(len(xr))
phi_error = np.zeros(len(xr))
# Normalize total power in an attemp to make the 19kHz sinusoid
# component have amplitude ~1.
#xr = xr/(2/3*std(xr));
# Begin the simulation loop
for kk in range(len(n)):
# Sinusoidal phase detector (simple multiplier)
phi_error[kk] = 2*xr[kk]*np.sin(vco_out)
# LPF to remove double frequency term
phi_error[kk],fstate = signal.lfilter(b_lp,a_lp,np.array([phi_error[kk]]),zi=fstate)
pd_out = phi_error[kk]
#pd_out = 0
# Loop gain
gain_out = Kt/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = a*gain_out
filt_out = filt_out_last + T/2.*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
else:
filt_out = gain_out
# VCO
vco_in = filt_out + fq/(Kv/(2*np.pi)) # bias to quiescent freq.
vco_out = vco_out_last + T/2.*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
# Measured loop signals
ev[kk] = filt_out
theta[kk] = np.mod(vco_out,2*np.pi); # The vco phase mod 2pi
return theta,phi_error | 0.011844 |
def explicitLogout(self, session):
"""
Handle a user-requested logout.
Here we override guard's behaviour for the logout action to delete the
persistent session. In this case the user has explicitly requested a
logout, so the persistent session must be deleted to require the user
to log in on the next request.
@type session: L{nevow.guard.GuardSession}
@param session: The session of the user logging out.
"""
guard.SessionWrapper.explicitLogout(self, session)
self.removeSessionWithKey(session.uid) | 0.003378 |
def get_all_comments_of_incoming(self, incoming_id):
"""
Get all comments of incoming
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param incoming_id: the incoming id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_comments_of_incoming_per_page,
resource=INCOMING_COMMENTS,
**{'incoming_id': incoming_id}
) | 0.005545 |
def __is_valid(loc_data):
"""Determine if this can be valid data (not all 0's)."""
for key, [value, func] in SENSOR_TYPES.items():
if (key != CONDITION and key != STATIONNAME and key != MEASURED):
if (func is not None):
sens_data = loc_data.get(value)
if func(sens_data) != 0:
return True | 0.002717 |
def fetch():
"""
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
"""
response = urlopen('https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml')
_, params = cgi.parse_header(response.headers['Content-Type'])
if 'charset' in params:
encoding = params['charset']
else:
encoding = 'utf-8'
return_xml = response.read().decode(encoding)
# Example return data
#
# <gesmes:Envelope xmlns:gesmes="http://www.gesmes.org/xml/2002-08-01" xmlns="http://www.ecb.int/vocabulary/2002-08-01/eurofxref">
# <gesmes:subject>Reference rates</gesmes:subject>
# <gesmes:Sender>
# <gesmes:name>European Central Bank</gesmes:name>
# </gesmes:Sender>
# <Cube>
# <Cube time="2015-01-09">
# <Cube currency="USD" rate="1.1813"/>
# <Cube currency="JPY" rate="140.81"/>
# <Cube currency="BGN" rate="1.9558"/>
# <Cube currency="CZK" rate="28.062"/>
# <Cube currency="DKK" rate="7.4393"/>
# <Cube currency="GBP" rate="0.77990"/>
# <Cube currency="HUF" rate="317.39"/>
# <Cube currency="PLN" rate="4.2699"/>
# <Cube currency="RON" rate="4.4892"/>
# <Cube currency="SEK" rate="9.4883"/>
# <Cube currency="CHF" rate="1.2010"/>
# <Cube currency="NOK" rate="9.0605"/>
# <Cube currency="HRK" rate="7.6780"/>
# <Cube currency="RUB" rate="72.8910"/>
# <Cube currency="TRY" rate="2.7154"/>
# <Cube currency="AUD" rate="1.4506"/>
# <Cube currency="BRL" rate="3.1389"/>
# <Cube currency="CAD" rate="1.3963"/>
# <Cube currency="CNY" rate="7.3321"/>
# <Cube currency="HKD" rate="9.1593"/>
# <Cube currency="IDR" rate="14925.34"/>
# <Cube currency="ILS" rate="4.6614"/>
# <Cube currency="INR" rate="73.6233"/>
# <Cube currency="KRW" rate="1290.29"/>
# <Cube currency="MXN" rate="17.3190"/>
# <Cube currency="MYR" rate="4.2054"/>
# <Cube currency="NZD" rate="1.5115"/>
# <Cube currency="PHP" rate="53.090"/>
# <Cube currency="SGD" rate="1.5789"/>
# <Cube currency="THB" rate="38.846"/>
# <Cube currency="ZAR" rate="13.6655"/>
# </Cube>
# </Cube>
# </gesmes:Envelope>
# If we don't explicitly recode to UTF-8, ElementTree stupidly uses
# ascii on Python 2.7
envelope = ElementTree.fromstring(return_xml.encode('utf-8'))
namespaces = {
'gesmes': 'http://www.gesmes.org/xml/2002-08-01',
'eurofxref': 'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'
}
date_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube[@time]', namespaces)
if not date_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube time=""> tag in ECB XML')
date = date_elements[0].get('time')
if not isinstance(date, str_cls):
date = date.decode('utf-8')
currency_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube/eurofxref:Cube[@currency][@rate]', namespaces)
if not currency_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube currency="" rate=""> tags in ECB XML')
rates = {
'EUR': Decimal('1.0000')
}
applicable_currenties = {
'BGN': True,
'CZK': True,
'DKK': True,
'EUR': True,
'GBP': True,
'HRK': True,
'HUF': True,
'NOK': True,
'PLN': True,
'RON': True,
'SEK': True,
'USD': True
}
for currency_element in currency_elements:
code = currency_element.attrib.get('currency')
if code not in applicable_currenties:
continue
rate = currency_element.attrib.get('rate')
rates[code] = Decimal(rate)
return (date, rates) | 0.00159 |
def session(self, auth=None):
"""Get a dict of the current authenticated user's session information.
:param auth: Tuple of username and password.
:type auth: Optional[Tuple[str,str]]
:rtype: User
"""
url = '{server}{auth_url}'.format(**self._options)
if isinstance(self._session.auth, tuple) or auth:
if not auth:
auth = self._session.auth
username, password = auth
authentication_data = {'username': username, 'password': password}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user | 0.002628 |
def path(self, value):
"""
Setter for 'path' property
Args:
value (str): Absolute path to scan
"""
if not value.endswith('/'):
self._path = '{v}/'.format(v=value)
else:
self._path = value | 0.052381 |
def p_ObjectSyntax(self, p):
"""ObjectSyntax : SimpleSyntax
| conceptualTable
| row
| entryType
| ApplicationSyntax
| typeTag SimpleSyntax"""
n = len(p)
if n == 2:
p[0] = p[1]
elif n == 3:
p[0] = p[2] | 0.005319 |
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded
Returns:
dictionary: The transformed init_params
"""
init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)
init_params['image_name'] = init_params.pop('image')
return init_params | 0.010145 |
def predict_mappings(self, mappings):
"""
This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour.
"""
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings | 0.001481 |
def color(self, code):
"""
When color is given as a number, apply that color to the content
While this is designed to support 256 color terminals, Windows will approximate
this with 16 colors
"""
def func(content=''):
return self._apply_color(u'38;5;%d' % code, content)
return func | 0.008547 |
def _Backward3_T_Ps(P, s):
"""Backward equation for region 3, T=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
T : float
Temperature, [K]
"""
sc = 4.41202148223476
if s <= sc:
T = _Backward3a_T_Ps(P, s)
else:
T = _Backward3b_T_Ps(P, s)
return T | 0.002506 |
def gather_cache(self):
'''
Gather the specified data from the minion data cache
'''
cache = {'grains': {}, 'pillar': {}}
if self.grains or self.pillar:
if self.opts.get('minion_data_cache'):
minions = self.cache.list('minions')
if not minions:
return cache
for minion in minions:
total = self.cache.fetch('minions/{0}'.format(minion), 'data')
if 'pillar' in total:
if self.pillar_keys:
for key in self.pillar_keys:
if key in total['pillar']:
cache['pillar'][minion][key] = total['pillar'][key]
else:
cache['pillar'][minion] = total['pillar']
else:
cache['pillar'][minion] = {}
if 'grains' in total:
if self.grain_keys:
for key in self.grain_keys:
if key in total['grains']:
cache['grains'][minion][key] = total['grains'][key]
else:
cache['grains'][minion] = total['grains']
else:
cache['grains'][minion] = {}
return cache | 0.003463 |
def smeft_toarray(wc_name, wc_dict):
"""Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements."""
shape = smeftutil.C_keys_shape[wc_name]
C = np.zeros(shape, dtype=complex)
for k, v in wc_dict.items():
if k.split('_')[0] != wc_name:
continue
indices = k.split('_')[-1] # e.g. '1213'
indices = tuple(int(s) - 1 for s in indices) # e.g. (1, 2, 1, 3)
C[indices] = v
C = smeftutil.symmetrize({wc_name: C})[wc_name]
return C | 0.001721 |
def uncancel_invoice(self, invoice_id):
"""
Uncancelles an invoice
:param invoice_id: the invoice id
"""
return self._create_put_request(
resource=INVOICES,
billomat_id=invoice_id,
command=UNCANCEL,
) | 0.007018 |
def _handle_fetch_response(self, request, send_time, response):
"""The callback for fetch completion"""
fetch_offsets = {}
for topic, partitions in request.topics:
for partition_data in partitions:
partition, offset = partition_data[:2]
fetch_offsets[TopicPartition(topic, partition)] = offset
partitions = set([TopicPartition(topic, partition_data[0])
for topic, partitions in response.topics
for partition_data in partitions])
metric_aggregator = FetchResponseMetricAggregator(self._sensors, partitions)
# randomized ordering should improve balance for short-lived consumers
random.shuffle(response.topics)
for topic, partitions in response.topics:
random.shuffle(partitions)
for partition_data in partitions:
tp = TopicPartition(topic, partition_data[0])
completed_fetch = CompletedFetch(
tp, fetch_offsets[tp],
response.API_VERSION,
partition_data[1:],
metric_aggregator
)
self._completed_fetches.append(completed_fetch)
if response.API_VERSION >= 1:
self._sensors.fetch_throttle_time_sensor.record(response.throttle_time_ms)
self._sensors.fetch_latency.record((time.time() - send_time) * 1000) | 0.002745 |
async def log_transaction(self, **params):
"""Writing transaction to database
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
coinid = params.get("coinid")
if not coinid in ["QTUM", "PUT"]:
return {"error":400, "reason": "Missed or invalid coinid"}
database = client[settings.TXS]
source_collection = database[coinid]
await source_collection.find_one_and_update({"txid":params.get("txid")},{"$set":{
"blocknumber":params.get("blocknumber"),
"blockhash":params.get("blockhash"),
"gasLimit":params.get("gasLimit"),
"gasPrice":params.get("gasPrice"),
}})
return {"success":True} | 0.047814 |
def register(self, func, singleton=False, threadlocal=False, name=None):
"""
Register a dependency function
"""
func._giveme_singleton = singleton
func._giveme_threadlocal = threadlocal
if name is None:
name = func.__name__
self._registered[name] = func
return func | 0.005848 |
def absolute_values(df, *, column: str, new_column: str = None):
"""
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
By default, no new column will be created and `column` will be replaced.
---
### Example
**Input**
| ENTITY | VALUE_1 | VALUE_2 |
|:------:|:-------:|:-------:|
| A | -1.512 | -1.504 |
| A | 0.432 | 0.14 |
```cson
absolute_values:
column: 'VALUE_1'
new_column: 'Pika'
```
**Output**
| ENTITY | VALUE_1 | VALUE_2 | Pika |
|:------:|:-------:|:-------:|:-----:|
| A | -1.512 | -1.504 | 1.512 |
| A | 0.432 | 0.14 | 0.432 |
"""
new_column = new_column or column
df[new_column] = abs(df[column])
return df | 0.00105 |
def addChild(self, item):
"""
Adds a new child item to this item.
:param item | <XGanttWidgetItem>
"""
super(XGanttWidgetItem, self).addChild(item)
item.sync() | 0.016807 |
def read(self, size = None):
"""Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None"
"""
r = self._buffer
self._buffer = b''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
return r | 0.009479 |
def dskrb2(vrtces, plates, corsys, corpar):
"""
Determine range bounds for a set of triangular plates to
be stored in a type 2 DSK segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskrb2_c.html
:param vrtces: Vertices
:type vrtces: NxM-Element Array of floats
:param plates: Plates
:type plates: NxM-Element Array of ints
:param corsys: DSK coordinate system code
:type corsys: int
:param corpar: DSK coordinate system parameters
:type corpar: N-Element Array of floats
:return: Lower and Upper bound on range of third coordinate
:rtype: tuple
"""
nv = ctypes.c_int(len(vrtces))
vrtces = stypes.toDoubleMatrix(vrtces)
np = ctypes.c_int(len(plates))
plates = stypes.toIntMatrix(plates)
corsys = ctypes.c_int(corsys)
corpar = stypes.toDoubleVector(corpar)
mncor3 = ctypes.c_double(0.0)
mxcor3 = ctypes.c_double(0.0)
libspice.dskrb2_c(nv, vrtces, np, plates, corsys, corpar, ctypes.byref(mncor3), ctypes.byref(mxcor3))
return mncor3.value, mxcor3.value | 0.003717 |
def subprocess_func(func, pipe, logger, mem_in_mb, cpu_time_limit_in_s, wall_time_limit_in_s, num_procs, grace_period_in_s, tmp_dir, *args, **kwargs):
# simple signal handler to catch the signals for time limits
def handler(signum, frame):
# logs message with level debug on this logger
logger.debug("signal handler: %i"%signum)
if (signum == signal.SIGXCPU):
# when process reaches soft limit --> a SIGXCPU signal is sent (it normally terminats the process)
raise(CpuTimeoutException)
elif (signum == signal.SIGALRM):
# SIGALRM is sent to process when the specified time limit to an alarm function elapses (when real or clock time elapses)
logger.debug("timeout")
raise(TimeoutException)
raise AnythingException
# temporary directory to store stdout and stderr
if not tmp_dir is None:
logger.debug('Redirecting output of the function to files. Access them via the stdout and stderr attributes of the wrapped function.')
stdout = open(os.path.join(tmp_dir, 'std.out'), 'a', buffering=1)
sys.stdout=stdout
stderr = open(os.path.join(tmp_dir, 'std.err'), 'a', buffering=1)
sys.stderr=stderr
# catching all signals at this point turned out to interfer with the subprocess (e.g. using ROS)
signal.signal(signal.SIGALRM, handler)
signal.signal(signal.SIGXCPU, handler)
signal.signal(signal.SIGQUIT, handler)
# code to catch EVERY catchable signal (even X11 related ones ... )
# only use for debugging/testing as this seems to be too intrusive.
"""
for i in [x for x in dir(signal) if x.startswith("SIG")]:
try:
signum = getattr(signal,i)
print("register {}, {}".format(signum, i))
signal.signal(signum, handler)
except:
print("Skipping %s"%i)
"""
# set the memory limit
if mem_in_mb is not None:
# byte --> megabyte
mem_in_b = mem_in_mb*1024*1024
# the maximum area (in bytes) of address space which may be taken by the process.
resource.setrlimit(resource.RLIMIT_AS, (mem_in_b, mem_in_b))
# for now: don't allow the function to spawn subprocesses itself.
#resource.setrlimit(resource.RLIMIT_NPROC, (1, 1))
# Turns out, this is quite restrictive, so we don't use this option by default
if num_procs is not None:
resource.setrlimit(resource.RLIMIT_NPROC, (num_procs, num_procs))
# schedule an alarm in specified number of seconds
if wall_time_limit_in_s is not None:
signal.alarm(wall_time_limit_in_s)
if cpu_time_limit_in_s is not None:
# From the Linux man page:
# When the process reaches the soft limit, it is sent a SIGXCPU signal.
# The default action for this signal is to terminate the process.
# However, the signal can be caught, and the handler can return control
# to the main program. If the process continues to consume CPU time,
# it will be sent SIGXCPU once per second until the hard limit is reached,
# at which time it is sent SIGKILL.
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time_limit_in_s,cpu_time_limit_in_s+grace_period_in_s))
# the actual function call
try:
logger.debug("call function")
return_value = ((func(*args, **kwargs), 0))
logger.debug("function returned properly: {}".format(return_value))
except MemoryError:
return_value = (None, MemorylimitException)
except OSError as e:
if (e.errno == 11):
return_value = (None, SubprocessException)
else:
return_value = (None, AnythingException)
except CpuTimeoutException:
return_value = (None, CpuTimeoutException)
except TimeoutException:
return_value = (None, TimeoutException)
except AnythingException as e:
return_value = (None, AnythingException)
except:
raise
logger.debug("Some wired exception occured!")
finally:
try:
logger.debug("return value: {}".format(return_value))
pipe.send(return_value)
pipe.close()
except:
# this part should only fail if the parent process is alread dead, so there is not much to do anymore :)
pass
finally:
# recursively kill all children
p = psutil.Process()
for child in p.children(recursive=True):
child.kill() | 0.029799 |
def get_all_credit_notes(self, params=None):
"""
Get all credit notes
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_credit_notes_per_page, resource=CREDIT_NOTES, **{'params': params}) | 0.008282 |
def define_plugin_entries(groups):
"""
helper to all groups for plugins
"""
result = dict()
for group, modules in groups:
tempo = []
for module_name, names in modules:
tempo.extend([define_plugin_entry(name, module_name)
for name in names])
result[group] = tempo
return result | 0.002747 |
def pullup(self, pin, enabled):
"""Turn on the pull-up resistor for the specified pin if enabled is True,
otherwise turn off the pull-up resistor.
"""
self._validate_channel(pin)
if enabled:
self.gppu[int(pin/8)] |= 1 << (int(pin%8))
else:
self.gppu[int(pin/8)] &= ~(1 << (int(pin%8)))
self._write_gppu() | 0.013021 |
def new_thing(self, name, **stats):
"""Create a new thing, located here, and return it."""
return self.character.new_thing(
name, self.name, **stats
) | 0.010753 |
def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index'):
"""
Wrapper of elasticsearch's bulk method
Converts an interable of models to document operations and submits them to
Elasticsearch. Returns a count of operations when done.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
actions = compact(dict_to_op(
to_dict(model),
index_name=INDEX_NAME,
doc_type=DOC_TYPE,
op_type=action,
) for model in iterable)
# fail fast if there are no actions
if not actions:
return 0
items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index)
return items | 0.001264 |
def data_response(self):
"""
returns the 1d array of the data element that is fitted for (including masking)
:return: 1d numpy array
"""
d = []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
d_i = self._imageModel_list[i].data_response
if d == []:
d = d_i
else:
d = np.append(d, d_i)
return d | 0.006397 |
def raise_event_handler_log_entry(self, command):
"""Raise SERVICE EVENT HANDLER entry (critical level)
Format is : "SERVICE EVENT HANDLER: *host_name*;*self.get_name()*;*state*;*state_type*
;*attempt*;*command.get_name()*"
Example : "SERVICE EVENT HANDLER: server;Load;UP;HARD;1;notify-by-rss"
:param command: Handler launched
:type command: alignak.objects.command.Command
:return: None
"""
if not self.__class__.log_event_handlers:
return
log_level = 'info'
if self.state == 'WARNING':
log_level = 'warning'
if self.state == 'CRITICAL':
log_level = 'error'
brok = make_monitoring_log(
log_level, "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s" % (
self.host_name, self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()
)
)
self.broks.append(brok) | 0.00297 |
def report_run():
"""Reports data for a run for a release candidate."""
build = g.build
release, run = _get_or_create_run(build)
db.session.refresh(run, lockmode='update')
current_url = request.form.get('url', type=str)
current_image = request.form.get('image', type=str)
current_log = request.form.get('log', type=str)
current_config = request.form.get('config', type=str)
ref_url = request.form.get('ref_url', type=str)
ref_image = request.form.get('ref_image', type=str)
ref_log = request.form.get('ref_log', type=str)
ref_config = request.form.get('ref_config', type=str)
diff_failed = request.form.get('diff_failed', type=str)
diff_image = request.form.get('diff_image', type=str)
diff_log = request.form.get('diff_log', type=str)
distortion = request.form.get('distortion', default=None, type=float)
run_failed = request.form.get('run_failed', type=str)
if current_url:
run.url = current_url
if current_image:
run.image = current_image
if current_log:
run.log = current_log
if current_config:
run.config = current_config
if current_image or current_log or current_config:
logging.info('Saving run data: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, url=%r, '
'image=%r, log=%r, config=%r, run_failed=%r',
build.id, release.name, release.number, run.name,
run.url, run.image, run.log, run.config, run_failed)
if ref_url:
run.ref_url = ref_url
if ref_image:
run.ref_image = ref_image
if ref_log:
run.ref_log = ref_log
if ref_config:
run.ref_config = ref_config
if ref_image or ref_log or ref_config:
logging.info('Saved reference data: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, ref_url=%r, '
'ref_image=%r, ref_log=%r, ref_config=%r',
build.id, release.name, release.number, run.name,
run.ref_url, run.ref_image, run.ref_log, run.ref_config)
if diff_image:
run.diff_image = diff_image
if diff_log:
run.diff_log = diff_log
if distortion:
run.distortion = distortion
if diff_image or diff_log:
logging.info('Saved pdiff: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, diff_image=%r, '
'diff_log=%r, diff_failed=%r, distortion=%r',
build.id, release.name, release.number, run.name,
run.diff_image, run.diff_log, diff_failed, distortion)
if run.image and run.diff_image:
run.status = models.Run.DIFF_FOUND
elif run.image and run.ref_image and not run.diff_log:
run.status = models.Run.NEEDS_DIFF
elif run.image and run.ref_image and not diff_failed:
run.status = models.Run.DIFF_NOT_FOUND
elif run.image and not run.ref_config:
run.status = models.Run.NO_DIFF_NEEDED
elif run_failed or diff_failed:
run.status = models.Run.FAILED
else:
# NOTE: Intentionally do not transition state here in the default case.
# We allow multiple background workers to be writing to the same Run in
# parallel updating its various properties.
pass
# TODO: Verify the build has access to both the current_image and
# the reference_sha1sum so they can't make a diff from a black image
# and still see private data in the diff image.
if run.status == models.Run.NEEDS_DIFF:
task_id = '%s:%s:%s' % (run.id, run.image, run.ref_image)
logging.info('Enqueuing pdiff task=%r', task_id)
work_queue.add(
constants.PDIFF_QUEUE_NAME,
payload=dict(
build_id=build.id,
release_name=release.name,
release_number=release.number,
run_name=run.name,
run_sha1sum=run.image,
reference_sha1sum=run.ref_image,
),
build_id=build.id,
release_id=release.id,
run_id=run.id,
source='report_run',
task_id=task_id)
# Flush the run so querying for Runs in _check_release_done_processing
# will be find the new run too and we won't deadlock.
db.session.add(run)
db.session.flush()
_check_release_done_processing(release)
db.session.commit()
signals.run_updated_via_api.send(
app, build=build, release=release, run=run)
logging.info('Updated run: build_id=%r, release_name=%r, '
'release_number=%d, run_name=%r, status=%r',
build.id, release.name, release.number, run.name, run.status)
return flask.jsonify(success=True) | 0.000207 |
def fetch_metadata(url, path, maxage=600):
"""
:param url: metadata remote location
:param path: metdata file name
:param maxage: if max age of existing metadata file (s) is exceeded,
the file will be fetched from the remote location
"""
fetch = False
if not os.path.isfile(path):
fetch = True
logger.debug("metadata file %s not found", path)
elif (os.path.getmtime(path) + maxage) < time.time():
fetch = True
logger.debug("metadata file %s from %s is more than %s s old",
path,
strftime("%Y-%m-%d %H:%M:%S", time.localtime(os.path.getmtime(path))),
maxage)
else:
logger.debug("metadata file %s is less than %s s old", path, maxage)
if fetch:
f=urllib.URLopener()
try:
f.retrieve(url, path)
logger.debug("downloaded metadata from %s into %s", url, path)
except:
logger.debug("downloaded metadata from %s failed: %s",
url, sys.exc_info()[0]) | 0.003724 |
def soldOutForRole(event, role):
'''
This tag allows one to determine whether any event is sold out for any
particular role.
'''
if not isinstance(event, Event) or not isinstance(role, DanceRole):
return None
return event.soldOutForRole(role) | 0.003559 |
def fields_for_model(self, model, include_fk=False, fields=None,
exclude=None, base_fields=None, dict_cls=dict):
"""
Overridden to correctly name hybrid_property fields, eg given::
class User(db.Model):
_password = db.Column('password', db.String)
@db.hybrid_property
def password(self):
return self._password
@password.setter
def password(self, password):
self._password = hash_password(password)
In this case upstream marshmallow_sqlalchemy uses '_password' for the
field name, but we use 'password', as would be expected because it's
the attribute name used for the public interface of the Model. In order
for this logic to work, the column name must be specified and it must be
the same as the hybrid property name. Otherwise we just fallback to the
upstream naming convention.
"""
# this prevents an error when building the docs
if not hasattr(model, '__mapper__'):
return
result = dict_cls()
base_fields = base_fields or {}
for prop in model.__mapper__.iterate_properties:
if self._should_exclude_field(prop, fields=fields, exclude=exclude):
continue
attr_name = prop.key
if hasattr(prop, 'columns'):
if not include_fk:
# Only skip a column if there is no overridden column
# which does not have a Foreign Key.
for column in prop.columns:
if not column.foreign_keys:
break
else:
continue
col_name = prop.columns[0].name
if attr_name != col_name and hasattr(model, col_name):
attr_name = col_name
field = base_fields.get(attr_name) or self.property2field(prop)
if field:
result[attr_name] = field
return result | 0.00235 |
def _hijack_target(self):
"""Replaces the target method on the target object with the proxy method."""
if self._target.is_class_or_module():
setattr(self._target.obj, self._method_name, self)
elif self._attr.kind == 'property':
proxy_property = ProxyProperty(
double_name(self._method_name),
self._original_method,
)
setattr(self._target.obj.__class__, self._method_name, proxy_property)
self._target.obj.__dict__[double_name(self._method_name)] = self
else:
self._target.obj.__dict__[self._method_name] = self
if self._method_name in ['__call__', '__enter__', '__exit__']:
self._target.hijack_attr(self._method_name) | 0.005161 |
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
"""
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False | 0.001767 |
def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others | 0.001957 |
def com_google_fonts_check_metadata_canonical_filename(font_metadata,
canonical_filename,
is_variable_font):
"""METADATA.pb: Filename is set canonically?"""
if is_variable_font:
valid_varfont_suffixes = [
("Roman-VF", "Regular"),
("Italic-VF", "Italic"),
]
for valid_suffix, style in valid_varfont_suffixes:
if style in canonical_filename:
canonical_filename = valid_suffix.join(canonical_filename.split(style))
if canonical_filename != font_metadata.filename:
yield FAIL, ("METADATA.pb: filename field (\"{}\")"
" does not match "
"canonical name \"{}\".".format(font_metadata.filename,
canonical_filename))
else:
yield PASS, "Filename in METADATA.pb is set canonically." | 0.006515 |
def completion(ctx):
'''Generate bash completion script'''
header(completion.__doc__)
with ctx.cd(ROOT):
ctx.run('_bumpr_COMPLETE=source bumpr > bumpr-complete.sh', pty=True)
success('Completion generated in bumpr-complete.sh') | 0.003984 |
def list(self, path, mimetype=None):
"""Yield two-tuples for all files found in the directory given by
``path`` parameter. Result can be filtered by the second parameter,
``mimetype``, that must be a MIME type of assets compiled source code.
Each tuple has :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
Usage example::
# Yield all files from 'js/templates' directory.
environment.list('js/templates/*')
# Yield only files that are in 'js/templates' directory and have
# 'application/javascript' MIME type of compiled source code.
environment.list('js/templates/*', mimetype='application/javascript')
"""
basename_pattern = os.path.basename(path)
if path.endswith('**'):
paths = [path]
else:
paths = AssetAttributes(self, path).search_paths
paths = map(lambda p: p if p.endswith('*') else p + '*', paths)
results = unique(self._list_paths(paths), lambda x: x[0])
for logical_path, absolute_path in results:
asset_attributes = AssetAttributes(self, logical_path)
if mimetype is not None and asset_attributes.mimetype != mimetype:
continue
basename = os.path.basename(asset_attributes.path_without_suffix)
if not fnmatch(basename, basename_pattern) and basename != 'index':
continue
yield asset_attributes, absolute_path | 0.001868 |
def run(argv=None): # pragma: no cover
"""Run the HTTP server
Usage:
httpserver [options] [<folder>]
Options::
-h,--host=<hostname> What host name to serve (default localhost)
-a,--bindaddress=<address> Address to bind to (default 127.0.0.1)
-p,--port=<port> Port to listen on (default 8080)
-v,--verbose Increase verbosity to INFO messages
-d,--debug Increase verbosity to DEBUG messages
--help Print this help message
--version Print the version
To serve /path/to/www on all (ipv4) addresses for host myserver
on port 80::
httpserver -a 0.0.0.0 -p 80 -h myserver /path/to/www
"""
import sys
import os
import docopt
import textwrap
# Check for the version
if not sys.version_info >= (3, 4):
print('This python version is not supported. Please use python 3.4')
exit(1)
argv = argv or sys.argv[1:]
# remove some RST formatting
docblock = run.__doc__.replace('::', ':')
args = docopt.docopt(textwrap.dedent(docblock), argv)
if args['--version']:
print("httpserver version {} by {}".format(
__version__,
__author__))
exit(0)
# Set up logging
level = logging.WARNING
if args['--verbose']:
level = logging.INFO
if args['--debug']:
level = logging.DEBUG
logging.basicConfig(level=level)
logger = logging.getLogger('run method')
logger.debug('CLI args: %s' % args)
bindaddr = args['--bindaddress'] or '127.0.0.1'
port = args['--port'] or '8080'
folder = args['<folder>'] or os.getcwd()
hostname = args['--host'] or 'localhost'
_start_server(bindaddr, port, hostname, folder) | 0.000547 |
def _get_resource_id_from_stack(cfn_client, stack_name, logical_id):
"""
Given the LogicalID of a resource, call AWS CloudFormation to get physical ID of the resource within
the specified stack.
Parameters
----------
cfn_client
CloudFormation client provided by AWS SDK
stack_name : str
Name of the stack to query
logical_id : str
LogicalId of the resource
Returns
-------
str
Physical ID of the resource
Raises
------
samcli.commands.exceptions.UserException
If the stack or resource does not exist
"""
LOG.debug("Getting resource's PhysicalId from AWS CloudFormation stack. StackName=%s, LogicalId=%s",
stack_name, logical_id)
try:
response = cfn_client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logical_id)
LOG.debug("Response from AWS CloudFormation %s", response)
return response["StackResourceDetail"]["PhysicalResourceId"]
except botocore.exceptions.ClientError as ex:
LOG.debug("Unable to fetch resource name from CloudFormation Stack: "
"StackName=%s, ResourceLogicalId=%s, Response=%s", stack_name, logical_id, ex.response)
# The exception message already has a well formatted error message that we can surface to user
raise UserException(str(ex)) | 0.005316 |
def all_but_blocks(names, data, newline="\n", remove_empty_next=True,
remove_comments=True):
"""
Multiline string from a list of strings data, removing every
block with any of the given names, as well as their delimiters.
Removes the empty lines after BLOCK_END when ``remove_empty_next``
is True. Returns a joined string with the given newline, or a
line generator if it's None. If desired, this function use
``commentless`` internally to remove the remaining comments.
"""
@allow_implicit_stop
def remove_blocks(name, iterable):
start, end = BLOCK_START % name, BLOCK_END % name
it = iter(iterable)
while True:
line = next(it)
while line != start:
yield line
line = next(it)
it = tail(itertools.dropwhile(not_eq(end), it))
if remove_empty_next:
it = itertools.dropwhile(lambda el: not el.strip(), it)
if isinstance(names, str):
names = [names]
processors = [functools.partial(remove_blocks, name) for name in names]
if remove_comments:
processors.append(commentless)
gen = functools.reduce(lambda result, func: func(result),
processors, data)
return gen if newline is None else newline.join(gen) | 0.000747 |
def language_model(self,verbose=True):
""" builds a Tamil bigram letter model """
# use a generator in corpus
prev = None
for next_letter in self.corpus.next_tamil_letter():
# update frequency from corpus
if prev:
self.letter2[prev][next_letter] += 1
if ( verbose ) :
print(prev)
print(next_letter)
print( self.letter2[prev][next_letter] )
prev = next_letter #update always
return | 0.018248 |
def partital_dict(self, with_name=True):
"""Returns the name as a dict, but with only the items that are
particular to a PartitionName."""
d = self._dict(with_name=False)
d = {k: d.get(k) for k, _, _ in PartialPartitionName._name_parts if d.get(k, False)}
if 'format' in d and d['format'] == Name.DEFAULT_FORMAT:
del d['format']
d['name'] = self.name
return d | 0.006944 |
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.codes)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj | 0.000318 |
def _get_categorical_score(
self,
profile: List,
negated_classes: List,
categories: List,
negation_weight: Optional[float] = 1,
ic_map: Optional[Dict[str, float]] = None) -> float:
"""
The average of the simple scores across a list of categories
"""
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
scores = []
for cat in categories:
if cat not in self.ic_store.category_statistics:
raise ValueError("statistics for {} not indexed".format(cat))
pos_profile = [cls for cls in profile
if cls in self.ic_store.category_statistics[cat].descendants]
neg_profile = [cls for cls in negated_classes
if cls in self.ic_store.category_statistics[cat].descendants]
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced by max_max_ic
scores.append(self._get_simple_score(
pos_profile, neg_profile,
self.ic_store.category_statistics[cat].mean_mean_ic,
self.ic_store.category_statistics[cat].max_max_ic,
self.ic_store.category_statistics[cat].mean_sum_ic,
negation_weight, ic_map
))
return mean(scores) | 0.002694 |
def _load_prefix_binding(self):
"""
Load the prefix key binding.
"""
pymux = self.pymux
# Remove previous binding.
if self._prefix_binding:
self.custom_key_bindings.remove_binding(self._prefix_binding)
# Create new Python binding.
@self.custom_key_bindings.add(*self._prefix, filter=
~(HasPrefix(pymux) | has_focus(COMMAND) | has_focus(PROMPT) |
WaitsForConfirmation(pymux)))
def enter_prefix_handler(event):
" Enter prefix mode. "
pymux.get_client_state().has_prefix = True
self._prefix_binding = enter_prefix_handler | 0.006033 |
def bootstrap(name, user=None):
'''
Bootstraps a frontend distribution.
Will execute 'bower install' on the specified directory.
user
The user to run Bower with
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Directory \'{0}\' is set to be bootstrapped'.format(
name)
return ret
try:
call = __salt__['bower.install'](pkg=None, dir=name, runas=user)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error bootstrapping \'{0}\': {1}'.format(name, err)
return ret
if not call:
ret['result'] = True
ret['comment'] = 'Directory is already bootstrapped'
return ret
ret['result'] = True
ret['changes'] = {name: 'Bootstrapped'}
ret['comment'] = 'Directory was successfully bootstrapped'
return ret | 0.001012 |
def _import_symbol(import_path, setting_name):
"""
Import a class or function by name.
"""
mod_name, class_name = import_path.rsplit('.', 1)
# import module
try:
mod = import_module(mod_name)
cls = getattr(mod, class_name)
except ImportError as e:
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise # import error is a level deeper.
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
except AttributeError:
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
return cls | 0.003989 |
def _muaprocessnew(self):
"""Moves all 'new' files into cur, correctly flagging"""
foldername = self._foldername("new")
files = self.filesystem.listdir(foldername)
for filename in files:
if filename == "":
continue
curfilename = self._foldername(joinpath("new", filename))
newfilename = joinpath(
self._cur,
"%s:2,%s" % (filename, "")
)
self.filesystem.rename(curfilename, newfilename) | 0.003781 |
def lock(tmp_dir, timeout=NOT_SET, min_wait=None, max_wait=None, verbosity=1):
"""Obtain lock.
Obtain lock access by creating a given temporary directory (whose base
will be created if needed, but will not be deleted after the lock is
removed). If access is refused by the same lock owner during more than
'timeout' seconds, then the current lock is overridden. If timeout is
None, then no timeout is performed.
The lock is performed by creating a 'lock' file in 'tmp_dir' that
contains a unique id identifying the owner of the lock (the process
id, followed by a random string).
When there is already a lock, the process sleeps for a random amount
of time between min_wait and max_wait seconds before trying again.
If 'verbosity' is >= 1, then a message will be displayed when we need
to wait for the lock. If it is set to a value >1, then this message
will be displayed each time we re-check for the presence of the lock.
Otherwise it is displayed only when we notice the lock's owner has
changed.
Parameters
----------
str tmp_dir : str
Lock directory that will be created when acquiring the lock.
timeout : int
Time (in seconds) to wait before replacing an existing lock.
min_wait : int
Minimum time (in seconds) to wait before trying again to get the
lock.
max_wait : int
Maximum time (in seconds) to wait before trying again to get the
lock (default 2 * min_wait).
verbosity : int
Amount of feedback displayed to screen (default 1).
"""
if min_wait is None:
min_wait = MIN_WAIT
if max_wait is None:
max_wait = min_wait * 2
if timeout is NOT_SET:
timeout = TIMEOUT
# Create base of lock directory if required.
base_lock = os.path.dirname(tmp_dir)
if not os.path.isdir(base_lock):
try:
os.makedirs(base_lock)
except OSError:
# Someone else was probably trying to create it at the same time.
# We wait two seconds just to make sure the following assert does
# not fail on some NFS systems.
time.sleep(2)
assert os.path.isdir(base_lock)
# Variable initialization.
lock_file = os.path.join(tmp_dir, 'lock')
random.seed()
my_pid = os.getpid()
no_display = (verbosity == 0)
nb_error = 0
# The number of time we sleep when their is no errors.
# Used to don't display it the first time to display it less frequently.
# And so don't get as much email about this!
nb_wait = 0
# Acquire lock.
while True:
try:
last_owner = 'no_owner'
time_start = time.time()
other_dead = False
while os.path.isdir(tmp_dir):
try:
with open(lock_file) as f:
read_owner = f.readlines()[0].strip()
# The try is transition code for old locks.
# It may be removed when people have upgraded.
try:
other_host = read_owner.split('_')[2]
except IndexError:
other_host = () # make sure it isn't equal to any host
if other_host == hostname:
try:
# Just check if the other process still exist.
os.kill(int(read_owner.split('_')[0]), 0)
except OSError:
other_dead = True
except AttributeError:
pass # os.kill does not exist on windows
except Exception:
read_owner = 'failure'
if other_dead:
if not no_display:
msg = "process '%s'" % read_owner.split('_')[0]
logger.warning("Overriding existing lock by dead %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock()
continue
if last_owner == read_owner:
if (timeout is not None and
time.time() - time_start >= timeout):
# Timeout exceeded or locking process dead.
if not no_display:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
logger.warning("Overriding existing lock by %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock()
continue
else:
last_owner = read_owner
time_start = time.time()
no_display = (verbosity == 0)
if not no_display and nb_wait > 0:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
logger.info("Waiting for existing lock by %s (I am "
"process '%s')", msg, my_pid)
logger.info("To manually release the lock, delete %s",
tmp_dir)
if verbosity <= 1:
no_display = True
nb_wait += 1
time.sleep(random.uniform(min_wait, max_wait))
try:
os.mkdir(tmp_dir)
except OSError:
# Error while creating the directory: someone else
# must have tried at the exact same time.
nb_error += 1
if nb_error < 10:
continue
else:
raise
# Safety check: the directory should be here.
assert os.path.isdir(tmp_dir)
# Write own id into lock file.
unique_id = refresh_lock(lock_file)
# Verify we are really the lock owner (this should not be needed,
# but better be safe than sorry).
with open(lock_file) as f:
owner = f.readlines()[0].strip()
if owner != unique_id:
# Too bad, try again.
continue
else:
# We got the lock, hoorray!
return
except Exception as e:
# If something wrong happened, we try again.
logger.warning("Something wrong happened: %s %s", type(e), e)
nb_error += 1
if nb_error > 10:
raise
time.sleep(random.uniform(min_wait, max_wait))
continue | 0.000144 |
def rmse(a, b):
"""Returns the root mean square error betwwen a and b
"""
return np.sqrt(np.square(a - b).mean()) | 0.008 |
def _truncated_power_method(self, A, x0, k, max_iter=10000, thresh=1e-8):
'''
given a matrix A, an initial guess x0, and a maximum cardinality k,
find the best k-sparse approximation to its dominant eigenvector
References
----------
[1] Yuan, X-T. and Zhang, T. "Truncated Power Method for Sparse Eigenvalue Problems."
Journal of Machine Learning Research. Vol. 14. 2013.
http://www.jmlr.org/papers/volume14/yuan13a/yuan13a.pdf
'''
xts = [x0]
for t in range(max_iter):
xts.append(self._normalize(self._truncate(np.dot(A, xts[-1]), k)))
if np.linalg.norm(xts[-1] - xts[-2]) < thresh: break
return xts[-1] | 0.005517 |
def check(self, **kwargs): # pragma: no cover
"""Calls the TimeZoneField's custom checks."""
errors = super(TimeZoneField, self).check(**kwargs)
errors.extend(self._check_timezone_max_length_attribute())
errors.extend(self._check_choices_attribute())
return errors | 0.006536 |
def project(*descs, root_file=None):
"""
Make a new project, using recursion and alias resolution.
Use this function in preference to calling Project() directly.
"""
load.ROOT_FILE = root_file
desc = merge.merge(merge.DEFAULT_PROJECT, *descs)
path = desc.get('path', '')
if root_file:
project_path = os.path.dirname(root_file)
if path:
path += ':' + project_path
else:
path = project_path
with load.extender(path):
desc = recurse.recurse(desc)
project = construct.construct(**desc)
project.desc = desc
return project | 0.0016 |
def _validate_alias_file_content(alias_file_path, url=''):
"""
Make sure the alias name and alias command in the alias file is in valid format.
Args:
The alias file path to import aliases from.
"""
alias_table = get_config_parser()
try:
alias_table.read(alias_file_path)
for alias_name, alias_command in reduce_alias_table(alias_table):
_validate_alias_name(alias_name)
_validate_alias_command(alias_command)
_validate_alias_command_level(alias_name, alias_command)
_validate_pos_args_syntax(alias_name, alias_command)
except Exception as exception: # pylint: disable=broad-except
error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)
error_msg = error_msg.replace(alias_file_path, url or alias_file_path)
raise CLIError(error_msg) | 0.003378 |
def fit_predict(self, X, y=None, **kwargs):
"""Compute cluster centroids and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X, **kwargs).predict(X, **kwargs) | 0.007067 |
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
with self.pipe as pipe:
return pipe.rename(self.redis_key(src), self.redis_key(dst)) | 0.010309 |
def parse(readDataInstance):
"""
Returns a new L{ExportTableEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{ExportTableEntry} object.
@rtype: L{ExportTableEntry}
@return: A new L{ExportTableEntry} object.
"""
exportEntry = ExportTableEntry()
exportEntry.functionRva.value = readDataInstance.readDword()
exportEntry.nameOrdinal.value = readDataInstance.readWord()
exportEntry.nameRva.value = readDataInstance.readDword()
exportEntry.name.value = readDataInstance.readString()
return exportEntry | 0.007194 |
def _schedule(self, delay: float, event: Callable, *args: Any, **kwargs: Any) -> int:
"""
Schedules a one-time event to be run along the simulation. The event is scheduled relative to current simulator
time, so delay is expected to be a positive simulation time interval. The `event' parameter corresponds to a
callable object (e.g. a function): it will be called so as to "execute" the event, with the positional and
keyword parameters that follow `event` in the call to `_schedule()` (note that the value of these arguments are
evaluated when `_schedule()` is called, not when the event is executed). Once this event function returns, the
simulation carries on to the next event, or stops if none remain.
Remark that this method is private, and is meant for internal usage by the :py:class:`Simulator` and
:py:class:`Process` classes, and helper functions of this module.
:return: Unique identifier for the scheduled event.
"""
if _logger is not None:
self._log(
DEBUG,
"schedule",
delay=delay,
fn=event,
args=args,
kwargs=kwargs,
counter=self._counter,
__now=self.now()
)
delay = float(delay)
if delay < 0.0:
raise ValueError("Delay must be positive.")
# Use counter to strictly order events happening at the same simulated time. This gives a total order on events,
# working around the heap queue not yielding a stable ordering.
id_event = self._counter
heappush(self._events, _Event(self._ts_now + delay, id_event, event, *args, **kwargs))
self._counter += 1
return id_event | 0.006087 |
def docopt_arguments():
""" Creates beautiful command-line interfaces.
See https://github.com/docopt/docopt """
doc = """Projy: Create templated project.
Usage: projy <template> <project> [<substitution>...]
projy -i | --info <template>
projy -l | --list
projy -h | --help
projy -v | --version
Options:
-i, --info Print information on a specific template.
-l, --list Print available template list.
-h, --help Show this help message and exit.
-v, --version Show program's version number and exit.
"""
from projy.docopt import docopt
return docopt(doc, argv=sys.argv[1:], version='0.1') | 0.001399 |
def _syndromes(self, r, k=None):
'''Given the received codeword r in the form of a Polynomial object,
computes the syndromes and returns the syndrome polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
'''
n = self.n
if not k: k = self.k
# Note the + [GF2int(0)] : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return Polynomial( [r.evaluate( GF2int(self.generator)**(l+self.fcr) ) for l in _range(n-k-1, -1, -1)] + [GF2int(0)], keep_zero=True ) | 0.010067 |
def get_field_errors(self, field):
"""
Return server side errors. Shall be overridden by derived forms to add their
extra errors for AngularJS.
"""
identifier = format_html('{0}[\'{1}\']', self.form_name, field.name)
errors = self.errors.get(field.html_name, [])
return self.error_class([SafeTuple(
(identifier, self.field_error_css_classes, '$pristine', '$pristine', 'invalid', e)) for e in errors]) | 0.008547 |
def main(*argv):
""" main driver of program """
try:
# Inputs
#
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
groupTitle = argv[3]
groupTags = argv[4]
description = argv[5]
access = argv[6]
# Logic
#
# Connect to the site
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(url=siteURL,
securityHandler=sh,
initialize=True)
community = admin.community
# Create Group
#
res = community.createGroup(title=groupTitle,
tags=groupTags,
description=description,
snippet="",
phone="",
access=access,
sortField="title",
sortOrder="asc",
isViewOnly=False,
isInvitationOnly=False,
thumbnail=None)
arcpy.SetParameterAsText(7, str(res))
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) | 0.001748 |
def get_levels(version=None):
'''get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if
'''
valid_versions = ['2.3','2.2']
if version is None:
version = "2.3"
version = str(version)
if version not in valid_versions:
bot.error("Unsupported version %s, valid versions are %s" %(version,
",".join(valid_versions)))
levels_file = os.path.abspath(os.path.join(get_installdir(),
'analysis',
'reproduce',
'data',
'reproduce_levels.json'))
levels = read_json(levels_file)
if version == "2.2":
# Labels not added until 2.3
del levels['LABELS']
levels = make_levels_set(levels)
return levels | 0.007513 |
def streamToFile(self, filename, keepInMemory = False, writeRate = 1) :
"""Starts a stream to a file. Every line must be committed (l.commit()) to be appended in to the file.
If keepInMemory is set to True, the parser will keep a version of the whole CSV in memory, writeRate is the number
of lines that must be committed before an automatic save is triggered.
"""
if len(self.legend) < 1 :
raise ValueError("There's no legend defined")
try :
os.remove(filename)
except :
pass
self.streamFile = open(filename, "a")
self.writeRate = writeRate
self.streamBuffer = []
self.keepInMemory = keepInMemory
self.streamFile.write(self.strLegend + "\n") | 0.044053 |
def location (self, pos):
"""Formats the location of the given SeqPos as:
filename:line:col:
"""
result = ''
if self.filename:
result += self.filename + ':'
if pos:
result += str(pos)
return result | 0.016529 |
def report(rel):
"""Fires if the machine is running Fedora."""
if "Fedora" in rel.product:
return make_pass("IS_FEDORA", product=rel.product)
else:
return make_fail("IS_NOT_FEDORA", product=rel.product) | 0.004329 |
def get_tile_images_by_rect(self, rect):
""" Speed up data access
More efficient because data is accessed and cached locally
"""
def rev(seq, start, stop):
if start < 0:
start = 0
return enumerate(seq[start:stop + 1], start)
x1, y1, x2, y2 = rect_to_bb(rect)
images = self.tmx.images
layers = self.tmx.layers
at = self._animated_tile
tracked_gids = self._tracked_gids
anim_map = self._animation_map
track = bool(self._animation_queue)
for l in self.tmx.visible_tile_layers:
for y, row in rev(layers[l].data, y1, y2):
for x, gid in [i for i in rev(row, x1, x2) if i[1]]:
# since the tile has been queried, assume it wants to be checked
# for animations sometime in the future
if track and gid in tracked_gids:
anim_map[gid].positions.add((x, y, l))
try:
# animated, so return the correct frame
yield x, y, l, at[(x, y, l)]
except KeyError:
# not animated, so return surface from data, if any
yield x, y, l, images[gid] | 0.003058 |
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte | 0.001441 |
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is changed
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
# sort to keep sources from changing order across builds
sources.sort()
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError as e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball) | 0.005877 |
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
'''
Install the named fileset(s)/rpm package(s).
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
Multiple Package Installation Options:
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly:
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install pkgs='["foo", "bar"]'
'''
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug('Removing these fileset(s)/rpm package(s) %s: %s', name, targets)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
if filename.endswith('.rpm'):
if _is_installed_rpm(filename.split('.aix')[0]):
continue
cmdflags = ' -Uivh '
if test:
cmdflags += ' --test'
cmd = ['/usr/bin/rpm', cmdflags, target]
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
else:
if _is_installed(target):
continue
cmd = '/usr/sbin/installp -acYXg'
if test:
cmd += 'p'
cmd += ' -d '
dirpath = os.path.dirname(target)
cmd += dirpath +' '+ filename
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
if 0 != out['retcode']:
errors.append(out['stderr'])
# Get a list of the packages after the uninstall
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problems encountered installing filesets(s)/package(s)',
info={
'changes': ret,
'errors': errors
}
)
# No error occurred
if test:
return 'Test succeeded.'
return ret | 0.003269 |
def create_small_thumbnail(self, token, item_id):
"""
Create a 100x100 small thumbnail for the given item. It is used for
preview purpose and displayed in the 'preview' and 'thumbnails'
sidebar sections.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The item on which to set the thumbnail.
:type item_id: int | long
:returns: The item object (with the new thumbnail id) and the path
where the newly created thumbnail is stored.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['itemId'] = item_id
response = self.request(
'midas.thumbnailcreator.create.small.thumbnail', parameters)
return response | 0.002433 |
def reward_scope(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:
'''Returns the complete reward fluent scope for the
current `state`, `action` fluents, and `next_state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
next_state (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = {}
scope.update(self.non_fluents_scope())
scope.update(self.state_scope(state))
scope.update(self.action_scope(action))
scope.update(self.next_state_scope(next_state))
return scope | 0.006719 |
def receiver(self, value):
"""Set receiver instance."""
assert isinstance(value, Receiver)
self.receiver_id = value.receiver_id | 0.013245 |
def circleconvert(amount, currentformat, newformat):
"""
Convert a circle measurement.
:type amount: number
:param amount: The number to convert.
:type currentformat: string
:param currentformat: The format of the provided value.
:type newformat: string
:param newformat: The intended format of the value.
>>> circleconvert(45, "radius", "diameter")
90
"""
# If the same format was provided
if currentformat.lower() == newformat.lower():
# Return the provided value
return amount
# If the lowercase version of the current format is 'radius'
if currentformat.lower() == 'radius':
# If the lowercase version of the new format is 'diameter'
if newformat.lower() == 'diameter':
# Return the converted value
return amount * 2
# If the lowercase version of the new format is 'circumference'
elif newformat.lower() == 'circumference':
# Return the converted value
return amount * 2 * math.pi
# Raise a warning
raise ValueError("Invalid new format provided.")
# If the lowercase version of the current format is 'diameter'
elif currentformat.lower() == 'diameter':
# If the lowercase version of the new format is 'radius'
if newformat.lower() == 'radius':
# Return the converted value
return amount / 2
# If the lowercase version of the new format is 'circumference'
elif newformat.lower() == 'circumference':
# Return the converted value
return amount * math.pi
# Raise a warning
raise ValueError("Invalid new format provided.")
# If the lowercase version of the current format is 'circumference'
elif currentformat.lower() == 'circumference':
# If the lowercase version of the new format is 'radius'
if newformat.lower() == 'radius':
# Return the converted value
return amount / math.pi / 2
# If the lowercase version of the new format is 'diameter'
elif newformat.lower() == 'diameter':
# Return the converted value
return amount / math.pi | 0.000452 |
def rewrite(self, block_address, new_bytes):
"""
Rewrites block with new bytes, keeping the old ones if None is passed. Tag and auth must be set - does auth.
Returns error state.
"""
if not self.is_tag_set_auth():
return True
error = self.do_auth(block_address)
if not error:
(error, data) = self.rfid.read(block_address)
if not error:
for i in range(len(new_bytes)):
if new_bytes[i] != None:
if self.debug:
print("Changing pos " + str(i) + " with current value " + str(data[i]) + " to " + str(new_bytes[i]))
data[i] = new_bytes[i]
error = self.rfid.write(block_address, data)
if self.debug:
print("Writing " + str(data) + " to " + self.sector_string(block_address))
return error | 0.006336 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.