text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def quadratic_2d(data):
"""
Compute the quadratic estimate of the centroid in a 2d-array.
Args:
data (2darray): two dimensional data array
Returns
center (tuple): centroid estimate on the row and column directions,
respectively
"""
arg_data_max = np.argmax(data)
i, j = np.unravel_index(arg_data_max, data.shape)
z_ = data[i-1:i+2, j-1:j+2]
# our quadratic function is defined as
# f(x, y | a, b, c, d, e, f) := a + b * x + c * y + d * x^2 + e * xy + f * y^2
# therefore, the best fit coeffiecients are given as
# note that they are unique and the uncertainty in each of them (#TODO) can be
# computed following the derivations done by Vakili & Hogg (2016) and
# Teague & Foreman-Mackey (2018)
try:
a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] -
z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9
b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6
c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6
d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 +
z_[2,0] + z_[2,1] + z_[2,2])/6
e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25
f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] +
z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6
except IndexError:
return (i, j)
# see https://en.wikipedia.org/wiki/Quadratic_function
det = 4 * d * f - e ** 2
xm = - (2 * f * b - c * e) / det
ym = - (2 * d * c - b * e) / det
return (i+xm, j+ym) | 0.029484 |
def write_partial_map(filename, data, nside, coord=None, nest=False,
header=None,dtype=None,**kwargs):
"""
Partial HEALPix maps are used to efficiently store maps of the sky by only
writing out the pixels that contain data.
Three-dimensional data can be saved by supplying a distance modulus array
which is stored in a separate extension.
Parameters:
-----------
filename : output file name
data : dictionary or recarray of data to write (must contain 'PIXEL')
nside : healpix nside of data
coord : 'G'alactic, 'C'elestial, 'E'cliptic
ordering : 'RING' or 'NEST'
kwargs : Passed to fitsio.write
Returns:
--------
None
"""
# ADW: Do we want to make everything uppercase?
if isinstance(data,dict):
names = list(data.keys())
else:
names = data.dtype.names
if 'PIXEL' not in names:
msg = "'PIXEL' column not found."
raise ValueError(msg)
hdr = header_odict(nside=nside,coord=coord,nest=nest)
fitshdr = fitsio.FITSHDR(list(hdr.values()))
if header is not None:
for k,v in header.items():
fitshdr.add_record({'name':k,'value':v})
logger.info("Writing %s"%filename)
fitsio.write(filename,data,extname='PIX_DATA',header=fitshdr,clobber=True) | 0.011278 |
def check_tool_version(tool, required_version, blacklisted_versions=None, binary=False):
"""
This will ensure that the required_version of `tool` is at least `required_version`.
:param str tool: The tool under review
:param str required_version: The version of the tool required by ProTECT
:param list blacklisted_versions: Version of the tool blacklisted by ProTECT
:param bool binary: Is the tool a binary
:return: None
"""
if binary:
try:
installed_version = subprocess.check_output([tool, '--version'],
stderr=subprocess.STDOUT)
except OSError as err:
if err.errno == errno.ENOENT:
raise RuntimeError('Is %s installed as a binary and present on your $PATH?' % tool)
else:
raise
installed_version = installed_version.rstrip()
else:
try:
module = __import__(tool + '.version')
except ImportError:
raise RuntimeError('Is %s installed as a library, and is it accessible in the same '
'environment as ProTECT?' % tool)
try:
installed_version = getattr(module, 'version').version
except AttributeError:
raise RuntimeError('Does %s have a version.py?' % tool)
if type(parse_version(installed_version)) == _LegacyVersion:
print('Detecting that the installed version of "%s"(%s) is probably based off a git commit '
'and assuming this build is for testing purposes. If this is not the case, please '
'try again with a valid version of "%s".' % (tool, installed_version, tool))
elif parse_version(installed_version) < parse_version(required_version):
raise RuntimeError('%s was detected to be version (%s) but ProTECT requires (%s)' %
(tool, installed_version, required_version))
if blacklisted_versions is not None:
if parse_version(installed_version) in [parse_version(v) for v in blacklisted_versions]:
raise RuntimeError('The version of %s was detected to be on the a blacklist (%s).' %
(tool, installed_version)) | 0.005788 |
def parse(self):
"""
Convert line to shape object
"""
log.debug(self)
self.parse_composite()
self.split_line()
self.convert_coordinates()
self.convert_meta()
self.make_shape()
log.debug(self) | 0.007353 |
def admin_tools_render_menu_css(context, menu=None):
"""
Template tag that renders the menu css files,, it takes an optional
``Menu`` instance as unique argument, if not given, the menu will be
retrieved with the ``get_admin_menu`` function.
"""
if menu is None:
menu = get_admin_menu(context)
context.update({
'template': 'admin_tools/menu/css.html',
'css_files': menu.Media.css,
})
return context | 0.002179 |
def rehome(old, new, struct):
"""
Replace all absolute paths to "re-home" it
"""
if old == new:
return
if isinstance(struct, list):
for item in struct:
rehome(old, new, item)
elif isinstance(struct, dict):
for key, val in struct.iteritems():
if isinstance(val, (dict, list)):
rehome(old, new, val)
elif "conf" in key:
continue
elif "orig" in key:
continue
elif "root" in key or "path" in key:
struct[key] = struct[key].replace(old, new) | 0.001639 |
def region_from_segment(image, segment):
"""given a segment (rectangle) and an image, returns it's corresponding subimage"""
x, y, w, h = segment
return image[y:y + h, x:x + w] | 0.010638 |
def GET_parameteritemtypes(self) -> None:
"""Get the types of all current exchange items supposed to change
the values of |Parameter| objects."""
for item in state.parameteritems:
self._outputs[item.name] = self._get_itemtype(item) | 0.007491 |
def save(self, *args, **kwargs):
"""Customized to generate an image from the pdf file."""
# open image from pdf
img = Image(filename=self.file.path + '[0]')
# make new filename
filename = os.path.basename(self.file.path).split('.')[:-1]
if type(filename) == list:
filename = ''.join(filename)
# TODO: Would be better to compute this path from the upload_to
# setting which is already set on the model field
image_dir = os.path.join(
django_settings.MEDIA_ROOT, UPLOAD_TO_DIR)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
image_path = os.path.join(
image_dir, '{}.jpg'.format(filename))
tmp_image_path = os.path.join(
image_dir, '{}.tmp.jpg'.format(filename))
# we remove the old image befor we save because the cover might have
# changed when we upload a new PDF file - even when that file has the
# same filename as the old one
try:
os.remove(image_path)
except OSError:
# file is already gone
pass
# and we also remove the thumbnails
old_files = glob.glob('{}.*'.format(image_path))
for old_file in old_files:
try:
os.remove(old_file)
except OSError:
pass
# save as image under a temporary filename so that we can read it with
# File()
img.save(filename=tmp_image_path)
# attach it to image field
with open(tmp_image_path, 'r') as f:
self.image.save('{}.jpg'.format(filename), File(f), save=False)
super(PDFPluginModel, self).save(*args, **kwargs)
# remove temp file
try:
os.remove(tmp_image_path)
except OSError:
pass | 0.001076 |
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle() | 0.002809 |
def _matchremove_verb_endings(self, word):
"""Remove the verb endings"""
i_verb_endings = ['iuntur',
'erunt',
'untur',
'iunt',
'unt']
bi_verb_endings = ['beris',
'bor',
'bo']
eri_verb_endings = ['ero']
verb_endings = ['mini',
'ntur',
'stis',
'mur',
'mus',
'ris',
'sti',
'tis',
'tur',
'ns',
'nt',
'ri',
'm',
'r',
's',
't']
# replace i verb endings with i
for ending in i_verb_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), 'i', word)
return word
# replace bi verb endings with bi
for ending in bi_verb_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), 'bi', word)
return word
# replace eri verb endings with eri
for ending in eri_verb_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), 'eri', word)
return word
# otherwise, remove general verb endings
for ending in verb_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), '', word)
break
return word | 0.002738 |
def find_elements(self, by, value, view_cls=None):
# type: (By, Any, Value) -> List[View]
"""
Find one or more elements matching condition
:param by: Type of condition
:param value: Condition value
:param view_cls: Optional custom class to wrap returned elements
:return: List of matching web elements wrapped in a view
"""
if view_cls is None:
view_cls = View
def get_elements():
results = []
try:
results = self.root.find_elements(by, value)
except NoSuchElementException:
pass
finally:
return results
def get_element_at_index(i):
return lambda: get_elements()[i]
return [view_cls(get_element_at_index(i)) for i, element in enumerate(get_elements())] | 0.004587 |
def branches(directory=None, verbose=False):
"""Show current branch points"""
config = current_app.extensions['migrate'].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.branches(config, verbose=verbose)
else:
command.branches(config) | 0.003472 |
def index_run(record_path, keep_json, check_duplicate):
"""
Convert raw JSON records into sqlite3 DB.
Normally RASH launches a daemon that takes care of indexing.
See ``rash daemon --help``.
"""
from .config import ConfigStore
from .indexer import Indexer
cfstore = ConfigStore()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all() | 0.002445 |
def __get_item_sh_fields_empty(self, rol, undefined=False):
""" Return a SH identity with all fields to empty_field """
# If empty_field is None, the fields do not appear in index patterns
empty_field = '' if not undefined else '-- UNDEFINED --'
return {
rol + "_id": empty_field,
rol + "_uuid": empty_field,
rol + "_name": empty_field,
rol + "_user_name": empty_field,
rol + "_domain": empty_field,
rol + "_gender": empty_field,
rol + "_gender_acc": None,
rol + "_org_name": empty_field,
rol + "_bot": False
} | 0.00304 |
def rmse(self, relative_to='AME2003'):
"""Calculate root mean squared error
Parameters
----------
relative_to : string,
a valid mass table name.
Example:
----------
>>> template = '{0:10}|{1:^6.2f}|{2:^6.2f}|{3:^6.2f}'
>>> print 'Model ', 'AME95 ', 'AME03 ', 'AME12 ' # Table header
... for name in Table.names:
... print template.format(name, Table(name).rmse(relative_to='AME1995'),
... Table(name).rmse(relative_to='AME2003'),
... Table(name).rmse(relative_to='AME2012'))
Model AME95 AME03 AME12
AME2003 | 0.13 | 0.00 | 0.13
AME2003all| 0.42 | 0.40 | 0.71
AME2012 | 0.16 | 0.13 | 0.00
AME2012all| 0.43 | 0.43 | 0.69
AME1995 | 0.00 | 0.13 | 0.16
AME1995all| 0.00 | 0.17 | 0.21
DUZU | 0.52 | 0.52 | 0.76
FRDM95 | 0.79 | 0.78 | 0.95
KTUY05 | 0.78 | 0.77 | 1.03
ETFSI12 | 0.84 | 0.84 | 1.04
HFB14 | 0.84 | 0.83 | 1.02
"""
error = self.error(relative_to=relative_to)
return math.sqrt((error.df ** 2).mean()) | 0.004052 |
def _run_task_internal(self, task):
''' run a particular module step in a playbook '''
hosts = self._list_available_hosts()
self.inventory.restrict_to(hosts)
runner = cirruscluster.ext.ansible.runner.Runner(
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
module_args=task.module_args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=task.play.remote_user,
remote_port=task.play.remote_port, module_vars=task.module_vars,
private_key_file=self.private_key_file,
private_key=self.private_key,
setup_cache=self.SETUP_CACHE, basedir=task.play.basedir,
conditional=task.only_if, callbacks=self.runner_callbacks,
sudo=task.sudo, sudo_user=task.sudo_user,
transport=task.transport, sudo_pass=task.sudo_pass, is_playbook=True
)
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
contacted = results.get('contacted',{})
dark = results.get('dark', {})
self.inventory.lift_restriction()
if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
return None
return results | 0.004819 |
def append(self, el):
"""
Idiosynractic method for adding an element to a list
"""
if self.value is None:
self.value = [el]
else:
self.value.append(el) | 0.009302 |
def getDisplayName(self):
"""Provides a name for display purpose respecting the alias"""
if self.alias == "":
return self.name
return self.name + " as " + self.alias | 0.00995 |
def unregister(self, signal):
"""
Unregisters an existing signal
:param signal: Name of the signal
"""
if signal in self.signals.keys():
del(self.signals[signal])
self.__log.debug("Signal %s unregisterd" % signal)
else:
self.__log.debug("Signal %s does not exist and could not be unregistered.") | 0.007895 |
def hill_climbing_stochastic(problem, iterations_limit=0, viewer=None):
'''
Stochastic hill climbing.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.value.
'''
return _local_search(problem,
_random_best_expander,
iterations_limit=iterations_limit,
fringe_size=1,
stop_when_no_better=iterations_limit==0,
viewer=viewer) | 0.002963 |
def count(self, page_size=10, vtimeout=10, callback=None):
"""
Utility function to count the number of messages in a queue.
Note: This function now calls GetQueueAttributes to obtain
an 'approximate' count of the number of messages in a queue.
"""
def counted(a):
if callable(callback):
callback(int(a['ApproximateNumberOfMessages']))
self.get_attributes('ApproximateNumberOfMessages', callback=counted) | 0.004107 |
def make_output_directory(output_path: str) -> str:
"""
Creates the parent directory or directories for the specified output path
if they do not already exist to prevent incomplete directory path errors
during copying/writing operations.
:param output_path:
The path of the destination file or directory that will be written.
:return:
The absolute path to the output directory that was created if missing
or already existed.
"""
output_directory = os.path.dirname(environ.paths.clean(output_path))
if not os.path.exists(output_directory):
os.makedirs(output_directory)
return output_directory | 0.001497 |
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None):
'''
Beam search best first.
beam_size is the size of the beam.
If iterations_limit is specified, the algorithm will end after that
number of iterations. Else, it will continue until it can't find a
better node than the current one.
Requires: SearchProblem.actions, SearchProblem.result, and
SearchProblem.value.
'''
return _local_search(problem,
_first_expander,
iterations_limit=iterations_limit,
fringe_size=beam_size,
random_initial_states=True,
stop_when_no_better=iterations_limit==0,
viewer=viewer) | 0.002587 |
def process_allow_action(processors, action, argument):
"""Process allow action."""
for processor in processors:
processor(action, argument)
db.session.commit() | 0.005556 |
def _function(self, x, a, b, c, d, s):
"""Lorentzian asymmetric function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
s: asymmetry parameter
"""
return c/(np.pi*self._g_a(x, a, b, s)*(1.0+((x-a)/(self._g_a(x, a, b, s)))**2))+d | 0.008523 |
def read_request_line(self, request_line):
""" Read HTTP-request line
:param request_line: line to parse
for HTTP/0.9 is GET <Request-URI>
for HTTP/1.0 and 1.1 is <METHOD> <Request-URI> HTTP/<HTTP-Version>, where HTTP-Version is 1.0
or 1.1.
for HTTP/2: binary headers are used
"""
request = self.__request_cls.parse_request_line(self, request_line)
protocol_version = self.protocol_version()
if protocol_version == '0.9':
if request.method() != 'GET':
raise Exception('HTTP/0.9 standard violation')
elif protocol_version == '1.0' or protocol_version == '1.1':
pass
elif protocol_version == '2':
pass
else:
raise RuntimeError('Unsupported HTTP-protocol') | 0.029915 |
def write(self, brightness):
"""Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
"""
if not isinstance(brightness, (bool, int)):
raise TypeError("Invalid brightness type, should be bool or int.")
if isinstance(brightness, bool):
brightness = self._max_brightness if brightness else 0
else:
if not 0 <= brightness <= self._max_brightness:
raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness)
# Write value
try:
os.write(self._fd, b"%d\n" % brightness)
except OSError as e:
raise LEDError(e.errno, "Writing LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror) | 0.002461 |
def merge_kwargs(self, kwargs):
"""these kwargs come from the @arg decorator, they are then merged into any
keyword arguments that were automatically generated from the main function
introspection"""
if kwargs:
self.parser_kwargs.update(kwargs)
#self.parser_kwargs['dest'] = self.name
self.parser_kwargs.setdefault('dest', self.name)
# special handling of any passed in values
if 'default' in kwargs:
# NOTE -- this doesn't use .set_default() because that is meant to
# parse from the function definition so it actually has different syntax
# than what the .set_default() method does. eg, @arg("--foo", default=[1, 2]) means
# that the default value should be an array with 1 and 2 in it, where main(foo=[1, 2])
# means foo should be constrained to choices=[1, 2]
self.parser_kwargs["default"] = kwargs["default"]
self.parser_kwargs["required"] = False
elif 'action' in kwargs:
if kwargs['action'] in set(['store_false', 'store_true']):
self.parser_kwargs['required'] = False
elif kwargs['action'] in set(['version']):
self.parser_kwargs.pop('required', False)
else:
self.parser_kwargs.setdefault("required", True) | 0.005869 |
def entityId(self, partial, channel=None):
'''Get an entity's full id provided a partial one.
Raises EntityNotFound if partial cannot be resolved.
@param partial The partial id (e.g. mysql, precise/mysql).
@param channel Optional channel name.
'''
url = '{}/{}/meta/any'.format(self.url, _get_path(partial))
data = self._get(_add_channel(url, channel))
return data.json()['Id'] | 0.004525 |
def Cpsm(self):
r'''Solid-phase heat capacity of the mixture at its current temperature
and composition, in units of [J/mol/K]. For calculation of this property
at other temperatures or compositions, or specifying manually the
method used to calculate it, and more - see the object oriented
interface :obj:`thermo.heat_capacity.HeatCapacitySolidMixture`; each
Mixture instance creates one to actually perform the calculations.
Examples
--------
>>> Mixture(['silver', 'platinum'], ws=[0.95, 0.05]).Cpsm
25.32745796347474
'''
return self.HeatCapacitySolidMixture(self.T, self.P, self.zs, self.ws) | 0.004335 |
def blurred_image_of_planes_from_1d_images_and_convolver(total_planes, image_plane_image_1d_of_planes,
image_plane_blurring_image_1d_of_planes, convolver,
map_to_scaled_array):
"""For a tracer, extract the image-plane image of every plane and blur it with the PSF.
If none of the galaxies in a plane have a light profie or pixelization (and thus don't have an image) a *None* \
is used.
Parameters
----------
total_planes : int
The total number of planes that blurred images are computed for.
image_plane_image_1d_of_planes : [ndarray]
For every plane, the 1D image-plane image.
image_plane_blurring_image_1d_of_planes : [ndarray]
For every plane, the 1D image-plane blurring image.
convolver : hyper.ccd.convolution.ConvolverImage
Class which performs the PSF convolution of a masked image in 1D.
map_to_scaled_array : func
A function which maps a masked image from 1D to 2D.
"""
blurred_image_of_planes = []
for plane_index in range(total_planes):
# If all entries are zero, there was no light profile / pixeization
if np.count_nonzero(image_plane_image_1d_of_planes[plane_index]) > 0:
blurred_image_1d_of_plane = blurred_image_1d_from_1d_unblurred_and_blurring_images(
unblurred_image_1d=image_plane_image_1d_of_planes[plane_index],
blurring_image_1d=image_plane_blurring_image_1d_of_planes[plane_index],
convolver=convolver)
blurred_image_of_plane = map_to_scaled_array(array_1d=blurred_image_1d_of_plane)
blurred_image_of_planes.append(blurred_image_of_plane)
else:
blurred_image_of_planes.append(None)
return blurred_image_of_planes | 0.004258 |
def add_maildir(self, maildir_path):
""" Load up a maildir and compute hash for each mail found. """
maildir_path = self.canonical_path(maildir_path)
logger.info("Opening maildir at {} ...".format(maildir_path))
# Maildir parser requires a string, not a unicode, as path.
maildir = Maildir(str(maildir_path), factory=None, create=False)
# Group folders by hash.
logger.info("{} mails found.".format(len(maildir)))
if self.conf.progress:
bar = ProgressBar(widgets=[Percentage(), Bar()],
max_value=len(maildir), redirect_stderr=True,
redirect_stdout=True)
else:
def bar(x):
return x
for mail_id in bar(maildir.iterkeys()):
self.stats['mail_found'] += 1
mail_path = self.canonical_path(os.path.join(
maildir._path, maildir._lookup(mail_id)))
mail = Mail(mail_path, self.conf)
try:
mail_hash = mail.hash_key
except (InsufficientHeadersError, MissingMessageID) as expt:
logger.warning(
"Rejecting {}: {}".format(mail_path, expt.args[0]))
self.stats['mail_rejected'] += 1
else:
logger.debug(
"Hash is {} for mail {!r}.".format(mail_hash, mail_id))
# Use a set to deduplicate entries pointing to the same file.
self.mails.setdefault(mail_hash, set()).add(mail_path)
self.stats['mail_kept'] += 1 | 0.001241 |
def isLoggedOn(rh, userid):
"""
Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off.
"""
rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except CalledProcessError as e:
search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode()
match = re.search(search_pattern, e.output)
if match:
# Not logged on
results['rs'] = 1
else:
# Abnormal failure
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
except Exception as e:
# All other exceptions.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " +
str(results['overallRC']) + " rc: " + str(results['rc']) +
" rs: " + str(results['rs']))
return results | 0.005305 |
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after)) | 0.004274 |
def port_is_open():
"""
Determine if the default port and user is open for business.
"""
with settings(hide('aborts'), warn_only=True ):
try:
if env.verbosity:
print "Testing node for previous installation on port %s:"% env.port
distribution = lsb_release()
except KeyboardInterrupt:
if env.verbosity:
print >> sys.stderr, "\nStopped."
sys.exit(1)
except: #No way to catch the failing connection without catchall?
return False
if distribution.distributor_id <> 'Ubuntu':
print env.host, 'WARNING: Woven has only been tested on Ubuntu >= 10.04. It may not work as expected on',distribution.description
return True | 0.014304 |
def _link_to_img(self):
"""
Generates a link to the user's Gravatar.
>>> Gravatar('[email protected]')._link_to_img()
'http://www.gravatar.com/avatar/16b87da510d278999c892cdbdd55c1b6?s=80&r=g'
"""
# make sure options are valid
if self.rating.lower() not in RATINGS:
raise InvalidRatingError(self.rating)
if not (MIN_SIZE <= self.size <= MAX_SIZE):
raise InvalidSizeError(self.size)
url = ''
if self.secure:
url = SECURE_BASE_URL
else:
url = BASE_URL
options = {'s' : self.size, 'r' : self.rating}
if self.default is not None:
options['d'] = self.default
url += self.hash + '?' + urlencode(options)
return url | 0.007407 |
def AddArguments(cls, argument_group):
"""Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--user', dest='username', type=str, action='store',
default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False,
help='The username used to connect to the database.')
argument_group.add_argument(
'--password', dest='password', type=str, action='store',
default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=(
'The password for the database user.'))
argument_group.add_argument(
'--db_name', '--db-name', dest='db_name', action='store',
type=str, default=cls._DEFAULT_NAME, required=False, help=(
'The name of the database to connect to.'))
server_config.ServerArgumentsHelper.AddArguments(argument_group) | 0.000896 |
def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):
"""Round-off x to cand1 or to cand2 in an unbiased way.
Cand1 and cand2 are the same shape as x.
For every element of x, the corresponding elements of cand1 and cand2 should
be the two closest bfloat16 values to x. Order does not matter.
cand1 and cand2 must differ from each other.
Args:
x: A float32 Tensor.
noise: A Tensor broadcastable to the shape of x containing
random uniform values in [0.0, 1.0].
cand1: A bfloat16 Tensor the same shape as x.
cand2: A bfloat16 Tensor the same shape as x.
Returns:
A bfloat16 Tensor.
"""
cand1_f = tf.to_float(cand1)
cand2_f = tf.to_float(cand2)
step_size = cand2_f - cand1_f
fpart = (x - cand1_f) / step_size
ret = tf.where(tf.greater(fpart, noise), cand2, cand1)
return ret | 0.009581 |
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ)) | 0.001535 |
def GetSizeHint(self, context=None, **unused_kwargs):
"""Retrieves a hint about the size.
Args:
context (Optional[DataTypeMapContext]): data type map context, used to
determine the size hint.
Returns:
int: hint of the number of bytes needed from the byte stream or None.
"""
context_state = getattr(context, 'state', {})
elements_data_size = self.GetByteSize()
if elements_data_size:
return elements_data_size
try:
elements_data_size = self._CalculateElementsDataSize(context)
except errors.MappingError:
pass
if elements_data_size is None and self._HasElementsTerminator():
size_hints = context_state.get('size_hints', {})
size_hint = size_hints.get(self._data_type_definition.name, None)
elements_data_size = 0
if size_hint:
elements_data_size = size_hint.byte_size
if not size_hint or not size_hint.is_complete:
elements_data_size += self._element_data_type_definition.GetByteSize()
return elements_data_size | 0.008571 |
def from_log(cls, log,
cutoff=None,
components=None,
legend=None,
legend_field=None,
field=None,
right=False,
basis=None,
source='Log'):
"""
Turn a 1D array into a striplog, given a cutoff.
Args:
log (array-like): A 1D array or a list of integers.
cutoff (number or array-like): The log value(s) at which to bin
the log. Optional.
components (array-like): A list of components. Use this or
``legend``.
legend (``Legend``): A legend object. Use this or ``components``.
legend_field ('str'): If you're not trying to match against
components, then you can match the log values to this field in
the Decors.
field (str): The field in the Interval's ``data`` to store the log
values as.
right (bool): Which side of the cutoff to send things that are
equal to, i.e. right on, the cutoff.
basis (array-like): A depth basis for the log, so striplog knows
where to put the boundaries.
source (str): The source of the data. Default 'Log'.
Returns:
Striplog: The ``striplog`` object.
"""
if (components is None) and (legend is None) and (field is None):
m = 'You must provide a list of components, and legend, or a field.'
raise StriplogError(m)
if (legend is not None) and (legend_field is None):
try: # To treat it like a legend.
components = [deepcopy(decor.component) for decor in legend]
except AttributeError: # It's just a list of components.
pass
if legend_field is not None:
field_values = [getattr(d, legend_field, 0) for d in legend]
components = [Component() for i in range(int(max(field_values)+1))]
for i, decor in enumerate(legend):
components[i] = deepcopy(decor.component)
if cutoff is not None:
# First make sure we have enough components.
try:
n = len(cutoff)
except TypeError:
n = 1
if len(components) < n+1:
m = 'For n cutoffs, you need to provide at least'
m += 'n+1 components.'
raise StriplogError(m)
# Digitize.
try: # To use cutoff as a list.
a = np.digitize(log, cutoff, right)
except ValueError: # It's just a number.
a = np.digitize(log, [cutoff], right)
else:
a = np.copy(log)
tops, values = utils.tops_from_loglike(a)
if basis is None:
m = 'You must provide a depth or elevation basis.'
raise StriplogError(m)
list_of_Intervals = cls.__intervals_from_tops(tops,
values,
basis,
components,
field=field
)
return cls(list_of_Intervals, source=source) | 0.003239 |
def mkstemp(self, suffix, prefix, directory=None):
"""
Generate temp file name in artifacts base dir
and close temp file handle
"""
if not directory:
directory = self.artifacts_dir
fd, fname = tempfile.mkstemp(suffix, prefix, directory)
os.close(fd)
os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode?
return fname | 0.004914 |
def populate_state_register(all_seq: [list], sr: state.StateRegister) -> Edge:
""" function that create a state for all instance
of MatchExpr in the given list and connect each others.
"""
# Basic State
s0 = state.State(sr)
# loop on himself
s0.matchDefault(s0)
# this is default
sr.set_default_state(s0)
# use Edge to store connection
e0 = Edge(s0)
for seq in all_seq:
r = ref(e0)
# merge all sequences into one tree automata
populate_from_sequence(seq, r, sr)
# return edge for debug purpose
return e0 | 0.001704 |
def component_by_tags(self, tags):
"""Retrieve components by tags.
:param tags: List of tags
:type tags: list
:return: List of ReportComponentsMetadata
:rtype: list[ReportComponentsMetadata]
.. versionadded:: 4.0
"""
tags_keys = [t['key'] for t in tags]
filtered = [
c for c in self.components
if set(tags_keys).issubset([ct['key'] for ct in c.tags])]
return filtered | 0.004219 |
def write(self, ncfile) -> None:
"""Write the data to the given NetCDF file.
See the general documentation on classes |NetCDFVariableDeep|
and |NetCDFVariableAgg| for some examples.
"""
self: NetCDFVariableBase
self.insert_subdevices(ncfile)
dimensions = self.dimensions
array = self.array
for dimension, length in zip(dimensions[2:], array.shape[2:]):
create_dimension(ncfile, dimension, length)
create_variable(ncfile, self.name, 'f8', dimensions)
ncfile[self.name][:] = array | 0.003454 |
def wrap(cls, meth):
'''
Wraps a connection opening method in this class.
'''
async def inner(*args, **kwargs):
sock = await meth(*args, **kwargs)
return cls(sock)
return inner | 0.008264 |
def broadcast_indices(x, minv, ndim, axis):
"""Calculate index values to properly broadcast index array within data array.
See usage in interp.
"""
ret = []
for dim in range(ndim):
if dim == axis:
ret.append(minv)
else:
broadcast_slice = [np.newaxis] * ndim
broadcast_slice[dim] = slice(None)
dim_inds = np.arange(x.shape[dim])
ret.append(dim_inds[tuple(broadcast_slice)])
return tuple(ret) | 0.004073 |
def _init_metadata(self):
"""stub"""
self._attempts_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'attempts'),
'element_label': 'Attempts',
'instructions': 'Max number of student attempts',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [0],
'syntax': 'INTEGER',
'object_set': [],
'minimum_integer': None,
'maximum_integer': None,
'integer_set': []
}
self._weight_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'weight'),
'element_label': 'Weight',
'instructions': 'Weight of the item when calculating grades',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [1.0],
'syntax': 'DECIMAL',
'object_set': [],
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': []
}
# self._rerandomize_metadata = {
# 'element_id': Id(self.my_osid_object_form._authority,
# self.my_osid_object_form._namespace,
# 'rerandomize'),
# 'element_label': 'Randomize',
# 'instructions': 'How to rerandomize the parameters',
# 'required': False,
# 'read_only': False,
# 'linked': False,
# 'array': False,
# 'default_object_values': ['never'],
# 'syntax': 'STRING',
# 'minimum_string_length': None,
# 'maximum_string_length': None,
# 'string_set': []
# }
self._showanswer_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'showanswer'),
'element_label': 'Show answer',
'instructions': 'When to show the answer to the student',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': ['closed'],
'syntax': 'STRING',
'minimum_string_length': None,
'maximum_string_length': None,
'string_set': []
}
self._markdown_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'markdown'),
'element_label': 'Studio markdown',
'instructions': 'Studio markdown representation of the problem',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [''],
'syntax': 'STRING',
'minimum_string_length': None,
'maximum_string_length': None,
'string_set': []
} | 0.000592 |
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
if self._ignore_mixin and cnode.name[-5:].lower() == "mixin":
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.accessed(cnode)
if cnode.type != "metaclass":
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled("attribute-defined-outside-init"):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in cnode.instance_attrs.items():
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [
n
for n in nodes
if not isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module
]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a call in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(
node.frame(), cnode, defining_methods
):
continue
self.add_message(
"attribute-defined-outside-init", args=attr, node=node
) | 0.002374 |
def moment_inertia(self):
"""
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.sphere_inertia(mass=self.volume,
radius=self.primitive.radius)
return tensor | 0.005831 |
def _recv_thread(self):
"""
Internal thread to iterate over source messages and dispatch callbacks.
"""
for msg, metadata in self._source:
if msg.msg_type:
self._call(msg, **metadata)
# Break any upstream iterators
for sink in self._sinks:
i = sink()
if i is not None:
i.breakiter()
self._dead = True | 0.004717 |
def delete(self, request, **resources):
""" Default DELETE method. Allow bulk delete.
:return django.http.response: empty response
"""
resource = resources.get(self._meta.name)
if not resource:
raise HttpError("Bad request", status=status.HTTP_404_NOT_FOUND)
for o in as_tuple(resource):
o.delete()
return HttpResponse("") | 0.004914 |
def unique_def_name(self, stmt, inrpc=False):
"""Mangle the name of `stmt` (typedef or grouping).
Return the mangled name and dictionary where the definition is
to be installed. The `inrpc` flag indicates when we are inside
an RPC, in which case the name gets the "__rpc" suffix.
"""
module = stmt.main_module()
name = ""
while True:
pref = stmt.arg if stmt.arg else stmt.keyword
name = "__" + pref + name
if stmt.keyword == "grouping": name = "_" + name
if stmt.parent.parent is None: break
stmt = stmt.parent
defs = (self.global_defs
if stmt.keyword in ("grouping", "typedef")
else self.local_defs)
if inrpc: name += "__rpc"
return (module.arg + name, defs) | 0.005952 |
def run(self):
"""Executed on startup of application"""
self.api = self.context.get("cls")(self.context)
self.context["inst"].append(self) # Adapters used by strategies
for call, calldata in self.context.get("calls", {}).items():
def loop():
"""Loop on event scheduler, calling calls"""
while not self.stopped.wait(calldata.get("delay", None)):
self.call(call, calldata.get("arguments", None))
self.thread[call] = Process(target=loop)
self.thread[call].start() | 0.003431 |
def _download_pastebin(self):
"""Download content from Pastebin itself."""
paste_id = self.url.split("/")[-1]
url = "https://pastebin.com/raw/" + paste_id
return self._download_raw(url) | 0.009217 |
def get_unspents(address, bitcoind):
"""
Used for testing only!
Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
NOTE: this will only return unspents if the address provided is present
in the bitcoind server.
"""
assert BLOCKSTACK_TEST, 'get_unspents can only be used in test mode!'
addresses = [address]
min_confirmations = 0
max_confirmation = 2000000000 # just a very large number for max
unspents = bitcoind.listunspent(min_confirmations, max_confirmation, addresses)
if BLOCKSTACK_TEST and len(unspents) == 0:
try:
bitcoind.importaddress(str(address))
unspents = bitcoind.listunspent(min_confirmations, max_confirmation, addresses)
except Exception as e:
return format_unspents([])
return format_unspents(unspents) | 0.004545 |
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords)) | 0.015385 |
def kmip_version(self, value):
"""
Set the KMIP version for the client.
Args:
value (KMIPVersion): A KMIPVersion enumeration
Return:
None
Raises:
ValueError: if value is not a KMIPVersion enumeration
Example:
>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1
>>>
"""
if isinstance(value, enums.KMIPVersion):
self._kmip_version = value
else:
raise ValueError("KMIP version must be a KMIPVersion enumeration") | 0.003509 |
def build_bot(config_file=None):
"""Parse a config and return a SeshetBot instance. After, the bot can be run
simply by calling .connect() and then .start()
Optional arguments:
config_file - valid file path or ConfigParser instance
If config_file is None, will read default config defined in this module.
"""
from . import bot
config = ConfigParser(interpolation=None)
if config_file is None:
config.read_string(default_config)
elif isinstance(config_file, ConfigParser):
config = config_file
else:
config.read(config_file)
# shorter names
db_conf = config['database']
conn_conf = config['connection']
client_conf = config['client']
log_conf = config['logging']
verbosity = config['debug']['verbosity'].lower() or 'notset'
debug_file = config['debug']['file'] or None
# add more as they're used
if db_conf.getboolean('use_db'):
db = DAL(db_conf['db_string'])
build_db_tables(db)
log_file = None
log_fmts = {}
else:
db = None
log_file = log_conf.pop('file')
log_fmts = dict(log_conf)
# debug logging
debug_lvls = {'notset': 0,
'debug': 10,
'info': 20,
'warning': 30,
'error': 40,
'critical': 50,
}
lvl = int(debug_lvls[verbosity])
seshetbot = bot.SeshetBot(client_conf['nickname'], db, debug_file, lvl)
# connection info for connect()
seshetbot.default_host = conn_conf['server']
seshetbot.default_port = int(conn_conf['port'])
seshetbot.default_channel = conn_conf['channels'].split(',')
seshetbot.default_use_ssl = conn_conf.getboolean('ssl')
# client info
seshetbot.user = client_conf['user']
seshetbot.real_name = client_conf['realname']
# logging info
seshetbot.log_file = log_file
seshetbot.log_formats = log_fmts
seshetbot.locale = dict(config['locale'])
return seshetbot | 0.004824 |
def set_node_status(node_id, status, **kwargs):
"""
Set the status of a node to 'X'
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id))
node_i.network.check_write_permission(user_id)
node_i.status = status
for link in node_i.links_to:
link.status = status
for link in node_i.links_from:
link.status = status
db.DBSession.flush()
return node_i | 0.003521 |
def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | 0.004478 |
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
# bazel 0.5.1> uses four distinct generated files that gives a warning
# we suppress the warning for these files
bazel_gen_files = set([
"external/local_config_cc/libtool",
"external/local_config_cc/make_hashed_objlist.py",
"external/local_config_cc/wrapped_ar",
"external/local_config_cc/wrapped_clang",
"external/local_config_cc/xcrunwrapper.sh",
])
if not filename in bazel_gen_files:
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
_RestoreFilters() | 0.007538 |
def get_data_port_by_id(self, data_port_id):
"""Search for the given data port id in the data ports of the state
The method tries to find a data port in the input and output data ports as well as in the scoped variables.
:param data_port_id: the unique id of the data port
:return: the data port with the searched id or None if not found
"""
data_port = super(ContainerState, self).get_data_port_by_id(data_port_id)
if data_port:
return data_port
if data_port_id in self.scoped_variables:
return self.scoped_variables[data_port_id]
return None | 0.00624 |
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=1,
redirect=True,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
"""
timeout = con_pool.timeout.read_timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body,
headers=headers, redirect=redirect,
retries=retries, timeout=timeout)
request_time = time.time() - start_time
rest_request.send(sender='restclients',
url=url,
request_time=request_time,
hostname=socket.gethostname(),
service_name=service_name)
rest_request_passfail.send(sender='restclients',
url=url,
success=True,
hostname=socket.gethostname(),
service_name=service_name)
return response | 0.000671 |
def cli(env, **args):
"""Order/create a dedicated server."""
mgr = SoftLayer.HardwareManager(env.client)
# Get the SSH keys
ssh_keys = []
for key in args.get('key'):
resolver = SoftLayer.SshKeyManager(env.client).resolve_ids
key_id = helpers.resolve_id(resolver, key, 'SshKey')
ssh_keys.append(key_id)
order = {
'hostname': args['hostname'],
'domain': args['domain'],
'size': args['size'],
'location': args.get('datacenter'),
'ssh_keys': ssh_keys,
'post_uri': args.get('postinstall'),
'os': args['os'],
'hourly': args.get('billing') == 'hourly',
'port_speed': args.get('port_speed'),
'no_public': args.get('no_public') or False,
'extras': args.get('extra'),
}
# Do not create hardware server with --test or --export
do_create = not (args['export'] or args['test'])
output = None
if args.get('test'):
result = mgr.verify_order(**order)
table = formatting.Table(['Item', 'cost'])
table.align['Item'] = 'r'
table.align['cost'] = 'r'
total = 0.0
for price in result['prices']:
total += float(price.get('recurringFee', 0.0))
rate = "%.2f" % float(price['recurringFee'])
table.add_row([price['item']['description'], rate])
table.add_row(['Total monthly cost', "%.2f" % total])
output = []
output.append(table)
output.append(formatting.FormattedItem(
'',
' -- ! Prices reflected here are retail and do not '
'take account level discounts and are not guaranteed.'))
if args['export']:
export_file = args.pop('export')
template.export_to_template(export_file, args,
exclude=['wait', 'test'])
env.fout('Successfully exported options to a template file.')
return
if do_create:
if not (env.skip_confirmations or formatting.confirm(
"This action will incur charges on your account. "
"Continue?")):
raise exceptions.CLIAbort('Aborting dedicated server order.')
result = mgr.place_order(**order)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', result['orderId']])
table.add_row(['created', result['orderDate']])
output = table
env.fout(output) | 0.000397 |
def Enumerate():
"""See base class."""
# Init a HID manager
hid_mgr = iokit.IOHIDManagerCreate(None, None)
if not hid_mgr:
raise errors.OsHidError('Unable to obtain HID manager reference')
iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
# Get devices from HID manager
device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
if not device_set_ref:
raise errors.OsHidError('Failed to obtain devices from HID manager')
num = iokit.CFSetGetCount(device_set_ref)
devices = (IO_HID_DEVICE_REF * num)()
iokit.CFSetGetValues(device_set_ref, devices)
# Retrieve and build descriptor dictionaries for each device
descriptors = []
for dev in devices:
d = base.DeviceDescriptor()
d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
d.product_string = GetDeviceStringProperty(dev,
HID_DEVICE_PROPERTY_PRODUCT)
d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
d.usage_page = GetDeviceIntProperty(
dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE)
d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
d.path = GetDevicePath(dev)
descriptors.append(d.ToPublicDict())
# Clean up CF objects
cf.CFRelease(device_set_ref)
cf.CFRelease(hid_mgr)
return descriptors | 0.008141 |
def run_chunked(self):
'''
Make the salt client call in the new fasion chunked multi-call way
'''
files, empty_dirs = self._list_files()
dest = self.opts['dest']
gzip = self.opts['gzip']
tgt = self.opts['tgt']
timeout = self.opts['timeout']
selected_target_option = self.opts.get('selected_target_option')
dest_is_dir = bool(empty_dirs) \
or len(files) > 1 \
or bool(re.search(r'[\\/]$', dest))
reader = salt.utils.gzip_util.compress_file \
if gzip \
else salt.utils.itertools.read_file
_res = salt.utils.minions.CkMinions(self.opts).check_minions(
tgt,
tgt_type=selected_target_option or 'glob')
minions = _res['minions']
local = salt.client.get_local_client(self.opts['conf_file'])
def _get_remote_path(fn_):
if fn_ in self.opts['src']:
# This was a filename explicitly passed on the CLI
return os.path.join(dest, os.path.basename(fn_)) \
if dest_is_dir \
else dest
else:
for path in self.opts['src']:
relpath = os.path.relpath(fn_, path + os.sep)
if relpath.startswith(parent):
# File is not within this dir
continue
return os.path.join(dest, os.path.basename(path), relpath)
else: # pylint: disable=useless-else-on-loop
# Should not happen
log.error('Failed to find remote path for %s', fn_)
return None
ret = {}
parent = '..' + os.sep
for fn_, mode in six.iteritems(files):
remote_path = _get_remote_path(fn_)
index = 1
failed = {}
for chunk in reader(fn_, chunk_size=self.opts['salt_cp_chunk_size']):
chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk))
append = index > 1
log.debug(
'Copying %s to %starget \'%s\' as %s%s',
fn_,
'{0} '.format(selected_target_option)
if selected_target_option
else '',
tgt,
remote_path,
' (chunk #{0})'.format(index) if append else ''
)
args = [
tgt,
'cp.recv_chunked',
[remote_path, chunk, append, gzip, mode],
timeout,
]
if selected_target_option is not None:
args.append(selected_target_option)
result = local.cmd(*args)
if not result:
# Publish failed
msg = (
'Publish failed.{0} It may be necessary to '
'decrease salt_cp_chunk_size (current value: '
'{1})'.format(
' File partially transferred.' if index > 1 else '',
self.opts['salt_cp_chunk_size'],
)
)
for minion in minions:
ret.setdefault(minion, {})[remote_path] = msg
break
for minion_id, minion_ret in six.iteritems(result):
ret.setdefault(minion_id, {})[remote_path] = minion_ret
# Catch first error message for a given minion, we will
# rewrite the results after we're done iterating through
# the chunks.
if minion_ret is not True and minion_id not in failed:
failed[minion_id] = minion_ret
index += 1
for minion_id, msg in six.iteritems(failed):
ret[minion_id][remote_path] = msg
for dirname in empty_dirs:
remote_path = _get_remote_path(dirname)
log.debug(
'Creating empty dir %s on %starget \'%s\'',
dirname,
'{0} '.format(selected_target_option) # pylint: disable=str-format-in-logging
if selected_target_option
else '',
tgt,
)
args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout]
if selected_target_option is not None:
args.append(selected_target_option)
for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
ret.setdefault(minion_id, {})[remote_path] = minion_ret
return ret | 0.002089 |
def aggregate_variable(estimate, id):
"""
Aggregate census table variables by a custom label.
"""
estimates = [
variable.estimates.get(division__id=id).estimate
for variable in estimate.variable.label.variables.all()
]
method = estimate.variable.label.aggregation
if method == "s":
aggregate = sum(estimates)
elif method == "a":
aggregate = statistics.mean(estimates)
elif method == "m":
aggregate = statistics.median(estimates)
else:
aggregate = None
return aggregate | 0.003185 |
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext) | 0.005929 |
def loads(s, object_pairs_hook=dict):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `dict` of the key-value pairs.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
By default, the key-value pairs extracted from ``s`` are combined into a
`dict` with later occurrences of a key overriding previous occurrences of
the same key. To change this behavior, pass a callable as the
``object_pairs_hook`` argument; it will be called with one argument, a
generator of ``(key, value)`` pairs representing the key-value entries in
``s`` (including duplicates) in order of occurrence. `loads` will then
return the value returned by ``object_pairs_hook``.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties`` document
:param callable object_pairs_hook: class or function for combining the
key-value pairs
:rtype: `dict` of text strings or the return value of ``object_pairs_hook``
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
fp = BytesIO(s) if isinstance(s, binary_type) else StringIO(s)
return load(fp, object_pairs_hook=object_pairs_hook) | 0.000692 |
def get_metadata_as_csv(fname):
""" Gets all metadata and puts into CSV format """
q = chr(34)
d = ","
res = q + fname + q + d
res = res + q + os.path.basename(fname) + q + d
res = res + q + os.path.dirname(fname) + q + d
try:
res = res + q + str(os.path.getsize(fname)) + q + d
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
res = res + q + str(width) + q + d
res = res + q + str(height) + q + d
res = res + q + str(img.format) + q + d
res = res + q + str(img.palette) + q + d
stat = ImageStat.Stat(img)
#print(fname, width, height)
#res = res + q + str(stat.extrema) + q + d
res = res + q + List2String(stat.count, ",") + q + d
res = res + q + List2String(stat.sum, ",") + q + d
res = res + q + List2String(stat.sum2, ",") + q + d
res = res + q + List2String(stat.mean, ",") + q + d
res = res + q + List2String(stat.median, ",") + q + d
res = res + q + List2String(stat.rms, ",") + q + d
res = res + q + List2String(stat.var, ",") + q + d
res = res + q + List2String(stat.stddev, ",") + q + d
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
res = res + q + str(lat) + q + d
res = res + q + str(lon) + q + d
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
return res | 0.00266 |
def build(self):
"""
Build the entire application
Returns
-------
dict
Returns the path to where each resource was built as a map of resource's LogicalId to the path string
"""
result = {}
for lambda_function in self._functions_to_build:
LOG.info("Building resource '%s'", lambda_function.name)
result[lambda_function.name] = self._build_function(lambda_function.name,
lambda_function.codeuri,
lambda_function.runtime)
return result | 0.008915 |
def p_single_statement_systemcall(self, p):
'single_statement : systemcall SEMICOLON'
p[0] = SingleStatement(p[1], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | 0.010695 |
def set_switch_state(self, state):
"""Set the switch state, also update local state."""
self.set_service_value(
self.switch_service,
'Target',
'newTargetValue',
state)
self.set_cache_value('Status', state) | 0.00722 |
def store_attribute(self, key, value):
"""Store blame info we are interested in."""
if key == 'summary' or key == 'filename' or key == 'previous':
return
attr = key.replace('-', '_')
if key.endswith('-time'):
value = int(value)
setattr(self, attr, value) | 0.006289 |
def control_system_state_encode(self, time_usec, x_acc, y_acc, z_acc, x_vel, y_vel, z_vel, x_pos, y_pos, z_pos, airspeed, vel_variance, pos_variance, q, roll_rate, pitch_rate, yaw_rate):
'''
The smoothed, monotonic system state used to feed the control loops of
the system.
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
x_acc : X acceleration in body frame (float)
y_acc : Y acceleration in body frame (float)
z_acc : Z acceleration in body frame (float)
x_vel : X velocity in body frame (float)
y_vel : Y velocity in body frame (float)
z_vel : Z velocity in body frame (float)
x_pos : X position in local frame (float)
y_pos : Y position in local frame (float)
z_pos : Z position in local frame (float)
airspeed : Airspeed, set to -1 if unknown (float)
vel_variance : Variance of body velocity estimate (float)
pos_variance : Variance in local position (float)
q : The attitude, represented as Quaternion (float)
roll_rate : Angular rate in roll axis (float)
pitch_rate : Angular rate in pitch axis (float)
yaw_rate : Angular rate in yaw axis (float)
'''
return MAVLink_control_system_state_message(time_usec, x_acc, y_acc, z_acc, x_vel, y_vel, z_vel, x_pos, y_pos, z_pos, airspeed, vel_variance, pos_variance, q, roll_rate, pitch_rate, yaw_rate) | 0.006208 |
def _get_common_cores(resources):
"""Retrieve the most common configured number of cores in the input file.
"""
all_cores = []
for vs in resources.values():
cores = vs.get("cores")
if cores:
all_cores.append(int(vs["cores"]))
return collections.Counter(all_cores).most_common(1)[0][0] | 0.003012 |
def authn_request(self, context, entity_id):
"""
Do an authorization request on idp with given entity id.
This is the start of the authorization.
:type context: satosa.context.Context
:type entity_id: str
:rtype: satosa.response.Response
:param context: The current context
:param entity_id: Target IDP entity id
:return: response to the user agent
"""
# If IDP blacklisting is enabled and the selected IDP is blacklisted,
# stop here
if self.idp_blacklist_file:
with open(self.idp_blacklist_file) as blacklist_file:
blacklist_array = json.load(blacklist_file)['blacklist']
if entity_id in blacklist_array:
satosa_logging(logger, logging.DEBUG, "IdP with EntityID {} is blacklisted".format(entity_id), context.state, exc_info=False)
raise SATOSAAuthenticationError(context.state, "Selected IdP is blacklisted for this backend")
kwargs = {}
authn_context = self.construct_requested_authn_context(entity_id)
if authn_context:
kwargs['requested_authn_context'] = authn_context
try:
binding, destination = self.sp.pick_binding(
"single_sign_on_service", None, "idpsso", entity_id=entity_id)
satosa_logging(logger, logging.DEBUG, "binding: %s, destination: %s" % (binding, destination),
context.state)
acs_endp, response_binding = self.sp.config.getattr("endpoints", "sp")["assertion_consumer_service"][0]
req_id, req = self.sp.create_authn_request(
destination, binding=response_binding, **kwargs)
relay_state = util.rndstr()
ht_args = self.sp.apply_binding(binding, "%s" % req, destination, relay_state=relay_state)
satosa_logging(logger, logging.DEBUG, "ht_args: %s" % ht_args, context.state)
except Exception as exc:
satosa_logging(logger, logging.DEBUG, "Failed to construct the AuthnRequest for state", context.state,
exc_info=True)
raise SATOSAAuthenticationError(context.state, "Failed to construct the AuthnRequest") from exc
if self.sp.config.getattr('allow_unsolicited', 'sp') is False:
if req_id in self.outstanding_queries:
errmsg = "Request with duplicate id {}".format(req_id)
satosa_logging(logger, logging.DEBUG, errmsg, context.state)
raise SATOSAAuthenticationError(context.state, errmsg)
self.outstanding_queries[req_id] = req
context.state[self.name] = {"relay_state": relay_state}
return make_saml_response(binding, ht_args) | 0.0036 |
def in_casapy(helper, vis=None):
"""This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting.
"""
import numpy as np, sys
from correct_ant_posns import correct_ant_posns
info = correct_ant_posns(vis, False)
if len(info) != 3 or info[0] != 0 or not len(info[1]):
helper.die('failed to fetch VLA antenna positions; got %r', info)
antenna = info[1]
parameter = info[2]
with open(helper.temppath('info.npy'), 'wb') as f:
np.save(f, antenna)
np.save(f, parameter) | 0.002841 |
def positiveint(value):
"""
:param value: input string
:returns: positive integer
"""
i = int(not_empty(value))
if i < 0:
raise ValueError('integer %d < 0' % i)
return i | 0.004878 |
def create_DOM_node_from_dict(d, name, parent_node):
"""
Dumps dict data to an ``xml.etree.ElementTree.SubElement`` DOM subtree
object and attaches it to the specified DOM parent node. The created
subtree object is named after the specified name. If the supplied dict is
``None`` no DOM node is created for it as well as no DOM subnodes are
generated for eventual ``None`` values found inside the dict
:param d: the input dictionary
:type d: dict
:param name: the name for the DOM subtree to be created
:type name: str
:param parent_node: the parent DOM node the newly created subtree must be
attached to
:type parent_node: ``xml.etree.ElementTree.Element`` or derivative objects
:returns: ``xml.etree.ElementTree.SubElementTree`` object
"""
if d is not None:
root_dict_node = ET.SubElement(parent_node, name)
for key, value in d.items():
if value is not None:
node = ET.SubElement(root_dict_node, key)
node.text = str(value)
return root_dict_node | 0.000921 |
def create_object(self, subject_id, image_group_id, properties, fmri_data_id=None):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
subject_id : string
Unique identifier of subject
image_group_id : string
Unique identifier of image group
properties : Dictionary
Set of experiment properties. Is required to contain at least the
experiment name
fmri_data_id : string, optional
Unique identifier of functional MRI data object
Returns
-------
ExperimentHandle
Handle for created experiment object in database
"""
# Ensure that experiment name is given in property list.
if not datastore.PROPERTY_NAME in properties:
raise ValueError('missing experiment name')
elif properties[datastore.PROPERTY_NAME] is None:
raise ValueError('invalid experiment name')
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-','')
# Create object handle and store it in database before returning it
obj = ExperimentHandle(
identifier,
properties,
subject_id,
image_group_id,
fmri_data_id=fmri_data_id
)
self.insert_object(obj)
return obj | 0.003752 |
def pivot_filter(pivot_array, predicates, ty=None):
"""
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
pivot_array_var = weld_obj.update(pivot_array)
if isinstance(pivot_array, WeldObject):
pivot_array_var = pivot_array.obj_id
weld_obj.dependencies[pivot_array_var] = pivot_array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
weld_template = """
let index_filtered =
result(
for(
zip(%(array)s.$0, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
);
let pivot_filtered =
map(
%(array)s.$1,
|x|
result(
for(
zip(x, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
)
);
{index_filtered, pivot_filtered, %(array)s.$2}
"""
weld_obj.weld_code = weld_template % {
"array": pivot_array_var,
"predicates": predicates_var}
return weld_obj | 0.000642 |
def update(self, status=values.unset, announce_url=values.unset,
announce_method=values.unset):
"""
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
data = values.of({'Status': status, 'AnnounceUrl': announce_url, 'AnnounceMethod': announce_method, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ConferenceInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | 0.007143 |
def reset_bars(self):
"""
Set all progress bars to zero and hide them.
"""
self.url_progressbar.reset()
self.url_progressbar.show([0, 0])
self.issn_progressbar.reset()
self.urlbox_error.reset()
self.issnbox_error.reset()
InputController._reset_typeaheads() | 0.006042 |
def add_partition(self, p):
"""Add a partition identity as a child of a dataset identity."""
if not self.partitions:
self.partitions = {}
self.partitions[p.vid] = p | 0.009901 |
def query(anchore_config, image, imagefile, include_allanchore, module):
"""
Image IDs can be specified as hash ids, repo names (e.g. centos), or tags (e.g. centos:latest).
Execute the specified query (module) with any parameters it requires. Modules are scripts in a specific location.
Each query has its own parameters and outputs.
Examples using pre-defined queries:
'anchore query --image nginx:latest list-packages all'
'anchore query has-package wget'
'anchore query --image nginx:latest list-files-detail all'
'anchore query cve-scan all'
"""
global config, imagelist, nav
ecode = 0
success = True
config = anchore_config
if module:
if image and imagefile:
raise click.BadOptionUsage('Can only use one of --image, --imagefile')
try:
imagedict = build_image_list(anchore_config, image, imagefile, not (image or imagefile), include_allanchore)
imagelist = imagedict.keys()
try:
ret = anchore_utils.discover_imageIds(imagelist)
except ValueError as err:
raise err
else:
#imagelist = ret.keys()
imagelist = ret
except Exception as err:
anchore_print_err("could not load input images")
sys.exit(1)
try:
nav = init_nav_contexts()
result = nav.run_query(list(module))
if result:
anchore_utils.print_result(config, result)
if nav.check_for_warnings(result):
ecode = 2
except:
anchore_print_err("query operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | 0.004049 |
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
environment = get_spontaneous_environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
str(options.get('trim_blocks', TRIM_BLOCKS)).lower() in \
('1', 'on', 'yes', 'true'),
NEWLINE_SEQUENCE, frozenset(extensions),
# fill with defaults so that environments are shared
# with other spontaneus environments. The rest of the
# arguments are optimizer, undefined, finalize, autoescape,
# loader, cache size, auto reloading setting and the
# bytecode cache
True, Undefined, None, False, None, 0, False, None
)
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno) | 0.001016 |
def get_org_smarthost(self, orgid, serverid):
"""Get an organization smarthost"""
return self.api_call(
ENDPOINTS['orgsmarthosts']['get'],
dict(orgid=orgid, serverid=serverid)) | 0.009259 |
def show(self, eq, value=None):
"""Show equation or variable array along with the names"""
if eq in ['f', 'x']:
key = 'unamex'
elif eq in ['g', 'y']:
key = 'unamey'
if value:
value = list(value)
else:
value = list(self.__dict__[eq])
out = ''
for name, val, idx in zip(self.system.varname.__dict__[key], value,
range(len(value))):
out += '{:20s} [{:>12.4f}] {:g}\n'.format(name, val, idx)
return out | 0.003584 |
def get_line_configuration_message(self, line_number):
"""Return the cnfLine content without id for the line.
:param int line_number: the number of the line
:rtype: bytes
:return: a cnfLine message without id as defined in :ref:`cnfLine`
"""
if line_number not in self._line_configuration_message_cache:
line_bytes = self.get_bytes(line_number)
if line_bytes is not None:
line_bytes = bytes([line_number & 255]) + line_bytes
line_bytes += bytes([self.is_last(line_number)])
line_bytes += crc8(line_bytes).digest()
self._line_configuration_message_cache[line_number] = line_bytes
del line_bytes
line = self._line_configuration_message_cache[line_number]
if line is None:
# no need to cache a lot of empty lines
line = (bytes([line_number & 255]) +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01')
line += crc8(line).digest()
return line | 0.001745 |
def remove_available_work_units(self, work_spec_name, work_unit_names):
'''Remove some work units in the available queue.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all available work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, priority_max=time.time()) | 0.002903 |
def get_gene_onto(pdb_id):
"""Return ligands of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the gene ontology information associated with the entry
Examples
--------
>>> gene_info = get_gene_onto('4Z0L')
>>> print(gene_info['term'][0])
{'@chainId': 'A',
'@id': 'GO:0001516',
'@structureId': '4Z0L',
'detail': {'@definition': 'The chemical reactions and pathways resulting '
'in the formation of prostaglandins, any of a '
'group of biologically active metabolites which '
'contain a cyclopentane ring.',
'@name': 'prostaglandin biosynthetic process',
'@ontology': 'B',
'@synonyms': 'prostaglandin anabolism, prostaglandin '
'biosynthesis, prostaglandin formation, '
'prostaglandin synthesis'}}
"""
out = get_info(pdb_id, url_root = 'http://www.rcsb.org/pdb/rest/goTerms?structureId=')
out = to_dict(out)
if not out['goTerms']:
return None
out = remove_at_sign(out['goTerms'])
return out | 0.004573 |
def start_if_necessary(self):
"""
Starts the listening thread if it wans't already.
"""
self.lock.acquire()
try:
if not self.listening:
self.init()
self.listening = True
self.listening_thread = Thread(target=self.listen)
self.listening_thread.daemon = True
self.listening_thread.start()
self.processing_thread = Thread(target=self.process)
self.processing_thread.daemon = True
self.processing_thread.start()
finally:
self.lock.release() | 0.003135 |
def checkArgs(args):
"""Checks the arguments and options.
:param args: a an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
if not os.path.isfile(args.ibs_related):
msg = "{}: no such file".format(args.ibs_related)
raise ProgramError(msg)
return True | 0.001745 |
def rescale_grad(self, scale=None, param_name=None):
""" Rescale the gradient of provided parameters by a certain scale """
if scale is None or param_name is None:
return
param_idx = self._exec_group.param_names.index(param_name)
grad_vals = self._exec_group.grad_arrays[param_idx]
for grad in grad_vals:
grad[:] *= scale | 0.005195 |
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors) | 0.003226 |
def _poll_update_interval(self):
""" update the polling interval to be used next iteration """
# Increase by 1 second every 3 polls
if old_div(self.poll_count, 3) > self.poll_interval_level:
self.poll_interval_level += 1
self.poll_interval_s += 1
self.logger.info(
"Increased polling interval to %d seconds", self.poll_interval_s
) | 0.00716 |
def _type_translation(self, str_type):
"""
Internal method to translate the named CRITs TLO type to a URL
specific string.
"""
if str_type == 'Indicator':
return 'indicators'
if str_type == 'Domain':
return 'domains'
if str_type == 'IP':
return 'ips'
if str_type == 'Sample':
return 'samples'
if str_type == 'Event':
return 'events'
if str_type == 'Actor':
return 'actors'
if str_type == 'Email':
return 'emails'
if str_type == 'Backdoor':
return 'backdoors'
raise CRITsInvalidTypeError('Invalid object type specified: '
'{}'.format(str_type)) | 0.002564 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.