text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ) | 0.004831 |
def download(self, **kwargs):
"""If table contains Gravity Spy triggers `EventTable`
Parameters
----------
nproc : `int`, optional, default: 1
number of CPUs to use for parallel file reading
download_path : `str` optional, default: 'download'
Specify where the images end up.
download_durs : `list` optional, default: [0.5, 1.0, 2.0, 4.0]
Specify exactly which durations you want to download
default is to download all the avaialble GSpy durations.
kwargs: Optional training_set and labelled_samples args
that will download images in a special way
./"ml_label"/"sample_type"/"image"
Returns
-------
Folder containing omega scans sorted by label
"""
from six.moves.urllib.request import urlopen
import os
# back to pandas
try:
images_db = self.to_pandas()
except ImportError as exc:
exc.args = ('pandas is required to download triggers',)
raise
# Remove any broken links
images_db = images_db.loc[images_db.url1 != '']
training_set = kwargs.pop('training_set', 0)
labelled_samples = kwargs.pop('labelled_samples', 0)
download_location = kwargs.pop('download_path',
os.path.join('download'))
duration_values = np.array([0.5, 1.0, 2.0, 4.0])
download_durs = kwargs.pop('download_durs', duration_values)
duration_idx = []
for idur in download_durs:
duration_idx.append(np.argwhere(duration_values == idur)[0][0])
duration_values = duration_values[duration_idx]
duration_values = np.array([duration_values]).astype(str)
# labelled_samples are only available when requesting the
if labelled_samples:
if 'sample_type' not in images_db.columns:
raise ValueError('You have requested ml_labelled Samples '
'for a Table which does not have '
'this column. Did you fetch a '
'trainingset* table?')
# If someone wants labelled samples they are
# Definitely asking for the training set but
# may hve forgotten
if labelled_samples and not training_set:
training_set = 1
# Let us check what columns are needed
cols_for_download = ['url1', 'url2', 'url3', 'url4']
cols_for_download = [cols_for_download[idx] for idx in duration_idx]
cols_for_download_ext = ['ml_label', 'sample_type',
'ifo', 'gravityspy_id']
if not training_set:
images_db['ml_label'] = ''
if not labelled_samples:
images_db['sample_type'] = ''
if not os.path.isdir(download_location):
os.makedirs(download_location)
if training_set:
for ilabel in images_db.ml_label.unique():
if labelled_samples:
for itype in images_db.sample_type.unique():
if not os.path.isdir(os.path.join(
download_location,
ilabel, itype)):
os.makedirs(os.path.join(download_location,
ilabel, itype))
else:
if not os.path.isdir(os.path.join(download_location,
ilabel)):
os.makedirs(os.path.join(download_location,
ilabel))
images_for_download = images_db[cols_for_download]
images = images_for_download.as_matrix().flatten()
images_for_download_ext = images_db[cols_for_download_ext]
duration = np.atleast_2d(
duration_values.repeat(
len(images_for_download_ext), 0).flatten(
)).T
images_for_download_ext = images_for_download_ext.as_matrix(
).repeat(len(cols_for_download), 0)
images_for_for_download_path = np.array([[download_location]]).repeat(
len(images_for_download_ext), 0)
images = np.hstack((np.atleast_2d(images).T,
images_for_download_ext, duration,
images_for_for_download_path))
def get_image(url):
name = url[3] + '_' + url[4] + '_spectrogram_' + url[5] + '.png'
outfile = os.path.join(url[6], url[1], url[2], name)
with open(outfile, 'wb') as fout:
fout.write(urlopen(url[0]).read())
# calculate maximum number of processes
nproc = min(kwargs.pop('nproc', 1), len(images))
# define multiprocessing method
def _download_single_image(url):
try:
return url, get_image(url)
except Exception as exc: # pylint: disable=broad-except
if nproc == 1:
raise
else:
return url, exc
# read files
output = mp_utils.multiprocess_with_queues(
nproc, _download_single_image, images)
# raise exceptions (from multiprocessing, single process raises inline)
for f, x in output:
if isinstance(x, Exception):
x.args = ('Failed to read %s: %s' % (f, str(x)),)
raise x | 0.000352 |
def inspect(self, w):
""" Get the value of a wirevector in the last simulation cycle.
:param w: the name of the WireVector to inspect
(passing in a WireVector instead of a name is deprecated)
:return: value of w in the current step of simulation
Will throw KeyError if w does not exist in the simulation.
"""
wire = self.block.wirevector_by_name.get(w, w)
return self.value[wire] | 0.004454 |
def _encode_label(label):
"""Convert a tuple label into a list. Works recursively on nested tuples."""
if isinstance(label, tuple):
return [_encode_label(v) for v in label]
return label | 0.009756 |
def current(cls, with_exception=True):
"""
Returns the current database context.
"""
if with_exception and len(cls.stack) == 0:
raise NoContext()
return cls.stack.top() | 0.009091 |
def deleteuser(self, user_id):
"""
Deletes a user. Available only for administrators.
This is an idempotent function, calling this function for a non-existent user id
still returns a status code 200 OK.
The JSON response differs if the user was actually deleted or not.
In the former the user is returned and in the latter not.
.. warning:: Warning this is being deprecated please use :func:`gitlab.Gitlab.delete_user`
:param user_id: The ID of the user
:return: True if it deleted, False if it couldn't
"""
deleted = self.delete_user(user_id)
if deleted is False:
return False
else:
return True | 0.00551 |
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = b""
while x:
tmp_len = orb(x[0])
x = x[1:]
if not tmp_len:
if cur and cur[-1:] == b'.':
cur = cur[:-1]
res.append(cur)
cur = b""
if x and orb(x[0]) == 0: # single component
x = x[1:]
continue
if tmp_len & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
cur += x[:tmp_len] + b"."
x = x[tmp_len:]
return res | 0.001176 |
def connect_to_database_odbc_sqlserver(self,
odbc_connection_string: str = None,
dsn: str = None,
database: str = None,
user: str = None,
password: str = None,
server: str = "localhost",
driver: str = "{SQL Server}",
autocommit: bool = True) -> None:
"""Connects to an SQL Server database via ODBC."""
self.connect(engine=ENGINE_SQLSERVER, interface=INTERFACE_ODBC,
odbc_connection_string=odbc_connection_string,
dsn=dsn,
database=database, user=user, password=password,
host=server, driver=driver,
autocommit=autocommit) | 0.010142 |
def get_hostname(ip_addr, cl_args):
'''
get host name of remote host
'''
if is_self(ip_addr):
return get_self_hostname()
cmd = "hostname"
ssh_cmd = ssh_remote_execute(cmd, ip_addr, cl_args)
pid = subprocess.Popen(ssh_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return_code = pid.wait()
output = pid.communicate()
if return_code != 0:
Log.error("Failed to get hostname for remote host %s with output:\n%s" % (ip_addr, output))
sys.exit(-1)
return output[0].strip("\n") | 0.018395 |
def save_image(self, outname):
"""
Save the image data.
This is probably only useful if the image data has been blanked.
Parameters
----------
outname : str
Name for the output file.
"""
hdu = self.global_data.img.hdu
hdu.data = self.global_data.img._pixels
hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__)
# delete some axes that we aren't going to need
for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']:
if c in hdu.header:
del hdu.header[c]
hdu.writeto(outname, overwrite=True)
self.log.info("Wrote {0}".format(outname))
return | 0.003979 |
def export_wavefront(mesh,
include_normals=True,
include_texture=True):
"""
Export a mesh as a Wavefront OBJ file
Parameters
-----------
mesh: Trimesh object
Returns
-----------
export: str, string of OBJ format output
"""
# store the multiple options for formatting
# a vertex index for a face
face_formats = {('v',): '{}',
('v', 'vn'): '{}//{}',
('v', 'vt'): '{}/{}',
('v', 'vn', 'vt'): '{}/{}/{}'}
# we are going to reference face_formats with this
face_type = ['v']
export = 'v '
export += util.array_to_string(mesh.vertices,
col_delim=' ',
row_delim='\nv ',
digits=8) + '\n'
if include_normals and 'vertex_normals' in mesh._cache:
# if vertex normals are stored in cache export them
# these will have been autogenerated if they have ever been called
face_type.append('vn')
export += 'vn '
export += util.array_to_string(mesh.vertex_normals,
col_delim=' ',
row_delim='\nvn ',
digits=8) + '\n'
if (include_texture and
'vertex_texture' in mesh.metadata and
len(mesh.metadata['vertex_texture']) == len(mesh.vertices)):
# if vertex texture exists and is the right shape export here
face_type.append('vt')
export += 'vt '
export += util.array_to_string(mesh.metadata['vertex_texture'],
col_delim=' ',
row_delim='\nvt ',
digits=8) + '\n'
# the format for a single vertex reference of a face
face_format = face_formats[tuple(face_type)]
faces = 'f ' + util.array_to_string(mesh.faces + 1,
col_delim=' ',
row_delim='\nf ',
value_format=face_format)
# add the exported faces to the export
export += faces
return export | 0.000441 |
def _spiral(width, height):
"""Spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
if width == 1:
for y in range(height - 1, -1, -1):
yield 0, y
return
if height == 1:
for x in range(width - 1, -1, -1):
yield x, 0
return
if width <= height:
x0 = width // 2
if width % 2:
for y in range(height - 1 - x0, x0 - 1, -1):
yield x0, y
x0 -= 1
y0 = x0
else:
y0 = height // 2
if height % 2:
for x in range(width - 1 - y0, y0 - 1, -1):
yield x, y0
y0 -= 1
x0 = y0
while x0 >= 0:
x1 = width - x0 - 1
y1 = height - y0 - 1
for y in range(y0 + 1, y1):
yield x0, y
for x in range(x0, x1):
yield x, y1
for y in range(y1, y0, -1):
yield x1, y
for x in range(x1, x0 - 1, -1):
yield x, y0
x0 -= 1
y0 -= 1 | 0.001479 |
def set_version(self, version):
"""
Set the version subfield (RFC 2459, section 4.1.2.1) of the certificate
request.
:param int version: The version number.
:return: ``None``
"""
set_result = _lib.X509_REQ_set_version(self._req, version)
_openssl_assert(set_result == 1) | 0.00597 |
def void(self):
''' Voids the invoice if it is valid to do so. '''
if self.invoice.total_payments() > 0:
raise ValidationError("Invoices with payments must be refunded.")
elif self.invoice.is_refunded:
raise ValidationError("Refunded invoices may not be voided.")
if self.invoice.is_paid:
self._release_cart()
self._mark_void() | 0.00495 |
def as_bool(self, key):
"""
Express given key's value as a boolean type.
Typically, this is used for ``ssh_config``'s pseudo-boolean values
which are either ``"yes"`` or ``"no"``. In such cases, ``"yes"`` yields
``True`` and any other value becomes ``False``.
.. note::
If (for whatever reason) the stored value is already boolean in
nature, it's simply returned.
.. versionadded:: 2.5
"""
val = self[key]
if isinstance(val, bool):
return val
return val.lower() == "yes" | 0.00335 |
def read_until(self, s, echo=None):
"""
Read until a certain string is encountered..
Args:
s(bytes): The string to wait for.
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The data up to and including *s*.
Raises:
EOFError: If the channel was closed.
"""
s_len = len(s)
buf = self.read(s_len, echo)
while buf[-s_len:] != s:
buf += self.read(1, echo)
return buf | 0.003781 |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._fsapfs_file_entry.read(size=size) | 0.003478 |
def generate_window(length, window=None, dtype='float64'):
"""Generate a time-domain window for use in a LAL FFT
Parameters
----------
length : `int`
length of window in samples.
window : `str`, `tuple`
name of window to generate, default: ``('kaiser', 24)``. Give
`str` for simple windows, or tuple of ``(name, *args)`` for
complicated windows
dtype : :class:`numpy.dtype`
numeric type of window, default `numpy.dtype(numpy.float64)`
Returns
-------
`window` : `REAL8Window` or similar
time-domain window to use for FFT
"""
from ...utils.lal import (find_typed_function, to_lal_type_str)
if window is None:
window = ('kaiser', 24)
# generate key for caching window
laltype = to_lal_type_str(dtype)
key = (length, str(window), laltype)
# find existing window
try:
return LAL_WINDOWS[key]
# or create one
except KeyError:
# parse window as name and arguments, e.g. ('kaiser', 24)
if isinstance(window, (list, tuple)):
window, beta = window
else:
beta = 0
window = canonical_name(window)
# create window
create = find_typed_function(dtype, 'CreateNamed', 'Window')
LAL_WINDOWS[key] = create(window, beta, length)
return LAL_WINDOWS[key] | 0.00073 |
def patch_jwt_settings():
"""Patch rest_framework_jwt authentication settings from allauth"""
defaults = api_settings.defaults
defaults['JWT_PAYLOAD_GET_USER_ID_HANDLER'] = (
__name__ + '.get_user_id_from_payload_handler')
if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
return
from allauth.socialaccount.models import SocialApp
try:
app = SocialApp.objects.get(provider='helsinki')
except SocialApp.DoesNotExist:
return
defaults['JWT_SECRET_KEY'] = app.secret
defaults['JWT_AUDIENCE'] = app.client_id | 0.001715 |
def form_invalid(self, form):
"""This is what's called when the form is invalid."""
ip = get_user_ip(self.request)
if settings.CONTACT_FORM_USE_SIGNALS:
contact_form_invalid.send(
sender=self,
event=self.invalid_event,
ip=ip,
site=self.site,
sender_name=form['sender_name'],
sender_email=form['sender_email']
)
return super(ContactFormView, self).form_invalid(form) | 0.003861 |
def all_function_definitions(function):
"""
Obtains a list of representing this function and any base definitions
:param function: The function to obtain all definitions at and beneath.
:return: Returns a list composed of the provided function definition and any base definitions.
"""
return [function] + [f for c in function.contract.inheritance
for f in c.functions_and_modifiers_not_inherited
if f.full_name == function.full_name] | 0.003945 |
def prep_bam_inputs(out_dir, sample, call_file, bam_file):
"""Prepare expected input BAM files from pre-aligned.
"""
base = utils.splitext_plus(os.path.basename(bam_file))[0]
with open(call_file) as in_handle:
for cur_hla in (x.strip() for x in in_handle):
out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)),
"%s.type.%s.filtered.bam" % (base, cur_hla))
if not os.path.exists(out_file):
cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla]
subprocess.check_call(cmd) | 0.008104 |
def parent(self, parent_object, limit_parent_language=True):
"""
Return all content items which are associated with a given parent object.
"""
lookup = get_parent_lookup_kwargs(parent_object)
# Filter the items by default, giving the expected "objects for this parent" items
# when the parent already holds the language state.
if limit_parent_language:
language_code = get_parent_language_code(parent_object)
if language_code:
lookup['language_code'] = language_code
return self.filter(**lookup) | 0.006656 |
def insert_empty_rows(self, y: int, amount: int = 1) -> None:
"""Insert a number of rows after the given row."""
def transform_rows(
column: Union[int, float],
row: Union[int, float]
) -> Tuple[Union[int, float], Union[int, float]]:
return column, row + (amount if row >= y else 0)
self._transform_coordinates(transform_rows) | 0.004988 |
def do_op(field, op, value):
''' used for comparisons '''
if op==NOOP:
return True
if field==None:
if value==None:
return True
else:
return False
if value==None:
return False
if op==LESS:
return (field < value)
if op==LESSorEQUAL:
return (field <= value)
if op==GREATERorEQUAL:
return (field >= value)
if op==GREATER:
return (field > value)
# for the EQUAL and NOT_EQUAL conditions, additional factors are considered.
# for EQUAL,
# if they don't match AND the types don't match,
# then the STR of the field and value is also tried
if op==EQUAL:
if (field == value):
return True
if type(field)==type(value):
return False
try:
field = str(field)
value = str(value)
return (field == value)
except:
return False
# for NOT_EQUAL,
# if they match, then report False
# if they don't match AND the types don't match,
# then the STR equivalents are also tried.
if op==NOT_EQUAL:
if (field == value):
return False
if type(field)==type(value):
return True
try:
field = str(field)
value = str(value)
return (field != value)
except:
return True
return False | 0.01324 |
def register_by_email(self, email, sender=None, request=None, **kwargs):
"""
Returns a User object filled with dummy data and not active, and sends
an invitation email.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
user.is_active = False
user.save()
self.send_activation(user, sender, **kwargs)
return user | 0.002999 |
def attach_file(self, path, mimetype=None):
"""Attache a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, "rb").read()
self.attach(filename, content, mimetype) | 0.008696 |
def guess_process(query_str, process_map):
"""
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
"""
save_list = []
# loops between the processes available in process_map
for process in process_map:
similarity = SequenceMatcher(None, process, query_str)
# checks if similarity between the process and the query string is
# higher than 50%
if similarity.ratio() > 0.5:
save_list.append(process)
# checks if any process is stored in save_list
if save_list:
logger.info(colored_print(
"Maybe you meant:\n\t{}".format("\n\t".join(save_list)), "white"))
logger.info(colored_print("Hint: check the available processes by using "
"the '-l' or '-L' flag.", "white")) | 0.000823 |
def generate_template_arc_tree(self):
'''
Generate a seven point template in this arc. Arc must be empty.
'''
arc_root = self.arc_root_node
if not arc_root:
arc_root = ArcElementNode.add_root(
arc_element_type='root',
description='root of arc %s' % self.name,
arc=self
)
if arc_root.get_children():
raise ArcIntegrityError(_("This arc already has elements. You cannot build a template on top of it"))
for key, value in ARC_NODE_ELEMENT_DEFINITIONS.items():
if value['milestone']:
arc_root.add_child(arc_element_type=key, description=value['template_description'])
arc_root.refresh_from_db()
return ArcElementNode.objects.get(pk=arc_root.pk).get_children().count() | 0.005848 |
def add_disk_encryption_password(self, id_p, password, clear_on_suspend):
"""Adds a password used for hard disk encryption/decryption.
in id_p of type str
The identifier used for the password. Must match the identifier
used when the encrypted medium was created.
in password of type str
The password.
in clear_on_suspend of type bool
Flag whether to clear the password on VM suspend (due to a suspending host
for example). The password must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
"""
if not isinstance(id_p, basestring):
raise TypeError("id_p can only be an instance of type basestring")
if not isinstance(password, basestring):
raise TypeError("password can only be an instance of type basestring")
if not isinstance(clear_on_suspend, bool):
raise TypeError("clear_on_suspend can only be an instance of type bool")
self._call("addDiskEncryptionPassword",
in_p=[id_p, password, clear_on_suspend]) | 0.007212 |
def getrdfdata():
"""Downloads Project Gutenberg RDF catalog.
Yields:
xml.etree.ElementTree.Element: An etext meta-data definition.
"""
if not os.path.exists(RDFFILES):
_, _ = urllib.urlretrieve(RDFURL, RDFFILES)
with tarfile.open(RDFFILES) as archive:
for tarinfo in archive:
yield ElementTree.parse(archive.extractfile(tarinfo)) | 0.02907 |
def add_resource(
self,
base_rule,
base_view,
alternate_view=None,
alternate_rule=None,
id_rule=None,
app=None,
):
"""Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present.
"""
if alternate_view:
if not alternate_rule:
id_rule = id_rule or DEFAULT_ID_RULE
alternate_rule = posixpath.join(base_rule, id_rule)
else:
assert id_rule is None
else:
assert alternate_rule is None
assert id_rule is None
app = self._get_app(app)
endpoint = self._get_endpoint(base_view, alternate_view)
# Store the view rules for reference. Doesn't support multiple routes
# mapped to same view.
views = app.extensions['resty'].views
base_rule_full = '{}{}'.format(self.prefix, base_rule)
base_view_func = base_view.as_view(endpoint)
if not alternate_view:
app.add_url_rule(base_rule_full, view_func=base_view_func)
views[base_view] = Resource(base_view, base_rule_full)
return
alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule)
alternate_view_func = alternate_view.as_view(endpoint)
@functools.wraps(base_view_func)
def view_func(*args, **kwargs):
if flask.request.url_rule.rule == base_rule_full:
return base_view_func(*args, **kwargs)
else:
return alternate_view_func(*args, **kwargs)
app.add_url_rule(
base_rule_full, view_func=view_func, endpoint=endpoint,
methods=base_view.methods,
)
app.add_url_rule(
alternate_rule_full, view_func=view_func, endpoint=endpoint,
methods=alternate_view.methods,
)
views[base_view] = Resource(base_view, base_rule_full)
views[alternate_view] = Resource(alternate_view, alternate_rule_full) | 0.000942 |
def map(cls, x, palette, limits, na_value=None):
"""
Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette
"""
n = len(limits)
pal = palette(n)[match(x, limits)]
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal | 0.002928 |
def has_edge_citation(self, u: BaseEntity, v: BaseEntity, key: str) -> bool:
"""Check if the given edge has a citation."""
return self._has_edge_attr(u, v, key, CITATION) | 0.010753 |
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if self._jconf is not None:
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
if key not in self._conf:
return None
return self._conf[key]
else:
if self._jconf is not None:
return self._jconf.get(key, defaultValue)
else:
return self._conf.get(key, defaultValue) | 0.005772 |
def poisson_source(rate, iterable, target):
"""Send events at random times with uniform probability.
Args:
rate: The average number of events to send per second.
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
"""
if rate <= 0.0:
raise ValueError("poisson_source rate {} is not positive".format(rate))
it = iter(iterable)
for item in it:
duration = random.expovariate(rate)
sleep(duration)
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | 0.002778 |
def stretch_pattern(image_source):
"""!
@brief Returns stretched content as 1-dimension (gray colored) matrix with size of input image.
@param[in] image_source (Image): PIL Image instance.
@return (list, Image) Stretched image as gray colored matrix and source image.
"""
wsize, hsize = image_source.size;
# Crop digit exactly
(ws, hs, we, he) = gray_pattern_borders(image_source);
image_source = image_source.crop((ws, hs, we, he));
# Stretch it to initial sizes
image_source = image_source.resize((wsize, hsize), Image.ANTIALIAS);
# Transform image to simple array
data = [pixel for pixel in image_source.getdata()];
image_pattern = rgb2gray(data);
return (image_pattern, image_source); | 0.021118 |
def convert_context_to_csv(self, context):
"""Convert the context dictionary into a CSV file."""
content = []
date_headers = context['date_headers']
headers = ['Name']
headers.extend([date.strftime('%m/%d/%Y') for date in date_headers])
headers.append('Total')
content.append(headers)
summaries = context['summaries']
summary = summaries.get(self.export, [])
for rows, totals in summary:
for name, user_id, hours in rows:
data = [name]
data.extend(hours)
content.append(data)
total = ['Totals']
total.extend(totals)
content.append(total)
return content | 0.002703 |
def get_resource_inst(self, path, environ):
"""Return info dictionary for path.
See get_resource_inst()
"""
# TODO: calling exists() makes directory browsing VERY slow.
# At least compared to PyFileServer, which simply used string
# functions to get display_type and displayTypeComment
self._count_get_resource_inst += 1
if not self.exists(path, environ):
return None
_tableName, primKey = self._split_path(path)
is_collection = primKey is None
return MySQLBrowserResource(self, path, is_collection, environ) | 0.003221 |
def com_google_fonts_check_vendor_id(ttFont, registered_vendor_ids):
"""Checking OS/2 achVendID."""
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE = (
" You should set it to your own 4 character code,"
" and register that code with Microsoft at"
" https://www.microsoft.com"
"/typography/links/vendorlist.aspx")
vid = ttFont['OS/2'].achVendID
bad_vids = ['UKWN', 'ukwn', 'PfEd']
if vid is None:
yield WARN, Message("not set", "OS/2 VendorID is not set." +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
elif vid in bad_vids:
yield WARN, Message("bad", ("OS/2 VendorID is '{}',"
" a font editor default.").format(vid) +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
elif vid not in registered_vendor_ids.keys():
yield WARN, Message("unknown", ("OS/2 VendorID value '{}' is not"
" a known registered id.").format(vid) +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
else:
yield PASS, f"OS/2 VendorID '{vid}' looks good!" | 0.010889 |
def client_info(self, client):
"""
Get client info. Uses GET to /clients/<client> interface.
:Args:
* *client*: (str) Client's ID
:Returns: (dict) Client dictionary
"""
client = self._client_id(client)
response = self._get(url.clients_id.format(id=client))
self._check_response(response, 200)
return self._create_response(response) | 0.004785 |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(SubjectKey=self.subject_key)
params['mdsol:SubjectKeyType'] = self.subject_key_type
if self.transaction_type is not None:
params["TransactionType"] = self.transaction_type
# mixins
self.mixin()
self.mixin_params(params)
builder.start("SubjectData", params)
# Ask children
if self.audit_record is not None:
self.audit_record.build(builder)
if self.siteref:
self.siteref.build(builder)
else:
builder.start("SiteRef", {'LocationOID': self.sitelocationoid})
builder.end("SiteRef")
for event in self.study_events:
event.build(builder)
if self.signature is not None:
self.signature.build(builder)
for annotation in self.annotations:
annotation.build(builder)
builder.end("SubjectData") | 0.002016 |
def compute_distance(self, other, default=None):
"""
Compute the minimal distance between the line string and `other`.
Parameters
----------
other : tuple of number \
or imgaug.augmentables.kps.Keypoint \
or imgaug.augmentables.LineString
Other object to which to compute the distance.
default
Value to return if this line string or `other` contain no points.
Returns
-------
float
Distance to `other` or `default` if not distance could be computed.
"""
# FIXME this computes distance pointwise, does not have to be identical
# with the actual min distance (e.g. edge center to other's point)
distances = self.compute_pointwise_distances(other, default=[])
if len(distances) == 0:
return default
return min(distances) | 0.003236 |
def video(request, obj_id):
"""Handles a request based on method and calls the appropriate function"""
obj = Video.objects.get(pk=obj_id)
if request.method == 'POST':
return post(request, obj)
elif request.method == 'PUT':
getPutData(request)
return put(request, obj)
elif request.method == 'DELETE':
getPutData(request)
return delete(request, obj) | 0.002451 |
def singleton(*args, **kwargs):
'''
a lazy init singleton pattern.
usage:
``` py
@singleton()
class X: ...
```
`args` and `kwargs` will pass to ctor of `X` as args.
'''
def decorator(cls: type) -> Callable[[], object]:
if issubclass(type(cls), _SingletonMetaClassBase):
raise TypeError('cannot inherit from another singleton class.')
box = _Box()
factory = None
lock = Lock()
def metaclass_call(_):
if box.value is None:
with lock:
if box.value is None:
instance = cls(*args, **kwargs)
instance.__class__ = factory
box.value = (instance, ) # use tuple to handle `cls()` return `None`
return box.value[0]
def _is_init(*_):
return box.value is not None
SingletonMetaClass = type('SingletonMetaClass', (type(cls), _SingletonMetaClassBase), {
'__slots__': (),
'__call__': metaclass_call
})
factory = SingletonMetaClass(cls.__name__, (cls, ), {
'__slots__': (),
'_is_init': _is_init
})
return update_wrapper(factory, cls, updated=())
return decorator | 0.003098 |
def edit(
request,
slug,
rev_id=None,
template_name='wakawaka/edit.html',
extra_context=None,
wiki_page_form=WikiPageForm,
wiki_delete_form=DeleteWikiPageForm,
):
"""
Displays the form for editing and deleting a page.
"""
# Get the page for slug and get a specific revision, if given
try:
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug)
rev = page.current
initial = {'content': page.current.content}
# Do not allow editing wiki pages if the user has no permission
if not request.user.has_perms(
('wakawaka.change_wikipage', 'wakawaka.change_revision')
):
return HttpResponseForbidden(
ugettext('You don\'t have permission to edit pages.')
)
if rev_id:
# There is a specific revision, fetch this
rev_specific = Revision.objects.get(pk=rev_id)
if rev.pk != rev_specific.pk:
rev = rev_specific
rev.is_not_current = True
initial = {
'content': rev.content,
'message': _('Reverted to "%s"' % rev.message),
}
# This page does not exist, create a dummy page
# Note that it's not saved here
except WikiPage.DoesNotExist:
# Do not allow adding wiki pages if the user has no permission
if not request.user.has_perms(
('wakawaka.add_wikipage', 'wakawaka.add_revision')
):
return HttpResponseForbidden(
ugettext('You don\'t have permission to add wiki pages.')
)
page = WikiPage(slug=slug)
page.is_initial = True
rev = None
initial = {
'content': _('Describe your new page %s here...' % slug),
'message': _('Initial revision'),
}
# Don't display the delete form if the user has nor permission
delete_form = None
# The user has permission, then do
if request.user.has_perm(
'wakawaka.delete_wikipage'
) or request.user.has_perm('wakawaka.delete_revision'):
delete_form = wiki_delete_form(request)
if request.method == 'POST' and request.POST.get('delete'):
delete_form = wiki_delete_form(request, request.POST)
if delete_form.is_valid():
return delete_form.delete_wiki(request, page, rev)
# Page add/edit form
form = wiki_page_form(initial=initial)
if request.method == 'POST':
form = wiki_page_form(data=request.POST)
if form.is_valid():
# Check if the content is changed, except there is a rev_id and the
# user possibly only reverted the HEAD to it
if (
not rev_id
and initial['content'] == form.cleaned_data['content']
):
form.errors['content'] = (_('You have made no changes!'),)
# Save the form and redirect to the page view
else:
try:
# Check that the page already exist
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug)
except WikiPage.DoesNotExist:
# Must be a new one, create that page
page = WikiPage(slug=slug)
page.save()
form.save(request, page)
kwargs = {'slug': page.slug}
redirect_to = reverse('wakawaka_page', kwargs=kwargs)
messages.success(
request,
ugettext('Your changes to %s were saved' % page.slug),
)
return HttpResponseRedirect(redirect_to)
template_context = {
'form': form,
'delete_form': delete_form,
'page': page,
'rev': rev,
}
template_context.update(extra_context or {})
return render(request, template_name, template_context) | 0.000249 |
def extract_all_gold_standard_data(data_dir, nprocesses=1,
overwrite=False, **kwargs):
"""
Extract the gold standard block-level content and comment percentages from a
directory of labeled data (only those for which the gold standard blocks are
not found), and save results to corresponding files in a block-level
gold standard directory under ``data_dir``.
Args:
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html files and gold standard content +
comments text files
nprocesses (int): If > 1, use a :class:`multiprocessing.Pool` to
parallelize the extractions
overwrite (bool): If True, overwrite existing gold-standard blocks files.
**kwargs: passed into :func:`extract_gold_standard_blocks`
See Also:
:func:`extract_gold_standard_blocks`
"""
use_pool = nprocesses > 1
if use_pool:
pool = multiprocessing.Pool(processes=nprocesses)
# get the set of files that have already been block corrected
# so that we don't block correct them again
if overwrite is False:
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
if not os.path.isdir(gs_blocks_dir):
os.mkdir(gs_blocks_dir)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = {
re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames}
else:
gs_blocks_fileroots = set()
# extract the block-level gold parse from
# the set of files to be block corrected
gs_dir = os.path.join(data_dir, GOLD_STANDARD_DIRNAME)
gs_filenames = get_filenames(
gs_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_EXT))
for i, gs_filename in enumerate(gs_filenames):
gs_fileroot = re.search(r'(.+)' + re.escape(GOLD_STANDARD_EXT), gs_filename).group(1)
if gs_fileroot in gs_blocks_fileroots:
continue
if i % 100 == 0:
print('Extracting gold standard blocks for file "{}"'.format(gs_filename))
if use_pool:
pool.apply_async(extract_gold_standard_blocks, (data_dir, gs_fileroot), kwargs)
else:
extract_gold_standard_blocks(data_dir, gs_fileroot, **kwargs)
# close out our pool
if use_pool:
pool.close()
pool.join() | 0.003476 |
def ApplyEdits(self, adds=None, updates=None, deletes=None):
"""This operation adds, updates and deletes features to the associated
feature layer or table in a single call (POST only). The apply edits
operation is performed on a feature service layer resource. The
result of this operation are 3 arrays of edit results (for adds,
updates and deletes respectively). Each edit result identifies a
single feature and indicates if the edit were successful or not. If
not, it also includes an error code and an error description."""
add_str, update_str = None, None
if adds:
add_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in adds)
if updates:
update_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in updates)
return self._get_subfolder("./applyEdits", JsonPostResult,
{'adds':
add_str,
'updates':
update_str,
'deletes':
deletes
}) | 0.007286 |
def get_equivalent_atoms(self, tolerance=0.3):
"""Returns sets of equivalent atoms with symmetry operations
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
eq = PA.get_equivalent_atoms()
self._convert_eq(eq)
return eq | 0.002315 |
def init(context, reset, force):
"""Setup the database."""
store = Store(context.obj['database'], context.obj['root'])
existing_tables = store.engine.table_names()
if force or reset:
if existing_tables and not force:
message = f"Delete existing tables? [{', '.join(existing_tables)}]"
click.confirm(click.style(message, fg='yellow'), abort=True)
store.drop_all()
elif existing_tables:
click.echo(click.style("Database already exists, use '--reset'", fg='red'))
context.abort()
store.create_all()
message = f"Success! New tables: {', '.join(store.engine.table_names())}"
click.echo(click.style(message, fg='green')) | 0.002845 |
def check(self, var):
"""Check whether the provided value is a valid enum constant."""
if not isinstance(var, _str_type): return False
return _enum_mangle(var) in self._consts | 0.015075 |
def transform_around(matrix, point):
"""
Given a transformation matrix, apply its rotation
around a point in space.
Parameters
----------
matrix: (4,4) or (3, 3) float, transformation matrix
point: (3,) or (2,) float, point in space
Returns
---------
result: (4,4) transformation matrix
"""
point = np.asanyarray(point)
matrix = np.asanyarray(matrix)
dim = len(point)
if matrix.shape != (dim + 1,
dim + 1):
raise ValueError('matrix must be (d+1, d+1)')
translate = np.eye(dim + 1)
translate[:dim, dim] = -point
result = np.dot(matrix, translate)
translate[:dim, dim] = point
result = np.dot(translate, result)
return result | 0.001342 |
def send_template_message(self, user_id, template_id, data, url=''):
"""
发送模板消息
详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param template_id: 模板 ID。
:param data: 用于渲染模板的数据。
:param url: 模板消息的可选链接。
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/template/send",
data={
"touser": user_id,
"template_id": template_id,
"url": url,
"data": data
}
) | 0.004559 |
def make_json_serializable(doc: Dict):
"""
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None
"""
for k, v in doc.items():
if isinstance(v, datetime.date):
doc[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, datetime.datetime):
doc[k] = v.isoformat() | 0.005405 |
def update(self, label=None, name=None, disabled=None, metadata=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None):
"""
Updates an existing check with any of the parameters.
"""
self.manager.update(self, label=label, name=name,
disabled=disabled, metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll, timeout=timeout,
period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver) | 0.013975 |
def manifests_parse(self):
'''parse manifests present on system'''
self.manifests = []
for manifest_path in self.find_manifests():
if self.manifest_path_is_old(manifest_path):
print("fw: Manifest (%s) is old; consider 'manifest download'" % (manifest_path))
manifest = self.manifest_parse(manifest_path)
if self.semver_major(manifest["format-version"]) != 1:
print("fw: Manifest (%s) has major version %d; MAVProxy only understands version 1" % (manifest_path,manifest["format-version"]))
continue
self.manifests.append(manifest) | 0.007704 |
def read_file_header(fd, endian):
"""Read mat 5 file header of the file fd.
Returns a dict with header values.
"""
fields = [
('description', 's', 116),
('subsystem_offset', 's', 8),
('version', 'H', 2),
('endian_test', 's', 2)
]
hdict = {}
for name, fmt, num_bytes in fields:
data = fd.read(num_bytes)
hdict[name] = unpack(endian, fmt, data)
hdict['description'] = hdict['description'].strip()
v_major = hdict['version'] >> 8
v_minor = hdict['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict | 0.00161 |
def list(gandi, limit, format):
""" List webaccelerators """
options = {
'items_per_page': limit,
}
result = gandi.webacc.list(options)
if format:
output_json(gandi, format, result)
return result
output_keys = ['name', 'state', 'ssl']
for num, webacc in enumerate(result):
if num:
gandi.separator_line('-', 4)
webacc['ssl'] = 'Enabled' if webacc['ssl_enable'] else 'Disable'
output_generic(gandi, webacc, output_keys, justify=14)
gandi.echo('Vhosts :')
for vhost in webacc['vhosts']:
output_vhosts = ['vhost', 'ssl']
vhost['vhost'] = vhost['name']
vhost['ssl'] = 'Disable' if vhost['cert_id'] is None else 'Enabled'
output_sub_generic(gandi, vhost, output_vhosts,
justify=14)
gandi.echo('')
gandi.echo('Backends :')
for server in webacc['servers']:
try:
ip = gandi.ip.info(server['ip'])
iface = gandi.iface.info(ip['iface_id'])
vm_info = gandi.iaas.info(iface['vm_id'])
server['name'] = vm_info['hostname']
output_servers = ['name', 'ip', 'port', 'state']
except Exception:
warningmsg = ('\tBackend with ip address %s no longer '
'exists.\n\tYou should remove it.'
% server['ip'])
gandi.echo(warningmsg)
output_servers = ['ip', 'port', 'state']
output_sub_generic(gandi, server, output_servers,
justify=14)
gandi.echo('')
return result | 0.000581 |
def parse_references(self, markup):
""" Returns a list of references found in the markup.
References appear inline as <ref> footnotes,
http:// external links, or {{cite}} citations.
We replace it with (1)-style footnotes.
Additional references data is gathered in
parse_paragraph_references() when we parse paragraphs.
References can also appear in image descriptions,
tables and taxoboxes, so they might not always pop up in a paragraph.
The plain() method finally replaces (1) by [1].
"""
references = []
# A Wikipedia reference note looks like:
# <ref>In 1946, [[ENIAC]] consumed an estimated 174 kW.
# By comparison, a typical personal computer may use around 400 W;
# over four hundred times less. {{Ref harvard|kempf1961|Kempf 1961|a}}</ref>
m = re.findall(self.re["reference"], markup)
for reference in m:
reference = re.sub("<ref> {0,1}cite", "<ref>{{cite", reference)
if not reference.strip().startswith("[http://") and \
not re.search("\{\{cite", reference):
r = WikipediaReference()
r.note = self.plain(re.sub("</{0,1}ref.*?>", "", reference))
if r.note != "":
references.append(r)
p = " "+self.ref+"("+str(len(references))+")"
markup = markup.replace(reference, p, 1)
else:
# References containing a citation or url
# are better handled by the next patterns.
pass
# A Wikipedia citation looks like:
# {{cite journal
# | last = Einstein
# | first = Albert
# | authorlink = Albert Einstein
# | title = Sidelights on Relativity (Geometry and Experience)
# | publisher = P. Dutton., Co
# | date = 1923}}
m = re.findall(self.re["citation"], markup)
for citation in m:
c = citation.replace("\n", "")
r = WikipediaReference()
for key in r.__dict__.keys():
value = re.search("\| {0,1}"+key+"(.*?)[\|}]", c)
if value:
value = value.group(1)
value = value.replace("link", "")
value = value.strip().strip(" =[]")
value = self.plain(value)
setattr(r, key, value)
if r.first != "" and r.last != "":
r.author = r.first + " " + r.last
references.append(r)
p = " "+self.ref+"("+str(len(references))+")"
markup = markup.replace(citation, p, 1)
# A Wikipedia embedded url looks like:
# [http://www.pbs.org/wnet/hawking/html/home.html ''Stephen Hawking's Universe'']
m = re.findall(self.re["url"], markup)
for url in m:
r = WikipediaReference()
i = url.find(" ")
if i > 0:
r.url = url[:i].strip()
r.note = self.plain(url[i:])
else:
r.url = url.strip()
references.append(r)
p = r.note+" "+self.ref+"("+str(len(references))+")"
markup = markup.replace("["+url+"]", p, 1)
# Since we parsed all citations first and then all notes and urls,
# the ordering will not be correct in the markup,
# e.g. (1) (11) (12) (2) (3).
sorted = []
m = re.findall(self.ref+"\(([0-9]*)\)", markup)
for i in m:
sorted.append(references[int(i)-1])
markup = markup.replace(
self.ref+"("+i+")",
self.ref+"**("+str(len(sorted))+")"
)
markup = markup.replace(self.ref+"**", self.ref)
for r in references:
if r not in sorted:
sorted.append(r)
references = sorted
return references, markup.strip() | 0.006439 |
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions():
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path | 0.007894 |
def augment_observation(
observation, reward, cum_reward, frame_index, bar_color=None,
header_height=27
):
"""Augments an observation with debug info."""
img = PIL_Image().new(
"RGB", (observation.shape[1], header_height,)
)
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "f:{:3}".format(int(frame_index)),
fill=(255, 0, 0)
)
header = np.copy(np.asarray(img))
del img
if bar_color is not None:
header[0, :, :] = bar_color
return np.concatenate([header, observation], axis=0) | 0.015649 |
def add_detection_pattern(self, m):
"""This method add the detection patterns to the QR code. This lets
the scanner orient the pattern. It is required for all QR codes.
The detection pattern consists of three boxes located at the upper
left, upper right, and lower left corners of the matrix. Also, two
special lines called the timing pattern is also necessary. Finally,
a single black pixel is added just above the lower left black box.
"""
#Draw outer black box
for i in range(7):
inv = -(i+1)
for j in [0,6,-1,-7]:
m[j][i] = 1
m[i][j] = 1
m[inv][j] = 1
m[j][inv] = 1
#Draw inner white box
for i in range(1, 6):
inv = -(i+1)
for j in [1, 5, -2, -6]:
m[j][i] = 0
m[i][j] = 0
m[inv][j] = 0
m[j][inv] = 0
#Draw inner black box
for i in range(2, 5):
for j in range(2, 5):
inv = -(i+1)
m[i][j] = 1
m[inv][j] = 1
m[j][inv] = 1
#Draw white border
for i in range(8):
inv = -(i+1)
for j in [7, -8]:
m[i][j] = 0
m[j][i] = 0
m[inv][j] = 0
m[j][inv] = 0
#To keep the code short, it draws an extra box
#in the lower right corner, this removes it.
for i in range(-8, 0):
for j in range(-8, 0):
m[i][j] = ' '
#Add the timing pattern
bit = itertools.cycle([1,0])
for i in range(8, (len(m)-8)):
b = next(bit)
m[i][6] = b
m[6][i] = b
#Add the extra black pixel
m[-8][8] = 1 | 0.007559 |
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') | 0.011299 |
def _find_value(ret_dict, key, path=None):
'''
PRIVATE METHOD
Traverses a dictionary of dictionaries/lists to find key
and return the value stored.
TODO:// this method doesn't really work very well, and it's not really
very useful in its current state. The purpose for this method is
to simplify parsing the JSON output so you can just pass the key
you want to find and have it return the value.
ret : dict<str,obj>
The dictionary to search through. Typically this will be a dict
returned from solr.
key : str
The key (str) to find in the dictionary
Return: list<dict<str,obj>>::
[{path:path, value:value}]
'''
if path is None:
path = key
else:
path = "{0}:{1}".format(path, key)
ret = []
for ikey, val in six.iteritems(ret_dict):
if ikey == key:
ret.append({path: val})
if isinstance(val, list):
for item in val:
if isinstance(item, dict):
ret = ret + _find_value(item, key, path)
if isinstance(val, dict):
ret = ret + _find_value(val, key, path)
return ret | 0.000835 |
def add_nodes(self, nodes): # noqa: D302
r"""
Add nodes to tree.
:param nodes: Node(s) to add with associated data. If there are
several list items in the argument with the same node
name the resulting node data is a list with items
corresponding to the data of each entry in the argument
with the same node name, in their order of appearance,
in addition to any existing node data if the node is
already present in the tree
:type nodes: :ref:`NodesWithData`
:raises:
* RuntimeError (Argument \`nodes\` is not valid)
* ValueError (Illegal node name: *[node_name]*)
For example:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('ptrie_example.py', cog.out)
.. =]=
.. code-block:: python
# ptrie_example.py
import ptrie
def create_tree():
tobj = ptrie.Trie()
tobj.add_nodes([
{'name':'root.branch1', 'data':5},
{'name':'root.branch1', 'data':7},
{'name':'root.branch2', 'data':[]},
{'name':'root.branch1.leaf1', 'data':[]},
{'name':'root.branch1.leaf1.subleaf1', 'data':333},
{'name':'root.branch1.leaf2', 'data':'Hello world!'},
{'name':'root.branch1.leaf2.subleaf2', 'data':[]},
])
return tobj
.. =[=end=]=
.. code-block:: python
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.get_data('root.branch1')
[5, 7]
"""
self._validate_nodes_with_data(nodes)
nodes = nodes if isinstance(nodes, list) else [nodes]
# Create root node (if needed)
if not self.root_name:
self._set_root_name(nodes[0]["name"].split(self._node_separator)[0].strip())
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
)
self._create_node(name=self.root_name, parent="", children=[], data=[])
# Process new data
for node_dict in nodes:
name, data = node_dict["name"], node_dict["data"]
if name not in self._db:
# Validate node name (root of new node same as tree root)
if not name.startswith(self.root_name + self._node_separator):
raise ValueError("Illegal node name: {0}".format(name))
self._create_intermediate_nodes(name)
self._db[name]["data"] += copy.deepcopy(
data
if isinstance(data, list) and data
else ([] if isinstance(data, list) else [data])
) | 0.001247 |
def game_events(game_id, innings_endpoint=False):
"""Return list of Inning objects for game matching the game id.
Using `inning_endpoints=True` will result in objects with
additional, undocumented data properties, but also objects
that may be missing properties expected by the user.
`innings_endpoint`: bool, use more detailed `innings` API endpoint
"""
data = mlbgame.events.game_events(game_id, innings_endpoint)
return [mlbgame.events.Inning(data[x], x) for x in data] | 0.001976 |
def pydict2xmlstring(metadata_dict, **kwargs):
"""Create an XML string from a metadata dictionary."""
ordering = kwargs.get('ordering', UNTL_XML_ORDER)
root_label = kwargs.get('root_label', 'metadata')
root_namespace = kwargs.get('root_namespace', None)
elements_namespace = kwargs.get('elements_namespace', None)
namespace_map = kwargs.get('namespace_map', None)
root_attributes = kwargs.get('root_attributes', None)
# Set any root namespace and namespace map.
if root_namespace and namespace_map:
root = Element(root_namespace + root_label, nsmap=namespace_map)
elif namespace_map:
root = Element(root_label, nsmap=namespace_map)
else:
root = Element(root_label)
# Set any root element attributes.
if root_attributes:
for key, value in root_attributes.items():
root.attrib[key] = value
# Create an XML structure from field list.
for metadata_key in ordering:
if metadata_key in metadata_dict:
for element in metadata_dict[metadata_key]:
if 'content' in element and 'qualifier' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'qualifier': element['qualifier']},
namespace=elements_namespace,
)
elif 'content' in element and 'role' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'role': element['role']},
namespace=elements_namespace,
)
elif 'content' in element and 'scheme' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'scheme': element['scheme']},
namespace=elements_namespace,
)
elif 'content' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
namespace=elements_namespace,
)
# Create the XML tree.
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
pretty_print=True
) | 0.00039 |
def _discarded_reads1_out_file_name(self):
"""Checks if file name is set for discarded reads1 output.
Returns absolute path."""
if self.Parameters['-3'].isOn():
discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value))
else:
raise ValueError(
"No discarded-reads1 (flag -3) output path specified")
return discarded_reads1 | 0.004831 |
def _rule_id(self, id: int) -> str:
"""
Convert an integer into a gorule key id.
"""
if id is None or id == 0 or id >= 10000000:
return "other"
return "gorule-{:0>7}".format(id) | 0.008696 |
def get_revision(self, id, revision_number, project=None, expand=None):
"""GetRevision.
[Preview API] Returns a fully hydrated work item for the requested revision
:param int id:
:param int revision_number:
:param str project: Project ID or project name
:param str expand:
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision_number is not None:
route_values['revisionNumber'] = self._serialize.url('revision_number', revision_number, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response) | 0.006042 |
def atan(x):
""" tan(x)
Trigonometric arc tan function.
"""
_math = infer_math(x)
if _math is math:
return _math.atan(x)
else:
return _math.arctan(x) | 0.005291 |
def to_json(value, **kwargs):
"""Convert a PNG Image to base64-encoded JSON
to_json assumes that value has passed validation.
"""
b64rep = base64.b64encode(value.read())
value.seek(0)
jsonrep = '{preamble}{b64}'.format(
preamble=PNG_PREAMBLE,
b64=b64rep.decode(),
)
return jsonrep | 0.00542 |
def get_default_project_directory():
"""
Return the default location for the project directory
depending of the operating system
"""
server_config = Config.instance().get_section_config("Server")
path = os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects"))
path = os.path.normpath(path)
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not create project directory: {}".format(e))
return path | 0.005629 |
def apply(self, docs, clear=True, parallelism=None, progress_bar=True):
"""Run the MentionExtractor.
:Example: To extract mentions from a set of training documents using
4 cores::
mention_extractor.apply(train_docs, parallelism=4)
:param docs: Set of documents to extract from.
:param clear: Whether or not to clear the existing Mentions
beforehand.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the
MentionExtractor if it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
super(MentionExtractor, self).apply(
docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar
) | 0.003058 |
def check_ups_estimated_minutes_remaining(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.2.3.0
MIB excerpt
An estimate of the time to battery charge depletion
under the present load conditions if the utility power
is off and remains off, or if it were to be lost and
remain off.
"""
the_helper.add_metric(
label=the_helper.options.type,
value=the_snmp_value,
uom="minutes")
the_helper.set_summary("Remaining runtime on battery is {} minutes".format(the_snmp_value)) | 0.005445 |
async def _send_sack(self):
"""
Build and send a selective acknowledgement (SACK) chunk.
"""
gaps = []
gap_next = None
for tsn in sorted(self._sack_misordered):
pos = (tsn - self._last_received_tsn) % SCTP_TSN_MODULO
if tsn == gap_next:
gaps[-1][1] = pos
else:
gaps.append([pos, pos])
gap_next = tsn_plus_one(tsn)
sack = SackChunk()
sack.cumulative_tsn = self._last_received_tsn
sack.advertised_rwnd = max(0, self._advertised_rwnd)
sack.duplicates = self._sack_duplicates[:]
sack.gaps = [tuple(x) for x in gaps]
await self._send_chunk(sack)
self._sack_duplicates.clear()
self._sack_needed = False | 0.002528 |
def _asa_task(q, masks, stft, sample_width, frame_rate, nsamples_for_each_fft):
"""
Worker for the ASA algorithm's multiprocessing step.
"""
# Convert each mask to (1 or 0) rather than (ID or 0)
for mask in masks:
mask = np.where(mask > 0, 1, 0)
# Multiply the masks against STFTs
masks = [mask * stft for mask in masks]
nparrs = []
dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32}
dtype = dtype_dict[sample_width]
for m in masks:
_times, nparr = signal.istft(m, frame_rate, nperseg=nsamples_for_each_fft)
nparr = nparr.astype(dtype)
nparrs.append(nparr)
for m in nparrs:
q.put(m)
q.put("DONE") | 0.00289 |
def create_inspect_template(
self,
parent,
inspect_template=None,
template_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates an InspectTemplate for re-using frequently used configuration
for inspecting content, images, and storage.
See https://cloud.google.com/dlp/docs/creating-templates to learn more.
Example:
>>> from google.cloud import dlp_v2
>>>
>>> client = dlp_v2.DlpServiceClient()
>>>
>>> parent = client.organization_path('[ORGANIZATION]')
>>>
>>> response = client.create_inspect_template(parent)
Args:
parent (str): The parent resource name, for example projects/my-project-id or
organizations/my-org-id.
inspect_template (Union[dict, ~google.cloud.dlp_v2.types.InspectTemplate]): The InspectTemplate to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.InspectTemplate`
template_id (str): The template id can contain uppercase and lowercase letters, numbers,
and hyphens; that is, it must match the regular expression:
``[a-zA-Z\\d-_]+``. The maximum length is 100 characters. Can be empty
to allow the system to generate one.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dlp_v2.types.InspectTemplate` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_inspect_template" not in self._inner_api_calls:
self._inner_api_calls[
"create_inspect_template"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_inspect_template,
default_retry=self._method_configs["CreateInspectTemplate"].retry,
default_timeout=self._method_configs["CreateInspectTemplate"].timeout,
client_info=self._client_info,
)
request = dlp_pb2.CreateInspectTemplateRequest(
parent=parent, inspect_template=inspect_template, template_id=template_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_inspect_template"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.003197 |
def listSerialPorts():
"""
http://pyserial.readthedocs.io/en/latest/shortintro.html
This calls the command line tool from pyserial to list the available
serial ports.
"""
cmd = 'python -m serial.tools.list_ports'
err, ret = commands.getstatusoutput(cmd)
if not err:
r = ret.split('\n')
ret = []
for line in r:
if line.find('/dev/') >= 0:
line = line.replace(' ', '')
ret.append(line)
return err, ret | 0.037559 |
def _load_old_defaults(self, old_version):
"""Read old defaults"""
old_defaults = cp.ConfigParser()
if check_version(old_version, '3.0.0', '<='):
path = get_module_source_path('spyder')
else:
path = osp.dirname(self.filename())
path = osp.join(path, 'defaults')
old_defaults.read(osp.join(path, 'defaults-'+old_version+'.ini'))
return old_defaults | 0.006865 |
def datetime_string(time_zone=False):
"""
Return a string representing the current date and time,
in ``YYYY-MM-DDThh:mm:ss`` or ``YYYY-MM-DDThh:mm:ss+hh:mm`` format
:param boolean time_zone: if ``True``, add the time zone offset.
:rtype: string
"""
time = datetime.datetime.now()
template = u"%04d-%02d-%02dT%02d:%02d:%02d"
if time_zone:
template += u"+00:00"
return template % (
time.year,
time.month,
time.day,
time.hour,
time.minute,
time.second
) | 0.001812 |
def train(hparams, *args):
"""Train your awesome model.
:param hparams: The arguments to run the model with.
"""
# Initialize experiments and track all the hyperparameters
exp = Experiment(
name=hparams.test_tube_exp_name,
# Location to save the metrics.
save_dir=hparams.log_path,
# The experiment version is optional, but using the one
# from SLURM means the exp will not collide with other
# versions if SLURM runs multiple at once.
version=hparams.hpc_exp_number,
autosave=False,
)
exp.argparse(hparams)
# Pretend to train.
x = hparams.x_val
for train_step in range(0, 100):
y = hparams.y_val
out = x * y
exp.log({'fake_err': out.item()}) # Log metrics.
# Save exp when done.
exp.save() | 0.002407 |
def Flemmer_Banks(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \frac{24}{Re}10^E
E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2}
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5
Examples
--------
>>> Flemmer_Banks(200.)
0.7849169609270039
References
----------
.. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a
Sphere." Powder Technology 48, no. 3 (November 1986): 217-21.
doi:10.1016/0032-5910(86)80044-4.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2)
Cd = 24./Re*10**E
return Cd | 0.000835 |
def types_from_module(pb_module):
'''
Return protobuf class types from an imported generated module.
'''
types = pb_module.DESCRIPTOR.message_types_by_name
return [getattr(pb_module, name) for name in types] | 0.00431 |
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
"""
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {'type': doc.type,
'docstring': doc.clean_doc,
'deprecated': doc.deprecated,
'file': doc.source_file_name,
'file_line': doc.source_file_def_line,
'github_link': doc.github_url,
'errors': errs,
'warnings': wrns,
'examples_errors': examples_errs} | 0.00125 |
def peek_pointers_in_data(self, data, peekSize = 16, peekStep = 1):
"""
Tries to guess which values in the given data are valid pointers,
and reads some data from them.
@see: L{peek}
@type data: str
@param data: Binary data to find pointers in.
@type peekSize: int
@param peekSize: Number of bytes to read from each pointer found.
@type peekStep: int
@param peekStep: Expected data alignment.
Tipically you specify 1 when data alignment is unknown,
or 4 when you expect data to be DWORD aligned.
Any other value may be specified.
@rtype: dict( str S{->} str )
@return: Dictionary mapping stack offsets to the data they point to.
"""
result = dict()
ptrSize = win32.sizeof(win32.LPVOID)
if ptrSize == 4:
ptrFmt = '<L'
else:
ptrFmt = '<Q'
if len(data) > 0:
for i in compat.xrange(0, len(data), peekStep):
packed = data[i:i+ptrSize]
if len(packed) == ptrSize:
address = struct.unpack(ptrFmt, packed)[0]
## if not address & (~0xFFFF): continue
peek_data = self.peek(address, peekSize)
if peek_data:
result[i] = peek_data
return result | 0.007077 |
def get_color(value, alpha):
"""Return color depending on value type"""
color = QColor()
for typ in COLORS:
if isinstance(value, typ):
color = QColor(COLORS[typ])
color.setAlphaF(alpha)
return color | 0.004082 |
def _generate_help_dicts(config_cls, _prefix=None):
"""
Generate dictionaries for use in building help strings.
Every dictionary includes the keys...
var_name: The env var that should be set to populate the value.
required: A bool, True if the var is required, False if it's optional.
Conditionally, the following are included...
default: Included if an optional variable has a default set
help_str: Included if the var uses the help kwarg to provide additional
context for the value.
Conditional key inclusion is meant to differentiate between exclusion
vs explicitly setting a value to None.
"""
help_dicts = []
if _prefix is None:
_prefix = config_cls._prefix
for a in attr.fields(config_cls):
try:
ce = a.metadata[CNF_KEY]
except KeyError:
continue
if ce.sub_cls is None: # Base case for "leaves".
if ce.name is None:
var_name = "_".join((_prefix, a.name)).upper()
else:
var_name = ce.name
req = ce.default == RAISE
help_dict = {"var_name": var_name, "required": req}
if not req:
help_dict["default"] = ce.default
if ce.help is not None:
help_dict["help_str"] = ce.help
help_dicts.append(help_dict)
else: # Construct the new prefix and recurse.
help_dicts += _generate_help_dicts(
ce.sub_cls, _prefix="_".join((_prefix, a.name)).upper()
)
return help_dicts | 0.00063 |
def get(m, k, default=None):
"""Return the value of k in m. Return default if k not found in m."""
if isinstance(m, IAssociative):
return m.entry(k, default=default)
try:
return m[k]
except (KeyError, IndexError, TypeError) as e:
logger.debug("Ignored %s: %s", type(e).__name__, e)
return default | 0.002899 |
def write_tables(target, tables, append=False, overwrite=False, **kwargs):
"""Write an LIGO_LW table to file
Parameters
----------
target : `str`, `file`, :class:`~ligo.lw.ligolw.Document`
the file or document to write into
tables : `list`, `tuple` of :class:`~ligo.lw.table.Table`
the tables to write
append : `bool`, optional, default: `False`
if `True`, append to an existing file/table, otherwise `overwrite`
overwrite : `bool`, optional, default: `False`
if `True`, delete an existing instance of the table type, otherwise
append new rows
**kwargs
other keyword arguments to pass to
:func:`~ligo.lw.utils.load_filename`, or
:func:`~ligo.lw.utils.load_fileobj` as appropriate
"""
from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler)
from ligo.lw import utils as ligolw_utils
# allow writing directly to XML
if isinstance(target, (Document, LIGO_LW)):
xmldoc = target
# open existing document, if possible
elif append:
xmldoc = open_xmldoc(
target, contenthandler=kwargs.pop('contenthandler',
LIGOLWContentHandler))
# fail on existing document and not overwriting
elif (not overwrite and isinstance(target, string_types) and
os.path.isfile(target)):
raise IOError("File exists: {}".format(target))
else: # or create a new document
xmldoc = Document()
# convert table to format
write_tables_to_document(xmldoc, tables, overwrite=overwrite)
# write file
if isinstance(target, string_types):
kwargs.setdefault('gz', target.endswith('.gz'))
ligolw_utils.write_filename(xmldoc, target, **kwargs)
elif isinstance(target, FILE_LIKE):
kwargs.setdefault('gz', target.name.endswith('.gz'))
ligolw_utils.write_fileobj(xmldoc, target, **kwargs) | 0.000513 |
def get_version(module):
"""
Attempts to read a version attribute from the given module that
could be specified via several different names and formats.
"""
version_names = ["__version__", "get_version", "version"]
version_names.extend([name.upper() for name in version_names])
for name in version_names:
try:
version = getattr(module, name)
except AttributeError:
continue
if callable(version):
version = version()
try:
version = ".".join([str(i) for i in version.__iter__()])
except AttributeError:
pass
return version | 0.001524 |
def locked(self):
"""Context generator for `with` statement, yields thread-safe connection.
:return: thread-safe connection
:rtype: pydbal.connection.Connection
"""
conn = self._get_connection()
try:
self._lock(conn)
yield conn
finally:
self._unlock(conn) | 0.008621 |
def dumps(self, msg, use_bin_type=False):
'''
Run the correct dumps serialization format
:param use_bin_type: Useful for Python 3 support. Tells msgpack to
differentiate between 'str' and 'bytes' types
by encoding them differently.
Since this changes the wire protocol, this
option should not be used outside of IPC.
'''
def ext_type_encoder(obj):
if isinstance(obj, six.integer_types):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
return six.text_type(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
# msgpack doesn't support datetime.datetime and datetime.date datatypes.
# So here we have converted these types to custom datatype
# This is msgpack Extended types numbered 78
return msgpack.ExtType(78, salt.utils.stringutils.to_bytes(
obj.strftime('%Y%m%dT%H:%M:%S.%f')))
# The same for immutable types
elif isinstance(obj, immutabletypes.ImmutableDict):
return dict(obj)
elif isinstance(obj, immutabletypes.ImmutableList):
return list(obj)
elif isinstance(obj, (set, immutabletypes.ImmutableSet)):
# msgpack can't handle set so translate it to tuple
return tuple(obj)
elif isinstance(obj, CaseInsensitiveDict):
return dict(obj)
# Nothing known exceptions found. Let msgpack raise it's own.
return obj
try:
if msgpack.version >= (0, 4, 0):
# msgpack only supports 'use_bin_type' starting in 0.4.0.
# Due to this, if we don't need it, don't pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack.
return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,
use_bin_type=use_bin_type,
_msgpack_module=msgpack)
else:
return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,
_msgpack_module=msgpack)
except (OverflowError, msgpack.exceptions.PackValueError):
# msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead.
# Convert any very long longs to strings and call dumps again.
def verylong_encoder(obj, context):
# Make sure we catch recursion here.
objid = id(obj)
if objid in context:
return '<Recursion on {} with id={}>'.format(type(obj).__name__, id(obj))
context.add(objid)
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = verylong_encoder(value, context)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry, context)
return obj
# A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack
# spec. Here we care only of JIDs that are positive integers.
if isinstance(obj, six.integer_types) and obj >= pow(2, 64):
return six.text_type(obj)
else:
return obj
msg = verylong_encoder(msg, set())
if msgpack.version >= (0, 4, 0):
return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,
use_bin_type=use_bin_type,
_msgpack_module=msgpack)
else:
return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,
_msgpack_module=msgpack) | 0.001394 |
def load_result(result_file, options, run_set_id=None, columns=None,
columns_relevant_for_diff=set()):
"""
Completely handle loading a single result file.
@param result_file the file to parse
@param options additional options
@param run_set_id the identifier of the run set
@param columns the list of columns
@param columns_relevant_for_diff a set of columns that is relevant for
the diff table
@return a fully ready RunSetResult instance or None
"""
xml = parse_results_file(result_file, run_set_id=run_set_id, ignore_errors=options.ignore_errors)
if xml is None:
return None
result = RunSetResult.create_from_xml(
result_file, xml, columns=columns, all_columns=options.all_columns,
columns_relevant_for_diff=columns_relevant_for_diff)
result.collect_data(options.correct_only)
return result | 0.00216 |
def dump_dict(self):
"""Returns a dictionary representation of the structure."""
dump_dict = dict()
dump_dict['Structure'] = self.name
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, (int, long)):
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val = '0x%-8X [%s UTC]' % (val, time.asctime(time.gmtime(val)))
except ValueError as e:
val = '0x%-8X [INVALID TIME]' % val
else:
val = ''.join(chr(d) if chr(d) in string.printable
else "\\x%02x" % d for d in
[ord(c) if not isinstance(c, int) else c for c in val])
dump_dict[key] = {'FileOffset': self.__field_offsets__[key] + self.__file_offset__,
'Offset': self.__field_offsets__[key],
'Value': val}
return dump_dict | 0.00493 |
def list_nodes_full(call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f '
'or --function.'
)
ret = {}
location = get_location()
params = {
'Action': 'DescribeInstanceStatus',
'RegionId': location,
'PageSize': '50'
}
result = query(params=params)
log.debug(
'Total %s instance found in Region %s',
result['TotalCount'], location
)
if 'Code' in result or result['TotalCount'] == 0:
return ret
# aliyun max 100 top instance in api
result_instancestatus = result['InstanceStatuses']['InstanceStatus']
if result['TotalCount'] > 50:
params['PageNumber'] = '2'
result = query(params=params)
result_instancestatus.update(result['InstanceStatuses']['InstanceStatus'])
for node in result_instancestatus:
instanceId = node.get('InstanceId', '')
params = {
'Action': 'DescribeInstanceAttribute',
'InstanceId': instanceId
}
items = query(params=params)
if 'Code' in items:
log.warning('Query instance:%s attribute failed', instanceId)
continue
name = items['InstanceName']
ret[name] = {
'id': items['InstanceId'],
'name': name,
'image': items['ImageId'],
'size': 'TODO',
'state': items['Status']
}
for item in items:
value = items[item]
if value is not None:
value = six.text_type(value)
if item == "PublicIpAddress":
ret[name]['public_ips'] = items[item]['IpAddress']
if item == "InnerIpAddress" and 'private_ips' not in ret[name]:
ret[name]['private_ips'] = items[item]['IpAddress']
if item == 'VpcAttributes':
vpc_ips = items[item]['PrivateIpAddress']['IpAddress']
if vpc_ips:
ret[name]['private_ips'] = vpc_ips
ret[name][item] = value
provider = __active_provider_name__ or 'aliyun'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret | 0.00082 |
def serialize(self, elt, sw, pyobj, name=None, orig=None, **kw):
'''elt -- the current DOMWrapper element
sw -- soapWriter object
pyobj -- python object to serialize
'''
if pyobj is not None and type(pyobj) not in _seqtypes:
raise EvaluateException, 'expecting a list or None'
objid = _get_idstr(pyobj)
ns,n = self.get_name(name, objid)
el = elt.createAppendElement(ns, n)
if self.nillable is True and pyobj is None:
self.serialize_as_nil(el)
return None
tc = self.itemTypeCode
s = StringIO(); sep = ' '
for item in pyobj:
s.write(tc.get_formatted_content(item))
s.write(sep)
el.createAppendTextNode(s.getvalue()) | 0.008805 |
def calcRapRperi(self,**kwargs):
"""
NAME:
calcRapRperi
PURPOSE:
calculate the apocenter and pericenter radii
INPUT:
OUTPUT:
(rperi,rap)
HISTORY:
2010-12-01 - Written - Bovy (NYU)
"""
if hasattr(self,'_rperirap'): #pragma: no cover
return self._rperirap
EL= self.calcEL(**kwargs)
E, L= EL
if self._vR == 0. and m.fabs(self._vT - vcirc(self._pot,self._R,use_physical=False)) < _EPS: #We are on a circular orbit
rperi= self._R
rap = self._R
elif self._vR == 0. and self._vT > vcirc(self._pot,self._R,use_physical=False): #We are exactly at pericenter
rperi= self._R
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R+10.**-8.,E,L,self._pot)
startsign/= m.fabs(startsign)
else: startsign= 1.
rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
startsign=startsign)
rap= optimize.brentq(_rapRperiAxiEq,rperi+0.00001,rend,
args=(E,L,self._pot))
# fprime=_rapRperiAxiDeriv)
elif self._vR == 0. and self._vT < vcirc(self._pot,self._R,use_physical=False): #We are exactly at apocenter
rap= self._R
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R-10.**-8.,E,L,self._pot)
startsign/= m.fabs(startsign)
else: startsign= 1.
rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
startsign=startsign)
if rstart == 0.: rperi= 0.
else:
rperi= optimize.brentq(_rapRperiAxiEq,rstart,rap-0.000001,
args=(E,L,self._pot))
# fprime=_rapRperiAxiDeriv)
else:
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R,E,L,self._pot)
startsign/= m.fabs(startsign)
else:
startsign= 1.
rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
startsign=startsign)
if rstart == 0.: rperi= 0.
else:
try:
rperi= optimize.brentq(_rapRperiAxiEq,rstart,self._R,
(E,L,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
startsign=startsign)
rap= optimize.brentq(_rapRperiAxiEq,self._R,rend,
(E,L,self._pot))
self._rperirap= (rperi,rap)
return self._rperirap | 0.030959 |
def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name):
"""
Parameters:
- db_name
- tbl_name
- part_name
- col_name
"""
self.send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
return self.recv_delete_partition_column_statistics() | 0.009146 |
def topsDF(symbols=None, token='', version=''):
'''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(tops(symbols, token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df | 0.0033 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.