text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def median(items):
"""Note: modifies the input list!"""
items.sort()
k = len(items)//2
if len(items) % 2 == 0:
return (items[k] + items[k - 1]) / 2
else:
return items[k] | 0.004878 |
def check_base_suggested_attributes(self, dataset):
'''
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
'''
suggested_ctx = TestCtx(BaseCheck.LOW, 'Suggested global attributes')
# Do any of the variables define platform ?
platform_name = getattr(dataset, 'platform', '')
suggested_ctx.assert_true(platform_name != '', 'platform should exist and point to a term in :platform_vocabulary.')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
suggested_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
# Parse dates, check for ISO 8601
for attr in ['date_modified', 'date_issued', 'date_metadata_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
suggested_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
suggested_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
suggested_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
suggested_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
return suggested_ctx.to_result() | 0.005034 |
def crypto_sign_ed25519_sk_to_curve25519(secret_key_bytes):
"""
Converts a secret Ed25519 key (encoded as bytes ``secret_key_bytes``) to
a secret Curve25519 key as bytes.
Raises a ValueError if ``secret_key_bytes``is not of length
``crypto_sign_SECRETKEYBYTES``
:param secret_key_bytes: bytes
:rtype: bytes
"""
if len(secret_key_bytes) != crypto_sign_SECRETKEYBYTES:
raise exc.ValueError("Invalid curve secret key")
curve_secret_key_len = crypto_sign_curve25519_BYTES
curve_secret_key = ffi.new("unsigned char[]", curve_secret_key_len)
rc = lib.crypto_sign_ed25519_sk_to_curve25519(curve_secret_key,
secret_key_bytes)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(curve_secret_key, curve_secret_key_len)[:] | 0.001125 |
def with_header(self, key, value):
"""Sets a header on the request and returns the request itself.
The header key will be canonicalized before use. (see also: canonicalize_header)
Keyword arguments:
key -- the header's name
value -- the string value for the header
"""
self.header[canonicalize_header(key)] = value
return self | 0.007673 |
def __get_reserve_details(self, account_id, **kwargs):
"""Call documentation: `/account/get_reserve_details
<https://www.wepay.com/developer/reference/account#reserve>`_, plus extra
keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'account_id': account_id
}
return self.make_call(self.__get_reserve_details, params, kwargs) | 0.00565 |
def _convert_hexstr_base(hexstr, base):
r"""
Packs a long hexstr into a shorter length string with a larger base.
Args:
hexstr (str): string of hexidecimal symbols to convert
base (list): symbols of the conversion base
Example:
>>> print(_convert_hexstr_base('ffffffff', _ALPHABET_26))
nxmrlxv
>>> print(_convert_hexstr_base('0', _ALPHABET_26))
0
>>> print(_convert_hexstr_base('-ffffffff', _ALPHABET_26))
-nxmrlxv
>>> print(_convert_hexstr_base('aafffff1', _ALPHABET_16))
aafffff1
Sympy:
>>> import sympy as sy
>>> # Determine the length savings with lossless conversion
>>> consts = dict(hexbase=16, hexlen=256, baselen=27)
>>> symbols = sy.symbols('hexbase, hexlen, baselen, newlen')
>>> haexbase, hexlen, baselen, newlen = symbols
>>> eqn = sy.Eq(16 ** hexlen, baselen ** newlen)
>>> newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
>>> print('newlen_ans = %r' % (newlen_ans,))
>>> # for a 26 char base we can get 216
>>> print('Required length for lossless conversion len2 = %r' % (len2,))
>>> def info(base, len):
... bits = base ** len
... print('base = %r' % (base,))
... print('len = %r' % (len,))
... print('bits = %r' % (bits,))
>>> info(16, 256)
>>> info(27, 16)
>>> info(27, 64)
>>> info(27, 216)
"""
if base is _ALPHABET_16:
# already in hex, no conversion needed
return hexstr
baselen = len(base)
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(base[x % baselen])
x //= baselen
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str | 0.001018 |
def check_blas_config():
""" checks to see if using OpenBlas/Intel MKL. If so, warn if the number of threads isn't set
to 1 (causes severe perf issues when training - can be 10x slower) """
# don't warn repeatedly
global _checked_blas_config
if _checked_blas_config:
return
_checked_blas_config = True
if np.__config__.get_info('openblas_info') and os.environ.get('OPENBLAS_NUM_THREADS') != '1':
logging.warning("OpenBLAS detected. Its highly recommend to set the environment variable "
"'export OPENBLAS_NUM_THREADS=1' to disable its internal multithreading")
if np.__config__.get_info('blas_mkl_info') and os.environ.get('MKL_NUM_THREADS') != '1':
logging.warning("Intel MKL BLAS detected. Its highly recommend to set the environment "
"variable 'export MKL_NUM_THREADS=1' to disable its internal "
"multithreading") | 0.008448 |
def startprocessmonitor(self, process_name, interval=2):
"""
Start memory and CPU monitoring, with the time interval between
each process scan
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@param interval: Time interval between each process scan
@type interval: double
@return: 1 on success
@rtype: integer
"""
if process_name in self._process_stats:
# Stop previously running instance
# At any point, only one process name can be tracked
# If an instance already exist, then stop it
self._process_stats[process_name].stop()
# Create an instance of process stat
self._process_stats[process_name] = ProcessStats(process_name, interval)
# start monitoring the process
self._process_stats[process_name].start()
return 1 | 0.003236 |
def get_first_model_with_resource_name(cls, resource_name):
""" Get the first model corresponding to a resource_name
Args:
resource_name: the resource name
"""
models = cls.get_models_with_resource_name(resource_name)
if len(models) > 0:
return models[0]
return None | 0.005714 |
def erase_line(method=EraseMethod.ALL, file=sys.stdout):
""" Erase a line, or part of a line. See `method` argument below.
Cursor position does not change.
Esc[<method>K
Arguments:
method : One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the line.
EraseMethod.START or 1:
Clear from cursor to the start of the line.
EraseMethod.ALL or 2:
Clear the entire line.
Default: EraseMethod.ALL (2)
"""
erase.line(method).write(file=file) | 0.001445 |
def main(in_base, out_base, compiled_files, source_files, outfile=None,
showasm=None, showast=False, do_verify=False,
showgrammar=False, raise_on_error=False,
do_linemaps=False, do_fragments=False):
"""
in_base base directory for input files
out_base base directory for output files (ignored when
files list of filenames to be uncompyled (relative to in_base)
outfile write output to this filename (overwrites out_base)
For redirecting output to
- <filename> outfile=<filename> (out_base is ignored)
- files below out_base out_base=...
- stdout out_base=None, outfile=None
"""
tot_files = okay_files = failed_files = verify_failed_files = 0
current_outfile = outfile
linemap_stream = None
for source_path in source_files:
compiled_files.append(compile_file(source_path))
for filename in compiled_files:
infile = os.path.join(in_base, filename)
# print("XXX", infile)
if not os.path.exists(infile):
sys.stderr.write("File '%s' doesn't exist. Skipped\n"
% infile)
continue
if do_linemaps:
linemap_stream = infile + '.pymap'
pass
# print (infile, file=sys.stderr)
if outfile: # outfile was given as parameter
outstream = _get_outstream(outfile)
elif out_base is None:
outstream = sys.stdout
if do_linemaps:
linemap_stream = sys.stdout
if do_verify:
prefix = os.path.basename(filename) + '-'
if prefix.endswith('.py'):
prefix = prefix[:-len('.py')]
# Unbuffer output if possible
buffering = -1 if sys.stdout.isatty() else 0
if PYTHON_VERSION >= 3.5:
t = tempfile.NamedTemporaryFile(mode='w+b',
buffering=buffering,
suffix='.py',
prefix=prefix)
else:
t = tempfile.NamedTemporaryFile(mode='w+b',
suffix='.py',
prefix=prefix)
current_outfile = t.name
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering)
tee = subprocess.Popen(["tee", current_outfile],
stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
else:
if filename.endswith('.pyc'):
current_outfile = os.path.join(out_base, filename[0:-1])
else:
current_outfile = os.path.join(out_base, filename) + '_dis'
pass
pass
outstream = _get_outstream(current_outfile)
# print(current_outfile, file=sys.stderr)
# Try to uncompile the input file
try:
deparsed = decompile_file(infile, outstream, showasm, showast, showgrammar,
linemap_stream, do_fragments)
if do_fragments:
for d in deparsed:
last_mod = None
offsets = d.offsets
for e in sorted([k for k in offsets.keys() if isinstance(k[1], int)]):
if e[0] != last_mod:
line = '=' * len(e[0])
outstream.write("%s\n%s\n%s\n" % (line, e[0], line))
last_mod = e[0]
info = offsets[e]
extractInfo = d.extract_node_info(info)
outstream.write("%s" % info.node.format().strip() + "\n")
outstream.write(extractInfo.selectedLine + "\n")
outstream.write(extractInfo.markerLine + "\n\n")
pass
pass
tot_files += 1
except (ValueError, SyntaxError, ParserError, pysource.SourceWalkerError) as e:
sys.stdout.write("\n")
sys.stderr.write("\n# file %s\n# %s\n" % (infile, e))
failed_files += 1
tot_files += 1
except KeyboardInterrupt:
if outfile:
outstream.close()
os.remove(outfile)
sys.stdout.write("\n")
sys.stderr.write("\nLast file: %s " % (infile))
raise
# except:
# failed_files += 1
# if current_outfile:
# outstream.close()
# os.rename(current_outfile, current_outfile + '_failed')
# else:
# sys.stderr.write("\n# %s" % sys.exc_info()[1])
# sys.stderr.write("\n# Can't uncompile %s\n" % infile)
else: # uncompile successful
if current_outfile:
outstream.close()
if do_verify:
try:
msg = verify.compare_code_with_srcfile(infile,
current_outfile,
do_verify)
if not current_outfile:
if not msg:
print('\n# okay decompiling %s' % infile)
okay_files += 1
else:
verify_failed_files += 1
print('\n# %s\n\t%s', infile, msg)
pass
else:
okay_files += 1
pass
except verify.VerifyCmpError as e:
print(e)
verify_failed_files += 1
os.rename(current_outfile, current_outfile + '_unverified')
sys.stderr.write("### Error Verifying %s\n" % filename)
sys.stderr.write(str(e) + "\n")
if not outfile:
if raise_on_error:
raise
pass
pass
pass
else:
okay_files += 1
pass
elif do_verify:
sys.stderr.write("\n### uncompile successful, but no file to compare against\n")
pass
else:
okay_files += 1
if not current_outfile:
mess = '\n# okay decompiling'
# mem_usage = __memUsage()
print(mess, infile)
if current_outfile:
sys.stdout.write("%s\r" %
status_msg(do_verify, tot_files, okay_files, failed_files,
verify_failed_files, do_verify))
try:
# FIXME: Something is weird with Pypy here
sys.stdout.flush()
except:
pass
if current_outfile:
sys.stdout.write("\n")
try:
# FIXME: Something is weird with Pypy here
sys.stdout.flush()
except:
pass
pass
return (tot_files, okay_files, failed_files, verify_failed_files) | 0.001722 |
def create_section(self, name, overwrite=True):
"""
create and return a new sub-section of this manifest, with the
given Name attribute. If a sub-section already exists with
that name, it will be lost unless overwrite is False in which
case the existing sub-section will be returned.
"""
if overwrite:
sect = ManifestSection(name)
self.sub_sections[name] = sect
else:
sect = self.sub_sections.get(name, None)
if sect is None:
sect = ManifestSection(name)
self.sub_sections[name] = sect
return sect | 0.003067 |
def challenge_hash(peer_challenge, authenticator_challenge, username):
"""ChallengeHash"""
sha_hash = hashlib.sha1()
sha_hash.update(peer_challenge)
sha_hash.update(authenticator_challenge)
sha_hash.update(username)
return sha_hash.digest()[:8] | 0.003731 |
def make_full_qualified_url(self, path: str) -> str:
""" append application url to path"""
return self.application_uri.rstrip('/') + '/' + path.lstrip('/') | 0.011696 |
def prepare_fixed_decimal(data, schema):
"""Converts decimal.Decimal to fixed length bytes array"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
size = schema['size']
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
size_in_bits = size * 8
offset_bits = size_in_bits - bits_req
mask = 2 ** size_in_bits - 1
bit = 1
for i in range(bits_req):
mask ^= bit
bit <<= 1
if bits_req < 8:
bytes_req = 1
else:
bytes_req = bits_req // 8
if bits_req % 8 != 0:
bytes_req += 1
tmp = MemoryIO()
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
unscaled_datum = mask | unscaled_datum
for index in range(size - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
else:
for i in range(offset_bits // 8):
tmp.write(mk_bits(0))
for index in range(bytes_req - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | 0.000634 |
def rotateAboutVectorMatrix(vec, theta_deg):
"""Construct the matrix that rotates vector a about
vector vec by an angle of theta_deg degrees
Taken from
http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
"""
ct = np.cos(np.radians(theta_deg))
st = np.sin(np.radians(theta_deg))
# Ensure vector has normal length
vec /= np.linalg.norm(vec)
assert( np.all( np.isfinite(vec)))
# compute the three terms
term1 = ct * np.eye(3)
ucross = np.zeros( (3,3))
ucross[0] = [0, -vec[2], vec[1]]
ucross[1] = [vec[2], 0, -vec[0]]
ucross[2] = [-vec[1], vec[0], 0]
term2 = st*ucross
ufunny = np.zeros( (3,3))
for i in range(0,3):
for j in range(i,3):
ufunny[i,j] = vec[i]*vec[j]
ufunny[j,i] = ufunny[i,j]
term3 = (1-ct) * ufunny
return term1 + term2 + term3 | 0.010453 |
def readdatacommlst(idfname):
"""read the idf file"""
# iddfile = sys.path[0] + '/EplusCode/Energy+.idd'
iddfile = 'Energy+.idd'
# iddfile = './EPlusInterfaceFunctions/E1.idd' # TODO : can the path name be not hard coded
iddtxt = open(iddfile, 'r').read()
block, commlst, commdct = parse_idd.extractidddata(iddfile)
theidd = eplusdata.Idd(block, 2)
data = eplusdata.Eplusdata(theidd, idfname)
return data, commlst | 0.004444 |
def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
"""upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
"""
action = 'upload/resumable'
headers = {
'x-filesize': str(filesize),
'x-filehash': filehash,
'x-unit-hash': unit_hash,
'x-unit-id': str(unit_id),
'x-unit-size': str(unit_size)
}
params = QueryParams({
'quick_key': quick_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'previous_hash': previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers) | 0.003934 |
def appkit_mouse_process(pipe):
"""Single subprocess for reading mouse events on Mac using older AppKit."""
# pylint: disable=import-error,too-many-locals
# Note Objective C does not support a Unix style fork.
# So these imports have to be inside the child subprocess since
# otherwise the child process cannot use them.
# pylint: disable=no-member, no-name-in-module
from Foundation import NSObject
from AppKit import NSApplication, NSApp
from Cocoa import (NSEvent, NSLeftMouseDownMask,
NSLeftMouseUpMask, NSRightMouseDownMask,
NSRightMouseUpMask, NSMouseMovedMask,
NSLeftMouseDraggedMask,
NSRightMouseDraggedMask, NSMouseEnteredMask,
NSMouseExitedMask, NSScrollWheelMask,
NSOtherMouseDownMask, NSOtherMouseUpMask)
from PyObjCTools import AppHelper
import objc
class MacMouseSetup(NSObject):
"""Setup the handler."""
@objc.python_method
def init_with_handler(self, handler):
"""
Init method that receives the write end of the pipe.
"""
# ALWAYS call the super's designated initializer.
# Also, make sure to re-bind "self" just in case it
# returns something else!
# pylint: disable=self-cls-assignment
self = super(MacMouseSetup, self).init()
self.handler = handler
# Unlike Python's __init__, initializers MUST return self,
# because they are allowed to return any object!
return self
# pylint: disable=invalid-name, unused-argument
def applicationDidFinishLaunching_(self, notification):
"""Bind the listen method as the handler for mouse events."""
mask = (NSLeftMouseDownMask | NSLeftMouseUpMask |
NSRightMouseDownMask | NSRightMouseUpMask |
NSMouseMovedMask | NSLeftMouseDraggedMask |
NSRightMouseDraggedMask | NSScrollWheelMask |
NSMouseEnteredMask | NSMouseExitedMask |
NSOtherMouseDownMask | NSOtherMouseUpMask)
NSEvent.addGlobalMonitorForEventsMatchingMask_handler_(
mask, self.handler)
class MacMouseListener(AppKitMouseBaseListener):
"""Loosely emulate Evdev mouse behaviour on the Macs.
Listen for key events then buffer them in a pipe.
"""
def install_handle_input(self):
"""Install the hook."""
self.app = NSApplication.sharedApplication()
# pylint: disable=no-member
delegate = MacMouseSetup.alloc().init_with_handler(
self.handle_input)
NSApp().setDelegate_(delegate)
AppHelper.runEventLoop()
def __del__(self):
"""Stop the listener on deletion."""
AppHelper.stopEventLoop()
# pylint: disable=unused-variable
mouse = MacMouseListener(pipe, events=[]) | 0.000327 |
def lsfiles(root=".", **kwargs):
"""
Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist.
"""
paths = ls(root=root, **kwargs)
if isfile(root):
return paths
return [_path for _path in paths if isfile(path(root, _path))] | 0.001946 |
def _getEventsByDay(self, request, firstDay, lastDay):
"""
Return the events in this site for the dates given, grouped by day.
"""
home = request.site.root_page
return getAllEventsByDay(request, firstDay, lastDay, home=home) | 0.007576 |
def bootstrap_ts(y, func, B=1000, b=3):
""" Bootstrap a timeseries using a window size:b. """
beta_star = np.empty(B)
z = y
z_star = np.empty(len(z))
for boot_i in range(B):
for block_i, start in enumerate(np.random.randint(len(z) - b + 1, size=len(z) / b)):
z_star[block_i * b:(block_i + 1) * b] = z[start:start + b]
beta_star[boot_i] = func(z_star)
return beta_star | 0.004728 |
def create_header_from_telpars(telpars):
"""
Create a list of fits header items from GTC telescope pars.
The GTC telescope server gives a list of string describing
FITS header items such as RA, DEC, etc.
Arguments
---------
telpars : list
list returned by server call to getTelescopeParams
"""
# pars is a list of strings describing tel info in FITS
# style, each entry in the list is a different class of
# thing (weather, telescope, instrument etc).
# first, we munge it into a single list of strings, each one
# describing a single item whilst also stripping whitespace
pars = [val.strip() for val in (';').join(telpars).split(';')
if val.strip() != '']
# apply parse_hstring to everything in pars
with warnings.catch_warnings():
warnings.simplefilter('ignore', fits.verify.VerifyWarning)
hdr = fits.Header(map(parse_hstring, pars))
return hdr | 0.001047 |
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1) | 0.00237 |
def _depth_first_search(self, target_id, layer_id_list, node_list):
"""Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id."""
assert len(node_list) <= self.n_nodes
u = node_list[-1]
if u == target_id:
return True
for v, layer_id in self.adj_list[u]:
layer_id_list.append(layer_id)
node_list.append(v)
if self._depth_first_search(target_id, layer_id_list, node_list):
return True
layer_id_list.pop()
node_list.pop()
return False | 0.004267 |
def send_result(self, additional_dict):
'''
Send a result to the RPC client
:param additional_dict: the dictionary with the response
'''
self.send_response(200)
self.send_header("Content-type", "application/json")
response = {
'jsonrpc': self.req_rpc_version,
'id': self.req_id,
}
response.update(additional_dict)
jresponse = json.dumps(response)
self.send_header("Content-length", len(jresponse))
self.end_headers()
self.wfile.write(jresponse.encode()) | 0.003436 |
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da | 0.002169 |
def import_schema(self, definitions, d):
"""Import schema as <types/> content."""
if not definitions.types:
root = Element("types", ns=wsdlns)
definitions.root.insert(root)
types = Types(root, definitions)
definitions.types.append(types)
else:
types = definitions.types[-1]
types.root.append(d.root)
log.debug("imported (XSD):\n%s", d.root) | 0.004545 |
def overall_CEN_calc(classes, TP, TOP, P, CEN_dict, modified=False):
"""
Calculate Overall_CEN (Overall confusion entropy).
:param classes: classes
:type classes : list
:param TP: true positive dict for all classes
:type TP : dict
:param TOP: test outcome positive
:type TOP : dict
:param P: condition positive
:type P : dict
:param CEN_dict: CEN dictionary for each class
:type CEN_dict : dict
:param modified : modified mode flag
:type modified : bool
:return: Overall_CEN(MCEN) as float
"""
try:
result = 0
for i in classes:
result += (convex_combination(classes, TP, TOP, P, i, modified) *
CEN_dict[i])
return result
except Exception:
return "None" | 0.001263 |
def _defragment_mountpoint(mountpoint):
'''
Defragment only one BTRFS mountpoint.
'''
out = __salt__['cmd.run_all']("btrfs filesystem defragment -f {0}".format(mountpoint))
return {
'mount_point': mountpoint,
'passed': not out['stderr'],
'log': out['stderr'] or False,
'range': False,
} | 0.005848 |
def hmset_dict(self, key, *args, **kwargs):
"""Set multiple hash fields to multiple values.
dict can be passed as first positional argument:
>>> await redis.hmset_dict(
... 'key', {'field1': 'value1', 'field2': 'value2'})
or keyword arguments can be used:
>>> await redis.hmset_dict(
... 'key', field1='value1', field2='value2')
or dict argument can be mixed with kwargs:
>>> await redis.hmset_dict(
... 'key', {'field1': 'value1'}, field2='value2')
.. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary,
if both specified and both have same key(s) -- ``kwargs`` will win:
>>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz')
>>> await redis.hget('key', 'foo', encoding='utf-8')
'baz'
"""
if not args and not kwargs:
raise TypeError("args or kwargs must be specified")
pairs = ()
if len(args) > 1:
raise TypeError("single positional argument allowed")
elif len(args) == 1:
if not isinstance(args[0], dict):
raise TypeError("args[0] must be dict")
elif not args[0] and not kwargs:
raise ValueError("args[0] is empty dict")
pairs = chain.from_iterable(args[0].items())
kwargs_pairs = chain.from_iterable(kwargs.items())
return wait_ok(self.execute(
b'HMSET', key, *chain(pairs, kwargs_pairs))) | 0.001308 |
def init_app(self, app, entry_point_group='invenio_oauth2server.scopes',
**kwargs):
"""Flask application initialization.
:param app: An instance of :class:`flask.Flask`.
:param entry_point_group: The entrypoint group name to load plugins.
(Default: ``'invenio_oauth2server.scopes'``)
"""
self.init_config(app)
state = _OAuth2ServerState(app, entry_point_group=entry_point_group)
app.extensions['invenio-oauth2server'] = state
return state | 0.005618 |
def make_random_histogram(center=0.0, stdev=default_stdev, length=default_feature_dim, num_bins=default_num_bins):
"Returns a sequence of histogram density values that sum to 1.0"
hist, bin_edges = np.histogram(get_distr(center, stdev, length),
range=edge_range, bins=num_bins, density=True)
# to ensure they sum to 1.0
hist = hist / sum(hist)
if len(hist) < 2:
raise ValueError('Invalid histogram')
return hist, bin_edges | 0.006098 |
def ls(self, path, offset=None, amount=None):
"""
Return list of files/directories. Each item is a dict.
Keys: 'path', 'creationdate', 'displayname', 'length', 'lastmodified', 'isDir'.
"""
def parseContent(content):
result = []
root = ET.fromstring(content)
for response in root.findall('.//d:response', namespaces=self.namespaces):
node = {
'path': response.find("d:href", namespaces=self.namespaces).text,
'creationdate': response.find("d:propstat/d:prop/d:creationdate", namespaces=self.namespaces).text,
'displayname': response.find("d:propstat/d:prop/d:displayname", namespaces=self.namespaces).text,
'lastmodified': response.find("d:propstat/d:prop/d:getlastmodified", namespaces=self.namespaces).text,
'isDir': response.find("d:propstat/d:prop/d:resourcetype/d:collection", namespaces=self.namespaces) != None
}
if not node['isDir']:
node['length'] = response.find("d:propstat/d:prop/d:getcontentlength", namespaces=self.namespaces).text
node['etag'] = response.find("d:propstat/d:prop/d:getetag", namespaces=self.namespaces).text
node['type'] = response.find("d:propstat/d:prop/d:getcontenttype", namespaces=self.namespaces).text
result.append(node)
return result
url = path
if (offset is not None) and (amount is not None):
url += "?offset={offset}&amount={amount}".format(offset=offset, amount=amount)
resp = self._sendRequest("PROPFIND", url, {'Depth': '1'})
if resp.status_code == 207:
return parseContent(resp.content)
else:
raise YaDiskException(resp.status_code, resp.content) | 0.008466 |
def populated_column_map(self):
'''Return the _column_map without unused optional fields'''
column_map = []
cls = self.model
for csv_name, field_pattern in cls._column_map:
# Separate the local field name from foreign columns
if '__' in field_pattern:
field_name = field_pattern.split('__', 1)[0]
else:
field_name = field_pattern
# Handle point fields
point_match = re_point.match(field_name)
if point_match:
field = None
else:
field = cls._meta.get_field(field_name)
# Only add optional columns if they are used in the records
if field and field.blank and not field.has_default():
kwargs = {field_name: get_blank_value(field)}
if self.exclude(**kwargs).exists():
column_map.append((csv_name, field_pattern))
else:
column_map.append((csv_name, field_pattern))
return column_map | 0.001867 |
def _from_wkt(string, wkttype=None, strict=False):
"""
Internal method for parsing wkt, with minor differences depending on ogc or esri style.
Arguments:
- *string*: The OGC or ESRI WKT representation as a string.
- *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default).
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
"""
# TODO
# - Make function for finding next elemt by name, instead of knowing its arg index position
# - Maybe verify elem arg name
# make sure valid wkttype
if wkttype: wkttype = wkttype.lower()
assert wkttype in ("ogc","esri",None)
# remove newlines and multi spaces
string = " ".join(string.split())
# parse arguments into components
def _consume_bracket(chars, char):
"char must be the opening bracket"
consumed = ""
depth = 1
while char and depth > 0:
consumed += char
char = next(chars, None)
# update depth level
if char == "[":
depth += 1
elif char == "]":
depth -= 1
consumed += char # consume the last closing char too
return consumed
def _consume_quote(chars, char, quotechar):
"char and quotechar must be the opening quote char"
consumed = ""
# consume the first opening char
consumed += char
char = next(chars, None)
# consume inside
while char and char != quotechar:
consumed += char
char = next(chars, None)
# consume the last closing char too
consumed += char
return consumed
def _next_elem(chars, char):
"char must be the first char of the text that precedes brackets"
header = ""
# skip until next header
while not char.isalpha():
char = next(chars, None)
# first consume the element text header
while char.isalpha():
header += char
char = next(chars, None)
# skip until next brackets (in case of spaces)
while char != "[":
char = next(chars, None)
# then consume the element bracket contents
if char == "[":
content = _consume_bracket(chars, char)
char = next(chars, None)
# split content into args list
content = content[1:-1] # remove enclosing brackets
content = _split_except(content)
# recursively load all subelems
for i,item in enumerate(content):
if isinstance(item, str) and "[" in item:
chars = (char for char in item)
char = next(chars)
item = _next_elem(chars, char)
content[i] = item
return header, content
def _clean_value(string):
string = string.strip()
try: string = float(string)
except: pass
return string
def _split_except(string):
"split the string on every comma, except not while inside quotes or square brackets"
chars = (char for char in string)
char = next(chars)
items = []
consumed = ""
while char:
# dont split on quotes, just consume it
if char in ("'", '"'):
consumed += _consume_quote(chars, char, char)
# dont split inside brackets, just consume it
elif char == "[":
consumed += _consume_bracket(chars, char)
# new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar
elif char == ",":
consumed = _clean_value(consumed)
items.append(consumed)
consumed = ""
# consume normal char
elif char:
consumed += char
# next
char = next(chars, None)
# append last item too
consumed = _clean_value(consumed)
items.append(consumed)
return items
# load into nested tuples and arglists
crstuples = []
chars = (char for char in string)
char = next(chars)
while char:
header,content = _next_elem(chars, char)
crstuples.append((header, content))
char = next(chars, None)
# autodetect wkttype if not specified
if not wkttype:
topheader,topcontent = crstuples[0]
if topheader == "PROJCS":
geogcsheader,geogcscontent = topcontent[1]
elif topheader == "GEOGCS":
geogcsheader,geogcscontent = topheader,topcontent
# datum elem should be second under geogcs
datumheader, datumcontent = geogcscontent[1]
datumname = datumcontent[0].upper().strip('"')
# esri wkt datums all use "D_" before the datum name
if datumname.startswith("D_"):
wkttype = "esri"
else:
wkttype = "ogc"
# parse into actual crs objects
def _parse_top(header, content):
"procedure for parsing the toplevel crs element and all its children"
if header.upper() == "PROJCS":
# find name
csname = content[0].strip('"')
# find geogcs elem (by running parse again)
subheader, subcontent = content[1]
geogcs = _parse_top(subheader, subcontent)
# find projection elem
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PROJECTION":
break
projname = subcontent[0].strip('"')
projclass = projections.find(projname, "%s_wkt" % wkttype, strict)
if projclass:
proj = projclass()
else:
raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname)
# find params
params = []
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PARAMETER":
name, value = subcontent[0].strip('"'), subcontent[1]
itemclass = parameters.find(name, "%s_wkt" % wkttype, strict)
if itemclass:
item = itemclass(value)
params.append(item)
# find unit
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "UNIT":
break
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
linunit = unit
# find twin axis maybe
## if len(content) >= 6:
## twinax = (parameters.Axis(
## else:
## twinax = None
# put it all together
projcs = containers.ProjCS(csname, geogcs, proj, params, linunit) #, twinax)
return projcs
elif header.upper() == "GEOGCS":
# name
csname = content[0].strip('"')
# datum
subheader, subcontent = content[1]
## datum name
datumname = subcontent[0].strip('"')
datumclass = datums.find(datumname, "%s_wkt" % wkttype, strict)
if datumclass:
datum = datumclass()
else:
datum = datums.Unknown()
## datum ellipsoid
subsubheader, subsubcontent = subcontent[1]
ellipsname = subsubcontent[0].strip('"')
ellipsclass = ellipsoids.find(ellipsname, "%s_wkt" % wkttype, strict)
if ellipsclass:
ellipsoid = ellipsclass()
else:
ellipsoid = ellipsoids.Unknown()
ellipsoid.semimaj_ax = parameters.SemiMajorRadius(subsubcontent[1])
if subsubcontent[2] == 0:
# WKT falsely sets inverse flattening to 0 for spheroids
# but actually it cannot be 0, it is the flattening that is 0
ellipsoid.flat = parameters.Flattening(subsubcontent[2])
else:
ellipsoid.inv_flat = parameters.InverseFlattening(subsubcontent[2])
## datum shift
if wkttype == "ogc":
for subsubheader,subsubcontent in subcontent[1:]:
if subsubheader == "TOWGS84":
datumshift = parameters.DatumShift(subsubcontent)
break
else:
datumshift = None
elif wkttype == "esri":
# not used in esri wkt
datumshift = None
## put it all togehter
datum.ellips = ellipsoid
datum.datumshift = datumshift
# prime mer
subheader, subcontent = content[2]
prime_mer = parameters.PrimeMeridian(subcontent[1])
# angunit
subheader, subcontent = content[3]
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
angunit = unit
# twin axis
# ...
# put it all together
geogcs = containers.GeogCS(csname, datum, prime_mer, angunit, twin_ax=None)
return geogcs
# toplevel collection
header, content = crstuples[0]
crs = _parse_top(header, content)
# use args to create crs
return crs | 0.006887 |
def load(kls, url, getter=None, parser=None, url_load_hook=None, sep=consts.private.SCOPE_SEPARATOR, prim=None, mime_codec=None, resolver=None):
""" load json as a raw App
:param str url: url of path of Swagger API definition
:param getter: customized Getter
:type getter: sub class/instance of Getter
:param parser: the parser to parse the loaded json.
:type parser: pyswagger.base.Context
:param dict app_cache: the cache shared by related App
:param func url_load_hook: hook to patch the url to load json
:param str sep: scope-separater used in this App
:param prim pyswager.primitives.Primitive: factory for primitives in Swagger
:param mime_codec pyswagger.primitives.MimeCodec: MIME codec
:param resolver: pyswagger.resolve.Resolver: customized resolver used as default when none is provided when resolving
:return: the created App object
:rtype: App
:raises ValueError: if url is wrong
:raises NotImplementedError: the swagger version is not supported.
"""
logger.info('load with [{0}]'.format(url))
url = utils.normalize_url(url)
app = kls(url, url_load_hook=url_load_hook, sep=sep, prim=prim, mime_codec=mime_codec, resolver=resolver)
app.__raw, app.__version = app.load_obj(url, getter=getter, parser=parser)
if app.__version not in ['1.2', '2.0']:
raise NotImplementedError('Unsupported Version: {0}'.format(self.__version))
# update scheme if any
p = six.moves.urllib.parse.urlparse(url)
if p.scheme:
app.schemes.append(p.scheme)
return app | 0.004737 |
def write(self, proto):
"""
:param proto: capnp TwoGramModelProto message builder
"""
super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{"hash": h, "value": v}
for h, v in self._hashToValueDict.items()]
twoGramDicts = []
for items in self._twoGramDicts:
twoGramArr = []
for prev, values in items.iteritems():
buckets = [{"index": index, "count": count}
for index, count in values.iteritems()]
if prev is None:
prev = -1
twoGramArr.append({"value": prev, "buckets": buckets})
twoGramDicts.append(twoGramArr)
proto.twoGramDicts = twoGramDicts | 0.005747 |
def _del_subscription(self, a_filter, session):
"""
Delete a session subscription on a given topic
:param a_filter:
:param session:
:return:
"""
deleted = 0
try:
subscriptions = self._subscriptions[a_filter]
for index, (sub_session, qos) in enumerate(subscriptions):
if sub_session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" %
(a_filter, format_client_message(session=session)))
subscriptions.pop(index)
deleted += 1
break
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass
finally:
return deleted | 0.004651 |
def plot_vs(fignum, Xs, c, ls):
"""
plots vertical lines at Xs values
Parameters
_________
fignum : matplotlib figure number
Xs : list of X values for lines
c : color for lines
ls : linestyle for lines
"""
fig = plt.figure(num=fignum)
for xv in Xs:
bounds = plt.axis()
plt.axvline(
x=xv, ymin=bounds[2], ymax=bounds[3], linewidth=1, color=c, linestyle=ls) | 0.004662 |
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
"""
fr = self.file(remotepath, 'rb')
file_size = self.stat(remotepath).st_size
fr.prefetch()
try:
fl = file(localpath, 'wb')
try:
size = 0
while True:
data = fr.read(32768)
if len(data) == 0:
break
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
finally:
fl.close()
finally:
fr.close()
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size)) | 0.002058 |
def _update_collection(self, ctx):
"""
Bulk update
"""
assert isinstance(ctx, ResourceQueryContext)
models = []
for row in ctx.data:
models.append(self._update_one_simple(row.pop('id'), row, ctx))
return models | 0.007143 |
def _load_client_secrets(self, filename):
"""Loads client secrets from the given filename."""
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {0} is not supported.'.format(
client_type))
self.client_id = client_info['client_id']
self.client_secret = client_info['client_secret'] | 0.004329 |
def to_json(self):
"""
Returns a json-compatible object from the constraint that can be saved using the json module.
Example
--------
>>> import json
>>> with open("path_to_file.json", "w") as outfile:
>>> json.dump(constraint.to_json(), outfile)
"""
if self.indicator_variable is None:
indicator = None
else:
indicator = self.indicator_variable.name
json_obj = {
"name": self.name,
"expression": expr_to_json(self.expression),
"lb": self.lb,
"ub": self.ub,
"indicator_variable": indicator,
"active_when": self.active_when
}
return json_obj | 0.004027 |
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
errors.NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise errors.NotEnoughArgumentsError(param_name + " not specified")
return value | 0.004717 |
def all(self, predicate=bool):
'''Determine if all elements in the source sequence satisfy a condition.
All of the source sequence will be consumed.
Note: This method uses immediate execution.
Args:
predicate (callable): An optional single argument function used to
test each elements. If omitted, the bool() function is used
resulting in the elements being tested directly.
Returns:
True if all elements in the sequence meet the predicate condition,
otherwise False.
Raises:
ValueError: If the Queryable is closed()
TypeError: If predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call all() on a closed Queryable.")
if not is_callable(predicate):
raise TypeError("all() parameter predicate={0} is "
"not callable".format(repr(predicate)))
return all(self.select(predicate)) | 0.002904 |
def a_urls(html):
'''
return normalized urls found in the 'a' tag
'''
soup = BeautifulSoup(html, 'lxml')
for node in soup.find_all('a'):
try:
href = node['href']
except KeyError:
continue
yield norm_url(href) | 0.003623 |
def find( self, flags = 0 ):
"""
Looks throught the text document based on the current criteria. The \
inputed flags will be merged with the generated search flags.
:param flags | <QTextDocument.FindFlag>
:return <bool> | success
"""
# check against the web and text views
if ( not (self._textEdit or self._webView) ):
fg = QColor('darkRed')
bg = QColor('red').lighter(180)
palette = self.palette()
palette.setColor(palette.Text, fg)
palette.setColor(palette.Base, bg)
self._searchEdit.setPalette(palette)
self._searchEdit.setToolTip( 'No Text Edit is linked.' )
return False
if ( self._caseSensitiveCheckbox.isChecked() ):
flags |= QTextDocument.FindCaseSensitively
if ( self._textEdit and self._wholeWordsCheckbox.isChecked() ):
flags |= QTextDocument.FindWholeWords
terms = self._searchEdit.text()
if ( terms != self._lastText ):
self._lastCursor = QTextCursor()
if ( self._regexCheckbox.isChecked() ):
terms = QRegExp(terms)
palette = self.palette()
# search on a text edit
if ( self._textEdit ):
cursor = self._textEdit.document().find(terms,
self._lastCursor,
QTextDocument.FindFlags(flags))
found = not cursor.isNull()
self._lastCursor = cursor
self._textEdit.setTextCursor(cursor)
elif ( QWebPage ):
flags = QWebPage.FindFlags(flags)
flags |= QWebPage.FindWrapsAroundDocument
found = self._webView.findText(terms, flags)
self._lastText = self._searchEdit.text()
if ( not terms or found ):
fg = palette.color(palette.Text)
bg = palette.color(palette.Base)
else:
fg = QColor('darkRed')
bg = QColor('red').lighter(180)
palette.setColor(palette.Text, fg)
palette.setColor(palette.Base, bg)
self._searchEdit.setPalette(palette)
return found | 0.020251 |
def list_tables(source):
# pylint: disable=line-too-long
"""List the names of all tables in this file(s)
Parameters
----------
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`
one or more open files, file paths, or LIGO_LW `Document`s
Examples
--------
>>> from gwpy.io.ligolw import list_tables
>>> print(list_tables('H1-LDAS_STRAIN-968654552-10.xml.gz'))
['process', 'process_params', 'sngl_burst', 'search_summary', 'segment_definer', 'segment_summary', 'segment']
""" # noqa: E501
try:
from ligo.lw.ligolw import (Document, Stream)
except ImportError: # no python-ligo-lw
from glue.ligolw.ligolw import Document, Stream
# read file object
if isinstance(source, Document):
xmldoc = source
else:
filt = get_filtering_contenthandler(Stream)
xmldoc = read_ligolw(source, contenthandler=filt)
# get list of table names
tables = []
for tbl in xmldoc.childNodes[0].childNodes:
try:
tables.append(tbl.TableName(tbl.Name))
except AttributeError: # not a table
continue
return tables | 0.000855 |
def flatten_array(grid):
"""
Takes a multi-dimensional array and returns a 1 dimensional array with the
same contents.
"""
grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))]
while type(grid[0]) is list:
grid = flatten_array(grid)
return grid | 0.003322 |
def from_base58_seed(cls, base58_seed):
"""Generate a :class:`Keypair` object via Base58 encoded seed.
.. deprecated:: 0.1.7
Base58 address encoding is DEPRECATED! Use this method only for
transition to strkey encoding.
:param str base58_seed: A base58 encoded encoded secret seed.
:return: A new :class:`Keypair` derived from the secret seed.
"""
warnings.warn(
"Base58 address encoding is DEPRECATED! Use this method only for "
"transition to strkey encoding.", DeprecationWarning)
raw_seed = b58decode_check(base58_seed)[1:]
return cls.from_raw_seed(raw_seed) | 0.002963 |
def _add_right(self, d):
'''
Adds the provided domino to the right end of the board.
:param Domino d: domino to add
:return: None
:raises EndsMismatchException: if the values do not match
'''
if not self:
self._left = d.first
self._right = d.second
elif d.first == self.right_end():
self._right = d.second
elif d.second == self.right_end():
self._right = d.first
else:
raise dominoes.EndsMismatchException(
'{} cannot be added to the right of'
' the board - values do not match!'.format(d)
)
self._length += 1 | 0.002845 |
def _compute_forearc_backarc_term(self, C, sites, dists):
"""
Computes the forearc/backarc scaling term given by equation (4).
"""
f_faba = np.zeros_like(dists.rhypo)
# Term only applies to backarc sites (F_FABA = 0. for forearc)
max_dist = dists.rhypo[sites.backarc]
max_dist[max_dist < 85.0] = 85.0
f_faba[sites.backarc] = C['theta7'] +\
(C['theta8'] * np.log(max_dist / 40.0))
return f_faba | 0.004193 |
def dbsafe_encode(value, compress_object=False):
"""
We use deepcopy() here to avoid a problem with cPickle, where dumps
can generate different character streams for same lookup value if
they are referenced differently.
The reason this is important is because we do all of our lookups as
simple string matches, thus the character streams must be the same
for the lookups to work properly. See tests.py for more information.
"""
if not compress_object:
value = b64encode(dumps(deepcopy(value)))
else:
value = b64encode(compress(dumps(deepcopy(value))))
return PickledObject(value) | 0.001563 |
def delete_service_definition(self, service_type, identifier):
"""DeleteServiceDefinition.
[Preview API]
:param str service_type:
:param str identifier:
"""
route_values = {}
if service_type is not None:
route_values['serviceType'] = self._serialize.url('service_type', service_type, 'str')
if identifier is not None:
route_values['identifier'] = self._serialize.url('identifier', identifier, 'str')
self._send(http_method='DELETE',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='5.0-preview.1',
route_values=route_values) | 0.005806 |
def validate_votes(self, validators_H):
"set of validators may change between heights"
assert self.sender
if not self.round_lockset.num_eligible_votes == len(validators_H):
raise InvalidProposalError('round_lockset num_eligible_votes mismatch')
for v in self.round_lockset:
if v.sender not in validators_H:
raise InvalidProposalError('invalid signer') | 0.007092 |
def to(self, space):
"""
Convert color to a different color space.
:param str space: Name of the color space.
:rtype: Color
:returns: A new spectra.Color in the given color space.
"""
if space == self.space: return self
new_color = convert_color(self.color_object, COLOR_SPACES[space])
return self.__class__(space, *new_color.get_value_tuple()) | 0.007177 |
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,
datamin, datamax):
"""
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
"""
# Compute the first estimate of roundness:
sum2 = np.sum(s2m*density)
sum4 = np.sum(s4m*abs(density))
if sum2 == 0.0:
round = 0.0
elif sum4 <= 0.0: # eps?
round = None
else:
round = 2.0 * sum2 / sum4
# Eliminate the sharpness test if the central pixel is bad:
mid_data_pix = data[yc, xc]
mid_dens_pix = density[yc, xc]
if mid_data_pix > datamax:
return True, round, None
if mid_data_pix < datamin:
return False, round, None
########################
# Sharpness statistics:
satur = np.max(kskip*data) > datamax
# Exclude pixels (create a mask) outside the [datamin, datamax] range:
uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)
# Update the mask with the "skipped" values from the convolution kernel:
uskip *= kskip
# Also, exclude central pixel:
uskip[yc, xc] = 0
npixels = np.sum(uskip)
if (npixels < 1 or mid_dens_pix <= 0.0):
return satur, round, None
sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix
#sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix
return satur, round, sharp | 0.002039 |
def as_tuple(obj):
" Given obj return a tuple "
if not obj:
return tuple()
if isinstance(obj, (tuple, set, list)):
return tuple(obj)
if hasattr(obj, '__iter__') and not isinstance(obj, dict):
return obj
return obj, | 0.003817 |
def dca(adata,
mode='denoise',
ae_type='zinb-conddisp',
normalize_per_cell=True,
scale=True,
log1p=True,
# network args
hidden_size=(64, 32, 64),
hidden_dropout=0.,
batchnorm=True,
activation='relu',
init='glorot_uniform',
network_kwds={},
# training args
epochs=300,
reduce_lr=10,
early_stop=15,
batch_size=32,
optimizer='rmsprop',
random_state=0,
threads=None,
verbose=False,
training_kwds={},
return_model=False,
return_info=False,
copy=False
):
"""Deep count autoencoder [Eraslan18]_.
Fits a count autoencoder to the raw count data given in the anndata object
in order to denoise the data and to capture hidden representation of
cells in low dimensions. Type of the autoencoder and return values are
determined by the parameters.
.. note::
More information and bug reports `here <https://github.com/theislab/dca>`__.
Parameters
----------
adata : :class:`~anndata.AnnData`
An anndata file with `.raw` attribute representing raw counts.
mode : `str`, optional. `denoise`(default), or `latent`.
`denoise` overwrites `adata.X` with denoised expression values.
In `latent` mode DCA adds `adata.obsm['X_dca']` to given adata
object. This matrix represent latent representation of cells via DCA.
ae_type : `str`, optional. `zinb-conddisp`(default), `zinb`, `nb-conddisp` or `nb`.
Type of the autoencoder. Return values and the architecture is
determined by the type e.g. `nb` does not provide dropout
probabilities. Types that end with "-conddisp", assumes that dispersion is mean dependant.
normalize_per_cell : `bool`, optional. Default: `True`.
If true, library size normalization is performed using
the `sc.pp.normalize_per_cell` function in Scanpy and saved into adata
object. Mean layer is re-introduces library size differences by
scaling the mean value of each cell in the output layer. See the
manuscript for more details.
scale : `bool`, optional. Default: `True`.
If true, the input of the autoencoder is centered using
`sc.pp.scale` function of Scanpy. Note that the output is kept as raw
counts as loss functions are designed for the count data.
log1p : `bool`, optional. Default: `True`.
If true, the input of the autoencoder is log transformed with a
pseudocount of one using `sc.pp.log1p` function of Scanpy.
hidden_size : `tuple` or `list`, optional. Default: (64, 32, 64).
Width of hidden layers.
hidden_dropout : `float`, `tuple` or `list`, optional. Default: 0.0.
Probability of weight dropout in the autoencoder (per layer if list
or tuple).
batchnorm : `bool`, optional. Default: `True`.
If true, batch normalization is performed.
activation : `str`, optional. Default: `relu`.
Activation function of hidden layers.
init : `str`, optional. Default: `glorot_uniform`.
Initialization method used to initialize weights.
network_kwds : `dict`, optional.
Additional keyword arguments for the autoencoder.
epochs : `int`, optional. Default: 300.
Number of total epochs in training.
reduce_lr : `int`, optional. Default: 10.
Reduces learning rate if validation loss does not improve in given number of epochs.
early_stop : `int`, optional. Default: 15.
Stops training if validation loss does not improve in given number of epochs.
batch_size : `int`, optional. Default: 32.
Number of samples in the batch used for SGD.
optimizer : `str`, optional. Default: "rmsprop".
Type of optimization method used for training.
random_state : `int`, optional. Default: 0.
Seed for python, numpy and tensorflow.
threads : `int` or None, optional. Default: None
Number of threads to use in training. All cores are used by default.
verbose : `bool`, optional. Default: `False`.
If true, prints additional information about training and architecture.
training_kwds : `dict`, optional.
Additional keyword arguments for the training process.
return_model : `bool`, optional. Default: `False`.
If true, trained autoencoder object is returned. See "Returns".
return_info : `bool`, optional. Default: `False`.
If true, all additional parameters of DCA are stored in `adata.obsm` such as dropout
probabilities (obsm['X_dca_dropout']) and estimated dispersion values
(obsm['X_dca_dispersion']), in case that autoencoder is of type
zinb or zinb-conddisp.
copy : `bool`, optional. Default: `False`.
If true, a copy of anndata is returned.
Returns
-------
If `copy` is true and `return_model` is false, AnnData object is returned.
In "denoise" mode, `adata.X` is overwritten with the denoised values. In "latent" mode, latent\
low dimensional representation of cells are stored in `adata.obsm['X_dca']` and `adata.X`\
is not modified. Note that these values are not corrected for library size effects.
If `return_info` is true, all estimated distribution parameters are stored in AnnData such as:
- `.obsm["X_dca_dropout"]` which is the mixture coefficient (pi) of the zero component\
in ZINB, i.e. dropout probability (only if `ae_type` is `zinb` or `zinb-conddisp`).
- `.obsm["X_dca_dispersion"]` which is the dispersion parameter of NB.
- `.uns["dca_loss_history"]` which stores the loss history of the training. See `.history`\
attribute of Keras History class for mode details.
Finally, the raw counts are stored in `.raw` attribute of AnnData object.
If `return_model` is given, trained model is returned. When both `copy` and `return_model`\
are true, a tuple of anndata and model is returned in that order.
"""
try:
from dca.api import dca
except ImportError:
raise ImportError('Please install dca package (>= 0.2.1) via `pip install dca`')
return dca(adata,
mode=mode,
ae_type=ae_type,
normalize_per_cell=normalize_per_cell,
scale=scale,
log1p=log1p,
hidden_size=hidden_size,
hidden_dropout=hidden_dropout,
batchnorm=batchnorm,
activation=activation,
init=init,
network_kwds=network_kwds,
epochs=epochs,
reduce_lr=reduce_lr,
early_stop=early_stop,
batch_size=batch_size,
optimizer=optimizer,
random_state=random_state,
threads=threads,
verbose=verbose,
training_kwds=training_kwds,
return_model=return_model) | 0.005559 |
def plugins():
"""Returns a tuple of the plugin classes registered with the python style checker.
:rtype: tuple of :class:`pants.contrib.python.checks.checker.common.CheckstylePlugin` subtypes
"""
return (
ClassFactoring,
ConstantLogic,
ExceptStatements,
FutureCompatibility,
ImportOrder,
Indentation,
MissingContextManager,
NewStyleClasses,
Newlines,
PrintStatements,
TrailingWhitespace,
PEP8VariableNames,
PyflakesChecker,
PyCodeStyleChecker,
) | 0.009747 |
def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series({
asset: (
position.last_sale_price *
position.amount *
asset.price_multiplier
)
for asset, position in self.positions.items()
})
return position_values / self.portfolio_value | 0.002821 |
def get_spam_checker(backend_path):
"""
Return the selected spam checker backend.
"""
try:
backend_module = import_module(backend_path)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % backend_path,
RuntimeWarning)
backend = None
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = None
return backend | 0.001953 |
def ColorWithHue(self, hue):
'''Create a new instance based on this one with a new hue.
Parameters:
:hue:
The hue of the new color [0...360].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60)
(1.0, 1.0, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60).hsl
(60, 1, 0.5)
'''
h, s, l = self.__hsl
return Color((hue, s, l), 'hsl', self.__a, self.__wref) | 0.004329 |
def pattern(self):
"""
Return the pattern used to check if a field name can be accepted by this
dynamic field. Use a default one ('^fieldname_(.+)$') if not set when
the field was initialized
"""
if self.dynamic_version_of is not None:
return self.dynamic_version_of.pattern
if not self._pattern:
self._pattern = re.compile('^%s_(.+)$' % self.name)
return self._pattern | 0.00655 |
def get_magnitude_scaling(self, C, mag):
"""
Returns the magnitude scaling term
"""
d_m = mag - self.CONSTANTS["Mh"]
if mag < self.CONSTANTS["Mh"]:
return C["e1"] + C["b1"] * d_m + C["b2"] * (d_m ** 2.0)
else:
return C["e1"] + C["b3"] * d_m | 0.00641 |
def poisson_ll_2(p1, p2):
"""
Calculates Poisson LL(p1|p2).
"""
p1_1 = p1 + eps
p2_1 = p2 + eps
return np.sum(-p2_1 + p1_1*np.log(p2_1)) | 0.00625 |
def slice_hidden(x, hidden_size, num_blocks):
"""Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
"""
batch_size, latent_dim, _ = common_layers.shape_list(x)
block_dim = hidden_size // num_blocks
x_sliced = tf.reshape(x,
shape=[batch_size, latent_dim, num_blocks, block_dim])
return x_sliced | 0.010256 |
def sample_stats_to_xarray(self):
"""Extract sample_stats from posterior."""
posterior = self.posterior
posterior_model = self.posterior_model
# copy dims and coords
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
# log_likelihood
log_likelihood = self.log_likelihood
if log_likelihood is not None:
if isinstance(log_likelihood, str) and log_likelihood in dims:
dims["log_likelihood"] = dims.pop(log_likelihood)
data = get_sample_stats_stan3(
posterior, model=posterior_model, log_likelihood=log_likelihood
)
return dict_to_dataset(data, library=self.stan, coords=coords, dims=dims) | 0.003672 |
def get_open_files(self):
"""Return files opened by process as a list of namedtuples."""
# XXX - C implementation available on FreeBSD >= 8 only
# else fallback on lsof parser
if hasattr(_psutil_bsd, "get_process_open_files"):
rawlist = _psutil_bsd.get_process_open_files(self.pid)
return [nt_openfile(path, fd) for path, fd in rawlist]
else:
lsof = _psposix.LsofParser(self.pid, self._process_name)
return lsof.get_process_open_files() | 0.003817 |
def to_dict(self):
"""
Converts object into a dictionary.
"""
data = {
'id': self.id,
'referenceId': self.reference_id,
'type': self.type,
'displayName': self.display_name,
'remoteUrl': self.remote_url}
for key in data.keys():
if data[key] == None:
data.pop(key)
return data | 0.007317 |
def getmethattr(obj, meth):
"""
Returns either the variable value or method invocation
"""
if hasmethod(obj, meth):
return getattr(obj, meth)()
elif hasvar(obj, meth):
return getattr(obj, meth)
return None | 0.022026 |
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape() | 0.002853 |
def debug_print_tree( self, spacing='' ):
''' *Debug only* method for outputting the tree. '''
print (spacing+" "+str(self.word_id)+" "+str(self.text))
if (self.children):
spacing=spacing+" "
for child in self.children:
child.debug_print_tree(spacing) | 0.018987 |
def panic(self, *args):
"""
Creates a fatal error and exit
"""
self._err("fatal", *args)
if self.test_errs_mode is False: # pragma: no cover
sys.exit(1) | 0.009756 |
def set_scrollbar_position(self, position):
"""Set scrollbar positions"""
# Scrollbars will be restored after the expanded state
self._scrollbar_positions = position
if self._to_be_loaded is not None and len(self._to_be_loaded) == 0:
self.restore_scrollbar_positions() | 0.006309 |
def physical(self):
"""
get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values
"""
if not self.raw or self.conversion is None:
samples = self.samples.copy()
else:
samples = self.conversion.convert(self.samples)
return Signal(
samples,
self.timestamps.copy(),
unit=self.unit,
name=self.name,
conversion=self.conversion,
raw=False,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits,
source=self.source,
encoding=self.encoding,
) | 0.00232 |
def list_wegobjecten_by_straat(self, straat):
'''
List all `wegobjecten` in a :class:`Straat`
:param straat: The :class:`Straat` for which the `wegobjecten` \
are wanted.
:rtype: A :class:`list` of :class:`Wegobject`
'''
try:
id = straat.id
except AttributeError:
id = straat
def creator():
res = crab_gateway_request(
self.client, 'ListWegobjectenByStraatnaamId', id
)
try:
return [
Wegobject(
r.IdentificatorWegobject,
r.AardWegobject
)for r in res.WegobjectItem
]
except AttributeError:
return []
if self.caches['short'].is_configured:
key = 'ListWegobjectenByStraatnaamId#%s' % (id)
wegobjecten = self.caches['short'].get_or_create(key, creator)
else:
wegobjecten = creator()
for r in wegobjecten:
r.set_gateway(self)
return wegobjecten | 0.001776 |
def _MultiNotifyQueue(self, queue, notifications, mutation_pool=None):
"""Does the actual queuing."""
notification_list = []
now = rdfvalue.RDFDatetime.Now()
for notification in notifications:
if not notification.first_queued:
notification.first_queued = (
self.frozen_timestamp or rdfvalue.RDFDatetime.Now())
else:
diff = now - notification.first_queued
if diff.seconds >= self.notification_expiry_time:
# This notification has been around for too long, we drop it.
logging.debug("Dropping notification: %s", str(notification))
continue
notification_list.append(notification)
mutation_pool.CreateNotifications(
self.GetNotificationShard(queue), notification_list) | 0.008997 |
def get_frac_coords_from_lll(self, lll_frac_coords: Vector3Like) -> np.ndarray:
"""
Given fractional coordinates in the lll basis, returns corresponding
fractional coordinates in the lattice basis.
"""
return dot(lll_frac_coords, self.lll_mapping) | 0.006969 |
def plot(self):
"""
Plot
"""
self.before_plot()
self.do_plot_and_bestfit()
self.after_plot()
self.do_label()
self.after_label()
self.save()
self.close()
return self.outputdict | 0.007605 |
def _wiggle_interval(value, wiggle=0.5 ** 44):
r"""Check if ``value`` is in :math:`\left[0, 1\right]`.
Allows a little bit of wiggle room outside the interval. Any value
within ``wiggle`` of ``0.0` will be converted to ``0.0` and similar
for ``1.0``.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
value (float): Value to check in interval.
wiggle (Optional[float]): The amount of wiggle room around the
the endpoints ``0.0`` and ``1.0``.
Returns:
Tuple[float, bool]: Pair of
* The ``value`` if it's in the interval, or ``0`` or ``1``
if the value lies slightly outside. If the ``value`` is
too far outside the unit interval, will be NaN.
* Boolean indicating if the ``value`` is inside the unit interval.
"""
if -wiggle < value < wiggle:
return 0.0, True
elif wiggle <= value <= 1.0 - wiggle:
return value, True
elif 1.0 - wiggle < value < 1.0 + wiggle:
return 1.0, True
else:
return np.nan, False | 0.000879 |
def visible_object_layers(self):
""" This must return layer objects
This is not required for custom data formats.
:return: Sequence of pytmx object layers/groups
"""
return (layer for layer in self.tmx.visible_layers
if isinstance(layer, pytmx.TiledObjectGroup)) | 0.00625 |
def initialize(address='127.0.0.1:27017', database_name='hfos', instance_name="default", reload=False):
"""Initializes the database connectivity, schemata and finally object models"""
global schemastore
global l10n_schemastore
global objectmodels
global collections
global dbhost
global dbport
global dbname
global instance
global initialized
if initialized and not reload:
hfoslog('Already initialized and not reloading.', lvl=warn, emitter="DB", frame_ref=2)
return
dbhost = address.split(':')[0]
dbport = int(address.split(":")[1]) if ":" in address else 27017
dbname = database_name
db_log("Using database:", dbname, '@', dbhost, ':', dbport)
try:
client = pymongo.MongoClient(host=dbhost, port=dbport)
db = client[dbname]
db_log("Database: ", db.command('buildinfo'), lvl=debug)
except Exception as e:
db_log("No database available! Check if you have mongodb > 3.0 "
"installed and running as well as listening on port 27017 "
"of localhost. (Error: %s) -> EXIT" % e, lvl=critical)
sys.exit(5)
warmongo.connect(database_name)
schemastore = _build_schemastore_new()
l10n_schemastore = _build_l10n_schemastore(schemastore)
objectmodels = _build_model_factories(schemastore)
collections = _build_collections(schemastore)
instance = instance_name
initialized = True | 0.002749 |
def get_success_url(self):
"""
By default we use the referer that was stuffed in our
form when it was created
"""
if self.success_url:
# if our smart url references an object, pass that in
if self.success_url.find('@') > 0:
return smart_url(self.success_url, self.object)
else:
return smart_url(self.success_url, None)
elif 'loc' in self.form.cleaned_data:
return self.form.cleaned_data['loc']
raise ImproperlyConfigured("No redirect location found, override get_success_url to not use redirect urls") | 0.004695 |
def init_app_context():
"""Initialize app context for Invenio 2.x."""
try:
from invenio.base.factory import create_app
app = create_app()
app.test_request_context('/').push()
app.preprocess_request()
except ImportError:
pass | 0.003623 |
def _coerceSingleRepetition(self, dataSet):
"""
Make a new liveform with our parameters, and get it to coerce our data
for us.
"""
# make a liveform because there is some logic in _coerced
form = LiveForm(lambda **k: None, self.parameters, self.name)
return form.fromInputs(dataSet) | 0.005917 |
def FoldByteStream(self, mapped_value, **unused_kwargs): # pylint: disable=redundant-returns-doc
"""Folds the data type into a byte stream.
Args:
mapped_value (object): mapped value.
Returns:
bytes: byte stream.
Raises:
FoldingError: if the data type definition cannot be folded into
the byte stream.
"""
raise errors.FoldingError(
'Unable to fold {0:s} data type into byte stream'.format(
self._data_type_definition.TYPE_INDICATOR)) | 0.003929 |
def one_to_many(clsname, **kw):
"""Use an event to build a one-to-many relationship on a class.
This makes use of the :meth:`.References._reference_table` method
to generate a full foreign key relationship from the remote table.
"""
@declared_attr
def o2m(cls):
cls._references((clsname, cls.__name__))
return relationship(clsname, **kw)
return o2m | 0.002538 |
def finish_directory_parse(self):
# type: () -> None
'''
A method to finish up the parsing of this UDF File Entry directory.
In particular, this method checks to see if it is in sorted order for
future use.
Parameters:
None.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')
if self.icb_tag.file_type != 4:
raise pycdlibexception.PyCdlibInternalError('Can only finish_directory for a directory') | 0.008403 |
def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path):
'''
iterate through the i_chunk and tmp_ner_path to generate a new
Chunk with body.ner
'''
o_chunk = Chunk()
input_iter = i_chunk.__iter__()
ner = ''
stream_id = None
all_ner = xml.dom.minidom.parse(open(tmp_ner_path))
for raw_ner in all_ner.getElementsByTagName('FILENAME'):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = raw_ner.attributes.get('docid').value
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner)
tagger_id = 'lingpipe'
tagging = Tagging()
tagging.tagger_id = tagger_id
## get this one file out of its FILENAME tags
tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1]
tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[tagger_id] = tagging
sentences = list(lingpipe.sentences(tagged_doc))
## make JS labels on individual tokens
assert stream_item.ratings[0].mentions, stream_item.stream_id
john_smith_label = Label()
john_smith_label.annotator = stream_item.ratings[0].annotator
john_smith_label.target_id = stream_item.ratings[0].target_id
# first map all corefchains to their words
equiv_ids = collections.defaultdict(lambda: set())
for sent in sentences:
for tok in sent.tokens:
if tok.entity_type is not None:
equiv_ids[tok.equiv_id].add(cleanse(tok.token))
## find all the chains that are John Smith
johnsmiths = set()
for equiv_id, names in equiv_ids.items():
## detect 'smith' in 'smithye'
_names = cleanse(' '.join(names))
if 'john' in _names and 'smith' in _names:
johnsmiths.add(equiv_id)
print len(johnsmiths)
## now apply the label
for sent in sentences:
for tok in sent.tokens:
if tok.equiv_id in johnsmiths:
tok.labels = [john_smith_label]
stream_item.body.sentences[tagger_id] = sentences
o_chunk.add(stream_item)
## put the o_chunk bytes into the specified file
open(tmp_done_path, 'wb').write(str(o_chunk))
## replace this with log.info()
print 'created %s' % tmp_done_path | 0.004743 |
def QueryInfoKey(key):
"""This calls the Windows RegQueryInfoKey function in a Unicode safe way."""
regqueryinfokey = advapi32["RegQueryInfoKeyW"]
regqueryinfokey.restype = ctypes.c_long
regqueryinfokey.argtypes = [
ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD,
ctypes.POINTER(FileTime)
]
null = LPDWORD()
num_sub_keys = ctypes.wintypes.DWORD()
num_values = ctypes.wintypes.DWORD()
ft = FileTime()
rc = regqueryinfokey(key.handle, ctypes.c_wchar_p(), null, null,
ctypes.byref(num_sub_keys), null, null,
ctypes.byref(num_values), null, null, null,
ctypes.byref(ft))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
last_modified = ft.dwLowDateTime | (ft.dwHighDateTime << 32)
last_modified = last_modified // 10000000 - WIN_UNIX_DIFF_MSECS
return (num_sub_keys.value, num_values.value, last_modified) | 0.014127 |
def completerTree( self ):
"""
Returns the completion tree for this instance.
:return <QTreeWidget>
"""
if not self._completerTree:
self._completerTree = QTreeWidget(self)
self._completerTree.setWindowFlags(Qt.Popup)
self._completerTree.setAlternatingRowColors( True )
self._completerTree.installEventFilter(self)
self._completerTree.itemClicked.connect( self.acceptCompletion )
self._completerTree.setRootIsDecorated(False)
self._completerTree.header().hide()
return self._completerTree | 0.015152 |
def get_admin():
'''
Return the actual admin from token file
'''
if os.path.isfile(LOGIN_FILENAME):
with open(LOGIN_FILENAME, 'r') as token_file:
old_login, old_password = token_file.read().splitlines()[:2]
return old_login, old_password
else:
return None, None | 0.003077 |
def update_exif_GEXIV2(oldfile,newfile):
"""Transfers oldfile's exif to newfile's exif and
updates the width/height EXIF fields"""
# Requires gexiv2 and pygobject package in gentoo
# (USE=introspection)
try:
from gi.repository import GExiv2
except:
print("Couldn't import GExiv2")
print("Are you sure you have GExiv2 installed?")
print("See this page: http://goo.gl/0bhDGx")
print("For gentoo, emerge media-libs/gexiv2 with introspection USE flag")
return False
# exif of orginal image
exif = GExiv2.Metadata(oldfile)
# exif of resized image
newExif = GExiv2.Metadata(newfile)
# Figure out dimensions
imgresize = Image.open(newfile)
# save all exif data of orinal image to resized
for tag in exif.get_exif_tags():
newExif[tag] = exif[tag]
# edit exif data - size
newExif['Exif.Photo.PixelXDimension'] = str(imgresize.size[0])
newExif['Exif.Photo.PixelYDimension'] = str(imgresize.size[1])
# FIXME: Doesn't work with PENTAX JPG
# Error is: gi._glib.GError: Unsupported data area offset type
newExif.save_file()
return True | 0.007621 |
def useful_mimetype(text):
"""Check to see if the given mime type is a MIME type
which is useful in terms of how to treat this file.
"""
if text is None:
return False
mimetype = normalize_mimetype(text)
return mimetype not in [DEFAULT, PLAIN, None] | 0.003571 |
def scroll_to(self, selector, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT):
''' Fast scroll to destination '''
if self.demo_mode:
self.slow_scroll_to(selector, by=by, timeout=timeout)
return
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
try:
self.__scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__scroll_to_element(element) | 0.003623 |
def memory_write(self, start_position: int, size: int, value: bytes) -> None:
"""
Write ``value`` to memory at ``start_position``. Require that ``len(value) == size``.
"""
return self._memory.write(start_position, size, value) | 0.011628 |
def _setBatchSystemEnvVars(self):
"""
Sets the environment variables required by the job store and those passed on command line.
"""
for envDict in (self._jobStore.getEnv(), self.config.environment):
for k, v in iteritems(envDict):
self._batchSystem.setEnv(k, v) | 0.009317 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
return _dict | 0.00627 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.