text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_matrix(theta, phi, psi, dx, dy, dz,
matrix=np.zeros((4, 4), dtype=DTYPE),
angles=np.zeros(3, dtype=DTYPE)):
"""
Build the rotation-translation matrix.
It has the form:
[ | dx ]
[ R | dy ]
[ | dz ]
[ 0 0 0 | 1 ]
"""
# NB!: matrix and angles by default are being overwritten on each call
# thus, only created once at compile time.
angles[0] = theta
angles[1] = phi
angles[2] = psi
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
rotation = matrix[:3, :3]
rotation.flat = (cx * cz - sx * cy * sz,
cx * sz + sx * cy * cz, sx * sy,
-sx * cz - cx * cy * sz,
-sx * sz + cx * cy * cz, cx * sy,
sy * sz,
-sy * cz, cy)
# Translation component
matrix[:3, 3] = dx, dy, dz
matrix[3, 3] = 1.
return matrix | 0.003742 |
def _pack(self, content):
"""pack the content using serializer and compressor"""
if self.serializer:
content = self.serializer.serialize(content)
if self.compressor:
content = self.compressor.compress(content)
return content | 0.007143 |
def is_valid_identifier(identifier):
"""Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen)
"""
if not isinstance(identifier, basestring):
return False
if len(identifier) > 100 or len(identifier) < 1:
return False
condensed_form = set(list(identifier.lower()))
return condensed_form.issubset(VALID_IDENTIFIER_SET) | 0.00161 |
def audit(func):
"""
Record a Flask route function in the audit log.
Generates a JSON record in the Flask log for every request.
"""
@wraps(func)
def wrapper(*args, **kwargs):
options = AuditOptions(
include_request_body=DEFAULT_INCLUDE_REQUEST_BODY,
include_response_body=DEFAULT_INCLUDE_RESPONSE_BODY,
include_path=True,
include_query_string=True,
)
with logging_levels():
return _audit_request(options, func, None, *args, **kwargs)
return wrapper | 0.001773 |
def list_targets_by_instance(self, instance_id, target_list=None):
"""Returns a list of FloatingIpTarget objects of FIP association.
:param instance_id: ID of target VM instance
:param target_list: (optional) a list returned by list_targets().
If specified, looking up is done against the specified list
to save extra API calls to a back-end. Otherwise target list
is retrieved from a back-end inside the method.
"""
if target_list is not None:
# We assume that target_list was returned by list_targets()
# so we can assume checks for subnet reachability and IP version
# have been done already. We skip all checks here.
return [target for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
reachable_subnets = self._get_reachable_subnets(
ports, fetch_router_ports=True)
name = self._get_server_name(instance_id)
targets = []
for p in ports:
for ip in p.fixed_ips:
if ip['subnet_id'] not in reachable_subnets:
continue
# Floating IPs can only target IPv4 addresses.
if netaddr.IPAddress(ip['ip_address']).version != 4:
continue
targets.append(FloatingIpTarget(p, ip['ip_address'], name))
return targets | 0.001284 |
def evaluatePotentials(Pot,R,z,phi=None,t=0.,dR=0,dphi=0):
"""
NAME:
evaluatePotentials
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (can be Quantity)
t - time (can be Quantity)
dR= dphi=, if set to non-zero integers, return the dR, dphi't derivative instead
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return _evaluatePotentials(Pot,R,z,phi=phi,t=t,dR=dR,dphi=dphi) | 0.020243 |
def _set_strip_header(self, v, load=False):
"""
Setter method for strip_header, mapped from YANG variable /interface/ethernet/strip_header (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_strip_header is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_strip_header() directly.
YANG Description: Vxlan header stripping configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'strip-802-1br': {'value': 1}, u'strip-vn-tag': {'value': 2}, u'strip-vxlan': {'value': 3}},), is_leaf=True, yang_name="strip-header", rest_name="strip-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VxLan header stripping configuration', u'cli-drop-node-name': None, u'display-when': u'((/local-node/swbd-number = "2000") or (/local-node/swbd-number = "4000"))'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """strip_header must be of a type compatible with enumeration""",
'defined-type': "brocade-interface:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'strip-802-1br': {'value': 1}, u'strip-vn-tag': {'value': 2}, u'strip-vxlan': {'value': 3}},), is_leaf=True, yang_name="strip-header", rest_name="strip-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VxLan header stripping configuration', u'cli-drop-node-name': None, u'display-when': u'((/local-node/swbd-number = "2000") or (/local-node/swbd-number = "4000"))'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)""",
})
self.__strip_header = t
if hasattr(self, '_set'):
self._set() | 0.004129 |
def compress_artifact_if_supported(artifact_path):
"""Compress artifacts with GZip if they're known to be supported.
This replaces the artifact given by a gzip binary.
Args:
artifact_path (str): the path to compress
Returns:
content_type, content_encoding (tuple): Type and encoding of the file. Encoding equals 'gzip' if compressed.
"""
content_type, encoding = guess_content_type_and_encoding(artifact_path)
log.debug('"{}" is encoded with "{}" and has mime/type "{}"'.format(artifact_path, encoding, content_type))
if encoding is None and content_type in _GZIP_SUPPORTED_CONTENT_TYPE:
log.info('"{}" can be gzip\'d. Compressing...'.format(artifact_path))
with open(artifact_path, 'rb') as f_in:
text_content = f_in.read()
with gzip.open(artifact_path, 'wb') as f_out:
f_out.write(text_content)
encoding = 'gzip'
log.info('"{}" compressed'.format(artifact_path))
else:
log.debug('"{}" is not supported for compression.'.format(artifact_path))
return content_type, encoding | 0.0036 |
def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint) | 0.001441 |
def set_scale_limits(self, scale_min, scale_max):
"""Set scale limits.
Parameters
----------
scale_min, scale_max : float
Minimum and maximum scale limits, respectively.
"""
# TODO: force scale to within limits if already outside?
self.t_.set(scale_min=scale_min, scale_max=scale_max) | 0.00565 |
def write(self, title, data, output=None):
'''
Add a data to the current opened section.
:return:
'''
if not isinstance(data, (dict, list, tuple)):
data = {'raw-content': str(data)}
output = output or self.__default_outputter
if output != 'null':
try:
if isinstance(data, dict) and 'return' in data:
data = data['return']
content = self._printout(data, output)
except Exception: # Fall-back to just raw YAML
content = None
else:
content = None
if content is None:
data = json.loads(json.dumps(data))
if isinstance(data, dict) and data.get('return'):
data = data.get('return')
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
self.__current_section.append({title: content}) | 0.002123 |
def update_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped',
skip_case_id=False, gq_treshold=None, case_id=None, max_window = 3000):
"""Update a case in the database
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
Returns:
nr_inserted(int)
"""
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
vcf_files.append(variant_file)
# Get the indivuduals that are present in vcf file
vcf_individuals = vcf_info['individuals']
nr_sv_variants = None
sv_individuals = None
if sv_file:
vcf_info = check_vcf(sv_file, 'sv')
nr_sv_variants = vcf_info['nr_variants']
vcf_files.append(sv_file)
sv_individuals = vcf_info['individuals']
# If a gq treshold is used the variants needs to have GQ
for _vcf_file in vcf_files:
# Get a cyvcf2.VCF object
vcf = get_vcf(_vcf_file)
if gq_treshold:
if not vcf.contains('GQ'):
LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))
raise SyntaxError('GQ is not defined in vcf header')
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
# Convert infromation to a loqusdb Case object
case_obj = build_case(
case=family,
case_id=case_id,
vcf_path=variant_file,
vcf_individuals=vcf_individuals,
nr_variants=nr_variants,
vcf_sv_path=sv_file,
sv_individuals=sv_individuals,
nr_sv_variants=nr_sv_variants,
)
existing_case = adapter.case(case_obj)
if not existing_case:
raise CaseError("Case {} does not exist in database".format(case_obj['case_id']))
# Update the existing case in database
case_obj = load_case(
adapter=adapter,
case_obj=case_obj,
update=True,
)
nr_inserted = 0
# If case was succesfully added we can store the variants
for file_type in ['vcf_path','vcf_sv_path']:
variant_type = 'snv'
if file_type == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_type) is None:
continue
vcf_obj = get_vcf(case_obj[file_type])
try:
nr_inserted += load_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
skip_case_id=skip_case_id,
gq_treshold=gq_treshold,
max_window=max_window,
variant_type=variant_type,
)
except Exception as err:
# If something went wrong do a rollback
LOG.warning(err)
delete(
adapter=adapter,
case_obj=case_obj,
update=True,
existing_case=existing_case,
)
raise err
return nr_inserted | 0.005043 |
def backend():
"""
:return:
A unicode string of the backend being used: "openssl", "osx", "win",
"winlegacy"
"""
if _module_values['backend'] is not None:
return _module_values['backend']
with _backend_lock:
if _module_values['backend'] is not None:
return _module_values['backend']
if sys.platform == 'win32':
# Windows XP was major version 5, Vista was 6
if sys.getwindowsversion()[0] < 6:
_module_values['backend'] = 'winlegacy'
else:
_module_values['backend'] = 'win'
elif sys.platform == 'darwin':
_module_values['backend'] = 'osx'
else:
_module_values['backend'] = 'openssl'
return _module_values['backend'] | 0.001242 |
def check_lazy_load_sectie(f):
'''
Decorator function to lazy load a :class:`Sectie`.
'''
def wrapper(self):
sectie = self
if (getattr(sectie, '_%s' % f.__name__, None) is None):
log.debug('Lazy loading Sectie %s in Afdeling %d', sectie.id, sectie.afdeling.id)
sectie.check_gateway()
s = sectie.gateway.get_sectie_by_id_and_afdeling(
sectie.id, sectie.afdeling.id
)
sectie._centroid = s._centroid
sectie._bounding_box = s._bounding_box
return f(self)
return wrapper | 0.003339 |
def write(self,
features=None,
outfile=None,
format=0,
is_leaf_fn=None,
format_root_node=False,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
Parameters:
-----------
features:
a list of feature names to be exported using the Extended Newick
Format (i.e. features=["name", "dist"]). Use an empty list to
export all available features in each node (features=[])
outfile:
writes the output to a given file
format:
defines the newick standard used to encode the tree.
format_root_node:
If True, it allows features and branch information from root node
to be exported as a part of the newick text string. For newick
compatibility reasons, this is False by default.
is_leaf_fn:
See :func:`TreeNode.traverse` for documentation.
**Example:**
t.get_newick(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features,
format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw | 0.013514 |
def FloatGreaterThanEqualToZero(x):
"""If *x* is a float >= 0, returns it, otherwise raises and error.
>>> print('%.1f' % FloatGreaterThanEqualToZero('1.5'))
1.5
>>> print('%.1f' % FloatGreaterThanEqualToZero('-1.1'))
Traceback (most recent call last):
...
ValueError: -1.1 not float greater than or equal to zero
"""
try:
x = float(x)
except:
raise ValueError("%r not float greater than or equal to zero" % x)
if x >= 0:
return x
else:
raise ValueError("%r not float greater than or equal to zero" % x) | 0.003401 |
def run_parallel_map_providers_query(data, queue=None):
'''
This function will be called from another process when building the
providers map.
'''
salt.utils.crypt.reinit_crypto()
cloud = Cloud(data['opts'])
try:
with salt.utils.context.func_globals_inject(
cloud.clouds[data['fun']],
__active_provider_name__=':'.join([
data['alias'],
data['driver']
])
):
return (
data['alias'],
data['driver'],
salt.utils.data.simple_types_filter(
cloud.clouds[data['fun']]()
)
)
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for running nodes: %s',
data['fun'], err, exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any nodes
return data['alias'], data['driver'], () | 0.000991 |
def remove_input(urls, preserves, verbose = False):
"""
Attempt to delete all files identified by the URLs in urls except
any that are the same as the files in the preserves list.
"""
for path in map(url2path, urls):
if any(os.path.samefile(path, preserve) for preserve in preserves):
continue
if verbose:
print >>sys.stderr, "removing \"%s\" ..." % path
try:
os.remove(path)
except:
pass | 0.041162 |
def generate_contact_list(config, args):
"""TODO: Docstring for generate_contact_list.
:param config: the config object to use
:type config: config.Config
:param args: the command line arguments
:type args: argparse.Namespace
:returns: the contacts for further processing (TODO)
:rtype: list(TODO)
"""
# fill contact list
vcard_list = []
if "uid" in args and args.uid:
# If an uid was given we use it to find the contact.
logging.debug("args.uid=%s", args.uid)
# set search terms to the empty query to prevent errors in
# phone and email actions
args.search_terms = ".*"
vcard_list = get_contacts(args.addressbook, args.uid, method="uid")
# We require that the uid given can uniquely identify a contact.
if not vcard_list:
sys.exit("Found no contact for {}uid {}".format(
"source " if args.action == "merge" else "", args.uid))
elif len(vcard_list) != 1:
print("Found multiple contacts for {}uid {}".format(
"source " if args.action == "merge" else "", args.uid))
for vcard in vcard_list:
print(" {}: {}".format(vcard, vcard.get_uid()))
sys.exit(1)
else:
# No uid was given so we try to use the search terms to select a
# contact.
if "source_search_terms" in args:
# exception for merge command
if args.source_search_terms:
args.search_terms = args.source_search_terms
else:
args.search_terms = ".*"
elif "search_terms" in args:
if args.search_terms:
args.search_terms = args.search_terms
else:
args.search_terms = ".*"
else:
# If no search terms where given on the command line we match
# everything with the empty search pattern.
args.search_terms = ".*"
logging.debug("args.search_terms=%s", args.search_terms)
vcard_list = get_contact_list_by_user_selection(
args.addressbook, args.search_terms,
args.strict_search if "strict_search" in args else False)
return vcard_list | 0.000446 |
def get_order(tre):
"""
return tree order
"""
anode = tre.tree&">A"
sister = anode.get_sisters()[0]
sisters = (anode.name[1:], sister.name[1:])
others = [i for i in list("ABCD") if i not in sisters]
return sorted(sisters) + sorted(others) | 0.007407 |
def run_board(args):
"""
Run main entry for AutoMLBoard.
Args:
args: args parsed from command line
"""
init_config(args)
# backend service, should import after django settings initialized
from backend.collector import CollectorService
service = CollectorService(
args.logdir,
args.reload_interval,
standalone=False,
log_level=args.log_level)
service.run()
# frontend service
logger.info("Try to start automlboard on port %s\n" % args.port)
command = [
os.path.join(root_path, "manage.py"), "runserver",
"0.0.0.0:%s" % args.port, "--noreload"
]
execute_from_command_line(command) | 0.001441 |
def finalize(self):
""" Is called before destruction (when closing).
Can be used to clean-up resources.
"""
logger.debug("Finalizing: {}".format(self))
# Disconnect signals
self.collector.sigContentsChanged.disconnect(self.collectorContentsChanged)
self._configTreeModel.sigItemChanged.disconnect(self.configContentsChanged)
self.sigInspectorChanged.disconnect(self.inspectorSelectionPane.updateFromInspectorRegItem)
self.customContextMenuRequested.disconnect(self.showContextMenu) | 0.008961 |
def transform(self, trajs_tuple, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj_zip)
for traj_zip in zip(*trajs_tuple)] | 0.00346 |
def build_self_reference(filename, clean_wcs=False):
""" This function creates a reference, undistorted WCS that can be used to
apply a correction to the WCS of the input file.
Parameters
----------
filename : str
Filename of image which will be corrected, and which will form the basis
of the undistorted WCS.
clean_wcs : bool
Specify whether or not to return the WCS object without any distortion
information, or any history of the original input image. This converts
the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object.
Returns
-------
customwcs : `stwcs.wcsutils.HSTWCS`
HSTWCS object which contains the undistorted WCS representing the entire
field-of-view for the input image.
Examples
--------
This function can be used with the following syntax to apply a shift/rot/scale
change to the same image:
>>> import buildref
>>> from drizzlepac import updatehdr
>>> filename = "jce501erq_flc.fits"
>>> wcslin = buildref.build_self_reference(filename)
>>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694,
... ysh=19.2203, rot = 359.998, scale = 0.9999964)
"""
if 'sipwcs' in filename:
sciname = 'sipwcs'
else:
sciname = 'sci'
wcslin = build_reference_wcs([filename], sciname=sciname)
if clean_wcs:
wcsbase = wcslin.wcs
customwcs = build_hstwcs(wcsbase.crval[0], wcsbase.crval[1], wcsbase.crpix[0],
wcsbase.crpix[1], wcslin._naxis1, wcslin._naxis2,
wcslin.pscale, wcslin.orientat)
else:
customwcs = wcslin
return customwcs | 0.004018 |
def load(self, data, size=None):
"""Data is cffi array"""
self.bind()
if size is None:
# ffi's sizeof understands arrays
size = sizeof(data)
if size == self.buffer_size:
# same size - no need to allocate new buffer, just copy
glBufferSubData(
self.array_type,
0,
size,
to_raw_pointer(data)
)
else:
# buffer size has changed - need to allocate new buffer in the GPU
glBufferData(
self.array_type,
size,
to_raw_pointer(data),
self.draw_type
)
self.buffer_size = size
self.unbind() | 0.002625 |
def constant_fold(code, silent=True, ignore_errors=True):
"""Constant-folds simple expressions like 2 3 + to 5.
Args:
code: Code in non-native types.
silent: Flag that controls whether to print optimizations made.
ignore_errors: Whether to raise exceptions on found errors.
"""
# Loop until we haven't done any optimizations. E.g., "2 3 + 5 *" will be
# optimized to "5 5 *" and in the next iteration to 25. Yes, this is
# extremely slow, big-O wise. We'll fix that some other time. (TODO)
arithmetic = list(map(instructions.lookup, [
instructions.add,
instructions.bitwise_and,
instructions.bitwise_or,
instructions.bitwise_xor,
instructions.div,
instructions.equal,
instructions.greater,
instructions.less,
instructions.mod,
instructions.mul,
instructions.sub,
]))
divzero = map(instructions.lookup, [
instructions.div,
instructions.mod,
])
lookup = instructions.lookup
def isfunction(op):
try:
instructions.lookup(op)
return True
except KeyError:
return False
def isconstant(op):
return op is None or interpreter.isconstant(op, quoted=True) or not isfunction(op)
keep_running = True
while keep_running:
keep_running = False
# Find two consecutive numbes and an arithmetic operator
for i, a in enumerate(code):
b = code[i+1] if i+1 < len(code) else None
c = code[i+2] if i+2 < len(code) else None
# Constant fold arithmetic operations (TODO: Move to check-func)
if interpreter.isnumber(a, b) and c in arithmetic:
# Although we can detect division by zero at compile time, we
# don't report it here, because the surrounding system doesn't
# handle that very well. So just leave it for now. (NOTE: If
# we had an "error" instruction, we could actually transform
# the expression to an error, or exit instruction perhaps)
if b==0 and c in divzero:
if ignore_errors:
continue
else:
raise errors.CompileError(ZeroDivisionError(
"Division by zero"))
# Calculate result by running on a machine (lambda vm: ... is
# embedded pushes, see compiler)
result = interpreter.Machine([lambda vm: vm.push(a), lambda vm:
vm.push(b), instructions.lookup(c)]).run().top
del code[i:i+3]
code.insert(i, result)
if not silent:
print("Optimizer: Constant-folded %s %s %s to %s" % (a,b,c,result))
keep_running = True
break
# Translate <constant> dup to <constant> <constant>
if isconstant(a) and b == lookup(instructions.dup):
code[i+1] = a
if not silent:
print("Optimizer: Translated %s %s to %s %s" % (a,b,a,a))
keep_running = True
break
# Dead code removal: <constant> drop
if isconstant(a) and b == lookup(instructions.drop):
del code[i:i+2]
if not silent:
print("Optimizer: Removed dead code %s %s" % (a,b))
keep_running = True
break
if a == lookup(instructions.nop):
del code[i]
if not silent:
print("Optimizer: Removed dead code %s" % a)
keep_running = True
break
# Dead code removal: <integer> cast_int
if isinstance(a, int) and b == lookup(instructions.cast_int):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <float> cast_float
if isinstance(a, float) and b == lookup(instructions.cast_float):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <string> cast_str
if isinstance(a, str) and b == lookup(instructions.cast_str):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# Dead code removal: <boolean> cast_bool
if isinstance(a, bool) and b == lookup(instructions.cast_bool):
del code[i+1]
if not silent:
print("Optimizer: Translated %s %s to %s" % (a,b,a))
keep_running = True
break
# <c1> <c2> swap -> <c2> <c1>
if isconstant(a) and isconstant(b) and c == lookup(instructions.swap):
del code[i:i+3]
code = code[:i] + [b, a] + code[i:]
if not silent:
print("Optimizer: Translated %s %s %s to %s %s" %
(a,b,c,b,a))
keep_running = True
break
# a b over -> a b a
if isconstant(a) and isconstant(b) and c == lookup(instructions.over):
code[i+2] = a
if not silent:
print("Optimizer: Translated %s %s %s to %s %s %s" %
(a,b,c,a,b,a))
keep_running = True
break
# "123" cast_int -> 123
if interpreter.isstring(a) and b == lookup(instructions.cast_int):
try:
number = int(a)
del code[i:i+2]
code.insert(i, number)
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b,
number))
keep_running = True
break
except ValueError:
pass
if isconstant(a) and b == lookup(instructions.cast_str):
del code[i:i+2]
code.insert(i, str(a)) # TODO: Try-except here
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, str(a)))
keep_running = True
break
if isconstant(a) and b == lookup(instructions.cast_bool):
del code[i:i+2]
code.insert(i, bool(a)) # TODO: Try-except here
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, bool(a)))
keep_running = True
break
if isconstant(a) and b == lookup(instructions.cast_float):
try:
v = float(a)
del code[i:i+2]
code.insert(i, v)
if not silent:
print("Optimizer: Translated %s %s to %s" % (a, b, v))
keep_running = True
break
except ValueError:
pass
return code | 0.004952 |
def plot_cells(cell_1, cell_2, cell_3):
"""Plots three cells"""
fig, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(12, 5))
for ax in [ax1, ax2, ax3]:
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax1.set_title("Type 1")
ax1.imshow(cell_1)
ax2.set_title("Type 2")
ax2.imshow(cell_2)
ax3.set_title("Type 3")
ax3.imshow(cell_3)
return ax1, ax2, ax3 | 0.002404 |
def open_external_editor(filename=None, sql=None):
"""Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
"""
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql (if available) and a
# placeholder comment.
query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER),
filename=filename, extension='.sql')
if filename:
try:
with open(filename, encoding='utf-8') as f:
query = f.read()
except IOError:
message = 'Error reading file: %s.' % filename
if query is not None:
query = query.split(MARKER, 1)[0].rstrip('\n')
else:
# Don't return None for the caller to deal with.
# Empty string is ok.
query = sql
return (query, message) | 0.000959 |
def to_bytes(self):
'''
Return packed byte representation of the TCP header.
'''
header = self._make_header(self._checksum)
return header + self._options.to_bytes() | 0.009804 |
def cmd(send, msg, args):
"""Gets previous nicks.
Syntax: {command} <nick>
"""
if not msg:
with args['handler'].data_lock:
users = list(args['handler'].channels[args['target']].users()) if args['target'] != 'private' else [args['nick']]
msg = choice(users)
chain = get_chain(args['db'], msg)
if chain:
send(" -> ".join(chain))
else:
send("%s has never changed their nick." % msg) | 0.004415 |
def process_frames_face(self, frames):
"""
Preprocess from frames using face detector
"""
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(self.face_predictor_path)
mouth_frames = self.get_frames_mouth(detector, predictor, frames)
self.face = np.array(frames)
self.mouth = np.array(mouth_frames)
if mouth_frames[0] is not None:
self.set_data(mouth_frames) | 0.004283 |
def _delete_membership(self, pipeline=None):
"""Removes the id of the object to the set of all objects of the
same class.
"""
Set(self._key['all'], pipeline=pipeline).remove(self.id) | 0.009346 |
def _log_exception(self, exception):
"""
Logs an exception.
:param Exception exception: The exception.
:rtype: None
"""
self._io.error(str(exception).strip().split(os.linesep)) | 0.00885 |
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, mdb = _get_conn(ret=None)
ret = {}
rdata = mdb.saltReturns.find_one({'fun': fun}, {'_id': 0})
if rdata:
ret = rdata
return ret | 0.003788 |
def sync_map_leaves(self, fragment_type=None):
"""
Return the list of non-empty leaves
in the sync map associated with the task.
If ``fragment_type`` has been specified,
return only leaves of that fragment type.
:param int fragment_type: type of fragment to return
:rtype: list
.. versionadded:: 1.7.0
"""
if (self.sync_map is None) or (self.sync_map.fragments_tree is None):
return []
return [f for f in self.sync_map.leaves(fragment_type)] | 0.003676 |
def setDisabledPenColor(self, color):
"""
Sets the pen color to be used when drawing this node as disabled.
:param color | <QColor>
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.Disabled,
self._palette.NodeForeground,
color)
self.setDirty() | 0.008114 |
def build_on_event(self, runnable, regime, on_event):
"""
Build OnEvent event handler code.
@param on_event: OnEvent event handler object
@type on_event: lems.model.dynamics.OnEvent
@return: Generated OnEvent code
@rtype: list(string)
"""
on_event_code = []
if self.debug: on_event_code += ['print("Maybe handling something for %s ("+str(id(self))+")")'%(runnable.id),
'print("EICs ("+str(id(self))+"): "+str(self.event_in_counters))']
on_event_code += ['count = self.event_in_counters[\'{0}\']'.\
format(on_event.port),
'while count > 0:',
' print(" Handling event")' if self.debug else '',
' count -= 1']
for action in on_event.actions:
code = self.build_action(runnable, regime, action)
for line in code:
on_event_code += [' ' + line]
on_event_code += ['self.event_in_counters[\'{0}\'] = 0'.\
format(on_event.port),]
return on_event_code | 0.010152 |
def from_gpx(gpx_segment):
""" Creates a segment from a GPX format.
No preprocessing is done.
Arguments:
gpx_segment (:obj:`gpxpy.GPXTrackSegment`)
Return:
:obj:`Segment`
"""
points = []
for point in gpx_segment.points:
points.append(Point.from_gpx(point))
return Segment(points) | 0.005249 |
async def stop_slaves(self, timeout=1):
"""Stop all the slaves by sending a stop-message to their managers.
:param int timeout:
Timeout for connecting to each manager. If a connection can not
be made before the timeout expires, the resulting error for that
particular manager is logged, but the stopping of other managers
is not halted.
"""
for addr in self.addrs:
try:
r_manager = await self.env.connect(addr, timeout=timeout)
await r_manager.stop()
except:
self._log(logging.WARNING, "Could not stop {}".format(addr)) | 0.004464 |
def rows(self, csv=False):
"""
Returns each row based on the selected criteria.
"""
# Store the index of each field against its ID for building each
# entry row with columns in the correct order. Also store the IDs of
# fields with a type of FileField or Date-like for special handling of
# their values.
field_indexes = {}
file_field_ids = []
date_field_ids = []
for field in self.form_fields:
if self.cleaned_data["field_%s_export" % field.id]:
field_indexes[field.id] = len(field_indexes)
if field.is_a(fields.FILE):
file_field_ids.append(field.id)
elif field.is_a(*fields.DATES):
date_field_ids.append(field.id)
num_columns = len(field_indexes)
include_entry_time = self.cleaned_data["field_0_export"]
if include_entry_time:
num_columns += 1
# Get the field entries for the given form and filter by entry_time
# if specified.
field_entries = FieldEntry.objects.filter(
entry__form=self.form).order_by(
"-entry__id").select_related("entry")
if self.cleaned_data["field_0_filter"] == FILTER_CHOICE_BETWEEN:
time_from = self.cleaned_data["field_0_from"]
time_to = self.cleaned_data["field_0_to"]
if time_from and time_to:
field_entries = field_entries.filter(
entry__entry_time__range=(time_from, time_to))
# Loop through each field value ordered by entry, building up each
# entry as a row. Use the ``valid_row`` flag for marking a row as
# invalid if it fails one of the filtering criteria specified.
current_entry = None
current_row = None
valid_row = True
for field_entry in field_entries:
if field_entry.entry_id != current_entry:
# New entry, write out the current row and start a new one.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row
current_entry = field_entry.entry_id
current_row = [""] * num_columns
valid_row = True
if include_entry_time:
current_row[-1] = field_entry.entry.entry_time
field_value = field_entry.value or ""
# Check for filter.
field_id = field_entry.field_id
filter_type = self.cleaned_data.get("field_%s_filter" % field_id)
filter_args = None
if filter_type:
if filter_type == FILTER_CHOICE_BETWEEN:
f, t = "field_%s_from" % field_id, "field_%s_to" % field_id
filter_args = [self.cleaned_data[f], self.cleaned_data[t]]
else:
field_name = "field_%s_contains" % field_id
filter_args = self.cleaned_data[field_name]
if filter_args:
filter_args = [filter_args]
if filter_args:
# Convert dates before checking filter.
if field_id in date_field_ids:
y, m, d = field_value.split(" ")[0].split("-")
dte = date(int(y), int(m), int(d))
filter_args.append(dte)
else:
filter_args.append(field_value)
filter_func = FILTER_FUNCS[filter_type]
if not filter_func(*filter_args):
valid_row = False
# Create download URL for file fields.
if field_entry.value and field_id in file_field_ids:
url = reverse("admin:form_file", args=(field_entry.id,))
field_value = self.request.build_absolute_uri(url)
if not csv:
parts = (field_value, split(field_entry.value)[1])
field_value = mark_safe("<a href=\"%s\">%s</a>" % parts)
# Only use values for fields that were selected.
try:
current_row[field_indexes[field_id]] = field_value
except KeyError:
pass
# Output the final row.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row | 0.000443 |
def _add_to_submit_args(s):
"""Adds string s to the PYSPARK_SUBMIT_ARGS env var"""
new_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "") + (" %s" % s)
os.environ["PYSPARK_SUBMIT_ARGS"] = new_args
return new_args | 0.004425 |
def admin_print_styles_single(request):
""" Returns all books with any version of the given print style.
Returns the print_style, recipe type, num books using the print_style,
along with a dictionary of the book, author, revision date, recipe,
tag of the print_style, and a link to the content.
"""
style = request.matchdict['style']
# do db search to get file id and other info on the print_style
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
if style != '(custom)':
cursor.execute("""
SELECT fileid, recipe_type, title
FROM default_print_style_recipes
WHERE print_style=%s
""", vars=(style,))
info = cursor.fetchall()
if len(info) < 1:
current_recipe = None
recipe_type = None
status = None
else:
current_recipe = info[0]['fileid']
recipe_type = info[0]['recipe_type']
status = 'current'
cursor.execute("""\
SELECT name, authors, lm.revised, lm.recipe, psr.tag,
f.sha1 as hash, psr.commit_id, uuid,
ident_hash(uuid, major_version, minor_version)
FROM modules as lm
LEFT JOIN print_style_recipes as psr
ON (psr.print_style = lm.print_style and
psr.fileid = lm.recipe)
LEFT JOIN files f ON psr.fileid = f.fileid
WHERE lm.print_style=%s
AND portal_type='Collection'
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version])
FROM modules WHERE lm.uuid = uuid)
ORDER BY psr.tag DESC;
""", vars=(style,))
else:
current_recipe = '(custom)'
recipe_type = '(custom)'
cursor.execute("""\
SELECT name, authors, lm.revised, lm.recipe, NULL as tag,
f.sha1 as hash, NULL as commit_id, uuid,
ident_hash(uuid, major_version, minor_version)
FROM modules as lm
JOIN files f ON lm.recipe = f.fileid
WHERE portal_type='Collection'
AND NOT EXISTS (
SELECT 1 from print_style_recipes psr
WHERE psr.fileid = lm.recipe)
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version])
FROM modules WHERE lm.uuid = uuid)
ORDER BY uuid, recipe, revised DESC;
""", vars=(style,))
status = '(custom)'
collections = []
for row in cursor.fetchall():
recipe = row['recipe']
if (status != '(custom)' and
current_recipe is not None and
recipe != current_recipe):
status = 'stale'
collections.append({
'title': row['name'].decode('utf-8'),
'authors': row['authors'],
'revised': row['revised'],
'recipe': row['hash'],
'recipe_link': request.route_path('get-resource',
hash=row['hash']),
'tag': row['tag'],
'ident_hash': row['ident_hash'],
'link': request.route_path('get-content',
ident_hash=row['ident_hash']),
'status': status,
'status_link': request.route_path(
'admin-content-status-single', uuid=row['uuid']),
})
return {'number': len(collections),
'collections': collections,
'print_style': style,
'recipe_type': recipe_type} | 0.000235 |
def encodePathElement(element):
"""Encode a URL path element according to RFC3986."""
return urllib.parse.quote(
(
element.encode('utf-8')
if isinstance(element, str)
else str(element)
if isinstance(element, int)
else element
),
safe=d1_common.const.URL_PATHELEMENT_SAFE_CHARS,
) | 0.002667 |
def p_expression_uxnor(self, p):
'expression : XNOR expression %prec UXNOR'
p[0] = Uxnor(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | 0.011976 |
def _find_from_file(full_doc, from_file_keyword):
"""
Finds a line in <full_doc> like
<from_file_keyword> <colon> <path>
and return path
"""
path = None
for line in full_doc.splitlines():
if from_file_keyword in line:
parts = line.strip().split(':')
if len(parts) == 2 and parts[0].strip() == from_file_keyword:
path = parts[1].strip()
break
return path | 0.002188 |
def validate(mcs, bases, attributes):
"""Check attributes."""
if bases[0] is object:
return None
mcs.check_model_cls(attributes)
mcs.check_include_exclude(attributes)
mcs.check_properties(attributes) | 0.007968 |
def project_top_dir(self, *args) -> str:
""" Project top-level directory """
return os.path.join(self.project_dir, *args) | 0.014599 |
def add_dimension(cls, columns, dimension, dim_pos, values, vdim):
"""
Adding value dimensions not currently supported by iris interface.
Adding key dimensions not possible on dense interfaces.
"""
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
raise NotImplementedError | 0.008174 |
def perfect_platonic_per_pixel(N, R, scale=11, pos=None, zscale=1.0, returnpix=None):
"""
Create a perfect platonic sphere of a given radius R by supersampling by a
factor scale on a grid of size N. Scale must be odd.
We are able to perfectly position these particles up to 1/scale. Therefore,
let's only allow those types of shifts for now, but return the actual position
used for the placement.
"""
# enforce odd scale size
if scale % 2 != 1:
scale += 1
if pos is None:
# place the default position in the center of the grid
pos = np.array([(N-1)/2.0]*3)
# limit positions to those that are exact on the size 1./scale
# positions have the form (d = divisions):
# p = N + m/d
s = 1.0/scale
f = zscale**2
i = pos.astype('int')
p = i + s*((pos - i)/s).astype('int')
pos = p + 1e-10 # unfortunately needed to break ties
# make the output arrays
image = np.zeros((N,)*3)
x,y,z = np.meshgrid(*(xrange(N),)*3, indexing='ij')
# for each real pixel in the image, integrate a bunch of superres pixels
for x0,y0,z0 in zip(x.flatten(),y.flatten(),z.flatten()):
# short-circuit things that are just too far away!
ddd = np.sqrt(f*(x0-pos[0])**2 + (y0-pos[1])**2 + (z0-pos[2])**2)
if ddd > R + 4:
image[x0,y0,z0] = 0.0
continue
# otherwise, build the local mesh and count the volume
xp,yp,zp = np.meshgrid(
*(np.linspace(i-0.5+s/2, i+0.5-s/2, scale, endpoint=True) for i in (x0,y0,z0)),
indexing='ij'
)
ddd = np.sqrt(f*(xp-pos[0])**2 + (yp-pos[1])**2 + (zp-pos[2])**2)
if returnpix is not None and returnpix == [x0,y0,z0]:
outpix = 1.0 * (ddd < R)
vol = (1.0*(ddd < R) + 0.0*(ddd == R)).sum()
image[x0,y0,z0] = vol / float(scale**3)
#vol_true = 4./3*np.pi*R**3
#vol_real = image.sum()
#print vol_true, vol_real, (vol_true - vol_real)/vol_true
if returnpix:
return image, pos, outpix
return image, pos | 0.011505 |
def moveToReplayContext(self, r):
'set the sheet/row/col to the values in the replay row. return sheet'
if not r.sheet:
# assert not r.col and not r.row
return self # any old sheet should do, row/column don't matter
try:
sheetidx = int(r.sheet)
vs = vd().sheets[sheetidx]
except ValueError:
vs = vd().getSheet(r.sheet) or error('no sheet named %s' % r.sheet)
if r.row:
try:
rowidx = int(r.row)
except ValueError:
rowidx = indexMatch(vs.rows, lambda r,vs=vs,k=r.row: keystr(vs.rowkey(r)) == k)
if rowidx is None:
error('no "%s" row' % r.row)
if options.replay_movement:
while vs.cursorRowIndex != rowidx:
vs.cursorRowIndex += 1 if (rowidx - vs.cursorRowIndex) > 0 else -1
while not self.delay(0.5):
pass
else:
vs.cursorRowIndex = rowidx
if r.col:
try:
vcolidx = int(r.col)
except ValueError:
vcolidx = indexMatch(vs.visibleCols, lambda c,name=r.col: name == c.name)
if vcolidx is None:
error('no "%s" column' % r.col)
if options.replay_movement:
while vs.cursorVisibleColIndex != vcolidx:
vs.cursorVisibleColIndex += 1 if (vcolidx - vs.cursorVisibleColIndex) > 0 else -1
while not self.delay(0.5):
pass
assert vs.cursorVisibleColIndex == vcolidx
else:
vs.cursorVisibleColIndex = vcolidx
return vs | 0.005734 |
def set_cache_buster(redis, path, hash):
"""Sets the cache buster value for a given file path"""
redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash) | 0.010989 |
def getftype(self, name):
"""Returns the python type for the specified field name. The field list is
cached so multiple calls do not invoke a getFields request each time.
@param name(string) The name of the SOLR field
@returns Python type of the field.
"""
fields = self.getFields()
try:
fld = fields['fields'][name]
except Exception:
return str
if fld['type'] in ['string', 'text', 'stext', 'text_ws']:
return str
if fld['type'] in ['sint', 'integer', 'long', 'slong']:
return int
if fld['type'] in ['sdouble', 'double', 'sfloat', 'float']:
return float
if fld['type'] in ['boolean']:
return bool
return fld['type'] | 0.003774 |
def demo(host, port):
"""Basic demo of the monitoring capabilities."""
# logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
stl = AsyncSatel(host,
port,
loop,
[1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 25, 26, 27, 28, 29, 30],
[8, 9, 10]
)
loop.run_until_complete(stl.connect())
loop.create_task(stl.arm("3333", 1))
loop.create_task(stl.disarm("3333"))
loop.create_task(stl.keep_alive())
loop.create_task(stl.monitor_status())
loop.run_forever()
loop.close() | 0.001462 |
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.ground_temperature_depth = None
else:
self.ground_temperature_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_soil_conductivity = None
else:
self.depth_soil_conductivity = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_soil_density = None
else:
self.depth_soil_density = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_soil_specific_heat = None
else:
self.depth_soil_specific_heat = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_january_average_ground_temperature = None
else:
self.depth_january_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_february_average_ground_temperature = None
else:
self.depth_february_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_march_average_ground_temperature = None
else:
self.depth_march_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_april_average_ground_temperature = None
else:
self.depth_april_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_may_average_ground_temperature = None
else:
self.depth_may_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_june_average_ground_temperature = None
else:
self.depth_june_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_july_average_ground_temperature = None
else:
self.depth_july_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_august_average_ground_temperature = None
else:
self.depth_august_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_september_average_ground_temperature = None
else:
self.depth_september_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_october_average_ground_temperature = None
else:
self.depth_october_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_november_average_ground_temperature = None
else:
self.depth_november_average_ground_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.depth_december_average_ground_temperature = None
else:
self.depth_december_average_ground_temperature = vals[i]
i += 1 | 0.000654 |
def add_nodes(self, nodes, nesting=1):
"""
Adds edges showing dependencies between source files listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for ne in n.efferent:
if ne not in self.added:
hopNodes.add(ne)
hopEdges.append((ne, n, 'solid', colour))
# add nodes and edges to the graph if maximum number of nodes is not
# exceeded
self.add_to_graph(hopNodes, hopEdges, nesting) | 0.002635 |
def _rfc3339_nanos_to_datetime(dt_str):
"""Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
:raises ValueError: If the timestamp does not match the RFC 3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(dt_str)
if with_nanos is None:
raise ValueError(
"Timestamp: %r, does not match pattern: %r"
% (dt_str, _RFC3339_NANOS.pattern)
)
bare_seconds = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
micros = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=UTC) | 0.000878 |
def build_kernel(self):
"""Build the MNN kernel.
Build a mutual nearest neighbors kernel.
Returns
-------
K : kernel matrix, shape=[n_samples, n_samples]
symmetric matrix with ones down the diagonal
with no non-negative entries.
"""
tasklogger.log_start("subgraphs")
self.subgraphs = []
from .api import Graph
# iterate through sample ids
for i, idx in enumerate(self.samples):
tasklogger.log_debug("subgraph {}: sample {}, "
"n = {}, knn = {}".format(
i, idx, np.sum(self.sample_idx == idx),
self.knn))
# select data for sample
data = self.data_nu[self.sample_idx == idx]
# build a kNN graph for cells within sample
graph = Graph(data, n_pca=None,
knn=self.knn,
decay=self.decay,
bandwidth=self.bandwidth,
distance=self.distance,
thresh=self.thresh,
verbose=self.verbose,
random_state=self.random_state,
n_jobs=self.n_jobs,
kernel_symm='+',
initialize=True)
self.subgraphs.append(graph) # append to list of subgraphs
tasklogger.log_complete("subgraphs")
tasklogger.log_start("MNN kernel")
if self.thresh > 0 or self.decay is None:
K = sparse.lil_matrix(
(self.data_nu.shape[0], self.data_nu.shape[0]))
else:
K = np.zeros([self.data_nu.shape[0], self.data_nu.shape[0]])
for i, X in enumerate(self.subgraphs):
K = set_submatrix(K, self.sample_idx == self.samples[i],
self.sample_idx == self.samples[i], X.K)
within_batch_norm = np.array(np.sum(X.K, 1)).flatten()
for j, Y in enumerate(self.subgraphs):
if i == j:
continue
tasklogger.log_start(
"kernel from sample {} to {}".format(self.samples[i],
self.samples[j]))
Kij = Y.build_kernel_to_data(
X.data_nu,
knn=self.knn)
between_batch_norm = np.array(np.sum(Kij, 1)).flatten()
scale = np.minimum(1, within_batch_norm /
between_batch_norm) * self.beta
if sparse.issparse(Kij):
Kij = Kij.multiply(scale[:, None])
else:
Kij = Kij * scale[:, None]
K = set_submatrix(K, self.sample_idx == self.samples[i],
self.sample_idx == self.samples[j], Kij)
tasklogger.log_complete(
"kernel from sample {} to {}".format(self.samples[i],
self.samples[j]))
tasklogger.log_complete("MNN kernel")
return K | 0.000622 |
def handle(send, msg, args):
"""Get titles for urls.
Generate a short url. Get the page title.
"""
worker = args["handler"].workers
result = worker.run_pool(get_urls, [msg])
try:
urls = result.get(5)
except multiprocessing.TimeoutError:
worker.restart_pool()
send("Url regex timed out.", target=args["config"]["core"]["ctrlchan"])
return
for url in urls:
# Prevent botloops
if (args["db"].query(Urls).filter(Urls.url == url, Urls.time > datetime.now() - timedelta(seconds=10)).count() > 1):
return
if url.startswith("https://twitter.com"):
tid = url.split("/")[-1]
twitter_api = get_api(args["config"])
status = twitter_api.GetStatus(tid)
text = status.text.replace("\n", " / ")
send("** {} (@{}) on Twitter: {}".format(status.user.name, status.user.screen_name, text))
return
imgkey = args["config"]["api"]["googleapikey"]
title = urlutils.get_title(url, imgkey)
shortkey = args["config"]["api"]["bitlykey"]
short = urlutils.get_short(url, shortkey)
last = args["db"].query(Urls).filter(Urls.url == url).order_by(Urls.time.desc()).first()
if args["config"]["feature"].getboolean("linkread"):
if last is not None:
lasttime = last.time.strftime("%H:%M:%S on %Y-%m-%d")
send("Url %s previously posted at %s by %s -- %s" % (short, lasttime, last.nick, title))
else:
send("** %s - %s" % (title, short))
args["db"].add(Urls(url=url, title=title, nick=args["nick"], time=datetime.now())) | 0.003557 |
def _init_create_child(self):
"""
Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not.
"""
if self._requires_pty():
self.create_child = mitogen.parent.hybrid_tty_create_child
else:
self.create_child = mitogen.parent.create_child
self.create_child_args = {
'stderr_pipe': True,
} | 0.004376 |
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result | 0.024272 |
def _upload_file(self, file_name, full_path, quiet, request, resources):
""" Helper function to upload a single file
Parameters
==========
file_name: name of the file to upload
full_path: path to the file to upload
request: the prepared request
resources: optional file metadata
quiet: suppress verbose output
:return: True - upload unsuccessful; False - upload successful
"""
if not quiet:
print('Starting upload for file ' + file_name)
content_length = os.path.getsize(full_path)
token = self.dataset_upload_file(full_path, quiet)
if token is None:
if not quiet:
print('Upload unsuccessful: ' + file_name)
return True
if not quiet:
print('Upload successful: ' + file_name + ' (' +
File.get_size(content_length) + ')')
upload_file = DatasetUploadFile()
upload_file.token = token
if resources:
for item in resources:
if file_name == item.get('path'):
upload_file.description = item.get('description')
if 'schema' in item:
fields = self.get_or_default(item['schema'], 'fields',
[])
processed = []
count = 0
for field in fields:
processed.append(self.process_column(field))
processed[count].order = count
count += 1
upload_file.columns = processed
request.files.append(upload_file)
return False | 0.001115 |
def participants(self, **kwargs):
"""List the participants.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of participants
"""
path = '%s/%s/participants' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs) | 0.002304 |
def main():
""" Main function for command line usage """
usage = "usage: %(prog)s [options] "
description = "Merge a set of Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--clobber', default=False, action='store_true',
help='Overwrite output file.')
parser.add_argument('files', nargs='+', default=None,
help='List of input files.')
args = parser.parse_args()
proj, f, hdu = fits_utils.read_projection_from_fits(args.files[0])
if isinstance(proj, WCS):
hdulist = merge_utils.merge_wcs_counts_cubes(args.files)
elif isinstance(proj, HPX):
hdulist = merge_utils.merge_hpx_counts_cubes(args.files)
else:
raise TypeError("Could not read projection from file %s" %
args.files[0])
if args.output:
hdulist.writeto(args.output, clobber=args.clobber, output_verify='silentfix') | 0.001821 |
def can_be_(self, state):
"""Check if machine can transit to given state."""
translator = self._meta['translator']
state = translator.translate(state)
if self._meta['complete']:
return True
if self.actual_state is None:
return True
transitions = self._meta['transitions'][self.actual_state]
return state in transitions | 0.00274 |
def get_imported_namespaces(self,
must_have_imported_data_type=False,
consider_annotations=False,
consider_annotation_types=False):
# type: (bool, bool, bool) -> typing.List[ApiNamespace]
"""
Returns a list of Namespace objects. A namespace is a member of this
list if it is imported by the current namespace and a data type is
referenced from it. Namespaces are in ASCII order by name.
Args:
must_have_imported_data_type (bool): If true, result does not
include namespaces that were not imported for data types.
consider_annotations (bool): If false, result does not include
namespaces that were only imported for annotations
consider_annotation_types (bool): If false, result does not
include namespaces that were only imported for annotation types.
Returns:
List[Namespace]: A list of imported namespaces.
"""
imported_namespaces = []
for imported_namespace, reason in self._imported_namespaces.items():
if must_have_imported_data_type and not reason.data_type:
continue
if (not consider_annotations) and not (
reason.data_type or reason.alias or reason.annotation_type
):
continue
if (not consider_annotation_types) and not (
reason.data_type or reason.alias or reason.annotation
):
continue
imported_namespaces.append(imported_namespace)
imported_namespaces.sort(key=lambda n: n.name)
return imported_namespaces | 0.003962 |
def exportChatInviteLink(self, chat_id):
""" See: https://core.telegram.org/bots/api#exportchatinvitelink """
p = _strip(locals())
return self._api_request('exportChatInviteLink', _rectify(p)) | 0.009259 |
def _isDissipative(obj):
"""
NAME:
_isDissipative
PURPOSE:
Determine whether this combination of potentials and forces is Dissipative
INPUT:
obj - Potential/DissipativeForce instance or list of such instances
OUTPUT:
True or False depending on whether the object is dissipative
HISTORY:
2018-03-16 - Written - Bovy (UofT)
"""
from .Potential import flatten
obj= flatten(obj)
isList= isinstance(obj,list)
if isList:
isCons= [not isinstance(p,DissipativeForce) for p in obj]
nonCons= not numpy.prod(numpy.array(isCons))
else:
nonCons= isinstance(obj,DissipativeForce)
return nonCons | 0.014265 |
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
headers['Last-Modified'] = email.utils.formatdate(stats.st_mtime,
usegmt=True)
headers['Date'] = email.utils.formatdate(time.time(), usegmt=True)
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers) | 0.00196 |
def get_topic_for_path(channel, chan_path_tuple):
"""
Given channel (dict) that contains a hierary of TopicNode dicts, we use the
walk the path given in `chan_path_tuple` to find the corresponding TopicNode.
"""
assert chan_path_tuple[0] == channel['dirname'], 'Wrong channeldir'
chan_path_list = list(chan_path_tuple)
chan_path_list.pop(0) # skip the channel name
if len(chan_path_list) == 0:
return channel
current = channel
for subtopic in chan_path_list:
current = list(filter(lambda d: 'dirname' in d and d['dirname'] == subtopic, current['children']))[0]
return current | 0.004688 |
def get_process_parser(self, process_id_or_name):
"""
Returns the ProcessParser for the given process ID or name. It matches
by name first.
"""
if process_id_or_name in self.process_parsers_by_name:
return self.process_parsers_by_name[process_id_or_name]
else:
return self.process_parsers[process_id_or_name] | 0.005263 |
def adjustButtons( self ):
"""
Adjusts the placement of the buttons for this line edit.
"""
y = 1
for btn in self.buttons():
btn.setIconSize(self.iconSize())
btn.setFixedSize(QSize(self.height() - 2, self.height() - 2))
# adjust the location for the left buttons
left_buttons = self._buttons.get(Qt.AlignLeft, [])
x = (self.cornerRadius() / 2.0) + 2
for btn in left_buttons:
btn.move(x, y)
x += btn.width()
# adjust the location for the right buttons
right_buttons = self._buttons.get(Qt.AlignRight, [])
w = self.width()
bwidth = sum([btn.width() for btn in right_buttons])
bwidth += (self.cornerRadius() / 2.0) + 1
for btn in right_buttons:
btn.move(w - bwidth, y)
bwidth -= btn.width()
self._buttonWidth = sum([btn.width() for btn in self.buttons()])
self.adjustTextMargins() | 0.013195 |
def get_batch_unlock_gain(
channel_state: NettingChannelState,
) -> UnlockGain:
"""Collect amounts for unlocked/unclaimed locks and onchain unlocked locks.
Note: this function does not check expiry, so the values make only sense during settlement.
Returns:
gain_from_partner_locks: locks amount received and unlocked on-chain
gain_from_our_locks: locks amount which are unlocked or unclaimed
"""
gain_from_partner_locks = TokenAmount(sum(
unlock.lock.amount
for unlock in channel_state.partner_state.secrethashes_to_onchain_unlockedlocks.values()
))
"""
The current participant will gain from unlocking its own locks when:
- The partner never managed to provide the secret to unlock the locked amount.
- The partner provided the secret to claim the locked amount but the current
participant node never sent out the unlocked balance proof and the partner
did not unlock the lock on-chain.
"""
our_locked_locks_amount = sum(
lock.amount
for lock in channel_state.our_state.secrethashes_to_lockedlocks.values()
)
our_unclaimed_locks_amount = sum(
lock.amount for lock in channel_state.our_state.secrethashes_to_unlockedlocks.values()
)
gain_from_our_locks = TokenAmount(our_locked_locks_amount + our_unclaimed_locks_amount)
return UnlockGain(
from_partner_locks=gain_from_partner_locks,
from_our_locks=gain_from_our_locks,
) | 0.006052 |
def SetClipboardText(text: str) -> bool:
"""
Return bool, True if succeed otherwise False.
"""
if ctypes.windll.user32.OpenClipboard(0):
ctypes.windll.user32.EmptyClipboard()
textByteLen = (len(text) + 1) * 2
hClipboardData = ctypes.windll.kernel32.GlobalAlloc(0, textByteLen) # GMEM_FIXED=0
hDestText = ctypes.windll.kernel32.GlobalLock(hClipboardData)
ctypes.cdll.msvcrt.wcsncpy(ctypes.c_wchar_p(hDestText), ctypes.c_wchar_p(text), textByteLen // 2)
ctypes.windll.kernel32.GlobalUnlock(hClipboardData)
# system owns hClipboardData after calling SetClipboardData,
# application can not write to or free the data once ownership has been transferred to the system
ctypes.windll.user32.SetClipboardData(13, hClipboardData) # CF_TEXT=1, CF_UNICODETEXT=13
ctypes.windll.user32.CloseClipboard()
return True
return False | 0.005411 |
def update_progress(self, progress, prefix=''):
"""
Print a progress bar for longer-running scripts.
The progress value is a value between 0.0 and 1.0. If a prefix is
present, it will be printed before the progress bar.
"""
total_length = 40
if progress == 1.:
sys.stderr.write('\r' + ' ' * (total_length + len(prefix) + 50))
sys.stderr.write('\n')
sys.stderr.flush()
else:
bar_length = int(round(total_length * progress))
sys.stderr.write('\r%s [%s%s] %.1f %% '
% (prefix, '=' * bar_length,
' ' * (total_length - bar_length),
progress * 100))
sys.stderr.flush() | 0.002522 |
def _async_raise(tid, exctype):
"""
raises the exception, performs cleanup if needed
参考: https://www.oschina.net/question/172446_2159505
"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
print('force close: {} {}'.format(tid, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) | 0.003778 |
def _pfp__process_metadata(self):
"""Process the metadata once the entire struct has been
declared.
"""
if self._pfp__metadata_processor is None:
return
metadata_info = self._pfp__metadata_processor()
if isinstance(metadata_info, list):
for metadata in metadata_info:
if metadata["type"] == "watch":
self._pfp__set_watch(
metadata["watch_fields"],
metadata["update_func"],
*metadata["func_call_info"]
)
elif metadata["type"] == "packed":
del metadata["type"]
self._pfp__set_packer(**metadata)
if self._pfp__can_unpack():
self._pfp__unpack_data(self.raw_data) | 0.002326 |
def contractString(self, contract, seperator="_"):
""" returns string from contract tuple """
localSymbol = ""
contractTuple = contract
if type(contract) != tuple:
localSymbol = contract.m_localSymbol
contractTuple = self.contract_to_tuple(contract)
# build identifier
try:
if contractTuple[1] in ("OPT", "FOP"):
# if contractTuple[5]*100 - int(contractTuple[5]*100):
# strike = contractTuple[5]
# else:
# strike = "{0:.2f}".format(contractTuple[5])
strike = '{:0>5d}'.format(int(contractTuple[5])) + \
format(contractTuple[5], '.3f').split('.')[1]
contractString = (contractTuple[0] + str(contractTuple[4]) +
contractTuple[6][0] + strike, contractTuple[1])
# contractTuple[6], str(strike).replace(".", ""))
elif contractTuple[1] == "FUT":
exp = ' ' # default
# round expiry day to expiry month
if localSymbol != "":
# exp = localSymbol[2:3]+str(contractTuple[4][:4])
exp = localSymbol[2:3] + self.localSymbolExpiry[localSymbol][:4]
if ' ' in exp:
exp = str(contractTuple[4])[:6]
exp = dataTypes["MONTH_CODES"][int(exp[4:6])] + str(int(exp[:4]))
contractString = (contractTuple[0] + exp, contractTuple[1])
elif contractTuple[1] == "CASH":
contractString = (contractTuple[0] + contractTuple[3], contractTuple[1])
else: # STK
contractString = (contractTuple[0], contractTuple[1])
# construct string
contractString = seperator.join(
str(v) for v in contractString).replace(seperator + "STK", "")
except Exception:
contractString = contractTuple[0]
return contractString.replace(" ", "_").upper() | 0.004812 |
def find_one(self, query):
"""Find one wrapper with conversion to dictionary
:param dict query: A Mongo query
"""
mongo_response = yield self.collection.find_one(query)
raise Return(self._obj_cursor_to_dictionary(mongo_response)) | 0.007407 |
def calculate_basic_cost(self, d1, d2):
"""
Calculates assignment cost between two cells.
"""
distance = euclidean_dist(d1.center, d2.center) / self.scale
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_initial["area_weight"] * area_change | 0.008721 |
def delete_workflow(self, workflow_id):
"""
Delete a workflow from the database
:param workflow_id:
:return: None
"""
deleted = False
with switch_db(WorkflowDefinitionModel, "hyperstream"):
workflows = WorkflowDefinitionModel.objects(workflow_id=workflow_id)
if len(workflows) == 1:
workflows[0].delete()
deleted = True
else:
logging.debug("Workflow with id {} does not exist".format(workflow_id))
with switch_db(WorkflowStatusModel, "hyperstream"):
workflows = WorkflowStatusModel.objects(workflow_id=workflow_id)
if len(workflows) == 1:
workflows[0].delete()
deleted = True
else:
logging.debug("Workflow status with id {} does not exist".format(workflow_id))
if workflow_id in self.workflows:
del self.workflows[workflow_id]
deleted = True
if deleted:
logging.info("Deleted workflow with id {}".format(workflow_id)) | 0.004525 |
def download_satellite_image(self, metaimage, x=None, y=None, zoom=None, palette=None):
"""
Downloads the satellite image described by the provided metadata. In case the satellite image is a tile, then
tile coordinates and zoom must be provided. An optional palette ID can be provided, if supported by the
downloaded preset (currently only NDVI is supported)
:param metaimage: the satellite image's metadata, in the form of a `MetaImage` subtype instance
:type metaimage: a `pyowm.agroapi10.imagery.MetaImage` subtype
:param x: x tile coordinate (only needed in case you are downloading a tile image)
:type x: int or `None`
:param y: y tile coordinate (only needed in case you are downloading a tile image)
:type y: int or `None`
:param zoom: zoom level (only needed in case you are downloading a tile image)
:type zoom: int or `None`
:param palette: ID of the color palette of the downloaded images. Values are provided by `pyowm.agroapi10.enums.PaletteEnum`
:type palette: str or `None`
:return: a `pyowm.agroapi10.imagery.SatelliteImage` instance containing both image's metadata and data
"""
if palette is not None:
assert isinstance(palette, str)
params = dict(paletteid=palette)
else:
palette = PaletteEnum.GREEN
params = dict()
# polygon PNG
if isinstance(metaimage, MetaPNGImage):
prepared_url = metaimage.url
status, data = self.http_client.get_png(
prepared_url, params=params)
img = Image(data, metaimage.image_type)
return SatelliteImage(metaimage, img, downloaded_on=timeutils.now(timeformat='unix'), palette=palette)
# GeoTIF
elif isinstance(metaimage, MetaGeoTiffImage):
prepared_url = metaimage.url
status, data = self.http_client.get_geotiff(
prepared_url, params=params)
img = Image(data, metaimage.image_type)
return SatelliteImage(metaimage, img, downloaded_on=timeutils.now(timeformat='unix'), palette=palette)
# tile PNG
elif isinstance(metaimage, MetaTile):
assert x is not None
assert y is not None
assert zoom is not None
prepared_url = self._fill_url(metaimage.url, x, y, zoom)
status, data = self.http_client.get_png(
prepared_url, params=params)
img = Image(data, metaimage.image_type)
tile = Tile(x, y, zoom, None, img)
return SatelliteImage(metaimage, tile, downloaded_on=timeutils.now(timeformat='unix'), palette=palette)
else:
raise ValueError("Cannot download: unsupported MetaImage subtype") | 0.004954 |
def version(self):
""" Return kernel and btrfs version. """
return dict(
buttersink=theVersion,
btrfs=self.butterStore.butter.btrfsVersion,
linux=platform.platform(),
) | 0.008772 |
def import_entities(self, entities):
"""Upload entity objects.
Args:
entities: iterable of firecloud.Entity objects.
"""
edata = Entity.create_payload(entities)
r = fapi.upload_entities(self.namespace, self.name,
edata, self.api_url)
fapi._check_response_code(r, 201) | 0.00554 |
def poly_to_power_basis(bezier_coeffs):
"""Convert a B |eacute| zier curve to polynomial in power basis.
.. note::
This assumes, but does not verify, that the "B |eacute| zier
degree" matches the true degree of the curve. Callers can
guarantee this by calling :func:`.full_reduce`.
Args:
bezier_coeffs (numpy.ndarray): A 1D array of coefficients in
the Bernstein basis.
Returns:
numpy.ndarray: 1D array of coefficients in monomial basis.
Raises:
.UnsupportedDegree: If the degree of the curve is not among
0, 1, 2 or 3.
"""
num_coeffs, = bezier_coeffs.shape
if num_coeffs == 1:
return bezier_coeffs
elif num_coeffs == 2:
# C0 (1 - s) + C1 s = C0 + (C1 - C0) s
coeff0, coeff1 = bezier_coeffs
return np.asfortranarray([coeff0, coeff1 - coeff0])
elif num_coeffs == 3:
# C0 (1 - s)^2 + C1 2 (1 - s) s + C2 s^2
# = C0 + 2(C1 - C0) s + (C2 - 2 C1 + C0) s^2
coeff0, coeff1, coeff2 = bezier_coeffs
return np.asfortranarray(
[coeff0, 2.0 * (coeff1 - coeff0), coeff2 - 2.0 * coeff1 + coeff0]
)
elif num_coeffs == 4:
# C0 (1 - s)^3 + C1 3 (1 - s)^2 + C2 3 (1 - s) s^2 + C3 s^3
# = C0 + 3(C1 - C0) s + 3(C2 - 2 C1 + C0) s^2 +
# (C3 - 3 C2 + 3 C1 - C0) s^3
coeff0, coeff1, coeff2, coeff3 = bezier_coeffs
return np.asfortranarray(
[
coeff0,
3.0 * (coeff1 - coeff0),
3.0 * (coeff2 - 2.0 * coeff1 + coeff0),
coeff3 - 3.0 * coeff2 + 3.0 * coeff1 - coeff0,
]
)
else:
raise _helpers.UnsupportedDegree(
num_coeffs - 1, supported=(0, 1, 2, 3)
) | 0.000555 |
def _trj_store_meta_data(self, traj):
""" Stores general information about the trajectory in the hdf5file.
The `info` table will contain the name of the trajectory, it's timestamp, a comment,
the length (aka the number of single runs), and the current version number of pypet.
Also prepares the desired overview tables and fills the `run` table with dummies.
"""
# Description of the `info` table
descriptiondict = {'name': pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_LOCATION_LENGTH,
pos=0),
'time': pt.StringCol(len(traj.v_time), pos=1),
'timestamp': pt.FloatCol(pos=3),
'comment': pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH,
pos=4),
'length': pt.IntCol(pos=2),
'version': pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH,
pos=5),
'python': pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH,
pos=5)}
# 'loaded_from' : pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_LOCATION_LENGTH)}
infotable = self._all_get_or_create_table(where=self._overview_group, tablename='info',
description=descriptiondict,
expectedrows=len(traj))
insert_dict = self._all_extract_insert_dict(traj, infotable.colnames)
self._all_add_or_modify_row(traj.v_name, insert_dict, infotable, index=0,
flags=(HDF5StorageService.ADD_ROW,
HDF5StorageService.MODIFY_ROW))
# Description of the `run` table
rundescription_dict = {'name': pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH,
pos=1),
'time': pt.StringCol(len(traj.v_time), pos=2),
'timestamp': pt.FloatCol(pos=3),
'idx': pt.IntCol(pos=0),
'completed': pt.IntCol(pos=8),
'parameter_summary': pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH,
pos=6),
'short_environment_hexsha': pt.StringCol(7, pos=7),
'finish_timestamp': pt.FloatCol(pos=4),
'runtime': pt.StringCol(
pypetconstants.HDF5_STRCOL_MAX_RUNTIME_LENGTH,
pos=5)}
runtable = self._all_get_or_create_table(where=self._overview_group,
tablename='runs',
description=rundescription_dict)
hdf5_description_dict = {'complib': pt.StringCol(7, pos=0),
'complevel': pt.IntCol(pos=1),
'shuffle': pt.BoolCol(pos=2),
'fletcher32': pt.BoolCol(pos=3),
'pandas_format': pt.StringCol(7, pos=4),
'encoding': pt.StringCol(11, pos=5)}
pos = 7
for name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items():
hdf5_description_dict[table_name] = pt.BoolCol(pos=pos)
pos += 1
# Store the hdf5 properties in an overview table
hdf5_description_dict.update({'purge_duplicate_comments': pt.BoolCol(pos=pos + 2),
'results_per_run': pt.IntCol(pos=pos + 3),
'derived_parameters_per_run': pt.IntCol(pos=pos + 4)})
hdf5table = self._all_get_or_create_table(where=self._overview_group,
tablename='hdf5_settings',
description=hdf5_description_dict)
insert_dict = {}
for attr_name in self.ATTR_LIST:
insert_dict[attr_name] = getattr(self, attr_name)
for attr_name, table_name in self.NAME_TABLE_MAPPING.items():
insert_dict[table_name] = getattr(self, attr_name)
for attr_name, name in self.PR_ATTR_NAME_MAPPING.items():
insert_dict[name] = getattr(self, attr_name)
self._all_add_or_modify_row(traj.v_name, insert_dict, hdf5table, index=0,
flags=(HDF5StorageService.ADD_ROW,
HDF5StorageService.MODIFY_ROW))
# Fill table with dummy entries starting from the current table size
actual_rows = runtable.nrows
self._trj_fill_run_table(traj, actual_rows, len(traj._run_information))
# stop != len(traj) to allow immediate post-proc with QUEUE wrapping
# Store the annotations and comment of the trajectory node
self._grp_store_group(traj, store_data=pypetconstants.STORE_DATA,
with_links=False,
recursive=False,
_hdf5_group=self._trajectory_group)
# Store the list of explored paramters
self._trj_store_explorations(traj)
# Prepare the exploration tables
# Prepare the overview tables
tostore_tables = []
for name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items():
# Check if we want the corresponding overview table
# If the trajectory does not contain information about the table
# we assume it should be created.
if getattr(self, name):
tostore_tables.append(table_name)
self._srvc_make_overview_tables(tostore_tables, traj) | 0.004274 |
def trim_sparse(M, n_std=3, s_min=None, s_max=None):
"""Apply the trimming procedure to a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | 0.001125 |
def AgregarConceptosBasicosMercadoInterno(self, kg_produccion_gb, precio_por_kg_produccion_gb,
kg_produccion_pr, precio_por_kg_produccion_pr,
kg_crecimiento_gb, precio_por_kg_crecimiento_gb,
kg_crecimiento_pr, precio_por_kg_crecimiento_pr,
**kwargs):
"Agrega balance litros y porcentajes sólidos (mercado interno)"
d = {'kgProduccionGB': kg_produccion_gb,
'precioPorKgProduccionGB': precio_por_kg_produccion_gb,
'kgProduccionPR': kg_produccion_pr,
'precioPorKgProduccionPR': precio_por_kg_produccion_pr,
'kgCrecimientoGB': kg_crecimiento_gb,
'precioPorKgCrecimientoGB': precio_por_kg_crecimiento_gb,
'kgCrecimientoPR':kg_crecimiento_pr,
'precioPorKgCrecimientoPR': precio_por_kg_crecimiento_pr}
self.solicitud['conceptosBasicosMercadoInterno'] = d
return True | 0.017674 |
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
text_block_content = dedent(re.sub(sub_pat, '', text_content))
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks | 0.000781 |
def import_users(self, users):
""" Import users, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Users)
Example: importUsers([{'login':'vadim', 'fullName':'vadim', 'email':'[email protected]', 'jabber':'[email protected]'},
{'login':'maxim', 'fullName':'maxim', 'email':'[email protected]', 'jabber':'[email protected]'}])
"""
if len(users) <= 0:
return
known_attrs = ('login', 'fullName', 'email', 'jabber')
xml = '<list>\n'
for u in users:
xml += ' <user ' + "".join(k + '=' + quoteattr(u[k]) + ' ' for k in u if k in known_attrs) + '/>\n'
xml += '</list>'
# TODO: convert response xml into python objects
if isinstance(xml, str):
xml = xml.encode('utf-8')
return self._req_xml('PUT', '/import/users', xml, 400).toxml() | 0.006764 |
def actual_query(self):
"""Extract the actual query (not pattern) from operations."""
if not self._actual_query:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._actual_query = self._find_pattern('query: ', actual=True)
elif self.command == 'find':
self._actual_query = self._find_pattern('filter: ',
actual=True)
return self._actual_query | 0.003273 |
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand) | 0.002686 |
def list_views():
"""
List all registered views
"""
echo_header("List of registered views")
for view in current_app.appbuilder.baseviews:
click.echo(
"View:{0} | Route:{1} | Perms:{2}".format(
view.__class__.__name__, view.route_base, view.base_permissions
)
) | 0.002941 |
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_) | 0.004262 |
def find_tendril(cls, proto, addr):
"""
Finds the tendril corresponding to the protocol and address
tuple. Returns the Tendril object, or raises KeyError if the
tendril is not tracked.
The address tuple is the tuple of the local address and the
remote address for the tendril.
"""
# First, normalize the proto
proto = proto.lower()
# Now, find and return the tendril
return cls._tendrils[proto][addr] | 0.004065 |
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect() | 0.005376 |
def upload(c, directory, index=None, sign=False, dry_run=False):
"""
Upload (potentially also signing) all artifacts in ``directory``.
:param str index:
Custom upload index/repository name.
By default, uses whatever the invoked ``pip`` is configured to use.
Modify your ``pypirc`` file to add new named repositories.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
"""
# Obtain list of archive filenames, then ensure any wheels come first
# so their improved metadata is what PyPI sees initially (otherwise, it
# only honors the sdist's lesser data).
archives = list(
itertools.chain.from_iterable(
glob(os.path.join(directory, "dist", "*.{0}".format(extension)))
for extension in ("whl", "tar.gz")
)
)
# Sign each archive in turn
# TODO: twine has a --sign option; but the below is still nice insofar
# as it lets us dry-run, generate for web upload when pypi's API is
# being cranky, etc. Figure out which is better.
if sign:
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO(getpass.getpass(prompt) + "\n")
gpg_bin = find_gpg(c)
if not gpg_bin:
sys.exit(
"You need to have one of `gpg`, `gpg1` or `gpg2` "
"installed to GPG-sign!"
)
for archive in archives:
cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format(
gpg_bin
) # noqa
c.run(cmd.format(archive), in_stream=input_)
input_.seek(0) # So it can be replayed by subsequent iterations
# Upload
parts = ["twine", "upload"]
if index:
index_arg = "--repository {0}".format(index)
if index:
parts.append(index_arg)
paths = archives[:]
if sign:
paths.append(os.path.join(directory, "dist", "*.asc"))
parts.extend(paths)
cmd = " ".join(parts)
if dry_run:
print("Would publish via: {0}".format(cmd))
print("Files that would be published:")
c.run("ls -l {0}".format(" ".join(paths)))
else:
c.run(cmd) | 0.000419 |
def color_val(color):
"""Convert various input to color tuples.
Args:
color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[int]: A tuple of 3 integers indicating BGR channels.
"""
if is_str(color):
return Color[color].value
elif isinstance(color, Color):
return color.value
elif isinstance(color, tuple):
assert len(color) == 3
for channel in color:
assert channel >= 0 and channel <= 255
return color
elif isinstance(color, int):
assert color >= 0 and color <= 255
return color, color, color
elif isinstance(color, np.ndarray):
assert color.ndim == 1 and color.size == 3
assert np.all((color >= 0) & (color <= 255))
color = color.astype(np.uint8)
return tuple(color)
else:
raise TypeError('Invalid type for color: {}'.format(type(color))) | 0.001082 |
def _argspec(func):
""" For a callable, get the full argument spec
:type func: Callable
:rtype: list[data.ArgumentSpec]
"""
assert isinstance(func, collections.Callable), 'Argument must be a callable'
try: sp = inspect.getargspec(func) if six.PY2 else inspect.getfullargspec(func)
except TypeError:
# inspect.getargspec() fails for built-in functions
return []
# Collect arguments with defaults
ret = []
defaults_start = len(sp.args) - len(sp.defaults) if sp.defaults else len(sp.args)
for i, name in enumerate(sp.args):
arg = data.ArgumentSpec(name)
if i >= defaults_start:
arg['default'] = sp.defaults[i - defaults_start]
ret.append(arg)
# *args, **kwargs
if sp.varargs:
ret.append(data.ArgumentSpec(sp.varargs, varargs=True))
if six.PY2:
if sp.keywords:
ret.append(data.ArgumentSpec(sp.keywords, keywords=True))
else:
if sp.varkw:
ret.append(data.ArgumentSpec(sp.varkw, keywords=True))
# TODO: support Python 3: kwonlyargs, kwonlydefaults, annotations
# Finish
return ret | 0.005204 |
def _rewrite_where(self, q):
"""
Rewrite field names inside WHERE tree.
"""
if isinstance(q, Lookup):
self._rewrite_col(q.lhs)
if isinstance(q, Node):
for child in q.children:
self._rewrite_where(child) | 0.007092 |
def aghmean(nums):
"""Return arithmetic-geometric-harmonic mean.
Iterates over arithmetic, geometric, & harmonic means until they
converge to a single value (rounded to 12 digits), following the
method described in :cite:`Raissouli:2009`.
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The arithmetic-geometric-harmonic mean of nums
Examples
--------
>>> aghmean([1, 2, 3, 4])
2.198327159900212
>>> aghmean([1, 2])
1.4142135623731884
>>> aghmean([0, 5, 1000])
335.0
"""
m_a = amean(nums)
m_g = gmean(nums)
m_h = hmean(nums)
if math.isnan(m_a) or math.isnan(m_g) or math.isnan(m_h):
return float('nan')
while round(m_a, 12) != round(m_g, 12) and round(m_g, 12) != round(
m_h, 12
):
m_a, m_g, m_h = (
(m_a + m_g + m_h) / 3,
(m_a * m_g * m_h) ** (1 / 3),
3 / (1 / m_a + 1 / m_g + 1 / m_h),
)
return m_a | 0.000979 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.