text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def is_unstructured(self):
"""A boolean for each array whether it is unstructured or not"""
return [
arr.psy.decoder.is_unstructured(arr)
if not isinstance(arr, ArrayList) else
arr.is_unstructured
for arr in self] | 0.00722 |
def get_url(url_base, tenant_id, user, password, type, region):
"""It get the url for a concrete service
:param url_base: keystone url
:param tenand_id: the id of the tenant
:param user: the user
:param paassword: the password
:param type: the type of service
:param region: the region
"""
url = 'http://' + url_base + '/v2.0/tokens'
headers = {'Accept': 'application/json'}
payload = {'auth': {'tenantName': '' + tenant_id + '',
'passwordCredentials':
{'username': '' + user + '',
'password': '' + password + ''}}}
try:
response = requests.post(url, headers=headers,
data=json.dumps(payload))
response_json = response.json()
services = response_json['access']['serviceCatalog']
except Exception as e:
raise Exception('Error to obtain a image ' + e.message)
for service in services:
if service['type'] == type and service['region'] == region:
for endpoint in service['endpoints']:
return endpoint['publicURL'] | 0.001756 |
def deny(ip,
port=None,
proto='tcp',
direction='in',
port_origin='d',
ip_origin='d',
ttl=None,
comment=''):
'''
Add an rule to csf denied hosts
See :func:`_access_rule`.
1- Deny an IP:
CLI Example:
.. code-block:: bash
salt '*' csf.deny 127.0.0.1
salt '*' csf.deny 127.0.0.1 comment="Too localhosty"
'''
return _access_rule('deny', ip, port, proto, direction, port_origin, ip_origin, comment) | 0.018072 |
def pages(site_id):
"""Pages already crawled."""
start = int(flask.request.args.get("start", 0))
end = int(flask.request.args.get("end", start + 90))
reql = rr.table("pages").between(
[site_id, 1, r.minval], [site_id, r.maxval, r.maxval],
index="least_hops").order_by(index="least_hops")[start:end]
logging.debug("querying rethinkdb: %s", reql)
pages_ = reql.run()
return flask.jsonify(pages=list(pages_)) | 0.002188 |
def proxify_elt(elt, bases=None, _dict=None, public=False):
"""Proxify input elt.
:param elt: elt to proxify.
:param bases: elt class base classes. If None, use elt type.
:param dict _dict: specific elt class content to use.
:param bool public: if True (default False), proxify only public members
(where name starts with the character '_').
:return: proxified element.
:raises: TypeError if elt does not implement all routines of bases and
_dict.
"""
# ensure _dict is a dictionary
proxy_dict = {} if _dict is None else _dict.copy()
# set of proxified attribute names which are proxified during bases parsing
# and avoid to proxify them twice during _dict parsing
proxified_attribute_names = set()
# ensure bases is a tuple of types
if bases is None:
bases = (elt if isclass(elt) else elt.__class__,)
if isinstance(bases, string_types):
bases = (lookup(bases),)
elif isclass(bases):
bases = (bases,)
else:
bases = tuple(bases)
# fill proxy_dict with routines of bases
for base in bases:
# exclude object
if base is object:
continue
for name, member in getmembers(base, isroutine):
# check if name is public
if public and not name.startswith('_'):
continue
eltmember = getattr(elt, name, None)
if eltmember is None:
raise TypeError(
'Wrong elt {0}. Must implement {1} ({2}) of {3}.'.
format(elt, name, member, base)
)
# proxify member if member is not a constructor
if name not in ['__new__', '__init__']:
# get routine from proxy_dict or eltmember
routine = proxy_dict.get(name, eltmember)
# exclude object methods
if getattr(routine, '__objclass__', None) is not object:
# get routine proxy
routine_proxy = proxify_routine(routine)
if ismethod(routine_proxy):
routine_proxy = get_method_function(routine_proxy)
# update proxy_dict
proxy_dict[name] = routine_proxy
# and save the proxified attribute flag
proxified_attribute_names.add(name)
# proxify proxy_dict
for name in proxy_dict:
value = proxy_dict[name]
if not hasattr(elt, name):
raise TypeError(
'Wrong elt {0}. Must implement {1} ({2}).'.format(
elt, name, value
)
)
if isroutine(value):
# if member has not already been proxified
if name not in proxified_attribute_names:
# proxify it
value = proxify_routine(value)
proxy_dict[name] = value
# set default constructors if not present in proxy_dict
if '__new__' not in proxy_dict:
proxy_dict['__new__'] = object.__new__
if '__init__' not in proxy_dict:
proxy_dict['__init__'] = object.__init__
# generate a new proxy class
cls = type('Proxy', bases, proxy_dict)
# instantiate proxy cls
result = cls if isclass(elt) else cls()
# bind elt to proxy
setattr(result, __PROXIFIED__, elt)
return result | 0.000294 |
def build(self):
"""Builds the barcode pattern from 'self.upc'
:return: The pattern as string
:rtype: String
"""
code = _upc.EDGE[:]
for i, number in enumerate(self.upc[0:6]):
code += _upc.CODES['L'][int(number)]
code += _upc.MIDDLE
for number in self.upc[6:]:
code += _upc.CODES['R'][int(number)]
code += _upc.EDGE
return [code] | 0.004556 |
def _locate_java(self, s):
'''If JAVA_HOME is in the environ, return $JAVA_HOME/bin/s. Otherwise,
return s.
'''
if 'JAVA_HOME' in self.buildozer.environ:
return join(self.buildozer.environ['JAVA_HOME'], 'bin', s)
else:
return s | 0.006873 |
def find_single_file_project(self): # type: () -> List[str]
"""
Take first non-setup.py python file. What a mess.
:return:
"""
# TODO: use package_dirs
packaged_dirs = ""
try:
# Right now only returns 1st.
packaged_dirs = self.extract_package_dir()
except:
pass
likely_src_folders = [".", "src/"]
if packaged_dirs:
likely_src_folders.append(packaged_dirs)
candidates = []
for likely_src in likely_src_folders:
if not os.path.isdir(likely_src):
continue
files = [f for f in os.listdir(likely_src) if os.path.isfile(f)]
# BUG: doesn't deal with src/foo/bar.py
for file in files:
if file.endswith("setup.py") or file == "setup":
continue # duh
if file.endswith(".py"):
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
else:
if self.file_opener.is_python_inside(file):
candidates.append(file)
return candidates | 0.002414 |
def get_state_all(self):
"""Returns all device states"""
state_dict = {}
for device in self.get_device_names().keys():
state_dict[device] = self.get_state(device)
return state_dict | 0.008929 |
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table") | 0.004167 |
def make_vertical_bar(percentage, width=1):
"""
Draws a vertical bar made of unicode characters.
:param value: A value between 0 and 100
:param width: How many characters wide the bar should be.
:returns: Bar as a String
"""
bar = ' _▁▂▃▄▅▆▇█'
percentage //= 10
percentage = int(percentage)
if percentage < 0:
output = bar[0]
elif percentage >= len(bar):
output = bar[-1]
else:
output = bar[percentage]
return output * width | 0.001996 |
def removed(name,
user=None,
env=None):
'''
Verify that given package is not installed.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up \'{0}\': {1}'.format(name, err)
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package \'{0}\' is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package \'{0}\' is set to be removed'.format(name)
return ret
if __salt__['cabal.uninstall'](pkg=name, user=user, env=env):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package \'{0}\' was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package \'{0}\''.format(name)
return ret | 0.001771 |
def get_orm_columns(cls: Type) -> List[Column]:
"""
Gets :class:`Column` objects from an SQLAlchemy ORM class.
Does not provide their attribute names.
"""
mapper = inspect(cls) # type: Mapper
# ... returns InstanceState if called with an ORM object
# http://docs.sqlalchemy.org/en/latest/orm/session_state_management.html#session-object-states # noqa
# ... returns Mapper if called with an ORM class
# http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper # noqa
colmap = mapper.columns # type: OrderedProperties
return colmap.values() | 0.001603 |
def _apply_hard_disk(unit_number, key, operation, disk_label=None, size=None,
unit='GB', controller_key=None, thin_provision=None,
eagerly_scrub=None, datastore=None, filename=None):
'''
Returns a vim.vm.device.VirtualDeviceSpec object specifying to add/edit
a virtual disk device
unit_number
Add network adapter to this address
key
Device key number
operation
Action which should be done on the device add or edit
disk_label
Label of the new disk, can be overridden
size
Size of the disk
unit
Unit of the size, can be GB, MB, KB
controller_key
Unique umber of the controller key
thin_provision
Boolean for thin provision
eagerly_scrub
Boolean for eagerly scrubbing
datastore
Datastore name where the disk will be located
filename
Full file name of the vm disk
'''
log.trace('Configuring hard disk %s size=%s, unit=%s, controller_key=%s, '
'thin_provision=%s, eagerly_scrub=%s, datastore=%s, filename=%s',
disk_label, size, unit, controller_key, thin_provision,
eagerly_scrub, datastore, filename)
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.key = key
disk_spec.device.unitNumber = unit_number
disk_spec.device.deviceInfo = vim.Description()
if size:
convert_size = salt.utils.vmware.convert_to_kb(unit, size)
disk_spec.device.capacityInKB = convert_size['size']
if disk_label:
disk_spec.device.deviceInfo.label = disk_label
if thin_provision is not None or eagerly_scrub is not None:
disk_spec.device.backing = \
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.diskMode = 'persistent'
if thin_provision is not None:
disk_spec.device.backing.thinProvisioned = thin_provision
if eagerly_scrub is not None and eagerly_scrub != 'None':
disk_spec.device.backing.eagerlyScrub = eagerly_scrub
if controller_key:
disk_spec.device.controllerKey = controller_key
if operation == 'add':
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device.backing.fileName = '[{0}] {1}'.format(
salt.utils.vmware.get_managed_object_name(datastore), filename)
disk_spec.fileOperation = \
vim.vm.device.VirtualDeviceSpec.FileOperation.create
elif operation == 'edit':
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
return disk_spec | 0.000373 |
def emit(self, event, *args, **kwargs):
"""Send out an event and call it's associated functions
:param event: Name of the event to trigger
"""
for func in self._registered_events[event].values():
func(*args, **kwargs) | 0.007634 |
def run_commands(commands, # type: List[Union[str, List[str], Dict[str, Union[str, List[str]]]]]
directory, # type: str
env=None # type: Optional[Dict[str, Union[str, int]]]
): # noqa
# type: (...) -> None
"""Run list of commands."""
if env is None:
env = os.environ.copy()
for step in commands:
if isinstance(step, (list, six.string_types)):
execution_dir = directory
raw_command = step
elif step.get('command'): # dictionary
execution_dir = os.path.join(directory,
step.get('cwd')) if step.get('cwd') else directory # noqa pylint: disable=line-too-long
raw_command = step['command']
else:
raise AttributeError("Invalid command step: %s" % step)
command_list = raw_command.split(' ') if isinstance(raw_command, six.string_types) else raw_command # noqa pylint: disable=line-too-long
if platform.system().lower() == 'windows':
command_list = fix_windows_command_list(command_list)
with change_dir(execution_dir):
check_call(command_list, env=env) | 0.001665 |
def symbolic_rotation_matrix(phi, theta, symbolic_psi):
"""Retourne une matrice de rotation où psi est symbolique"""
return sympy.Matrix(Rz_matrix(phi)) * sympy.Matrix(Rx_matrix(theta)) * symbolic_Rz_matrix(symbolic_psi) | 0.008772 |
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0) | 0.001953 |
def cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for real general matrix.
"""
status = _libcublas.cublasDgemm_v2(handle,
_CUBLAS_OP[transa],
_CUBLAS_OP[transb], m, n, k,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status) | 0.004762 |
def put(self, key, value):
'''Stores the object `value` named by `key`self.
DirectoryTreeDatastore stores a directory entry.
'''
super(DirectoryTreeDatastore, self).put(key, value)
str_key = str(key)
# ignore root
if str_key == '/':
return
# retrieve directory, to add entry
dir_key = key.parent.instance('directory')
directory = self.directory(dir_key)
# ensure key is in directory
if str_key not in directory:
directory.append(str_key)
super(DirectoryTreeDatastore, self).put(dir_key, directory) | 0.007018 |
def get_goa_gene_sets(go_annotations):
"""Generate a list of gene sets from a collection of GO annotations.
Each gene set corresponds to all genes annotated with a certain GO term.
"""
go_term_genes = OrderedDict()
term_ids = {}
for ann in go_annotations:
term_ids[ann.go_term.id] = ann.go_term
try:
go_term_genes[ann.go_term.id].append(ann.db_symbol)
except KeyError:
go_term_genes[ann.go_term.id] = [ann.db_symbol]
go_term_genes = OrderedDict(sorted(go_term_genes.items()))
gene_sets = []
for tid, genes in go_term_genes.items():
go_term = term_ids[tid]
gs = GeneSet(id=tid, name=go_term.name, genes=genes,
source='GO',
collection=go_term.domain_short,
description=go_term.definition)
gene_sets.append(gs)
gene_sets = GeneSetCollection(gene_sets)
return gene_sets | 0.002105 |
def _get_proto():
'''
Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True
'''
use_ssl = config.get_cloud_config_value(
'use_ssl',
get_configured_provider(),
__opts__,
search_global=False,
default=True
)
if use_ssl is True:
return 'https'
return 'http' | 0.0025 |
def wrap_code(code: str, args: str = '') -> ast.Module:
"""
Compiles Python code into an async function or generator,
and automatically adds return if the function body is a single evaluation.
Also adds inline import expression support.
"""
if sys.version_info >= (3, 7):
user_code = import_expression.parse(code, mode='exec')
injected = ''
else:
injected = code
mod = import_expression.parse(CORO_CODE.format(args, textwrap.indent(injected, ' ' * 8)), mode='exec')
definition = mod.body[-1] # async def ...:
assert isinstance(definition, ast.AsyncFunctionDef)
try_block = definition.body[-1] # try:
assert isinstance(try_block, ast.Try)
if sys.version_info >= (3, 7):
try_block.body.extend(user_code.body)
else:
ast.increment_lineno(mod, -16) # bring line numbers back in sync with repl
ast.fix_missing_locations(mod)
is_asyncgen = any(isinstance(node, ast.Yield) for node in ast.walk(try_block))
last_expr = try_block.body[-1]
# if the last part isn't an expression, ignore it
if not isinstance(last_expr, ast.Expr):
return mod
# if the last expression is not a yield
if not isinstance(last_expr.value, ast.Yield):
# copy the expression into a return/yield
if is_asyncgen:
# copy the value of the expression into a yield
yield_stmt = ast.Yield(last_expr.value)
ast.copy_location(yield_stmt, last_expr)
# place the yield into its own expression
yield_expr = ast.Expr(yield_stmt)
ast.copy_location(yield_expr, last_expr)
# place the yield where the original expression was
try_block.body[-1] = yield_expr
else:
# copy the expression into a return
return_stmt = ast.Return(last_expr.value)
ast.copy_location(return_stmt, last_expr)
# place the return where the original expression was
try_block.body[-1] = return_stmt
return mod | 0.001943 |
def background_noise(self):
"""
Gaussian sigma of noise level per pixel (in counts per second)
:return: sqrt(variance) of background noise level
"""
if self._background_noise is None:
return data_util.bkg_noise(self.read_noise, self._exposure_time, self.sky_brightness, self.pixel_scale,
num_exposures=self._num_exposures)
else:
return self._background_noise | 0.008584 |
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result | 0.01219 |
def _get_notmuch_thread(self, tid):
"""returns :class:`notmuch.database.Thread` with given id"""
query = self.query('thread:' + tid)
try:
return next(query.search_threads())
except StopIteration:
errmsg = 'no thread with id %s exists!' % tid
raise NonexistantObjectError(errmsg) | 0.00578 |
def to_ranges(lst):
"""
Convert a list of numbers to a list of ranges::
>>> numbers = [1,2,3,5,6]
>>> list(to_ranges(numbers))
[(1, 3), (5, 6)]
"""
for a, b in itertools.groupby(enumerate(lst), lambda t: t[1] - t[0]):
b = list(b)
yield b[0][1], b[-1][1] | 0.003344 |
def inspect_node_neighborhood(nlinks, msinds, node_msindex):
"""
Get information about one node in graph
:param nlinks: neighboorhood edges
:param msinds: indexes in 3d image
:param node_msindex: int, multiscale index of selected voxel
:return: node_neighboor_edges_and_weights, node_neighboor_seeds
"""
# seed_indexes = np.nonzero(node_seed)
# selected_inds = msinds[seed_indexes]
# node_msindex = selected_inds[0]
node_neighbor_edges = get_neighborhood_edes(nlinks, node_msindex)
node_neighbor_seeds = np.zeros_like(msinds, dtype=np.int8)
for neighboor_ind in np.unique(node_neighbor_edges[:, :2].ravel()):
node_neighbor_ind = np.where(msinds == neighboor_ind)
node_neighbor_seeds[node_neighbor_ind] = 2
node_neighbor_seeds[np.where(msinds == node_msindex)] = 1
# node_coordinates = np.unravel_index(selected_voxel_ind, msinds.shape)
# node_neighbor_coordinates = np.unravel_index(np.unique(node_neighbor_edges[:, :2].ravel()), msinds.shape)
return node_neighbor_edges, node_neighbor_seeds | 0.001852 |
def deep(symbol=None, token='', version=''):
'''DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,
and do not indicate the size or number of individual orders at any price level.
Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.
DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.
https://iexcloud.io/docs/api/#deep
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if symbol:
return _getJson('deep?symbols=' + symbol, token, version)
return _getJson('deep', token, version) | 0.005994 |
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0) | 0.003333 |
def get_profile_for_user(user):
"""
Returns site-specific profile for this user. Raises
``ProfileNotConfigured`` if ``settings.ACCOUNTS_PROFILE_MODEL`` is not
set, and ``ImproperlyConfigured`` if the corresponding model can't
be found.
"""
if not hasattr(user, '_yacms_profile'):
# Raises ProfileNotConfigured if not bool(ACCOUNTS_PROFILE_MODEL)
profile_model = get_profile_model()
profile_manager = profile_model._default_manager.using(user._state.db)
user_field = get_profile_user_fieldname(profile_model, user.__class__)
profile, created = profile_manager.get_or_create(**{user_field: user})
profile.user = user
user._yacms_profile = profile
return user._yacms_profile | 0.001312 |
def meantsubpool(d, data_read):
""" Wrapper for mean visibility subtraction in time.
Doesn't work when called from pipeline using multiprocessing pool.
"""
logger.info('Subtracting mean visibility in time...')
data_read = numpyview(data_read_mem, 'complex64', datashape(d))
tsubpart = partial(rtlib.meantsub, data_read)
blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])]
with closing(mp.Pool(1, initializer=initreadonly, initargs=(data_read_mem,))) as tsubpool:
tsubpool.map(tsubpart, blr) | 0.005181 |
def _handle_pagerange(pagerange):
"""
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
"""
try:
pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)")
start, end = re.findall(pr, pagerange)[0]
except IndexError:
start = end = 0
return unicode(start), unicode(end) | 0.007843 |
def calc_run(request):
"""
Run a calculation.
:param request:
a `django.http.HttpRequest` object.
If the request has the attribute `hazard_job_id`, the results of the
specified hazard calculations will be re-used as input by the risk
calculation.
The request also needs to contain the files needed to perform the
calculation. They can be uploaded as separate files, or zipped
together.
"""
hazard_job_id = request.POST.get('hazard_job_id')
if hazard_job_id:
hazard_job_id = int(hazard_job_id)
candidates = ("job_risk.ini", "job.ini")
else:
candidates = ("job_hazard.ini", "job_haz.ini", "job.ini")
result = safely_call(_prepare_job, (request, candidates))
if result.tb_str:
return HttpResponse(json.dumps(result.tb_str.splitlines()),
content_type=JSON, status=500)
inifiles = result.get()
if not inifiles:
msg = 'Could not find any file of the form %s' % str(candidates)
logging.error(msg)
return HttpResponse(content=json.dumps([msg]), content_type=JSON,
status=500)
user = utils.get_user(request)
try:
job_id, pid = submit_job(inifiles[0], user, hazard_job_id)
except Exception as exc: # no job created, for instance missing .xml file
# get the exception message
exc_msg = str(exc)
logging.error(exc_msg)
response_data = exc_msg.splitlines()
status = 500
else:
response_data = dict(job_id=job_id, status='created', pid=pid)
status = 200
return HttpResponse(content=json.dumps(response_data), content_type=JSON,
status=status) | 0.000571 |
def __button_action(self, data=None):
"""Button action event"""
if any(not x for x in (self._ename.value, self._p1.value, self._p2.value, self._file.value)):
print("Missing one of the required fields (event name, player names, file name)")
return
self.__p1chars = []
self.__p2chars = []
options = Namespace()
self.__history.append(self.__save_form())
options.ename = self._ename.value
if self._ename_min.value:
options.ename_min = self._ename_min.value
else:
options.ename_min = options.ename
options.pID = self._pID.value
options.mtype = self._mtype.value
options.mmid = options.mtype
options.p1 = self._p1.value
options.p2 = self._p2.value
options.p1char = self._p1char.value
options.p2char = self._p2char.value
options.bracket = self._bracket.value
isadir = os.path.isdir(self._file.value)
if isadir:
options.file = max([os.path.join(self._file.value, f) for f in os.listdir(self._file.value) if os.path.isfile(os.path.join(self._file.value, f))], key=os.path.getmtime)
else:
options.file = self._file.value
options.tags = self._tags.value
options.msuffix = self._msuffix.value
options.mprefix = self._mprefix.value
options.privacy = self._privacy.value
options.descrip = self._description.value
options.titleformat = self._titleformat.value
if self._p1sponsor.value:
options.p1 = " | ".join((self._p1sponsor.value, options.p1))
if self._p2sponsor.value:
options.p2 = " | ".join((self._p2sponsor.value, options.p2))
options.ignore = False
self.__reset_match(False, isadir)
self.__add_to_qview(options)
self._queueref.append(options)
if consts.firstrun:
thr = threading.Thread(target=self.__worker)
thr.daemon = True
thr.start()
consts.firstrun = False | 0.002425 |
def _unbytes(bytestr):
"""
Returns a bytestring from the human-friendly string returned by `_bytes`.
>>> _unbytes('123456')
'\x12\x34\x56'
"""
return ''.join(chr(int(bytestr[k:k + 2], 16))
for k in range(0, len(bytestr), 2)) | 0.003731 |
def reset_default_props(**kwargs):
"""Reset properties to initial cycle point"""
global _DEFAULT_PROPS
pcycle = plt.rcParams['axes.prop_cycle']
_DEFAULT_PROPS = {
'color': itertools.cycle(_get_standard_colors(**kwargs))
if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]),
'marker': itertools.cycle(['o', 'x', '.', '+', '*']),
'linestyle': itertools.cycle(['-', '--', '-.', ':']),
} | 0.002212 |
def show_instances(server, cim_class):
"""
Display the instances of the CIM_Class defined by cim_class. If the
namespace is None, use the interop namespace. Search all namespaces for
instances except for CIM_RegisteredProfile
"""
if cim_class == 'CIM_RegisteredProfile':
for inst in server.profiles:
print(inst.tomof())
return
for ns in server.namespaces:
try:
insts = server.conn.EnumerateInstances(cim_class, namespace=ns)
if len(insts):
print('INSTANCES OF %s ns=%s' % (cim_class, ns))
for inst in insts:
print(inst.tomof())
except pywbem.Error as er:
if er.status_code != pywbem.CIM_ERR_INVALID_CLASS:
print('%s namespace %s Enumerate failed for conn=%s\n'
'exception=%s'
% (cim_class, ns, server, er)) | 0.001079 |
def calculate_correlations(tetra_z):
"""Returns dataframe of Pearson correlation coefficients.
- tetra_z - dictionary of Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs,
dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx+1:]:
assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys())
tets = sorted(tetra_z[org1].keys())
zscores = [[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets]]
zmeans = [sum(zscore)/len(zscore) for zscore in zscores]
zdiffs = [[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]]]
diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in
range(len(zdiffs[0]))])
zdiffs2 = [sum([z * z for z in zdiffs[0]]),
sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / \
math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations | 0.001271 |
def canonical_circulation(elements: T, key: Optional[Callable[[T], bool]] = None) -> T:
"""Get get a canonical representation of the ordered collection by finding its minimum circulation with the
given sort key
"""
return min(get_circulations(elements), key=key) | 0.010791 |
def read(self, file, nbytes):
"""Read nbytes characters from file while running Tk mainloop"""
if not capable.OF_GRAPHICS:
raise RuntimeError("Cannot run this command without graphics")
if isinstance(file, int):
fd = file
else:
# Otherwise, assume we have Python file object
try:
fd = file.fileno()
except:
raise TypeError("file must be an integer or a filehandle/socket")
init_tk_default_root() # harmless if already done
self.widget = TKNTR._default_root
if not self.widget:
# no Tk widgets yet, so no need for mainloop
# (shouldnt happen now with init_tk_default_root)
s = []
while nbytes>0:
snew = os.read(fd, nbytes) # returns bytes in PY3K
if snew:
if PY3K: snew = snew.decode('ascii','replace')
s.append(snew)
nbytes -= len(snew)
else:
# EOF -- just return what we have so far
break
return "".join(s)
else:
self.nbytes = nbytes
self.value = []
self.widget.tk.createfilehandler(fd,
TKNTR.READABLE | TKNTR.EXCEPTION,
self._read)
try:
self.widget.mainloop()
finally:
self.widget.tk.deletefilehandler(fd)
return "".join(self.value) | 0.006984 |
def bind_unix_socket(path):
""" Returns a unix file socket bound on (path). """
assert path
bindsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(path)
except OSError:
if os.path.exists(path):
raise
try:
bindsocket.bind(path)
except socket.error:
logger.error("Couldn't bind socket on %s", path)
return None
logger.info('Listening on %s', path)
bindsocket.listen(0)
return bindsocket | 0.001996 |
def evdev_device(self):
"""
Return our corresponding evdev device object
"""
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
if device.name == self.evdev_device_name:
return device
raise Exception("%s: could not find evdev device '%s'" % (self, self.evdev_device_name)) | 0.007752 |
def approle_token(vault_client, role_id, secret_id):
"""Returns a vault token based on the role and seret id"""
resp = vault_client.auth_approle(role_id, secret_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid approle') | 0.002865 |
def determineFrom(cls, challenge, password):
"""
Create a nonce and use it, along with the given challenge and password,
to generate the parameters for a response.
@return: A C{dict} suitable to be used as the keyword arguments when
calling this command.
"""
nonce = secureRandom(16)
response = _calcResponse(challenge, nonce, password)
return dict(cnonce=nonce, response=response) | 0.004367 |
def is_readable(value, **kwargs):
"""Indicate whether ``value`` is a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The value to evaluate.
:type value: Path-like object
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
validators.readable(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | 0.003253 |
def attach(self, container_id=None, sudo=False):
'''attach to a container instance based on container_id
Parameters
==========
container_id: the container_id to delete
sudo: whether to issue the command with sudo (or not)
a container started with sudo will belong to the root user
If started by a user, the user needs to control deleting it
Returns
=======
return_code: the return code from the delete command. 0 indicates a
successful delete, 255 indicates not.
'''
sudo = self._get_sudo(sudo)
container_id = self.get_container_id(container_id)
# singularity oci delete
cmd = self._init_command('attach')
# Add the container_id
cmd.append(container_id)
# Delete the container, return code goes to user (message to screen)
return self._run_and_return(cmd, sudo) | 0.00111 |
def metadata(self, file_path, params=None):
"""
:params:
title: string
keywords: array
extra_metadata: array
temporal_coverage: coverage object
spatial_coverage: coverage object
:return:
file metadata object (200 status code)
"""
url_base = self.hs.url_base
url = "{url_base}/resource/{pid}/files/metadata/{file_path}/".format(url_base=url_base,
pid=self.pid,
file_path=file_path)
if params is None:
r = self.hs._request('GET', url)
else:
headers = {}
headers["Content-Type"] = "application/json"
r = self.hs._request("PUT", url, data=json.dumps(params), headers=headers)
return r | 0.008395 |
def send(self, fail_silently=False):
"""
Sends the sms message
"""
if not self.to:
# Don't bother creating the connection if there's nobody to send to
return 0
res = self.get_connection(fail_silently).send_messages([self])
sms_post_send.send(sender=self, to=self.to, from_phone=self.from_phone, body=self.body)
return res | 0.007481 |
def collect_yarn_application_diagnostics(self, *application_ids):
"""
DEPRECATED: use create_yarn_application_diagnostics_bundle on the Yarn service. Deprecated since v10.
Collects the Diagnostics data for Yarn applications.
@param application_ids: An array of strings containing the ids of the
yarn applications.
@return: Reference to the submitted command.
@since: API v8
"""
args = dict(applicationIds = application_ids)
return self._cmd('yarnApplicationDiagnosticsCollection', api_version=8, data=args) | 0.008741 |
def pOparapar(self,Opar,apar,tdisrupt=None):
"""
NAME:
pOparapar
PURPOSE:
return the probability of a given parallel (frequency,angle) offset pair
INPUT:
Opar - parallel frequency offset (array) (can be Quantity)
apar - parallel angle offset along the stream (scalar) (can be Quantity)
OUTPUT:
p(Opar,apar)
HISTORY:
2015-12-07 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(Opar,units.Quantity):
Opar= Opar.to(1/units.Gyr).value\
/bovy_conversion.freq_in_Gyr(self._vo,self._ro)
if _APY_LOADED and isinstance(apar,units.Quantity):
apar= apar.to(units.rad).value
if tdisrupt is None: tdisrupt= self._tdisrupt
if isinstance(Opar,(int,float,numpy.float32,numpy.float64)):
Opar= numpy.array([Opar])
out= numpy.zeros(len(Opar))
# Compute ts
ts= apar/Opar
# Evaluate
out[(ts < tdisrupt)*(ts >= 0.)]=\
numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out | 0.021944 |
def weakref_proxy(obj):
"""returns either a weakref.proxy for the object, or if object is already a proxy,
returns itself."""
if type(obj) in weakref.ProxyTypes:
return obj
else:
return weakref.proxy(obj) | 0.008475 |
def determine_version(self, request, *args, **kwargs):
"""
If versioning is being used, then determine any API version for the
incoming request. Returns a two-tuple of (version, versioning_scheme)
"""
if self.versioning_class is None:
return (None, None)
scheme = self.versioning_class()
return (scheme.determine_version(request, *args, **kwargs), scheme) | 0.004728 |
def append_vobject(self, vtodo, project=None):
"""Add a task from vObject to Taskwarrior
vtodo -- the iCalendar to add
project -- the project to add (see get_filesnames() as well)
"""
if project:
project = basename(project)
return self.to_task(vtodo.vtodo, project) | 0.006154 |
def repopulateWinowMenu(self, actionGroup):
""" Clear the window menu and fills it with the actions of the actionGroup
"""
for action in self.windowMenu.actions():
self.windowMenu.removeAction(action)
for action in actionGroup.actions():
self.windowMenu.addAction(action) | 0.009146 |
def ListChildren(self, limit=None, age=NEWEST_TIME):
"""Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range in microseconds.
Yields:
RDFURNs instances of each child.
"""
# Just grab all the children from the index.
for predicate, timestamp in data_store.DB.AFF4FetchChildren(
self.urn, timestamp=Factory.ParseAgeSpecification(age), limit=limit):
urn = self.urn.Add(predicate)
urn.age = rdfvalue.RDFDatetime(timestamp)
yield urn | 0.007599 |
def argsort(*args, **kwargs):
"""
like np.argsort but for lists
Args:
*args: multiple lists to sort by
**kwargs:
reverse (bool): sort order is descending if True else acscending
CommandLine:
python -m utool.util_list argsort
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100})
>>> print(result)
"""
if len(args) == 1 and isinstance(args[0], dict):
dict_ = args[0]
index_list = list(dict_.keys())
value_list = list(dict_.values())
return sortedby2(index_list, value_list)
else:
index_list = list(range(len(args[0])))
return sortedby2(index_list, *args, **kwargs) | 0.001274 |
def _load_site_scons_dir(topdir, site_dir_name=None):
"""Load the site_scons dir under topdir.
Prepends site_scons to sys.path, imports site_scons/site_init.py,
and prepends site_scons/site_tools to default toolpath."""
if site_dir_name:
err_if_not_found = True # user specified: err if missing
else:
site_dir_name = "site_scons"
err_if_not_found = False
site_dir = os.path.join(topdir, site_dir_name)
if not os.path.exists(site_dir):
if err_if_not_found:
raise SCons.Errors.UserError("site dir %s not found."%site_dir)
return
site_init_filename = "site_init.py"
site_init_modname = "site_init"
site_tools_dirname = "site_tools"
# prepend to sys.path
sys.path = [os.path.abspath(site_dir)] + sys.path
site_init_file = os.path.join(site_dir, site_init_filename)
site_tools_dir = os.path.join(site_dir, site_tools_dirname)
if os.path.exists(site_init_file):
import imp, re
try:
try:
fp, pathname, description = imp.find_module(site_init_modname,
[site_dir])
# Load the file into SCons.Script namespace. This is
# opaque and clever; m is the module object for the
# SCons.Script module, and the exec ... in call executes a
# file (or string containing code) in the context of the
# module's dictionary, so anything that code defines ends
# up adding to that module. This is really short, but all
# the error checking makes it longer.
try:
m = sys.modules['SCons.Script']
except Exception as e:
fmt = 'cannot import site_init.py: missing SCons.Script module %s'
raise SCons.Errors.InternalError(fmt % repr(e))
try:
sfx = description[0]
modname = os.path.basename(pathname)[:-len(sfx)]
site_m = {"__file__": pathname, "__name__": modname, "__doc__": None}
re_special = re.compile("__[^_]+__")
for k in list(m.__dict__.keys()):
if not re_special.match(k):
site_m[k] = m.__dict__[k]
# This is the magic.
exec(compile(fp.read(), fp.name, 'exec'), site_m)
except KeyboardInterrupt:
raise
except Exception as e:
fmt = '*** Error loading site_init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
else:
for k in site_m:
if not re_special.match(k):
m.__dict__[k] = site_m[k]
except KeyboardInterrupt:
raise
except ImportError as e:
fmt = '*** cannot import site init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
finally:
if fp:
fp.close()
if os.path.exists(site_tools_dir):
# prepend to DefaultToolpath
SCons.Tool.DefaultToolpath.insert(0, os.path.abspath(site_tools_dir)) | 0.001481 |
def query_recent(num=8, **kwargs):
'''
query recent posts.
'''
order_by_create = kwargs.get('order_by_create', False)
kind = kwargs.get('kind', None)
if order_by_create:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_update.desc()
).limit(num)
return recent_recs | 0.001698 |
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
state = self._format_state(state)
if qargs is None:
if state.shape[0] != self._input_dim:
raise QiskitError(
"Operator input dimension is not equal to state dimension."
)
if state.ndim == 1:
# Return evolved statevector
return np.dot(self.data, state)
# Return evolved density matrix
return np.dot(
np.dot(self.data, state), np.transpose(np.conj(self.data)))
# Subsystem evolution
return self._evolve_subsystem(state, qargs) | 0.001768 |
def find_triangles(self):
"""
Finds all the triangles present in the given model
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.inference import Mplp
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> mplp = Mplp(mm)
>>> mplp.find_triangles()
"""
return list(filter(lambda x: len(x) == 3, nx.find_cliques(self.model))) | 0.003425 |
def new_crew_member(self, program, role, fullname, givenname, surname):
"""Callback run for each new crew member entry. 'fullname' is a
derived full-name, based on the presence of 'givenname' and/or
'surname'.
"""
if self.__v_crew_member:
# [Crew: EP000036710112, Actor, Estelle Parsons]
print("[Crew: %s, %s, %s]" % (program, role, fullname)) | 0.007335 |
def detectOperaMobile(self):
"""Return detection of an Opera browser for a mobile device
Detects Opera Mobile or Opera Mini.
"""
return UAgentInfo.engineOpera in self.__userAgent \
and (UAgentInfo.mini in self.__userAgent
or UAgentInfo.mobi in self.__userAgent) | 0.009317 |
def extinction_query(lon, lat,
coordtype='equatorial',
sizedeg=5.0,
forcefetch=False,
cachedir='~/.astrobase/dust-cache',
verbose=True,
timeout=10.0,
jitter=5.0):
'''This queries the 2MASS DUST service to find the extinction parameters
for the given `lon`, `lat`.
Parameters
----------
lon,lat: float
These are decimal right ascension and declination if `coordtype =
'equatorial'`. These are are decimal Galactic longitude and latitude if
`coordtype = 'galactic'`.
coordtype : {'equatorial','galactic'}
Sets the type of coordinates passed in as `lon`, `lat`.
sizedeg : float
This is the width of the image returned by the DUST service. This can
usually be left as-is if you're interested in the extinction only.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our request.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
A dict of the following form is returned::
{'Amag':{dict of extinction A_v values for several mag systems},
'table': array containing the full extinction table,
'tablefile': the path to the full extinction table file on disk,
'provenance': 'cached' or 'new download',
'request': string repr of the request made to 2MASS DUST}
'''
dustparams = DUST_PARAMS.copy()
# convert the lon, lat to the required format
# and generate the param dict
if coordtype == 'equatorial':
locstr = '%.3f %.3f Equ J2000' % (lon, lat)
elif coordtype == 'galactic':
locstr = '%.3f %.3f gal' % (lon, lat)
else:
LOGERROR('unknown coordinate type: %s' % coordtype)
return None
dustparams['locstr'] = locstr
dustparams['regSize'] = '%.3f' % sizedeg
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachekey and cache filename
cachekey = '%s - %.1f' % (locstr, sizedeg)
cachekey = hashlib.sha256(cachekey.encode()).hexdigest()
cachefname = os.path.join(cachedir, '%s.txt' % cachekey)
provenance = 'cache'
# if this does not exist in cache or if we're forcefetching, do the query
if forcefetch or (not os.path.exists(cachefname)):
time.sleep(random.randint(1,jitter))
provenance = 'new download'
try:
if verbose:
LOGINFO('submitting 2MASS DUST request for '
'lon = %.3f, lat = %.3f, type = %s, size = %.1f' %
(lon, lat, coordtype, sizedeg))
req = requests.get(DUST_URL, dustparams, timeout=timeout)
req.raise_for_status()
resp = req.text
# see if we got an extinction table URL in the response
tableurl = DUST_REGEX.search(resp)
# if we did, download it to the cache directory
if tableurl:
tableurl = tableurl.group(0)
req2 = requests.get(tableurl, timeout=timeout)
# write the table to the cache directory
with open(cachefname,'wb') as outfd:
outfd.write(req2.content)
tablefname = cachefname
else:
LOGERROR('could not get extinction parameters for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
LOGERROR('error from DUST service follows:\n%s' % resp)
return None
except requests.exceptions.Timeout as e:
LOGERROR('DUST request timed out for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
return None
except Exception as e:
LOGEXCEPTION('DUST request failed for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
return None
# if this result is available in the cache, get it from there
else:
if verbose:
LOGINFO('getting cached 2MASS DUST result for '
'lon = %.3f, lat = %.3f, coordtype = %s, size = %.1f' %
(lon, lat, coordtype, sizedeg))
tablefname = cachefname
#
# now we should have the extinction table in some form
#
# read and parse the extinction table using astropy.Table
extinction_table = Table.read(tablefname, format='ascii.ipac')
# get the columns we need
filters = np.array(extinction_table['Filter_name'])
a_sf11_byfilter = np.array(extinction_table['A_SandF'])
a_sfd98_byfilter = np.array(extinction_table['A_SFD'])
# generate the output dict
extdict = {'Amag':{x:{'sf11':y, 'sfd98':z} for
x,y,z in zip(filters,a_sf11_byfilter,a_sfd98_byfilter)},
'table':np.array(extinction_table),
'tablefile':os.path.abspath(cachefname),
'provenance':provenance,
'request':'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,
sizedeg)}
return extdict | 0.003946 |
def predict(self, quadruplets):
"""Predicts the ordering between sample distances in input quadruplets.
For each quadruplet, returns 1 if the quadruplet is in the right order (
first pair is more similar than second pair), and -1 if not.
Parameters
----------
quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or
(n_quadruplets, 4)
3D Array of quadruplets to predict, with each row corresponding to four
points, or 2D array of indices of quadruplets if the metric learner
uses a preprocessor.
Returns
-------
prediction : `numpy.ndarray` of floats, shape=(n_constraints,)
Predictions of the ordering of pairs, for each quadruplet.
"""
check_is_fitted(self, 'transformer_')
quadruplets = check_input(quadruplets, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return np.sign(self.decision_function(quadruplets)) | 0.000965 |
def _update_limits_from_api(self):
"""
Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information.
"""
logger.debug('Setting DirectoryService limits from API')
self.connect()
resp = self.conn.get_directory_limits()
directory_limits = resp['DirectoryLimits']
self.limits['CloudOnlyDirectories']._set_api_limit(
directory_limits['CloudOnlyDirectoriesLimit']
)
self.limits['CloudOnlyMicrosoftAD']._set_api_limit(
directory_limits['CloudOnlyMicrosoftADLimit']
)
self.limits['ConnectedDirectories']._set_api_limit(
directory_limits['ConnectedDirectoriesLimit']
) | 0.002558 |
def add(x1, x2, output_shape=None, name=None):
"""Binary addition with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
if not isinstance(x2, Tensor):
return ScalarAddOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name="add"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return AddOperation(
x1, x2, output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)).outputs[0] | 0.008432 |
def extreme_temperature_range(tasmax, tasmin, freq='YS'):
r"""Extreme intra-period temperature range.
The maximum of max temperature (TXx) minus the minimum of min temperature (TNn) for the given time period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature values [℃] or [K]
tasmin : xarray.DataArray
Minimum daily temperature values [℃] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Extreme intra-period temperature range for the given time period.
Notes
-----
Let :math:`TX_{ij}` and :math:`TN_{ij}` be the daily maximum and minimum temperature at day :math:`i`
of period :math:`j`. Then the extreme temperature range in period :math:`j` is:
.. math::
ETR_j = max(TX_{ij}) - min(TN_{ij})
"""
tx_max = tasmax.resample(time=freq).max(dim='time')
tn_min = tasmin.resample(time=freq).min(dim='time')
out = tx_max - tn_min
out.attrs['units'] = tasmax.units
return out | 0.00381 |
def parse_headers(self, http_code):
""" Parse http-code (like 'Header-X: foo\r\nHeader-Y: bar\r\n') and retrieve (save) HTTP-headers
:param http_code: code to parse
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__headers = WHTTPHeaders.import_headers(http_code) | 0.030395 |
def gateways_info():
"""Returns gateways data.
"""
data = netifaces.gateways()
results = {'default': {}}
with suppress(KeyError):
results['ipv4'] = data[netifaces.AF_INET]
results['default']['ipv4'] = data['default'][netifaces.AF_INET]
with suppress(KeyError):
results['ipv6'] = data[netifaces.AF_INET6]
results['default']['ipv6'] = data['default'][netifaces.AF_INET6]
return results | 0.002247 |
def rpccall(pvname, request=None, rtype=None):
"""Decorator marks a client proxy method.
:param str pvname: The PV name, which will be formated using the 'format' argument of the proxy class constructor.
:param request: A pvRequest string or :py:class:`p4p.Value` passed to eg. :py:meth:`p4p.client.thread.Context.rpc`.
The method to be decorated must have all keyword arguments,
where the keywords are type code strings or :class:`~p4p.Type`.
"""
def wrapper(fn):
fn._call_PV = pvname
fn._call_Request = request
fn._reply_Type = rtype
return fn
return wrapper | 0.004785 |
def _task_idle_ticks(seconds_per_cycle):
""" 计算下次周期的沉睡时间 """
t = time_ticks()
while True:
t += seconds_per_cycle
yield max(t - time_ticks(), 0) | 0.011628 |
def wsgi_middleware(self, app, cors=False):
"""WSGI middlewares that wraps the given ``app`` and serves
actual image files. ::
fs_store = HttpExposedFileSystemStore('userimages', 'images/')
app = fs_store.wsgi_middleware(app)
:param app: the wsgi app to wrap
:type app: :class:`~typing.Callable`\ [[],
:class:`~typing.Iterable`\ [:class:`bytes`]]
:returns: the another wsgi app that wraps ``app``
:rtype: :class:`StaticServerMiddleware`
"""
_app = StaticServerMiddleware(app, '/' + self.prefix, self.path,
cors=self.cors)
def app(environ, start_response):
if not hasattr(self, 'host_url'):
self.host_url = (environ['wsgi.url_scheme'] + '://' +
environ['HTTP_HOST'] + '/')
return _app(environ, start_response)
return app | 0.004211 |
def dset_copy(dset,to_dir):
'''robust way to copy a dataset (including AFNI briks)'''
if nl.is_afni(dset):
dset_strip = re.sub(r'\.(HEAD|BRIK)?(\.(gz|bz))?','',dset)
for dset_file in [dset_strip + '.HEAD'] + glob.glob(dset_strip + '.BRIK*'):
if os.path.exists(dset_file):
shutil.copy(dset_file,to_dir)
else:
if os.path.exists(dset):
shutil.copy(dset,to_dir)
else:
nl.notify('Warning: couldn\'t find file %s to copy to %s' %(dset,to_dir),level=nl.level.warning) | 0.019784 |
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.files.fopen(repo_path) as rph:
repo_data = salt.utils.yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo]) | 0.003481 |
def notify_update_image(self, x, y, width, height, image):
"""Informs about an update and provides 32bpp bitmap.
in x of type int
in y of type int
in width of type int
in height of type int
in image of type str
Array with 32BPP image data.
"""
if not isinstance(x, baseinteger):
raise TypeError("x can only be an instance of type baseinteger")
if not isinstance(y, baseinteger):
raise TypeError("y can only be an instance of type baseinteger")
if not isinstance(width, baseinteger):
raise TypeError("width can only be an instance of type baseinteger")
if not isinstance(height, baseinteger):
raise TypeError("height can only be an instance of type baseinteger")
if not isinstance(image, list):
raise TypeError("image can only be an instance of type list")
for a in image[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("notifyUpdateImage",
in_p=[x, y, width, height, image]) | 0.004132 |
def get_deploy_data(self):
'''
Gets any default data attached to the current deploy, if any.
'''
if self.state and self.state.deploy_data:
return self.state.deploy_data
return {} | 0.008621 |
def trim_sample(data):
"""Trim from a sample with the provided trimming method.
Support methods: read_through.
"""
data = utils.to_single_data(data)
trim_reads = dd.get_trim_reads(data)
# this block is to maintain legacy configuration files
if not trim_reads:
logger.info("Skipping trimming of %s." % dd.get_sample_name(data))
else:
if "skewer" in dd.get_tools_on(data) or trim_reads == "skewer":
trim_adapters = skewer.trim_adapters
else:
trim_adapters = trim.trim_adapters
out_files = trim_adapters(data)
data["files"] = out_files
return [[data]] | 0.001543 |
def std_check_in(dataset, name, allowed_vals):
"""
Returns 0 if attr not present, 1 if present but not in correct value, 2 if good
"""
if not hasattr(dataset, name):
return 0
ret_val = 1
if getattr(dataset, name) in allowed_vals:
ret_val += 1
return ret_val | 0.006601 |
def _generate_G_points(self, kpoint):
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
Returns:
a list containing valid G-points
"""
gpoints = []
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(2 * self._nbmax[0] + 1):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
return np.array(gpoints, dtype=np.float64) | 0.002412 |
def spatial_clip(catalog, corners, mindepth=None, maxdepth=None):
"""
Clip the catalog to a spatial box, can be irregular.
Can only be irregular in 2D, depth must be between bounds.
:type catalog: :class:`obspy.core.catalog.Catalog`
:param catalog: Catalog to clip.
:type corners: :class:`matplotlib.path.Path`
:param corners: Corners to clip the catalog to
:type mindepth: float
:param mindepth: Minimum depth for earthquakes in km.
:type maxdepth: float
:param maxdepth: Maximum depth for earthquakes in km.
.. Note::
Corners is expected to be a :class:`matplotlib.path.Path` in the form
of tuples of (lat, lon) in decimal degrees.
"""
cat_out = catalog.copy()
if mindepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth < mindepth * 1000:
cat_out.events.remove(event)
if maxdepth is not None:
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if origin.depth > maxdepth * 1000:
cat_out.events.remove(event)
for event in cat_out:
try:
origin = _get_origin(event)
except IOError:
continue
if not corners.contains_point((origin.latitude, origin.longitude)):
cat_out.events.remove(event)
return cat_out | 0.000655 |
def organization_tags(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tags#show-tags"
api_path = "/api/v2/organizations/{id}/tags.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | 0.007663 |
def envs(backend=None, sources=False):
'''
Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
return sorted(fileserver.envs(back=backend, sources=sources)) | 0.000859 |
def main():
"""
Main function.
"""
msg = ''
try:
songs = parse_argv()
if not songs:
msg = 'No songs specified'
except ValueError as error:
msg = str(error)
if msg:
logger.error('%s: Error: %s', sys.argv[0], msg)
return 1
logger.debug('Running with %s', songs)
try:
run(songs)
except KeyboardInterrupt:
print('Interrupted')
return 1
return 0 | 0.002165 |
def no_duplicates_sections2d(sections2d, prt=None):
"""Check for duplicate header GO IDs in the 2-D sections variable."""
no_dups = True
ctr = cx.Counter()
for _, hdrgos in sections2d:
for goid in hdrgos:
ctr[goid] += 1
for goid, cnt in ctr.most_common():
if cnt == 1:
break
no_dups = False
if prt is not None:
prt.write("**SECTIONS WARNING FOUND: {N:3} {GO}\n".format(N=cnt, GO=goid))
return no_dups | 0.004065 |
def set_harddisk_sleep(minutes):
'''
Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off"
to never sleep.
:param minutes: Can be an integer between 1 and 180 or "Never" or "Off"
:ptype: int, str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_harddisk_sleep 120
salt '*' power.set_harddisk_sleep off
'''
value = _validate_sleep(minutes)
cmd = 'systemsetup -setharddisksleep {0}'.format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
str(value),
get_harddisk_sleep,
) | 0.002849 |
def launch_satellite(cli):
"""Deploys a new satellite app over any existing app"""
cli.info("Launching skypipe satellite:")
finish = wait_for(" Pushing to dotCloud")
# destroy any existing satellite
destroy_satellite(cli)
# create new satellite app
url = '/applications'
try:
cli.user.post(url, {
'name': APPNAME,
'flavor': 'sandbox'
})
except RESTAPIError as e:
if e.code == 409:
cli.die('Application "{0}" already exists.'.format(APPNAME))
else:
cli.die('Creating application "{0}" failed: {1}'.format(APPNAME, e))
class args: application = APPNAME
#cli._connect(args)
# push satellite code
protocol = 'rsync'
url = '/applications/{0}/push-endpoints{1}'.format(APPNAME, '')
endpoint = cli._select_endpoint(cli.user.get(url).items, protocol)
class args: path = satellite_path
cli.push_with_rsync(args, endpoint)
# tell dotcloud to deploy, then wait for it to finish
revision = None
clean = False
url = '/applications/{0}/deployments'.format(APPNAME)
response = cli.user.post(url, {'revision': revision, 'clean': clean})
deploy_trace_id = response.trace_id
deploy_id = response.item['deploy_id']
original_stdout = sys.stdout
finish = wait_for(" Waiting for deployment", finish, original_stdout)
try:
sys.stdout = StringIO()
res = cli._stream_deploy_logs(APPNAME, deploy_id,
deploy_trace_id=deploy_trace_id, follow=True)
if res != 0:
return res
except KeyboardInterrupt:
cli.error('You\'ve closed your log stream with Ctrl-C, ' \
'but the deployment is still running in the background.')
cli.error('If you aborted because of an error ' \
'(e.g. the deployment got stuck), please e-mail\n' \
'[email protected] and mention this trace ID: {0}'
.format(deploy_trace_id))
cli.error('If you want to continue following your deployment, ' \
'try:\n{0}'.format(
cli._fmt_deploy_logs_command(deploy_id)))
cli.die()
except RuntimeError:
# workaround for a bug in the current dotcloud client code
pass
finally:
sys.stdout = original_stdout
finish = wait_for(" Satellite coming online", finish)
endpoint = lookup_endpoint(cli)
ok = client.check_skypipe_endpoint(endpoint, 120)
finish.set()
time.sleep(0.1) # sigh, threads
if ok:
return endpoint
else:
cli.die("Satellite failed to come online") | 0.007173 |
def attach(self, id, filename, url):
"""Add an attachmemt to record from url
:param id: ID of record
:param filename: File name of attachment
:param url: Public url to download file from.
"""
Attachment = self.client.model('ir.attachment')
return Attachment.add_attachment_from_url(
filename, url, '%s,%s' % (self.model_name, id)
) | 0.004902 |
def create(model_config, epochs, optimizer, model, source, storage, scheduler=None, callbacks=None, max_grad_norm=None):
""" Vel factory function """
return SimpleTrainCommand(
epochs=epochs,
model_config=model_config,
model_factory=model,
optimizer_factory=optimizer,
scheduler_factory=scheduler,
source=source,
storage=storage,
callbacks=callbacks,
max_grad_norm=max_grad_norm
) | 0.00431 |
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes | 0.004367 |
def validate_configuration(self):
""" Runs :meth:`arca.DockerBackend.validate_configuration` and checks extra:
* ``box`` format
* ``provider`` format
* ``use_registry_name`` is set and ``registry_pull_only`` is not enabled.
"""
super().validate_configuration()
if self.use_registry_name is None:
raise ArcaMisconfigured("Use registry name setting is required for VagrantBackend")
if not re.match(r"^[a-z]+/[a-zA-Z0-9\-_]+$", self.box):
raise ArcaMisconfigured("Provided Vagrant box is not valid")
if not re.match(r"^[a-z_]+$", self.provider):
raise ArcaMisconfigured("Provided Vagrant provider is not valid")
if self.registry_pull_only:
raise ArcaMisconfigured("Push must be enabled for VagrantBackend") | 0.005981 |
def set_variable(self, name, expression_or_value, write=True):
"""Set the variable to an expression or value defined by expression_or_value.
Example
>>> df.set_variable("a", 2.)
>>> df.set_variable("b", "a**2")
>>> df.get_variable("b")
'a**2'
>>> df.evaluate_variable("b")
4.0
:param name: Name of the variable
:param write: write variable to meta file
:param expression: value or expression
"""
self.variables[name] = expression_or_value | 0.005505 |
def subscribe(self, feedUrl):
"""
Adds a feed to the top-level subscription list
Ubscribing seems idempotent, you can subscribe multiple times
without error
returns True or throws HTTPError
"""
response = self.httpPost(
ReaderUrl.SUBSCRIPTION_EDIT_URL,
{'ac':'subscribe', 's': feedUrl})
# FIXME - need better return API
if response and 'OK' in response:
return True
else:
return False | 0.005837 |
async def enable(self, reason=None):
"""Resumes normal operation
Parameters:
reason (str): Reason of enabling
Returns:
bool: ``True`` on success
"""
params = {"enable": False, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 | 0.005333 |
def from_abinit_ixc(cls, ixc):
"""Build the object from Abinit ixc (integer)"""
ixc = int(ixc)
if ixc >= 0:
return cls(**cls.abinitixc_to_libxc[ixc])
else:
# libxc notation employed in Abinit: a six-digit number in the form XXXCCC or CCCXXX
#ixc = str(ixc)
#assert len(ixc[1:]) == 6
#first, last = ixc[1:4], ixc[4:]
ixc = abs(ixc)
first = ixc // 1000
last = ixc - first * 1000
x, c = LibxcFunc(int(first)), LibxcFunc(int(last))
if not x.is_x_kind: x, c = c, x # Swap
assert x.is_x_kind and c.is_c_kind
return cls(x=x, c=c) | 0.010014 |
def filter_(input_, filename='<internal>', state='INITIAL'):
""" Filter the input string thought the preprocessor.
result is appended to OUTPUT global str
"""
global CURRENT_DIR
prev_dir = CURRENT_DIR
CURRENT_FILE.append(filename)
CURRENT_DIR = os.path.dirname(CURRENT_FILE[-1])
LEXER.input(input_, filename)
LEXER.lex.begin(state)
parser.parse(lexer=LEXER, debug=OPTIONS.Debug.value > 2)
CURRENT_FILE.pop()
CURRENT_DIR = prev_dir | 0.002088 |
def keywords(self) -> Set[str]:
"""A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
"""
return set(keyword for device in self
for keyword in device.keywords if
keyword not in self._shadowed_keywords) | 0.000868 |
def load_transport(self, url):
'''
For remote communication. Sets the communication dispatcher of the host
at the address and port specified.
The scheme must be http if using a XMLRPC dispatcher.
amqp for RabbitMQ communications.
This methos is internal. Automatically called when creating the host.
:param str. url: URL where to bind the host. Must be provided in
the tipical form: 'scheme://address:port/hierarchical_path'
'''
aurl = urlparse(url)
addrl = aurl.netloc.split(':')
self.addr = addrl[0], addrl[1]
self.transport = aurl.scheme
self.host_url = aurl
if aurl.scheme == 'http':
self.launch_actor('http', rpcactor.RPCDispatcher(url, self, 'rpc'))
elif aurl.scheme == 'amqp':
self.launch_actor('amqp', rpcactor.RPCDispatcher(url, self,
'rabbit')) | 0.002049 |
def structureChunk(keywords, resultDict, lines):
"""
Parse Weir and Culvert Structures Method
"""
chunks = pt.chunk(keywords, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Extract values and assign to appropriate key in resultDict
resultDict[key.lower()] = schunk[1]
return resultDict | 0.001742 |
def _login(self):
"""
Login using username / password and get the first auth token
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}
url = self.api_base_url + "account/directlogin"
data = {'Email': self.username,
'Password': self.password,
'RememberMe': 'True'}
response = requests.post(url, data=data, headers=headers, timeout=10)
if response.status_code != 200:
return False
self.login_data = response.json()
if not self.login_data['isSuccess']:
self.login_data = None
return False
if ('token' in self.login_data
and 'accessToken' in self.login_data['token']):
self.home_id = self.login_data['token']['currentHomeId']
self.user_id = self.login_data['token']['userId']
return True
self.login_data = None
return False | 0.001942 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.