text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_account_state(self, address, **kwargs):
""" Returns the account state information associated with a specific address.
:param address: a 34-bit length address (eg. AJBENSwajTzQtwyJFkiJSv7MAaaMc7DsRz)
:type address: str
:return: dictionary containing the account state information
:rtype: dict
"""
return self._call(JSONRPCMethods.GET_ACCOUNT_STATE.value, params=[address, ], **kwargs) | 0.011136 |
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(np.asarray(self.levels[i]._values),
self_codes, allow_fill=False)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values),
other_codes, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True | 0.001166 |
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))] | 0.003407 |
def pearson(logu, name=None):
"""The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "pearson", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return tf.square(tf.math.expm1(logu)) | 0.003846 |
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
self.set_sigint_handler()
return 1 | 0.002281 |
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2, v1):
"""
Add site effects to an intensity.
Implements eq. 5
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
z1pt0 = sites.z1pt0
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0
ln_y = (
# first line of eq. 13b
ln_y_ref + C['phi1'] *
np.log(np.clip(sites.vs30, -np.inf, v1) / 1130)
# second line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) + C['phi4']) / C['phi4'])
# third line
+ C['phi5']
* (1.0 - 1.0 / np.cosh(
C['phi6'] * (z1pt0 - C['phi7']).clip(0, np.inf)))
+ C['phi8'] / np.cosh(0.15 * (z1pt0 - 15).clip(0, np.inf))
# fourth line
+ eta + epsilon
)
return ln_y | 0.001961 |
def regular_half(circuit: circuits.Circuit) -> circuits.Circuit:
"""Return only the Clifford part of a circuit. See
convert_and_separate_circuit().
Args:
circuit: A Circuit with the gate set {SingleQubitCliffordGate,
PauliInteractionGate, PauliStringPhasor}.
Returns:
A Circuit with SingleQubitCliffordGate and PauliInteractionGate gates.
It also contains MeasurementGates if the given
circuit contains measurements.
"""
return circuits.Circuit(
ops.Moment(op
for op in moment.operations
if not isinstance(op, ops.PauliStringPhasor))
for moment in circuit) | 0.001468 |
def calculate_border_width(self):
"""
Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns.
"""
return self.max_dimension.width - self.margins.left - self.margins.right - 1 | 0.010399 |
def as_dict(self):
""" returns a dictionary view of the option
:returns: the option converted in a dict
:rtype: dict
"""
info = {}
info["type"] = self.__class__.__name__
info["help"] = self.help
info["default"] = self.default
info["multi"] = self.multi
info["uniq"] = self.uniq
info["choices"] = self.choices
# TODO appel rec sur les attrs
#info["attrs"] = self.attrs
return info | 0.008 |
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Deletes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
.. note:: Deletes of non-existent files are considered successful
since the end result is the same: no file with that _id remains.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionchanged:: 3.1
``delete`` no longer ensures indexes.
"""
self.__files.delete_one({"_id": file_id})
self.__chunks.delete_many({"files_id": file_id}) | 0.00237 |
def catalog_to_cells(catalog, radius, order, include_fallback=True, **kwargs):
"""
Convert a catalog to a set of cells.
This function is intended to be used via `catalog_to_moc` but
is available for separate usage. It takes the same arguments
as that function.
This function uses the Healpy `query_disc` function to get a list
of cells for each item in the catalog in turn. Additional keyword
arguments, if specified, are passed to `query_disc`. This can include,
for example, `inclusive` (set to `True` to include cells overlapping
the radius as well as those with centers within it) and `fact`
(to control sampling when `inclusive` is specified).
If cells at the given order are bigger than the given radius, then
`query_disc` may find none inside the radius. In this case,
if `include_fallback` is `True` (the default), the cell at each
position is included.
If the given radius is zero (or smaller) then Healpy `query_disc`
is not used -- instead the fallback position is used automatically.
"""
nside = 2 ** order
# Ensure catalog is in ICRS coordinates.
catalog = catalog.icrs
# Ensure radius is in radians.
if isinstance(radius, Quantity):
radius = radius.to(radian).value
else:
radius = radius * pi / (180.0 * 3600.0)
# Convert coordinates to position vectors.
phi = catalog.ra.radian
theta = (pi / 2) - catalog.dec.radian
vectors = ang2vec(theta, phi)
# Ensure we can iterate over vectors (it might be a single position).
if catalog.isscalar:
vectors = [vectors]
# Query for a list of cells for each catalog position.
cells = set()
for vector in vectors:
if radius > 0.0:
# Try "disc" query.
vector_cells = query_disc(nside, vector, radius, nest=True, **kwargs)
if vector_cells.size > 0:
cells.update(vector_cells.tolist())
continue
elif not include_fallback:
continue
# The query didn't find anything -- include the cell at the
# given position at least.
cell = vec2pix(nside, vector[0], vector[1], vector[2], nest=True)
cells.add(cell.item())
return cells | 0.000877 |
def set_instance_erred(self, instance, error_message):
""" Mark instance as erred and save error message """
instance.set_erred()
instance.error_message = error_message
instance.save(update_fields=['state', 'error_message']) | 0.007813 |
def _discover_toc(zf, opf_xmldoc, opf_filepath):
'''
Returns a list of objects: {title: str, src: str, level: int, index: int}
'''
toc = None
# ePub 3.x
tag = find_tag(opf_xmldoc, 'item', 'properties', 'nav')
if tag and 'href' in tag.attributes.keys():
filepath = unquote(tag.attributes['href'].value)
# The xhtml file path is relative to the OPF file
base_dir = os.path.dirname(opf_filepath)
# print('- Reading Nav file: {}/{}'.format(base_dir, filepath))
npath = os.path.normpath(os.path.join(base_dir, filepath))
nav_content = zf.read(npath)
toc_xmldoc = minidom.parseString(nav_content)
_toc = []
for n in toc_xmldoc.getElementsByTagName('a'):
if n.firstChild and ('href' in n.attributes.keys()):
href = unquote(n.attributes['href'].value)
# Discarding CFI links
if '.html' in href or '.xhtml' in href:
title = n.firstChild.nodeValue
# try the second node too (maybe the first child is an empty span)
if not title and n.firstChild.firstChild:
title = n.firstChild.firstChild.nodeValue
title = title.strip() if title else None
if title:
level = -1
parentNode = n.parentNode
avoid_infinite_loop = 0 # simple security issue to avoid infinite loop for bad epub files
while parentNode and parentNode.nodeName != 'nav' and avoid_infinite_loop < 50:
if parentNode.nodeName == 'ol': # count the depth of the a link related to ol items
level += 1
parentNode = parentNode.parentNode
avoid_infinite_loop += 1
level = max(level, 0) # root level is 0, not -1
_toc.append({'title': title, 'src': href, 'level': level})
if _toc:
toc = _toc
if not toc:
# ePub 2.x
tag = find_tag(opf_xmldoc, 'item', 'id', 'ncx')
if not tag:
tag = find_tag(opf_xmldoc, 'item', 'id', 'ncxtoc')
if tag and 'href' in tag.attributes.keys():
filepath = unquote(tag.attributes['href'].value)
# The ncx file path is relative to the OPF file
base_dir = os.path.dirname(opf_filepath)
# print('- Reading NCX file: {}/{}'.format(base_dir, filepath))
npath = os.path.normpath(os.path.join(base_dir, filepath))
ncx_content = zf.read(npath)
toc_xmldoc = minidom.parseString(ncx_content)
def read_nav_point(nav_point_node, level = 0):
items = []
item = {'title': None, 'src': None, 'level': level}
children_points = []
for item_node in nav_point_node.childNodes:
if item_node.nodeName in ('navLabel', 'ncx:navLabel'):
try:
text = item_node.getElementsByTagName('text')[0].firstChild
except IndexError:
try:
text = item_node.getElementsByTagName('ncx:text')[0].firstChild
except IndexError:
text = None
item['title'] = text.nodeValue.strip() if text and text.nodeValue else None
elif item_node.nodeName in ('content', 'ncx:content'):
if item_node.hasAttribute('src'):
item['src'] = item_node.attributes['src'].value
elif item_node.nodeName in ('navPoint', 'ncx:navPoint'):
children_points.append(item_node)
if item['title']:
items.append(item)
for child_node in children_points:
subitems = read_nav_point(child_node, level=level + 1)
items.extend(subitems)
return items
def read_nav_map(toc_xmldoc, level=0):
items = []
try:
nav_map_node = toc_xmldoc.getElementsByTagName('navMap')[0]
except IndexError:
# Some ebooks use the ncx: namespace so try that too
try:
nav_map_node = toc_xmldoc.getElementsByTagName('ncx:navMap')[0]
except IndexError:
print('Failed reading TOC')
return items
for nav_point in nav_map_node.childNodes:
if nav_point.nodeName in ('navPoint', 'ncx:navPoint'):
subitems = read_nav_point(nav_point, level=level)
items.extend(subitems)
return items
toc = read_nav_map(toc_xmldoc)
# add indexes
if toc:
for i, t in enumerate(toc):
t['index'] = i
return toc | 0.002913 |
def series(identifier=None, **kwargs):
"""Get an economic data series."""
if identifier:
kwargs['series_id'] = identifier
if 'release' in kwargs:
kwargs.pop('release')
path = 'release'
elif 'releases' in kwargs:
kwargs.pop('releases')
path = 'release'
else:
path = None
return Fred().series(path, **kwargs) | 0.002646 |
def unmapped(name,
config='/etc/crypttab',
persist=True,
immediate=False):
'''
Ensure that a device is unmapped
name
The name to ensure is not mapped
config
Set an alternative location for the crypttab, if the map is persistent,
Default is ``/etc/crypttab``
persist
Set if the map should be removed from the crypttab. Default is ``True``
immediate
Set if the device should be unmapped immediately. Default is ``False``.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if immediate:
# Get the active crypt mounts. If ours is not listed already, no action is necessary.
active = __salt__['cryptdev.active']()
if name in active.keys():
# Close the map using cryptsetup.
if __opts__['test']:
ret['result'] = None
ret['commment'] = 'Device would be unmapped immediately'
else:
cryptsetup_result = __salt__['cryptdev.close'](name)
if cryptsetup_result:
ret['changes']['cryptsetup'] = 'Device unmapped using cryptsetup'
else:
ret['changes']['cryptsetup'] = 'Device failed to unmap using cryptsetup'
ret['result'] = False
if persist and not __opts__['test']:
crypttab_result = __salt__['cryptdev.rm_crypttab'](name, config=config)
if crypttab_result:
if crypttab_result == 'change':
ret['changes']['crypttab'] = 'Entry removed from {0}'.format(config)
else:
ret['changes']['crypttab'] = 'Unable to remove entry in {0}'.format(config)
ret['result'] = False
return ret | 0.003295 |
def add_alias(agent, prefix, alias):
"""Adds an alias mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, AddMappingManager, RecordType.record_CNAME,
prefix, alias) | 0.003953 |
def check_initial_subdomain(cls, subdomain_rec):
"""
Verify that a first-ever subdomain record is well-formed.
* n must be 0
* the subdomain must not be independent of its domain
"""
if subdomain_rec.n != 0:
return False
if subdomain_rec.independent:
return False
return True | 0.008065 |
def plot_eeg_erp(all_epochs, conditions=None, times=None, include="all", exclude=None, hemisphere="both", central=True, name=None, colors=None, gfp=False, ci=0.95, ci_alpha=0.333, invert_y=False, linewidth=1, linestyle="-", filter_hfreq=None):
"""
DOCS INCOMPLETE :(
"""
# Preserve original
all_epochs_current = all_epochs.copy()
# Filter using Savitzky-Golay polynomial method
if (filter_hfreq is not None) and (isinstance(filter_hfreq, int)):
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.savgol_filter(filter_hfreq, copy=True)
# Crop
if isinstance(times, list) and len(times) == 2:
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.copy().crop(times[0], times[1])
# Transform to evokeds
all_evokeds = eeg_to_all_evokeds(all_epochs_current, conditions=conditions)
data = {}
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition] = []
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition].append(epoch)
conditions = list(data.keys())
# Line styles
if isinstance(linestyle, str):
linestyle = [linestyle] * len(conditions)
elif isinstance(linestyle, list) and len(linestyle) >= len(conditions):
pass
elif isinstance(linestyle, dict) and len(linestyle.keys()) >= len(conditions):
linestyle = [linestyle[cond] for cond in conditions]
else:
print("NeuroKit Warning: plot_eeg_erp(): linestyle must be either a str, a list or a dict.")
# Colors
if isinstance(colors, str):
colors = {condition: colors for condition in conditions}
elif isinstance(colors, list) and len(colors) >= len(conditions):
colors= {condition: colors[index] for index, condition in enumerate(conditions)}
elif isinstance(colors, dict) and len(colors.keys()) >= len(conditions):
pass
elif colors is None:
pass
else:
print("NeuroKit Warning: plot_eeg_erp(): colors must be either a str, a list, a dict or None.")
# Modify styles
styles = {}
for index, condition in enumerate(conditions):
styles[condition] = {"linewidth": linewidth, "linestyle": linestyle[index]}
# Select electrodes
picks = mne.pick_types(epoch.info, eeg=True, selection=eeg_select_electrodes(epoch, include=include, exclude=exclude, hemisphere=hemisphere, central=central))
# Plot
try:
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y, ci_alpha=ci_alpha)
except TypeError:
print("NeuroKit Warning: plot_eeg_erp(): You're using a version of mne that does not support ci_alpha or ci_method parameters. Leaving defaults.")
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y)
return(plot) | 0.006421 |
def list(options):
"""
show all currently running jobs
"""
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
jobs = data_engine.get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
if len(jobs) == 0:
error('No running jobs')
else:
_print_jobs(jobs, token_manager, app_url, options) | 0.003464 |
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker) | 0.001869 |
def _tool_to_dict(tool, records, remapped):
"""Parse a tool definition into a cwl2wdl style dictionary.
"""
requirements = _requirements_to_dict(tool.requirements + tool.hints)
inputs = []
outputs = []
for inp in tool.tool["inputs"]:
ready_inp, records = _input_to_dict(inp, records, remapped)
inputs.append(ready_inp)
for outp in tool.tool["outputs"]:
ready_outp, records = _output_to_dict(outp, records)
outputs.append(ready_outp)
out = {"name": _id_to_name(tool.tool["id"]),
"baseCommand": " ".join(tool.tool["baseCommand"]),
"arguments": [_arg_to_dict(a, requirements) for a in tool.tool["arguments"]],
"inputs": inputs,
"outputs": outputs,
"requirements": requirements,
"stdin": None, "stdout": None}
return out, records | 0.002328 |
def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
super(Context, self).close() | 0.004535 |
def make_links(self,base_url='..'):
"""
Substitute intrasite links to documentation for other parts of
the program.
"""
ford.sourceform.set_base_url(base_url)
for src in self.allfiles:
src.make_links(self) | 0.018248 |
def handle_offchain_secretreveal(
target_state: TargetTransferState,
state_change: ReceiveSecretReveal,
channel_state: NettingChannelState,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=target_state.transfer.lock.secrethash,
secret=state_change.secret,
)
has_transfer_expired = channel.is_transfer_expired(
transfer=target_state.transfer,
affected_channel=channel_state,
block_number=block_number,
)
if valid_secret and not has_transfer_expired:
channel.register_offchain_secret(
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
)
route = target_state.route
message_identifier = message_identifier_from_prng(pseudo_random_generator)
target_state.state = TargetTransferState.OFFCHAIN_SECRET_REVEAL
target_state.secret = state_change.secret
recipient = route.node_address
reveal = SendSecretReveal(
recipient=recipient,
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
message_identifier=message_identifier,
secret=target_state.secret,
)
iteration = TransitionResult(target_state, [reveal])
else:
# TODO: event for byzantine behavior
iteration = TransitionResult(target_state, list())
return iteration | 0.001198 |
def zeroscreen(self, focus_stage=None):
"""
Remove all points containing data below zero (which are impossible!)
"""
if focus_stage is None:
focus_stage = self.focus_stage
for s in self.data.values():
ind = np.ones(len(s.Time), dtype=bool)
for v in s.data[focus_stage].values():
ind = ind & (nominal_values(v) > 0)
for k in s.data[focus_stage].keys():
s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)
self.set_focus(focus_stage)
return | 0.003425 |
def check_validation_level(validation_level):
"""
Validate the given validation level
:type validation_level: ``int``
:param validation_level: validation level (see :class:`hl7apy.consts.VALIDATION_LEVEL`)
:raises: :exc:`hl7apy.exceptions.UnknownValidationLevel` if the given validation level is unsupported
"""
if validation_level not in (VALIDATION_LEVEL.QUIET, VALIDATION_LEVEL.STRICT, VALIDATION_LEVEL.TOLERANT):
raise UnknownValidationLevel | 0.008299 |
def get_sorted_dependencies(service_model):
"""
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
"""
app_models = list(service_model._meta.app_config.get_models())
dependencies = {model: set() for model in app_models}
relations = (
relation
for model in app_models
for relation in model._meta.related_objects
if relation.on_delete in (models.PROTECT, models.CASCADE)
)
for rel in relations:
dependencies[rel.model].add(rel.related_model)
return stable_topological_sort(app_models, dependencies) | 0.001555 |
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
if 'decorate' in config:
depr("The 'decorate' parameter was renamed to 'apply'") # 0.9
plugins += makelist(config.pop('decorate'))
if config.pop('no_hooks', False):
depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\
" list of skipped plugins instead.") # 0.9
skiplist.append('hooks')
static = config.get('static', False) # depr 0.9
def decorator(callback):
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
cfg = dict(rule=rule, method=verb, callback=callback,
name=name, app=self, config=config,
apply=plugins, skip=skiplist)
self.routes.append(cfg)
cfg['id'] = self.routes.index(cfg)
self.router.add(rule, verb, cfg['id'], name=name, static=static)
if DEBUG: self.ccache[cfg['id']] = self._build_callback(cfg)
return callback
return decorator(callback) if callback else decorator | 0.004397 |
def get_variable_nodes(self):
"""
Returns variable nodes present in the graph.
Before calling this method make sure that all the factors are added
properly.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_factors(phi1, phi2)
>>> G.add_edges_from([('a', phi1), ('b', phi1),
... ('b', phi2), ('c', phi2)])
>>> G.get_variable_nodes()
['a', 'b']
"""
self.check_model()
variable_nodes = set([x for factor in self.factors for x in factor.scope()])
return list(variable_nodes) | 0.003106 |
def from_epw(cls, buffer_or_path):
"""
Parameters
----------
buffer_or_path: buffer or path containing epw format.
Returns
-------
WeatherData instance.
"""
from .epw_parse import parse_epw
_, buffer = to_buffer(buffer_or_path)
with buffer as f:
return parse_epw(f) | 0.005464 |
def width_normalize(self, width):
""" Handle a width style, which can be a fractional number
representing a percentage of available width or positive integers
which indicate a fixed width. """
if width is not None:
if width > 0 and width < 1:
return int(width * self.usable_width)
else:
return int(width) | 0.005102 |
def initialize_new_session():
"""Check session and initialize if necessary
Before every request, check the user session. If no session exists, add
one and provide temporary locations for images
"""
if 'image_uid_counter' in session and 'image_list' in session:
logger.debug('images are already being tracked')
else:
# reset image list counter for the session
session['image_uid_counter'] = 0
session['image_list'] = []
if 'img_input_dir' in session and 'img_output_dir' in session:
logger.debug('temporary image directories already exist')
else:
# make image upload directory
session['img_input_dir'] = mkdtemp()
session['img_output_dir'] = mkdtemp() | 0.001335 |
def decode_network(objects):
"""Return root object from ref-containing obj table entries"""
def resolve_ref(obj, objects=objects):
if isinstance(obj, Ref):
# first entry is 1
return objects[obj.index - 1]
else:
return obj
# Reading the ObjTable backwards somehow makes more sense.
for i in xrange(len(objects)-1, -1, -1):
obj = objects[i]
if isinstance(obj, Container):
obj.update((k, resolve_ref(v)) for (k, v) in obj.items())
elif isinstance(obj, Dictionary):
obj.value = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.value.items()
)
elif isinstance(obj, dict):
obj = dict(
(resolve_ref(field), resolve_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
obj = [resolve_ref(field) for field in obj]
elif isinstance(obj, Form):
for field in obj.value:
value = getattr(obj, field)
value = resolve_ref(value)
setattr(obj, field, value)
elif isinstance(obj, ContainsRefs):
obj.value = [resolve_ref(field) for field in obj.value]
objects[i] = obj
for obj in objects:
if isinstance(obj, Form):
obj.built()
root = objects[0]
return root | 0.000683 |
def coordinate(self, panes=[], index=0):
"""
Update pane coordinate tuples based on their height and width relative to other panes
within the dimensions of the current window.
We account for panes with a height of 1 where the bottom coordinates are the same as the top.
Account for floating panes and self-coordinating panes adjacent to panes set to EXPAND.
Coordinates are of the form:
[
((top-left-from-top, top-left-from-left),
(top-right-from-top, top-right-from-left)),
((bottom-left-from-top, bottom-left-from-left),
(bottom-right-from-top, bottom-right-from-left))
]
We can then use these to determine things such as whether corners are inverted and how
many characters may be drawn
"""
y = 0 # height
for i, element in enumerate(self.panes):
x = 0 # width
if isinstance(element, list):
current_height = 0
for j, pane in enumerate(element):
if pane.hidden: continue
current_width = pane.width
current_height = pane.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
pane.coords = [upper, lower]
x += current_width
y += (current_height+1 if current_height > 1 else 1)
else:
if element.hidden: continue
current_width = element.width
current_height = element.height
upper = ((y, x), (y, x+current_width))
lower = ((y+(current_height if current_height > 1 else 0), x),
(y+(current_height if current_height > 1 else 0), x+current_width))
element.coords = [upper, lower]
y += (current_height+1 if current_height > 1 else 1)
if self.debug:
coordinates = "Coordinates: " + str([p.coords for p in self])
if len(coordinates) > self.width:
coordinates = coordinates[:self.width - 3]
coordinates += '...'
self.addstr(self.height-3, 0, coordinates) | 0.009282 |
def get_effort_streams(self, effort_id, types=None, resolution=None,
series_type=None):
"""
Returns an streams for an effort.
http://strava.github.io/api/v3/streams/#effort
Streams represent the raw data of the uploaded file. External
applications may only access this information for activities owned
by the authenticated athlete.
Streams are available in 11 different types. If the stream is not
available for a particular activity it will be left out of the request
results.
Streams types are: time, latlng, distance, altitude, velocity_smooth,
heartrate, cadence, watts, temp, moving, grade_smooth
http://strava.github.io/api/v3/streams/#effort
:param effort_id: The ID of effort.
:type effort_id: int
:param types: (optional) A list of the the types of streams to fetch.
:type types: list
:param resolution: (optional, default is 'all') indicates desired number
of data points. 'low' (100), 'medium' (1000),
'high' (10000) or 'all'.
:type resolution: str
:param series_type: (optional, default is 'distance'. Relevant only if
using resolution either 'time' or 'distance'.
Used to index the streams if the stream is being
reduced.
:type series_type: str
:return: An dictionary of :class:`stravalib.model.Stream` from the effort.
:rtype: :py:class:`dict`
"""
# stream are comma seperated list
if types is not None:
types = ",".join(types)
params = {}
if resolution is not None:
params["resolution"] = resolution
if series_type is not None:
params["series_type"] = series_type
result_fetcher = functools.partial(self.protocol.get,
'/segment_efforts/{id}/streams/{types}'.format(id=effort_id, types=types),
**params)
streams = BatchedResultsIterator(entity=model.Stream,
bind_client=self,
result_fetcher=result_fetcher)
# Pack streams into dictionary
return {i.type: i for i in streams} | 0.002852 |
def claim(self, ttl, grace, count=None):
"""
Claims up to `count` unclaimed messages from this queue. If count is
not specified, the default is to claim 10 messages.
The `ttl` parameter specifies how long the server should wait before
releasing the claim. The ttl value MUST be between 60 and 43200 seconds.
The `grace` parameter is the message grace period in seconds. The value
of grace MUST be between 60 and 43200 seconds. The server extends the
lifetime of claimed messages to be at least as long as the lifetime of
the claim itself, plus a specified grace period to deal with crashed
workers (up to 1209600 or 14 days including claim lifetime). If a
claimed message would normally live longer than the grace period, its
expiration will not be adjusted.
bReturns a QueueClaim object, whose 'messages' attribute contains the
list of QueueMessage objects representing the claimed messages.
"""
if count is None:
qs = ""
else:
qs = "?limit=%s" % count
uri = "/%s%s" % (self.uri_base, qs)
body = {"ttl": ttl,
"grace": grace,
}
resp, resp_body = self.api.method_post(uri, body=body)
if resp.status_code == 204:
# Nothing available to claim
return None
# Get the claim ID from the first message in the list.
href = resp_body[0]["href"]
claim_id = href.split("claim_id=")[-1]
return self.get(claim_id) | 0.001896 |
def _populate_attributes(self, config, record, context, data):
"""
Use a record found in LDAP to populate attributes.
"""
search_return_attributes = config['search_return_attributes']
for attr in search_return_attributes.keys():
if attr in record["attributes"]:
if record["attributes"][attr]:
data.attributes[search_return_attributes[attr]] = record["attributes"][attr]
satosa_logging(
logger,
logging.DEBUG,
"Setting internal attribute {} with values {}".format(
search_return_attributes[attr],
record["attributes"][attr]
),
context.state
)
else:
satosa_logging(
logger,
logging.DEBUG,
"Not setting internal attribute {} because value {} is null or empty".format(
search_return_attributes[attr],
record["attributes"][attr]
),
context.state
) | 0.003091 |
def communicate_through(self, file):
"""Setup communication through a file.
:rtype: AYABInterface.communication.Communication
"""
if self._communication is not None:
raise ValueError("Already communicating.")
self._communication = communication = Communication(
file, self._get_needle_positions,
self._machine, [self._on_message_received],
right_end_needle=self.right_end_needle,
left_end_needle=self.left_end_needle)
return communication | 0.003656 |
def wait_until_not_visible(self, timeout=None):
"""Search element and wait until it is not visible
:param timeout: max time to wait
:returns: page element instance
"""
try:
self.utils.wait_until_element_not_visible(self, timeout)
except TimeoutException as exception:
parent_msg = " and parent locator '{}'".format(self.parent) if self.parent else ''
msg = "Page element of type '%s' with locator %s%s is still visible after %s seconds"
timeout = timeout if timeout else self.utils.get_explicitly_wait()
self.logger.error(msg, type(self).__name__, self.locator, parent_msg, timeout)
exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg, timeout))
raise exception
return self | 0.007075 |
def websocket_url_for_server_url(url):
''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into
the appropriate ``ws(s)`` URL
Args:
url (str):
An ``http(s)`` URL
Returns:
str:
The corresponding ``ws(s)`` URL ending in ``/ws``
Raises:
ValueError:
If the input URL is not of the proper form.
'''
if url.startswith("http:"):
reprotocoled = "ws" + url[4:]
elif url.startswith("https:"):
reprotocoled = "wss" + url[5:]
else:
raise ValueError("URL has unknown protocol " + url)
if reprotocoled.endswith("/"):
return reprotocoled + "ws"
else:
return reprotocoled + "/ws" | 0.001374 |
def convert_dirs(base_dir, hdf_name, complib=None, complevel=0):
"""
Convert nested set of directories to
"""
print('Converting directories in {}'.format(base_dir))
dirs = glob.glob(os.path.join(base_dir, '*'))
dirs = {d for d in dirs if os.path.basename(d) in DIRECTORIES}
if not dirs:
raise RuntimeError('No direcotries found matching known data.')
store = pd.HDFStore(
hdf_name, mode='w', complevel=complevel, complib=complib)
for dirpath in dirs:
dirname = os.path.basename(dirpath)
print(dirname)
df = cache_to_df(dirpath)
if dirname == 'travel_data':
keys = ['from_zone_id', 'to_zone_id']
elif dirname == 'annual_employment_control_totals':
keys = ['sector_id', 'year', 'home_based_status']
elif dirname == 'annual_job_relocation_rates':
keys = ['sector_id']
elif dirname == 'annual_household_control_totals':
keys = ['year']
elif dirname == 'annual_household_relocation_rates':
keys = ['age_of_head_max', 'age_of_head_min',
'income_min', 'income_max']
elif dirname == 'building_sqft_per_job':
keys = ['zone_id', 'building_type_id']
elif dirname == 'counties':
keys = ['county_id']
elif dirname == 'development_event_history':
keys = ['building_id']
elif dirname == 'target_vacancies':
keys = ['building_type_id', 'year']
else:
keys = [dirname[:-1] + '_id']
if dirname != 'annual_household_relocation_rates':
df = df.set_index(keys)
for colname in df.columns:
if df[colname].dtype == np.float64:
df[colname] = df[colname].astype(np.float32)
elif df[colname].dtype == np.int64:
df[colname] = df[colname].astype(np.int32)
else:
df[colname] = df[colname]
df.info()
print(os.linesep)
store.put(dirname, df)
store.close() | 0.000484 |
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out) | 0.001271 |
def get_option_def(self, opt):
"""return the dictionary defining an option given its name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise optparse.OptionError(
"no such option %s in section %r" % (opt, self.name), opt
) | 0.00578 |
def method(*args, **kwargs):
"""Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
"""
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method | 0.001397 |
def Kurt(poly, dist=None, fisher=True, **kws):
"""
Kurtosis operator.
Element by element 4rd order statistics of a distribution or polynomial.
Args:
poly (Poly, Dist):
Input to take kurtosis on.
dist (Dist):
Defines the space the skewness is taken on. It is ignored if
``poly`` is a distribution.
fisher (bool):
If True, Fisher's definition is used (Normal -> 0.0). If False,
Pearson's definition is used (normal -> 3.0)
Returns:
(numpy.ndarray):
Element for element variance along ``poly``, where
``skewness.shape==poly.shape``.
Examples:
>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))
>>> print(numpy.around(chaospy.Kurt(dist), 4))
[6. 0.]
>>> print(numpy.around(chaospy.Kurt(dist, fisher=False), 4))
[9. 3.]
>>> x, y = chaospy.variable(2)
>>> poly = chaospy.Poly([1, x, y, 10*x*y])
>>> print(numpy.around(chaospy.Kurt(poly, dist), 4))
[nan 6. 0. 15.]
"""
if isinstance(poly, distributions.Dist):
x = polynomials.variable(len(poly))
poly, dist = x, poly
else:
poly = polynomials.Poly(poly)
if fisher:
adjust = 3
else:
adjust = 0
shape = poly.shape
poly = polynomials.flatten(poly)
m1 = E(poly, dist)
m2 = E(poly**2, dist)
m3 = E(poly**3, dist)
m4 = E(poly**4, dist)
out = (m4-4*m3*m1 + 6*m2*m1**2 - 3*m1**4) /\
(m2**2-2*m2*m1**2+m1**4) - adjust
out = numpy.reshape(out, shape)
return out | 0.001222 |
def dumps(self):
r"""Turn the Latex Object into a string in Latex format."""
string = ""
if self.row_height is not None:
row_height = Command('renewcommand', arguments=[
NoEscape(r'\arraystretch'),
self.row_height])
string += row_height.dumps() + '%\n'
if self.col_space is not None:
col_space = Command('setlength', arguments=[
NoEscape(r'\tabcolsep'),
self.col_space])
string += col_space.dumps() + '%\n'
return string + super().dumps() | 0.003367 |
def get_library(self, username, status=None):
"""Fetches a users library.
:param str username: The user to get the library from.
:param str status: only return the items with the supplied status.
Can be one of `currently-watching`, `plan-to-watch`, `completed`,
`on-hold` or `dropped`.
:returns: List of Library objects.
"""
r = self._query_('/users/%s/library' % username, 'GET',
params={'status': status})
results = [LibraryEntry(item) for item in r.json()]
return results | 0.003384 |
def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None):
"""
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
operation: Patch operation
path: Path
value: Value
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
"""
patch_request_body = [{'op': operation, 'path': path, 'value': value}]
return self.patch_request(id_or_uri=id_or_uri,
body=patch_request_body,
timeout=timeout,
custom_headers=custom_headers) | 0.004251 |
def find_obj_by_tag(self, tag):
"""Search through all the objects in the world and
return the first instance whose tag matches the specified string."""
for obj in self.__up_objects:
if obj.tag == tag:
return obj
for obj in self.__draw_objects:
if obj.tag == tag:
return obj
return None | 0.005222 |
def u_base(self, theta, phi, lam, q):
"""Apply U to q."""
return self.append(UBase(theta, phi, lam), [q], []) | 0.008547 |
def analyzeAP(Y,dY,I,rate,verbose=False):
"""
given a sweep and a time point, return the AP array for that AP.
APs will be centered in time by their maximum upslope.
"""
Ims = int(rate/1000) #Is per MS
IsToLook=5*Ims #TODO: clarify this, ms until downslope is over
upslope=np.max(dY[I:I+IsToLook]) #maximum rise velocity
upslopeI=np.where(dY[I:I+IsToLook]==upslope)[0][0]+I
I=upslopeI #center sweep at the upslope
downslope=np.min(dY[I:I+IsToLook]) #maximum fall velocity
downslopeI=np.where(dY[I:I+IsToLook]==downslope)[0][0]+I
peak=np.max(Y[I:I+IsToLook]) #find peak value (mV)
peakI=np.where(Y[I:I+IsToLook]==peak)[0][0]+I #find peak I
thresholdI=I-np.where(dY[I:I+IsToLook:--1]<10)[0] #detect <10V/S
if not len(thresholdI):
return False
thresholdI=thresholdI[0]
threshold=Y[thresholdI] # mV where >10mV/S
height=peak-threshold # height (mV) from threshold to peak
halfwidthPoint=np.average((threshold,peak))
halfwidth=np.where(Y[I-IsToLook:I+IsToLook]>halfwidthPoint)[0]
if not len(halfwidth):
return False #doesn't look like a real AP
halfwidthI1=halfwidth[0]+I-IsToLook
halfwidthI2=halfwidth[-1]+I-IsToLook
if Y[halfwidthI1-1]>halfwidthPoint or Y[halfwidthI2+1]>halfwidthPoint:
return False #doesn't look like a real AP
halfwidth=len(halfwidth)/rate*1000 #now in MS
riseTime=(peakI-thresholdI)*1000/rate # time (ms) from threshold to peak
IsToLook=100*Ims #TODO: max prediction until AHP reaches nadir
AHPchunk=np.diff(Y[downslopeI:downslopeI+IsToLook]) #first inflection
AHPI=np.where(AHPchunk>0)[0]
if len(AHPI)==0:
AHPI=np.nan
else:
AHPI=AHPI[0]+downslopeI
AHPchunk=Y[AHPI:AHPI+IsToLook]
if max(AHPchunk)>threshold: #if another AP is coming, cut it out
AHPchunk=AHPchunk[:np.where(AHPchunk>threshold)[0][0]]
if len(AHPchunk):
AHP=np.nanmin(AHPchunk)
AHPI=np.where(AHPchunk==AHP)[0][0]+AHPI
AHPheight=threshold-AHP # AHP magnitude from threshold (mV)
IsToLook=500*Ims #TODO: max prediction until AHP reaches threshold
AHPreturn=np.average((AHP,threshold)) #half of threshold
AHPreturnI=np.where(Y[AHPI:AHPI+IsToLook]>AHPreturn)[0]
if len(AHPreturnI): #not having a clean decay won't cause AP to crash
AHPreturnI=AHPreturnI[0]+AHPI
AHPrisetime=(AHPreturnI-AHPI)*2/rate*1000 #predicted return time (ms)
AHPupslope=AHPheight/AHPrisetime #mV/ms = V/S
AHPreturnFullI=(AHPreturnI-AHPI)*2+AHPI
else: #make them nan so you can do averages later
AHPreturnI,AHPrisetime,AHPupslope=np.nan,np.nan,np.nan
downslope=np.nan
#fasttime (10V/S to 10V/S) #TODO:
#dpp (deriv peak to peak) #TODO:
sweepI,sweepT=I,I/rate # clean up variable names
del IsToLook,I, Y, dY, Ims, AHPchunk, verbose #delete what we don't need
return locals() | 0.038867 |
def displayAll(elapsed, display_amt, est_end, nLoops, count, numPrints):
'''Displays time if verbose is true and count is within the display amount'''
if numPrints > nLoops:
display_amt = 1
else:
display_amt = round(nLoops / numPrints)
if count % display_amt == 0:
avg = elapsed / count
est_end = round(avg * nLoops)
(disp_elapsed,
disp_avg,
disp_est) = timeUnit(int(round(elapsed)),
int(round(avg)),
int(round(est_end)))
print "%s%%" % str(round(count / float(nLoops) * 100)), "@" + str(count),
totalTime = disp_est[0]
unit = disp_est[1]
if str(unit) == "secs":
remain = totalTime - round(elapsed)
remainUnit = "secs"
elif str(unit) == "mins":
remain = totalTime - round(elapsed) / 60
remainUnit = "mins"
elif str(unit) == "hr":
remain = totalTime - round(elapsed) / 3600
remainUnit = "hr"
print "ETA: %s %s" % (str(remain), remainUnit)
print
return | 0.002648 |
def write_how_many(self, file):
""" Writes component numbers to a table.
"""
report = CaseReport(self.case)
# Map component labels to attribute names
components = [("Bus", "n_buses"), ("Generator", "n_generators"),
("Committed Generator", "n_online_generators"),
("Load", "n_loads"), ("Fixed Load", "n_fixed_loads"),
("Despatchable Load", "n_online_vloads"), ("Shunt", "n_shunts"),
("Branch", "n_branches"), ("Transformer", "n_transformers"),
("Inter-tie", "n_interties"), ("Area", "n_areas")
]
# Column 1 width
longest = max([len(c[0]) for c in components])
col1_header = "Object"
col1_width = longest
col2_header = "Quantity"
col2_width = len(col2_header)
# Row separator
sep = "="*col1_width + " " + "="*col2_width + "\n"
# Row headers
file.write(sep)
file.write(col1_header.center(col1_width))
file.write(" ")
file.write("%s\n" % col2_header.center(col2_width))
file.write(sep)
# Rows
for label, attr in components:
col2_value = str(getattr(report, attr))
file.write("%s %s\n" %
(label.ljust(col1_width), col2_value.rjust(col2_width)))
else:
file.write(sep)
file.write("\n")
del report | 0.002829 |
def _setup_cmd(self,mode='cloud-in-cells'):
"""
The purpose here is to create a more finely binned
background CMD to sample from.
"""
# Only setup once...
if hasattr(self,'bkg_lambda'): return
logger.info("Setup color...")
# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2
# (Remember to convert from sr to deg^2)
#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))
solid_angle_roi = self.roi.area_pixel*len(self.roi.pixels)
# Large CMD bins cause problems when simulating
config = Config(self.config)
config['color']['n_bins'] *= 5 #10
config['mag']['n_bins'] *= 1 #2
#config['mask']['minimum_solid_angle'] = 0
roi = ugali.analysis.loglike.createROI(config,self.roi.lon,self.roi.lat)
mask = ugali.analysis.loglike.createMask(config,roi)
self.bkg_centers_color = roi.centers_color
self.bkg_centers_mag = roi.centers_mag
# Background CMD has units: [objs / deg^2 / mag^2]
cmd_background = mask.backgroundCMD(self.catalog,mode)
self.bkg_lambda=cmd_background*solid_angle_roi*roi.delta_color*roi.delta_mag
np.sum(self.bkg_lambda)
# Clean up
del config, roi, mask | 0.019652 |
def profile_dir(name):
"""Return path to FF profile for a given profile name or path."""
if name:
possible_path = Path(name)
if possible_path.exists():
return possible_path
profiles = list(read_profiles())
try:
if name:
profile = next(p for p in profiles if p.name == name)
else:
profile = next(p for p in profiles if p.default)
except StopIteration:
raise ProfileNotFoundError(name)
return profile.path | 0.001988 |
def format_specifications(specifications):
# type: (Iterable[str]) -> List[str]
"""
Transforms the interfaces names into URI strings, with the interface
implementation language as a scheme.
:param specifications: Specifications to transform
:return: The transformed names
"""
transformed = set()
for original in specifications:
try:
lang, spec = _extract_specification_parts(original)
transformed.add(_format_specification(lang, spec))
except ValueError:
# Ignore invalid specifications
pass
return list(transformed) | 0.001608 |
def print_context_names(ctx, param, value):
"""Print all possible types."""
if not value or ctx.resilient_parsing:
return
click.echo('\n'.join(_context_names()))
ctx.exit() | 0.005102 |
def get_urls(self):
"""
Returns the urls for the model.
"""
urls = super(IPAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'^batch_process_ips/$', self.admin_site.admin_view(self.batch_process_ips_view), name='batch_process_ips_view')
)
return my_urls + urls | 0.008746 |
def _get_namespace_tag(self, tag):
"""Return the given C{tag} with the namespace prefix added, if any."""
if self._namespace is not None:
tag = "{%s}%s" % (self._namespace, tag)
return tag | 0.008929 |
def parse_startup_message(self):
"""results in an OmapiStartupMessage
>>> d = b"\\0\\0\\0\\x64\\0\\0\\0\\x18"
>>> next(InBuffer(d).parse_startup_message()).validate()
"""
return parse_map(lambda args: OmapiStartupMessage(*args), parse_chain(self.parse_net32int, lambda _: self.parse_net32int())) | 0.026144 |
def scan_file(fullpath, relpath, assign_id):
""" Scan a file for the index
fullpath -- The full path to the file
relpath -- The path to the file, relative to its base directory
assign_id -- Whether to assign an ID to the file if not yet assigned
This calls into various modules' scanner functions; the expectation is that
the scan_file function will return a truthy value if it was scanned
successfully, False if it failed, and None if there is nothing to scan.
"""
logger.debug("Scanning file: %s (%s) %s", fullpath, relpath, assign_id)
def do_scan():
""" helper function to do the scan and gather the result """
_, ext = os.path.splitext(fullpath)
try:
if ext in ENTRY_TYPES:
logger.info("Scanning entry: %s", fullpath)
return entry.scan_file(fullpath, relpath, assign_id)
if ext in CATEGORY_TYPES:
logger.info("Scanning meta info: %s", fullpath)
return category.scan_file(fullpath, relpath)
return None
except: # pylint: disable=bare-except
logger.exception("Got error parsing %s", fullpath)
return False
result = do_scan()
if result is False and not assign_id:
logger.info("Scheduling fixup for %s", fullpath)
THREAD_POOL.submit(scan_file, fullpath, relpath, True)
else:
logger.debug("%s complete", fullpath)
if result:
set_fingerprint(fullpath)
SCHEDULED_FILES.remove(fullpath) | 0.001289 |
def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):
"""Updates the max_versions of cas_required setting on an existing path.
Supported methods:
POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body)
:param path: Path
:type path: str | unicode
:param max_versions: The number of versions to keep per key. If not set, the backend's configured max version is
used. Once a key has more than the configured allowed versions the oldest version will be permanently
deleted.
:type max_versions: int
:param cas_required: If true the key will require the cas parameter to be set on all write requests. If false,
the backend's configuration will be used.
:type cas_required: bool
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {}
if max_versions is not None:
params['max_versions'] = max_versions
if cas_required is not None:
if not isinstance(cas_required, bool):
error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required))
raise exceptions.ParamValidationError(error_msg)
params['cas_required'] = cas_required
api_path = '/v1/{mount_point}/metadata/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
) | 0.005386 |
def train(self):
"""
Train the pair layer and pooling layer.
"""
for iDriving, cDriving in enumerate(self.drivingOperandSDRs):
minicolumnSDR = self.minicolumnSDRs[iDriving]
self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving)
for iContext, cContext in enumerate(self.contextOperandSDRs):
iResult = (iContext + iDriving) % self.numLocations
cResult = self.resultSDRs[iResult]
self.pairLayer.compute(minicolumnSDR, basalInput=cContext)
cPair = self.pairLayer.getWinnerCells()
self.poolingLayer.associate(cResult, cPair) | 0.00659 |
def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = mmax - mag_value
a_2 = self._get_a2_value(bbar, dbar, slip / 10., beta, mmax)
return a_2 * (np.exp(bbar * delta_m) - 1.0) * (delta_m > 0.0) | 0.002688 |
def uncancel_offer(self, offer_id):
"""
Uncancelles an invoice
:param offer_id: the offer id
:return Response
"""
return self._create_put_request(
resource=OFFERS,
billomat_id=offer_id,
command=UNCANCEL,
) | 0.006711 |
def count(self, param, must=[APIKEY, START_TIME, END_TIME]):
'''统计短信条数
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
start_time String 是 短信发送开始时间 2013-08-11 00:00:00
end_time String 是 短信发送结束时间 2013-08-12 00:00:00
mobile String 否 需要查询的手机号 15205201314
page_num Integer 否 页码,默认值为1 1
page_size Integer 否 每页个数,最大100个 20
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: int(rsp[TOTAL]) if TOTAL in rsp else 0)
return self.path('count.json').post(param, h, r) | 0.006775 |
def template_cylinder_annulus(height, outer_radius, inner_radius=0):
r"""
This method generates an image array of a disc-ring. It is useful for
passing to Cubic networks as a ``template`` to make circular-shaped 2D
networks.
Parameters
----------
height : int
The height of the cylinder
outer_radius : int
Number of nodes in the outer radius of the cylinder
inner_radius : int
Number of the nodes in the inner radius of the annulus. A value of 0
will result in a solid cylinder.
Returns
-------
A Numpy array containing 1's to demarcate the disc-ring, and 0's
elsewhere.
"""
img = _template_sphere_disc(dim=2, outer_radius=outer_radius,
inner_radius=inner_radius)
img = sp.tile(sp.atleast_3d(img), reps=height)
return img | 0.001161 |
def port_str_arrange(ports):
""" Gives a str in the format (always tcp listed first).
T:<tcp ports/portrange comma separated>U:<udp ports comma separated>
"""
b_tcp = ports.find("T")
b_udp = ports.find("U")
if (b_udp != -1 and b_tcp != -1) and b_udp < b_tcp:
return ports[b_tcp:] + ports[b_udp:b_tcp]
return ports | 0.002857 |
def clear_instance(cls):
"""unset _instance for this class and singleton parents.
"""
if not cls.initialized():
return
for subclass in cls._walk_mro():
if isinstance(subclass._instance, cls):
# only clear instances that are instances
# of the calling class
subclass._instance = None | 0.005181 |
def _ReadParserPresetsFromFile(self):
"""Reads the parser presets from the presets.yaml file.
Raises:
BadConfigOption: if the parser presets file cannot be read.
"""
self._presets_file = os.path.join(
self._data_location, self._PRESETS_FILE_NAME)
if not os.path.isfile(self._presets_file):
raise errors.BadConfigOption(
'No such parser presets file: {0:s}.'.format(self._presets_file))
try:
parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file)
except errors.MalformedPresetError as exception:
raise errors.BadConfigOption(
'Unable to read presets from file with error: {0!s}'.format(
exception)) | 0.005642 |
def flip(self):
"""
Flip colors of a node and its children.
"""
left = self.left._replace(red=not self.left.red)
right = self.right._replace(red=not self.right.red)
top = self._replace(left=left, right=right, red=not self.red)
return top | 0.006803 |
def matched_filter(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, sigmasq=None):
""" Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
"""
snr, _, norm = matched_filter_core(template, data, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq)
return snr * norm | 0.003042 |
def get_tip_labels(self, idx=None):
"""
Returns tip labels in the order they will be plotted on the tree, i.e.,
starting from zero axis and counting up by units of 1 (bottom to top
in right-facing trees; left to right in down-facing). If 'idx' is
indicated then a list of tip labels descended from that node will be
returned, instead of all tip labels. This is useful in combination
with other functions that select nodes/clades of the tree based on a
list of tip labels. You can use the toytree draw() command with
tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes.
Parameters:
idx (int): index label of a node.
Example:
# select a clade of the tree and use it for rooting.
tiplist = tre.get_descenants_from_idx(21)
tre.root(names=tiplist)
"""
if not idx:
return self.treenode.get_leaf_names()[::-1]
else:
treenode = self.treenode.search_nodes(idx=idx)[0]
return treenode.get_leaf_names()[::-1] | 0.008029 |
def _symm_current(C):
"""To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_klij"""
nans = np.isnan(C)
C[nans] = np.einsum('klij', C)[nans]
return C | 0.004975 |
def derive_toctree_rst(self, current_file):
"""
Generate the rst content::
.. toctree::
args ...
example.rst
...
:param current_file:
:return:
"""
TAB = " " * 4
lines = list()
lines.append(".. toctree::")
for opt in TocTree.option_spec:
value = self.options.get(opt)
if value is not None:
lines.append(("{}:{}: {}".format(TAB, opt, value)).rstrip())
lines.append("")
append_ahead = "append_ahead" in self.options
if append_ahead:
for line in list(self.content):
lines.append(TAB + line)
article_folder = ArticleFolder(dir_path=Path(current_file).parent.abspath)
for af in article_folder.sub_article_folders:
line = "{}{} <{}>".format(TAB, af.title, af.rel_path)
lines.append(line)
append_behind = not append_ahead
if append_behind:
for line in list(self.content):
lines.append(TAB + line)
lines.append("")
return "\n".join(lines) | 0.002586 |
def get_servers():
'''
Get list of configured NTP servers
CLI Example:
.. code-block:: bash
salt '*' ntp.get_servers
'''
cmd = ['w32tm', '/query', '/configuration']
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
try:
if line.startswith('NtpServer:'):
_, ntpsvrs = line.rsplit(' (', 1)[0].split(':', 1)
return sorted(ntpsvrs.split())
except ValueError as e:
return False
return False | 0.001855 |
def get_prior(self, twig=None, **kwargs):
"""
[NOT IMPLEMENTED]
:raises NotImplementedError: because it isn't
"""
raise NotImplementedError
kwargs['context'] = 'prior'
return self.filter(twig=twig, **kwargs) | 0.007576 |
def get_value_matched_by_regex(field_name, regex_matches, string):
"""Ensure value stored in regex group exists."""
try:
value = regex_matches.group(field_name)
if value is not None:
return value
except IndexError:
pass
raise MissingFieldError(string, field_name) | 0.003165 |
def show(self, resolve_mac=True):
"""Print list of available network interfaces in human readable form"""
print("%s %s %s %s" % ("INDEX".ljust(5), "IFACE".ljust(35), "IP".ljust(15), "MAC"))
for iface_name in sorted(self.data.keys()):
dev = self.data[iface_name]
mac = dev.mac
if resolve_mac and iface_name != LOOPBACK_NAME:
mac = conf.manufdb._resolve_MAC(mac)
print("%s %s %s %s" % (str(dev.win_index).ljust(5), str(dev.name).ljust(35), str(dev.ip).ljust(15), mac) ) | 0.008834 |
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_csv
text = clipboard_get()
# Try to decode (if needed, as "text" might already be a string here).
try:
text = text.decode(kwargs.get('encoding')
or get_option('display.encoding'))
except AttributeError:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_csv
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = {x.lstrip().count('\t') for x in lines}
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
# Edge case where sep is specified to be None, return to default
if sep is None and kwargs.get('delim_whitespace') is None:
sep = r'\s+'
# Regex separator currently only works with python engine.
# Default to python if separator is multi-character (regex)
if len(sep) > 1 and kwargs.get('engine') is None:
kwargs['engine'] = 'python'
elif len(sep) > 1 and kwargs.get('engine') == 'c':
warnings.warn('read_clipboard with regex separator does not work'
' properly with c engine')
return read_csv(StringIO(text), sep=sep, **kwargs) | 0.000456 |
def create_open(cls, *args, **kwargs):
"""
:return:
a delegator function that calls the ``cls`` constructor whose arguments being
a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns
a looping function that uses the object's ``listener`` to wait for messages
and invokes instance method ``open``, ``on_message``, and ``on_close`` accordingly.
By default, a thread wrapping that looping function is spawned.
"""
def f(seed_tuple):
j = cls(seed_tuple, *args, **kwargs)
def wait_loop():
bot, msg, seed = seed_tuple
try:
handled = j.open(msg, seed)
if not handled:
j.on_message(msg)
while 1:
msg = j.listener.wait()
j.on_message(msg)
# These exceptions are "normal" exits.
except (exception.IdleTerminate, exception.StopListening) as e:
j.on_close(e)
# Any other exceptions are accidents. **Print it out.**
# This is to prevent swallowing exceptions in the case that on_close()
# gets overridden but fails to account for unexpected exceptions.
except Exception as e:
traceback.print_exc()
j.on_close(e)
return wait_loop
return f | 0.004326 |
def create_drop_query(self, tokens):
"""
Parse tokens of drop query
:param tokens: A list of InfluxDB query tokens
"""
if not tokens[Keyword.SERIES]:
return None
return DropQuery(self.parse_keyword(Keyword.SERIES, tokens)) | 0.007092 |
def delete_arrays(self, period = None):
"""
If ``period`` is ``None``, remove all known values of the variable.
If ``period`` is not ``None``, only remove all values for any period included in period (e.g. if period is "2017", values for "2017-01", "2017-07", etc. would be removed)
"""
self._memory_storage.delete(period)
if self._disk_storage:
self._disk_storage.delete(period) | 0.011111 |
def put(self, url, body=None, **kwargs):
"""
Send a PUT request.
:param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix.
:param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded.
:param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`.
:return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text)
:rtype: tuple
"""
return self.request('put', url, body=body, **kwargs) | 0.009434 |
def diam_swamee(FlowRate, HeadLossFric, Length, Nu, PipeRough):
"""Return the inner diameter of a pipe.
The Swamee Jain equation is dimensionally correct and returns the
inner diameter of a pipe given the flow rate and the head loss due
to shear on the pipe walls. The Swamee Jain equation does NOT take
minor losses into account. This equation ONLY applies to turbulent
flow.
"""
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Length, ">0", "Length"],
[HeadLossFric, ">0", "Headloss due to friction"],
[Nu, ">0", "Nu"], [PipeRough, "0-1", "Pipe roughness"])
a = ((PipeRough ** 1.25)
* ((Length * FlowRate**2)
/ (gravity.magnitude * HeadLossFric)
)**4.75
)
b = (Nu * FlowRate**9.4
* (Length / (gravity.magnitude * HeadLossFric)) ** 5.2
)
return 0.66 * (a+b)**0.04 | 0.003205 |
def getmtime(self, path):
"""Returns the modification time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the modification time of the fake file
in number of seconds since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except IOError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3) | 0.003578 |
def unblock_signals(self):
"""Let the combos listen for event changes again."""
self.aggregation_layer_combo.blockSignals(False)
self.exposure_layer_combo.blockSignals(False)
self.hazard_layer_combo.blockSignals(False) | 0.008 |
def insertDatastore(self, index, store):
'''Inserts datastore `store` into this collection at `index`.'''
if not isinstance(store, Datastore):
raise TypeError("stores must be of type %s" % Datastore)
self._stores.insert(index, store) | 0.007937 |
def unpack_from(self, buf, offset=0 ):
'''
unpacks data from 'buf' and returns a dication of named fields. the
fields can be post-processed by extending the _post_unpack() method.
'''
data = super(Struct,self).unpack_from( buf, offset)
items = dict(zip(self.fields,data))
return self._post_unpack(items) | 0.016713 |
def annotate(self, content, includeCat=None, excludeCat=None, minLength=None, longestOnly=None, includeAbbrev=None, includeAcronym=None, includeNumbers=None, output='text/plain; charset=utf-8'):
""" Annotate text from: /annotations
Arguments:
content: The content to annotate
includeCat: A set of categories to include
excludeCat: A set of categories to exclude
minLength: The minimum number of characters in annotated entities
longestOnly: Should only the longest entity be returned for an overlapping group
includeAbbrev: Should abbreviations be included
includeAcronym: Should acronyms be included
includeNumbers: Should numbers be included
outputs:
text/plain; charset=utf-8
"""
kwargs = {'content':content, 'includeCat':includeCat, 'excludeCat':excludeCat, 'minLength':minLength, 'longestOnly':longestOnly, 'includeAbbrev':includeAbbrev, 'includeAcronym':includeAcronym, 'includeNumbers':includeNumbers}
kwargs = {k:dumps(v) if builtins.type(v) is dict else v for k, v in kwargs.items()}
param_rest = self._make_rest(None, **kwargs)
url = self._basePath + ('/annotations').format(**kwargs)
requests_params = kwargs
output = self._get('GET', url, requests_params, output)
return output if output else None | 0.010571 |
def memory_read(self, start_position: int, size: int) -> memoryview:
"""
Read and return a view of ``size`` bytes from memory starting at ``start_position``.
"""
return self._memory.read(start_position, size) | 0.0125 |
def load_manifest(check_name):
"""
Load the manifest file into a dictionary
"""
manifest_path = os.path.join(get_root(), check_name, 'manifest.json')
if file_exists(manifest_path):
return json.loads(read_file(manifest_path))
return {} | 0.003759 |
def _print_throbber(self):
'''Print an indefinite progress bar.'''
self._print('[')
for position in range(self._bar_width):
self._print('O' if position == self._throbber_index else ' ')
self._print(']')
self._throbber_index = next(self._throbber_iter) | 0.006536 |
def manage_all(self, *args, **kwargs):
"""
Runs manage() across all unique site default databases.
"""
for site, site_data in self.iter_unique_databases(site='all'):
if self.verbose:
print('-'*80, file=sys.stderr)
print('site:', site, file=sys.stderr)
if self.env.available_sites_by_host:
hostname = self.current_hostname
sites_on_host = self.env.available_sites_by_host.get(hostname, [])
if sites_on_host and site not in sites_on_host:
self.vprint('skipping site:', site, sites_on_host, file=sys.stderr)
continue
self.manage(*args, **kwargs) | 0.005479 |
def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: ``Refund``
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type invoice: ``djstripe.models.Refund``
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.get("data", []):
item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False)
refund_objs.append(item)
return refund_objs | 0.031343 |
def menu_clean(menu_config):
"""
Make sure that only the menu item with the largest weight is active.
If a child of a menu item is active, the parent should be active too.
:param menu:
:return:
"""
max_weight = -1
for _, value in list(menu_config.items()):
if value["submenu"]:
for _, v in list(value["submenu"].items()):
if v["active"]:
# parent inherits the weight of the axctive child
value["active"] = True
value["active_weight"] = v["active_weight"]
if value["active"]:
max_weight = max(value["active_weight"], max_weight)
if max_weight > 0:
# one of the items is active: make items with lesser weight inactive
for _, value in list(menu_config.items()):
if value["active"] and value["active_weight"] < max_weight:
value["active"] = False
return menu_config | 0.001038 |
def chhome(name, home, **kwargs):
'''
Change the home directory of the user
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /Users/foo
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
persist = kwargs.pop('persist', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if persist:
log.info('Ignoring unsupported \'persist\' argument to user.chhome')
pre_info = info(name)
if not pre_info:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
if home == pre_info['home']:
return True
_dscl(
['/Users/{0}'.format(name), 'NFSHomeDirectory',
pre_info['home'], home],
ctype='change'
)
# dscl buffers changes, sleep 1 second before checking if new value
# matches desired value
time.sleep(1)
return info(name).get('home') == home | 0.00111 |
def initialize_shade(self, shade_name, shade_color, alpha):
"""This method will create semi-transparent surfaces with a specified
color. The surface can be toggled on and off.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inputs:
Shade_name - String of the name that you want to associate with the
surface
Shade_color - An rgb tuple of the color of the shade
Alpha - Level of transparency of the shade (0-255 with 150 being a
good middle value)
(doc string updated ver 0.1)
"""
# Create the pygame surface
self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]
# Fill the surface with a solid color or an image
if type(shade_color) == str:
background = pygame.image.load(shade_color).convert()
background = pygame.transform.scale(background,
(self.image.get_width(),
self.image.get_height()))
self.shades[shade_name][1].blit(background, (0, 0))
# Otherwise the background should contain an rgb value
else:
self.shades[shade_name][1].fill(shade_color)
# Set the alpha value for the shade
self.shades[shade_name][1].set_alpha(alpha) | 0.001427 |
def map(cls, obj, mode='data', backend=None):
"""
Applies compositor operations to any HoloViews element or container
using the map method.
"""
from .overlay import CompositeOverlay
element_compositors = [c for c in cls.definitions if len(c._pattern_spec) == 1]
overlay_compositors = [c for c in cls.definitions if len(c._pattern_spec) > 1]
if overlay_compositors:
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend),
[CompositeOverlay])
element_patterns = [c.pattern for c in element_compositors]
if element_compositors and obj.traverse(lambda x: x, element_patterns):
obj = obj.map(lambda obj: cls.collapse_element(obj, mode=mode, backend=backend),
element_patterns)
return obj | 0.006889 |
def show(parent=None, targets=[], modal=None, auto_publish=False, auto_validate=False):
"""Attempt to show GUI
Requires install() to have been run first, and
a live instance of Pyblish QML in the background.
Arguments:
parent (None, optional): Deprecated
targets (list, optional): Publishing targets
modal (bool, optional): Block interactions to parent
"""
# Get modal mode from environment
if modal is None:
modal = bool(os.environ.get("PYBLISH_QML_MODAL", False))
# Automatically install if not already installed.
install(modal)
show_settings = settings.to_dict()
show_settings['autoPublish'] = auto_publish
show_settings['autoValidate'] = auto_validate
# Show existing GUI
if _state.get("currentServer"):
server = _state["currentServer"]
proxy = ipc.server.Proxy(server)
try:
proxy.show(show_settings)
return server
except IOError:
# The running instance has already been closed.
_state.pop("currentServer")
if not host.is_headless():
host.splash()
try:
service = ipc.service.Service()
server = ipc.server.Server(service, targets=targets, modal=modal)
except Exception:
# If for some reason, the GUI fails to show.
traceback.print_exc()
return host.desplash()
proxy = ipc.server.Proxy(server)
proxy.show(show_settings)
# Store reference to server for future calls
_state["currentServer"] = server
log.info("Success. QML server available as "
"pyblish_qml.api.current_server()")
server.listen()
return server | 0.00118 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.