text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def strict_dependencies(self, dep_context):
"""
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
"""
strict_deps = self._cached_strict_dependencies_map.get(dep_context, None)
if strict_deps is None:
default_predicate = self._closure_dep_predicate({self}, **dep_context.target_closure_kwargs)
# TODO(#5977): this branch needs testing!
if not default_predicate:
def default_predicate(*args, **kwargs):
return True
def dep_predicate(source, dependency):
if not default_predicate(source, dependency):
return False
# Always expand aliases.
if type(source) in dep_context.alias_types:
return True
# Traverse other dependencies if they are exported.
if source._dep_is_exported(dependency):
return True
return False
dep_addresses = [d.address for d in self.dependencies
if default_predicate(self, d)
]
result = self._build_graph.transitive_subgraph_of_addresses_bfs(
addresses=dep_addresses,
dep_predicate=dep_predicate
)
strict_deps = OrderedSet()
for declared in result:
if type(declared) in dep_context.alias_types:
continue
if isinstance(declared, dep_context.types_with_closure):
strict_deps.update(declared.closure(
bfs=True,
**dep_context.target_closure_kwargs))
strict_deps.add(declared)
strict_deps = list(strict_deps)
self._cached_strict_dependencies_map[dep_context] = strict_deps
return strict_deps | 0.010505 |
def kill_process(self):
"""Stop the process"""
if self.process.poll() is not None:
return
try:
if hasattr(self.process, 'terminate'):
self.process.terminate()
elif os.name != 'nt':
os.kill(self.process.pid, 9)
else:
import ctypes
handle = int(self.process._handle)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
except OSError:
pass | 0.003929 |
def search(query, stats):
""" Perform issue search for given stats instance """
log.debug("Search query: {0}".format(query))
issues = []
# Fetch data from the server in batches of MAX_RESULTS issues
for batch in range(MAX_BATCHES):
response = stats.parent.session.get(
"{0}/rest/api/latest/search?{1}".format(
stats.parent.url, urllib.urlencode({
"jql": query,
"fields": "summary,comment",
"maxResults": MAX_RESULTS,
"startAt": batch * MAX_RESULTS})))
data = response.json()
log.debug("Batch {0} result: {1} fetched".format(
batch, listed(data["issues"], "issue")))
log.data(pretty(data))
issues.extend(data["issues"])
# If all issues fetched, we're done
if len(issues) >= data["total"]:
break
# Return the list of issue objects
return [Issue(issue, prefix=stats.parent.prefix) for issue in issues] | 0.001815 |
def change_exteditor(self):
"""Change external editor path"""
path, valid = QInputDialog.getText(self, _('External editor'),
_('External editor executable path:'),
QLineEdit.Normal,
self.get_option('external_editor/path'))
if valid:
self.set_option('external_editor/path', to_text_string(path)) | 0.007229 |
def add(self, *args, **kwargs):
"""Add the instance tied to the field for the given "value" (via `args`) to the index
For the parameters, see ``BaseIndex.add``
"""
check_uniqueness = kwargs.get('check_uniqueness', True)
key = self.get_storage_key(*args)
if self.field.unique and check_uniqueness:
self.check_uniqueness(key=key, *args)
# Do index => create a key to be able to retrieve parent pk with
# current field value]
pk = self.instance.pk.get()
logger.debug("adding %s to index %s" % (pk, key))
self.connection.sadd(key, pk)
self._indexed_values.add(tuple(args)) | 0.004405 |
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json() | 0.002621 |
def bind_type(python_value):
"""Return a Gibica type derived from a Python type."""
binding_table = {'bool': Bool, 'int': Int, 'float': Float}
if python_value is None:
return NoneType()
python_type = type(python_value)
gibica_type = binding_table.get(python_type.__name__)
if gibica_type is None:
raise TypeError('Impossible to recognize underlying type.')
return gibica_type(python_value) | 0.002288 |
def setInputFormatText( self, text ):
"""
Sets the input format text for this widget to the given value.
:param text | <str>
"""
try:
self._inputFormat = XLineEdit.InputFormat[nativestring(text)]
except KeyError:
pass | 0.016502 |
def _print_unique_links_with_status_codes(page_url, soup):
""" Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
links = _get_unique_links(page_url, soup)
for link in links:
status_code = _get_link_status_code(link)
print(link, " -> ", status_code) | 0.001887 |
def chain_split(*splits: Iterable[Callable[..., Any]]) -> Callable[[BaseChain], Iterable[BaseChain]]: # noqa: E501
"""
Construct and execute multiple concurrent forks of the chain.
Any number of forks may be executed. For each fork, provide an iterable of
commands.
Returns the resulting chain objects for each fork.
.. code-block:: python
chain_a, chain_b = build(
mining_chain,
chain_split(
(mine_block(extra_data=b'chain-a'), mine_block()),
(mine_block(extra_data=b'chain-b'), mine_block(), mine_block()),
),
)
"""
if not splits:
raise ValidationError("Cannot use `chain_split` without providing at least one split")
@functools.wraps(chain_split)
@to_tuple
def _chain_split(chain: BaseChain) -> Iterable[BaseChain]:
for split_fns in splits:
result = build(
chain,
*split_fns,
)
yield result
return _chain_split | 0.002887 |
def split(self):
"""
Split the graph into sub-graphs. Only connected objects belong to the
same graph. `split` yields copies of the Graph object. Shallow copies
are used that only replicate the meta-information, but share the same
object list ``self.objects``.
>>> from pympler.refgraph import ReferenceGraph
>>> a = 42
>>> b = 'spam'
>>> c = {a: b}
>>> t = (1,2,3)
>>> rg = ReferenceGraph([a,b,c,t])
>>> for subgraph in rg.split():
... print subgraph.index
0
1
"""
self._annotate_groups()
index = 0
for group in range(self._max_group):
subgraph = copy(self)
subgraph.metadata = self.metadata[:]
subgraph.edges = self.edges.copy()
if subgraph._filter_group(group):
subgraph.total_size = sum([x.size for x in subgraph.metadata])
subgraph.index = index
index += 1
yield subgraph | 0.001914 |
def copy_snapshot(kwargs=None, call=None):
'''
Copy a snapshot
'''
if call != 'function':
log.error(
'The copy_snapshot function must be called with -f or --function.'
)
return False
if 'source_region' not in kwargs:
log.error('A source_region must be specified to copy a snapshot.')
return False
if 'source_snapshot_id' not in kwargs:
log.error('A source_snapshot_id must be specified to copy a snapshot.')
return False
if 'description' not in kwargs:
kwargs['description'] = ''
params = {'Action': 'CopySnapshot'}
if 'source_region' in kwargs:
params['SourceRegion'] = kwargs['source_region']
if 'source_snapshot_id' in kwargs:
params['SourceSnapshotId'] = kwargs['source_snapshot_id']
if 'description' in kwargs:
params['Description'] = kwargs['description']
log.debug(params)
data = aws.query(params,
return_url=True,
location=get_location(),
provider=get_provider(),
opts=__opts__,
sigver='4')
return data | 0.000847 |
def get_ip_interface_input_request_type_get_request_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
input = ET.SubElement(get_ip_interface, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_name = ET.SubElement(get_request, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004559 |
def set_table(self, schema, **kwargs):
"""
add the table to the db
schema -- Schema() -- contains all the information about the table
"""
with self.connection(**kwargs) as connection:
kwargs['connection'] = connection
if self.has_table(str(schema), **kwargs): return True
try:
with self.transaction(**kwargs):
self._set_table(schema, **kwargs)
for index_name, index in schema.indexes.items():
self.set_index(
schema,
name=index.name,
fields=index.fields,
connection=connection,
**index.options
)
except InterfaceError:
# check to see if this table now exists, it might have been created
# in another thread
if not self.has_table(schema, **kwargs):
raise | 0.003784 |
def resolve_url(self, catalog_url):
"""Resolve the url of the dataset when reading latest.xml.
Parameters
----------
catalog_url : str
The catalog url to be resolved
"""
if catalog_url != '':
resolver_base = catalog_url.split('catalog.xml')[0]
resolver_url = resolver_base + self.url_path
resolver_xml = session_manager.urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if 'name' in root.attrib:
self.catalog_name = root.attrib['name']
else:
self.catalog_name = 'No name found'
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == 'dataset':
if 'urlPath' in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning('no dataset url path found in latest.xml!') | 0.001599 |
def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0):
"""Embed an Ising problem onto a target graph.
Args:
source_h (dict[variable, bias]/list[bias]):
Linear biases of the Ising problem. If a list, the list's indices are used as
variable labels.
source_J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
tuple: A 2-tuple:
dict[variable, bias]: Linear biases of the target Ising problem.
dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph: target-nodes
2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Target graph is a square graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)
>>> target_J[(0, 1)] == J[('a', 'b')]
True
>>> target_J # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to
represent source-node c.
>>> import dimod
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_ising(target_h, target_J)
>>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1, 2: -1, 3: -1}
{0: 1, 1: 1, 2: -1, 3: -1}
{0: -1, 1: 1, 2: -1, 3: -1}
"""
source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_h, target_J, __ = target_bqm.to_ising()
return target_h, target_J | 0.004586 |
def trajectory(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> TrajectoryOutput:
'''Returns the ops for the trajectory generation with given `horizon`
and `initial_state`.
The simulation returns states, actions and interms as a
sequence of tensors (i.e., all representations are factored).
The reward is a batch sized tensor.
The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards).
If initial state is None, use default compiler's initial state.
Note:
All tensors have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of simulation timesteps.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple.
'''
if initial_state is None:
initial_state = self._cell.initial_state()
with self.graph.as_default():
self.inputs = self.timesteps(horizon)
outputs, _ = tf.nn.dynamic_rnn(
self._cell,
self.inputs,
initial_state=initial_state,
dtype=tf.float32,
scope="trajectory")
states, actions, interms, rewards = outputs
# fluent types
state_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.state_range_type)
states = self._output(states, state_dtype)
interm_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.interm_range_type)
interms = self._output(interms, interm_dtype)
action_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.action_range_type)
actions = self._output(actions, action_dtype)
outputs = (initial_state, states, actions, interms, rewards)
return outputs | 0.004535 |
def show_raslog_output_show_all_raslog_raslog_entries_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_raslog = ET.Element("show_raslog")
config = show_raslog
output = ET.SubElement(show_raslog, "output")
show_all_raslog = ET.SubElement(output, "show-all-raslog")
raslog_entries = ET.SubElement(show_all_raslog, "raslog-entries")
index = ET.SubElement(raslog_entries, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003247 |
def old_roles_manager(self):
'''Return old roles, grouped first by term, then by chamber,
then by type.'''
wrapper = self._old_role_wrapper
chamber_getter = operator.methodcaller('get', 'chamber')
for term, roles in self.get('old_roles', {}).items():
chamber_roles = defaultdict(lambda: defaultdict(list))
for chamber, roles in itertools.groupby(roles, chamber_getter):
for role in roles:
role = wrapper(role)
typeslug = role['type'].lower().replace(' ', '_')
chamber_roles[chamber][typeslug].append(role)
yield term, chamber_roles | 0.002928 |
def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='',
python2_bin='python2', python3_bin='python3'):
'''
Generate the salt-min tarball and print the location of the tarball
Optional additional mods to include (e.g. mako) can be supplied as a comma
delimited string. Permits forcing an overwrite of the output file as well.
CLI Example:
.. code-block:: bash
salt-run min.generate
salt-run min.generate mako
salt-run min.generate mako,wempy 1
salt-run min.generate overwrite=1
'''
mindir = os.path.join(cachedir, 'min')
if not os.path.isdir(mindir):
os.makedirs(mindir)
mintar = os.path.join(mindir, 'min.tgz')
minver = os.path.join(mindir, 'version')
pyminver = os.path.join(mindir, '.min-gen-py-version')
salt_call = os.path.join(mindir, 'salt-call')
with salt.utils.files.fopen(salt_call, 'wb') as fp_:
fp_.write(_get_salt_call())
if os.path.isfile(mintar):
if not overwrite:
if os.path.isfile(minver):
with salt.utils.files.fopen(minver) as fh_:
overwrite = fh_.read() != salt.version.__version__
if overwrite is False and os.path.isfile(pyminver):
with salt.utils.files.fopen(pyminver) as fh_:
overwrite = fh_.read() != str(sys.version_info[0]) # future lint: disable=blacklisted-function
else:
overwrite = True
if overwrite:
try:
os.remove(mintar)
except OSError:
pass
else:
return mintar
if _six.PY3:
# Let's check for the minimum python 2 version requirement, 2.6
py_shell_cmd = (
python2_bin + ' -c \'from __future__ import print_function; import sys; '
'print("{0}.{1}".format(*(sys.version_info[:2])));\''
)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd.communicate()
if cmd.returncode == 0:
py2_version = tuple(int(n) for n in stdout.decode('utf-8').strip().split('.'))
if py2_version < (2, 6):
# Bail!
raise salt.exceptions.SaltSystemExit(
'The minimum required python version to run salt-ssh is "2.6".'
'The version reported by "{0}" is "{1}". Please try "salt-ssh '
'--python2-bin=<path-to-python-2.6-binary-or-higher>".'.format(python2_bin,
stdout.strip())
)
elif sys.version_info < (2, 6):
# Bail! Though, how did we reached this far in the first place.
raise salt.exceptions.SaltSystemExit(
'The minimum required python version to run salt-ssh is "2.6".'
)
tops_py_version_mapping = {}
tops = get_tops(extra_mods=extra_mods, so_mods=so_mods)
if _six.PY2:
tops_py_version_mapping['2'] = tops
else:
tops_py_version_mapping['3'] = tops
# TODO: Consider putting known py2 and py3 compatible libs in it's own sharable directory.
# This would reduce the min size.
if _six.PY2 and sys.version_info[0] == 2:
# Get python 3 tops
py_shell_cmd = (
python3_bin + ' -c \'import sys; import json; import salt.utils.thin; '
'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' '
'\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))
)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = cmd.communicate()
if cmd.returncode == 0:
try:
tops = salt.utils.json.loads(stdout)
tops_py_version_mapping['3'] = tops
except ValueError:
pass
if _six.PY3 and sys.version_info[0] == 3:
# Get python 2 tops
py_shell_cmd = (
python2_bin + ' -c \'from __future__ import print_function; '
'import sys; import json; import salt.utils.thin; '
'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))), ensure_ascii=False)); exit(0);\' '
'\'{0}\''.format(salt.utils.json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))
)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = cmd.communicate()
if cmd.returncode == 0:
try:
tops = salt.utils.json.loads(stdout.decode('utf-8'))
tops_py_version_mapping['2'] = tops
except ValueError:
pass
tfp = tarfile.open(mintar, 'w:gz', dereference=True)
try: # cwd may not exist if it was removed but salt was run from it
start_dir = os.getcwd()
except OSError:
start_dir = None
tempdir = None
# This is the absolute minimum set of files required to run salt-call
min_files = (
'salt/__init__.py',
'salt/utils',
'salt/utils/__init__.py',
'salt/utils/atomicfile.py',
'salt/utils/validate',
'salt/utils/validate/__init__.py',
'salt/utils/validate/path.py',
'salt/utils/decorators',
'salt/utils/decorators/__init__.py',
'salt/utils/cache.py',
'salt/utils/xdg.py',
'salt/utils/odict.py',
'salt/utils/minions.py',
'salt/utils/dicttrim.py',
'salt/utils/sdb.py',
'salt/utils/migrations.py',
'salt/utils/files.py',
'salt/utils/parsers.py',
'salt/utils/locales.py',
'salt/utils/lazy.py',
'salt/utils/s3.py',
'salt/utils/dictupdate.py',
'salt/utils/verify.py',
'salt/utils/args.py',
'salt/utils/kinds.py',
'salt/utils/xmlutil.py',
'salt/utils/debug.py',
'salt/utils/jid.py',
'salt/utils/openstack',
'salt/utils/openstack/__init__.py',
'salt/utils/openstack/swift.py',
'salt/utils/asynchronous.py',
'salt/utils/process.py',
'salt/utils/jinja.py',
'salt/utils/rsax931.py',
'salt/utils/context.py',
'salt/utils/minion.py',
'salt/utils/error.py',
'salt/utils/aws.py',
'salt/utils/timed_subprocess.py',
'salt/utils/zeromq.py',
'salt/utils/schedule.py',
'salt/utils/url.py',
'salt/utils/yamlencoding.py',
'salt/utils/network.py',
'salt/utils/http.py',
'salt/utils/gzip_util.py',
'salt/utils/vt.py',
'salt/utils/templates.py',
'salt/utils/aggregation.py',
'salt/utils/yaml.py',
'salt/utils/yamldumper.py',
'salt/utils/yamlloader.py',
'salt/utils/event.py',
'salt/utils/state.py',
'salt/serializers',
'salt/serializers/__init__.py',
'salt/serializers/yamlex.py',
'salt/template.py',
'salt/_compat.py',
'salt/loader.py',
'salt/client',
'salt/client/__init__.py',
'salt/ext',
'salt/ext/__init__.py',
'salt/ext/six.py',
'salt/ext/ipaddress.py',
'salt/version.py',
'salt/syspaths.py',
'salt/defaults',
'salt/defaults/__init__.py',
'salt/defaults/exitcodes.py',
'salt/renderers',
'salt/renderers/__init__.py',
'salt/renderers/jinja.py',
'salt/renderers/yaml.py',
'salt/modules',
'salt/modules/__init__.py',
'salt/modules/test.py',
'salt/modules/selinux.py',
'salt/modules/cmdmod.py',
'salt/modules/saltutil.py',
'salt/minion.py',
'salt/pillar',
'salt/pillar/__init__.py',
'salt/textformat.py',
'salt/log',
'salt/log/__init__.py',
'salt/log/handlers',
'salt/log/handlers/__init__.py',
'salt/log/mixins.py',
'salt/log/setup.py',
'salt/cli',
'salt/cli/__init__.py',
'salt/cli/caller.py',
'salt/cli/daemons.py',
'salt/cli/salt.py',
'salt/cli/call.py',
'salt/fileserver',
'salt/fileserver/__init__.py',
'salt/transport',
'salt/transport/__init__.py',
'salt/transport/client.py',
'salt/exceptions.py',
'salt/grains',
'salt/grains/__init__.py',
'salt/grains/extra.py',
'salt/scripts.py',
'salt/state.py',
'salt/fileclient.py',
'salt/crypt.py',
'salt/config.py',
'salt/beacons',
'salt/beacons/__init__.py',
'salt/payload.py',
'salt/output',
'salt/output/__init__.py',
'salt/output/nested.py',
)
for py_ver, tops in _six.iteritems(tops_py_version_mapping):
for top in tops:
base = os.path.basename(top)
top_dirname = os.path.dirname(top)
if os.path.isdir(top_dirname):
os.chdir(top_dirname)
else:
# This is likely a compressed python .egg
tempdir = tempfile.mkdtemp()
egg = zipfile.ZipFile(top_dirname)
egg.extractall(tempdir)
top = os.path.join(tempdir, base)
os.chdir(tempdir)
if not os.path.isdir(top):
# top is a single file module
tfp.add(base, arcname=os.path.join('py{0}'.format(py_ver), base))
continue
for root, dirs, files in salt.utils.path.os_walk(base, followlinks=True):
for name in files:
if name.endswith(('.pyc', '.pyo')):
continue
if root.startswith('salt') and os.path.join(root, name) not in min_files:
continue
tfp.add(os.path.join(root, name),
arcname=os.path.join('py{0}'.format(py_ver), root, name))
if tempdir is not None:
shutil.rmtree(tempdir)
tempdir = None
os.chdir(mindir)
tfp.add('salt-call')
with salt.utils.files.fopen(minver, 'w+') as fp_:
fp_.write(salt.version.__version__)
with salt.utils.files.fopen(pyminver, 'w+') as fp_:
fp_.write(str(sys.version_info[0])) # future lint: disable=blacklisted-function
os.chdir(os.path.dirname(minver))
tfp.add('version')
tfp.add('.min-gen-py-version')
if start_dir:
os.chdir(start_dir)
tfp.close()
return mintar | 0.002147 |
def _calculate_conversion(hdr):
"""Calculate the conversion factor.
Returns
-------
conv_factor : numpy.ndarray
channel-long vector with the channel-specific conversion factor
Notes
-----
Final units are microvolts
It should include all the headbox versions apart from 5 because it depends
on subversion.
"""
discardbits = hdr['discardbits']
n_chan = hdr['num_channels']
if hdr['headbox_type'][0] in (1, 3):
# all channels
factor = ones((n_chan)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits
elif hdr['headbox_type'][0] == 4:
# 0 - 23
ch1 = ones((24)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits
# 24 - 27
ch2 = ones((4)) * ((5000000. / (2 ** 10 - 0.5)) / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2))
elif hdr['headbox_type'][0] == 6:
# 0 - 31
ch1 = ones((32)) * (8711. / (2 ** 21 - 0.5)) * 2 ** discardbits
# 32 - 35
ch2 = ones((4)) * ((5000000. / (2 ** 10 - 0.5)) / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2))
elif hdr['headbox_type'][0] == 8:
# 0 - 24
ch1 = ones((25)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 25 - 26
ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2))
elif hdr['headbox_type'][0] == 9:
# 0 - 32
ch1 = ones((33)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 33 - 34
ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2))
elif hdr['headbox_type'][0] == 14:
# 0 - 37
ch1 = ones((38)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 38 - 47
ch2 = ones((10)) * ((10800000 / 65536) / (2 ** 6)) * 2 ** discardbits
# 48-49
ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3))
elif hdr['headbox_type'][0] == 15:
# 0 - 23
ch1 = ones((24)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 24 - 27 (as above)
ch2 = ones((4)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 28 - 31 (note 10000000 instead of 10800000)
ch3 = ones((4)) * ((10000000 / 65536) / (2 ** 6)) * 2 ** discardbits
# 32-33
ch4 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3, ch4))
elif hdr['headbox_type'][0] == 17:
# 0 - 39
ch1 = ones((40)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 40 - 43
ch2 = ones((4)) * ((10800000 / 65536) / (2 ** 6)) * 2 ** discardbits
# 44 - 45
ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3))
elif hdr['headbox_type'][0] == 19:
# all channels
factor = ones((n_chan)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
elif hdr['headbox_type'][0] == 21:
# 0 - 127
ch1 = ones((128)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 128 - 129
ch2 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
# 130 - 255
ch3 = ones((126)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3))
elif hdr['headbox_type'][0] == 22:
# 0 - 31
ch1 = ones((32)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 32 - 39
ch2 = ones((8)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits
# 40 - 41
ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
# 42
ch4 = ones((1)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3, ch4))
elif hdr['headbox_type'][0] == 23:
# 0 - 31
ch1 = ones((32)) * (8711. / ((2 ** 21) - 0.5)) * 2 ** discardbits
# 32 - 35
ch2 = ones((4)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits
# 36 - 37
ch3 = ones((2)) * (1 / (2 ** 6)) * 2 ** discardbits
# 38
ch4 = ones((1)) * ((10800000. / 65536.) / (2 ** 6)) * 2 ** discardbits
factor = concatenate((ch1, ch2, ch3, ch4))
else:
raise NotImplementedError('Implement conversion factor for headbox ' +
str(hdr['headbox_type'][0]))
return factor[:n_chan] | 0.00091 |
def display_assignment_changes(plan_details, to_log=True):
"""Display current and proposed changes in
topic-partition to replica layout over brokers.
"""
curr_plan_list, new_plan_list, total_changes = plan_details
action_cnt = '\n[INFO] Total actions required {0}'.format(total_changes)
_log_or_display(to_log, action_cnt)
action_cnt = (
'[INFO] Total actions that will be executed {0}'
.format(len(new_plan_list))
)
_log_or_display(to_log, action_cnt)
changes = ('[INFO] Proposed Changes in current cluster-layout:\n')
_log_or_display(to_log, changes)
tp_str = 'Topic - Partition'
curr_repl_str = 'Previous-Assignment'
new_rep_str = 'Proposed-Assignment'
tp_list = [tp_repl[0] for tp_repl in curr_plan_list]
# Display heading
msg = '=' * 80
_log_or_display(to_log, msg)
row = (
'{tp:^30s}: {curr_rep_str:^20s} ==> {new_rep_str:^20s}' .format(
tp=tp_str,
curr_rep_str=curr_repl_str,
new_rep_str=new_rep_str,
)
)
_log_or_display(to_log, row)
msg = '=' * 80
_log_or_display(to_log, msg)
# Display each topic-partition list with changes
tp_list_sorted = sorted(tp_list, key=lambda tp: (tp[0], tp[1]))
for tp in tp_list_sorted:
curr_repl = [
tp_repl[1] for tp_repl in curr_plan_list if tp_repl[0] == tp
][0]
proposed_repl = [
tp_repl[1] for tp_repl in new_plan_list if tp_repl[0] == tp
][0]
tp_str = '{topic} - {partition:<2d}'.format(topic=tp[0], partition=tp[1])
row = (
'{tp:<30s}: {curr_repl:<20s} ==> {proposed_repl:<20s}'.format(
tp=tp_str,
curr_repl=curr_repl,
proposed_repl=proposed_repl,
)
)
_log_or_display(to_log, row) | 0.001075 |
def _read_color_image(self):
""" Reads a color image from the device """
# read raw buffer
im_arr = self._color_stream.read_frame()
raw_buf = im_arr.get_buffer_as_triplet()
r_array = np.array([raw_buf[i][0] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
g_array = np.array([raw_buf[i][1] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
b_array = np.array([raw_buf[i][2] for i in range(PrimesenseSensor.COLOR_IM_WIDTH * PrimesenseSensor.COLOR_IM_HEIGHT)])
# convert to uint8 image
color_image = np.zeros([PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH, 3])
color_image[:,:,0] = r_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:,:,1] = g_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
color_image[:,:,2] = b_array.reshape(PrimesenseSensor.COLOR_IM_HEIGHT,
PrimesenseSensor.COLOR_IM_WIDTH)
if self._flip_images:
color_image = np.flipud(color_image.astype(np.uint8))
else:
color_image = np.fliplr(color_image.astype(np.uint8))
return ColorImage(color_image, frame=self._frame) | 0.010338 |
def plotly(
data: typing.Union[dict, list] = None,
layout: dict = None,
scale: float = 0.5,
figure: dict = None,
static: bool = False
):
"""
Creates a Plotly plot in the display with the specified data and
layout.
:param data:
The Plotly trace data to be plotted.
:param layout:
The layout data used for the plot.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param figure:
In cases where you need to create a figure instead of separate data
and layout information, you can pass the figure here and leave the
data and layout values as None.
:param static:
If true, the plot will be created without interactivity.
This is useful if you have a lot of plots in your notebook.
"""
r = _get_report()
if not figure and not isinstance(data, (list, tuple)):
data = [data]
if 'plotly' not in r.library_includes:
r.library_includes.append('plotly')
r.append_body(render.plotly(
data=data,
layout=layout,
scale=scale,
figure=figure,
static=static
))
r.stdout_interceptor.write_source('[ADDED] Plotly plot\n') | 0.000676 |
def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
# self.packageMenuItem = menu.AppendCheckItem(
# ID_PACKAGE_VIEW, _('&File View'),
# _('View time spent by package/module')
# )
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
# it's more about how to generate graphics to describe profiling...
self.deeperViewItem = menu.Append(
ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views')
)
self.shallowerViewItem = menu.Append(
ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views')
)
# wx.ToolTip.Enable(True)
menubar.Append(menu, _('&View'))
self.viewTypeMenu =wx.Menu( )
menubar.Append(self.viewTypeMenu, _('View &Type'))
self.SetMenuBar(menubar)
wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True))
wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile)
wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory)
wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView)
wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView)
wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView)
wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView)
wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView)
wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView)
wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle) | 0.003698 |
def post_flag_list(self, creator_id=None, creator_name=None, post_id=None,
reason_matches=None, is_resolved=None, category=None):
"""Function to flag a post (Requires login).
Parameters:
creator_id (int): The user id of the flag's creator.
creator_name (str): The name of the flag's creator.
post_id (int): The post id if the flag.
"""
params = {
'search[creator_id]': creator_id,
'search[creator_name]': creator_name,
'search[post_id]': post_id,
}
return self._get('post_flags.json', params, auth=True) | 0.004608 |
def get_available_user_FIELD_transitions(instance, user, field):
"""
List of transitions available in current model state
with all conditions met and user have rights on it
"""
for transition in get_available_FIELD_transitions(instance, field):
if transition.has_perm(instance, user):
yield transition | 0.002933 |
def save(self, filename_or_handle):
'''Save the state of this network to a pickle file on disk.
Parameters
----------
filename_or_handle : str or file handle
Save the state of this network to a pickle file. If this parameter
is a string, it names the file where the pickle will be saved. If it
is a file-like object, this object will be used for writing the
pickle. If the filename ends in ".gz" then the output will
automatically be gzipped.
'''
if isinstance(filename_or_handle, util.basestring):
opener = gzip.open if filename_or_handle.lower().endswith('.gz') else open
handle = opener(filename_or_handle, 'wb')
else:
handle = filename_or_handle
pickle.dump(self, handle, -1)
if isinstance(filename_or_handle, util.basestring):
handle.close()
util.log('saved model to {}', filename_or_handle) | 0.004061 |
def netconf_config_change_edit_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
edit = ET.SubElement(netconf_config_change, "edit")
operation = ET.SubElement(edit, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005618 |
def interface(self):
"""
Public method for handling service command argument.
"""
# Possible control arguments
controls = {
'start': self.do_start,
'stop': self.do_stop,
'status': self.do_status,
'restart': self.do_restart,
'reload': self.do_restart
}
# Process the control argument
try:
controls[self.command]()
except KeyError:
self.write_stdout('Usage: {} {{start|stop|status|restart|reload}}'.format(self.name), 3)
exit(0) | 0.009804 |
def _handle_response(self, response):
"""Internal helper for handling API responses from the Coinbase server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise build_api_error(response)
return response | 0.008451 |
def _get_team_results(self, away_name, away_abbr, away_score, home_name,
home_abbr, home_score):
"""
Determine the winner and loser of the game.
If the game has been completed and sports-reference has been updated
with the score, determine the winner and loser and return their
respective names and abbreviations.
Parameters
----------
away_name : string
The name of the away team, such as 'Indiana'.
away_abbr : string
The abbreviation of the away team, such as 'indiana'.
away_score : int
The number of points the away team scored, or None if the game
hasn't completed yet.
home_score : string
The name of the home team, such as 'Purdue'.
home_abbr : string
The abbreviation of the home team, such as 'purdue'.
home_score : int
The number of points the home team scored, or None if the game
hasn't completed yet.
Returns
-------
tuple, tuple
Returns two tuples, each containing the name followed by the
abbreviation of the winning and losing team, respectively. If the
game doesn't have a score associated with it yet, both tuples will
be None.
"""
if not away_score or not home_score:
return None, None
if away_score > home_score:
return (away_name, away_abbr), (home_name, home_abbr)
else:
return (home_name, home_abbr), (away_name, away_abbr) | 0.001856 |
def _process_bulk_chunk(client, bulk_actions, raise_on_exception=True, raise_on_error=True, **kwargs):
"""
Send a bulk request to elasticsearch and process the output.
"""
# if raise on error is set, we need to collect errors per chunk before raising them
errors = []
try:
# send the actual request
resp = client.bulk('\n'.join(bulk_actions) + '\n', **kwargs)
except TransportError as e:
# default behavior - just propagate exception
if raise_on_exception:
raise e
# if we are not propagating, mark all actions in current chunk as failed
err_message = str(e)
exc_errors = []
# deserialize the data back, thisis expensive but only run on
# errors if raise_on_exception is false, so shouldn't be a real
# issue
bulk_data = iter(map(client.transport.serializer.loads, bulk_actions))
while True:
try:
# collect all the information about failed actions
action = next(bulk_data)
op_type, action = action.popitem()
info = {"error": err_message, "status": e.status_code, "exception": e}
if op_type != 'delete':
info['data'] = next(bulk_data)
info.update(action)
exc_errors.append({op_type: info})
except StopIteration:
break
# emulate standard behavior for failed actions
if raise_on_error:
raise BulkIndexError('%i document(s) failed to index.' % len(exc_errors), exc_errors)
else:
for err in exc_errors:
yield False, err
return
# go through request-reponse pairs and detect failures
for op_type, item in map(methodcaller('popitem'), resp['items']):
ok = 200 <= item.get('status', 500) < 300
if not ok and raise_on_error:
errors.append({op_type: item})
if ok or not errors:
# if we are not just recording all errors to be able to raise
# them all at once, yield items individually
yield ok, {op_type: item}
if errors:
raise BulkIndexError('%i document(s) failed to index.' % len(errors), errors) | 0.003082 |
def percentage(self):
"""Returns the progress as a percentage."""
if self.currval >= self.maxval:
return 100.0
return self.currval * 100.0 / self.maxval | 0.010638 |
def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
"""
if not snapshot:
return []
source_snapshots = re.findall(r"'([\w\d\.-]+)'", snapshot['Description'])
if not source_snapshots and fallback_self:
source_snapshots = [snapshot['Name']]
source_snapshots.sort()
return source_snapshots | 0.005941 |
def complete_tree(self, text, line, begidx, endidx):
"""completion for ls command"""
options = self.TREE_OPTS
if not text:
completions = options
else:
completions = [f
for f in options
if f.startswith(text)
]
return completions | 0.005348 |
def check_entry(self, entries, *args, **kwargs):
"""
With a list of entries, check each entry against every other
"""
verbosity = kwargs.get('verbosity', 1)
user_total_overlaps = 0
user = ''
for index_a, entry_a in enumerate(entries):
# Show the name the first time through
if index_a == 0:
if args and verbosity >= 1 or verbosity >= 2:
self.show_name(entry_a.user)
user = entry_a.user
for index_b in range(index_a, len(entries)):
entry_b = entries[index_b]
if entry_a.check_overlap(entry_b):
user_total_overlaps += 1
self.show_overlap(entry_a, entry_b, verbosity=verbosity)
if user_total_overlaps and user and verbosity >= 1:
overlap_data = {
'first': user.first_name,
'last': user.last_name,
'total': user_total_overlaps,
}
self.stdout.write('Total overlapping entries for user ' +
'%(first)s %(last)s: %(total)d' % overlap_data)
return user_total_overlaps | 0.001656 |
def validar(self, id_vlan):
"""Validates ACL - IPv4 of VLAN from its identifier.
Assigns 1 to 'acl_valida'.
:param id_vlan: Identifier of the Vlan. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: Vlan identifier is null and invalid.
:raise VlanNaoExisteError: Vlan not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/validate/' + IP_VERSION.IPv4[0] + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml) | 0.003509 |
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
def _GetIntegerEnumValue(enum_type, value):
"""Convert a string or integer enum value to an integer.
If the value is a string, it is converted to the enum value in
enum_type with the same name. If the value is not a string, it's
returned as-is. (No conversion or bounds-checking is done.)
"""
if isinstance(value, six.string_types):
try:
return enum_type.values_by_name[value].number
except KeyError:
raise ValueError('Enum type %s: unknown label "%s"' % (
enum_type.full_name, value))
return value
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
# Contains a mapping from oneof field descriptors to the descriptor
# of the currently set field in that oneof field.
self._oneofs = {}
# _unknown_fields is () when empty for efficiency, and will be turned into
# a list if fields are added.
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in kwargs.items():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field_value is None:
# field=None is the same as no field at all.
continue
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
if _IsMapField(field):
if _IsMessageMapField(field):
for key in field_value:
copy[key].MergeFrom(field_value[key])
else:
copy.update(field_value)
else:
for val in field_value:
if isinstance(val, dict):
copy.add(**val)
else:
copy.add().MergeFrom(val)
else: # Scalar
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = [_GetIntegerEnumValue(field.enum_type, val)
for val in field_value]
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
new_val = field_value
if isinstance(field_value, dict):
new_val = field.message_type._concrete_class(**field_value)
try:
copy.MergeFrom(new_val)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = _GetIntegerEnumValue(field.enum_type, field_value)
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init | 0.008793 |
def create_submission(self, token, **kwargs):
"""
Associate a result item with a particular scalar value.
:param token: A valid token for the user in question.
:type token: string
:param uuid (optional) The uuid of the submission (must be unique)
:type uuid: string
:param name (optional) The name of the submission
:type name: string
:returns: The submission object that was created.
:rtype: dict
"""
parameters = {}
parameters['token'] = token
optional_keys = ['uuid', 'name']
for key in optional_keys:
if key in kwargs:
parameters[key] = kwargs[key]
return self.request('midas.tracker.submission.create', parameters) | 0.002577 |
def privtohex(key):
'''
Used for getting unknown input type into a private key.
For example, if you ask a user to input a private key, and they
may input hex, WIF, integer, etc. Run it through this function to
get a standardized format.
Function either outputs private key hex string or raises an
exception. It's really going to try to make any input into
a private key, so make sure that whatever you import is indeed
supposed to be a private key. For example, if you put an int in,
it will turn that into a key. Make sure you want a key when you
use this function!!!
'''
if isitint(key):
key = dechex(key,32)
else:
try:
key, z, zz = wiftohex(key)
assert len(key) == 64
except:
try:
key = unhexlify(key)
except:
try:
key1 = hexstrlify(key)
assert len(key1) == 64 or len(key1) == 66 or len(key1) == 68
if len(key1) == 68:
assert key1[-2:] == '01'
key = key1
except:
raise Exception("Cannot interpret input key.")
else:
key = hexstrlify(key)
if len(key) == 68:
assert key[-2:] == '01'
key = key[:-2]
if len(key) == 66:
key = key[2:]
assert len(key) == 64
return key | 0.004181 |
def custom_build_class_rule(self, opname, i, token, tokens, customize):
'''
# Should the first rule be somehow folded into the 2nd one?
build_class ::= LOAD_BUILD_CLASS mkfunc
LOAD_CLASSNAME {expr}^n-1 CALL_FUNCTION_n
LOAD_CONST CALL_FUNCTION_n
build_class ::= LOAD_BUILD_CLASS mkfunc
expr
call
CALL_FUNCTION_3
'''
# FIXME: I bet this can be simplified
# look for next MAKE_FUNCTION
for i in range(i+1, len(tokens)):
if tokens[i].kind.startswith('MAKE_FUNCTION'):
break
elif tokens[i].kind.startswith('MAKE_CLOSURE'):
break
pass
assert i < len(tokens), "build_class needs to find MAKE_FUNCTION or MAKE_CLOSURE"
assert tokens[i+1].kind == 'LOAD_CONST', \
"build_class expecting CONST after MAKE_FUNCTION/MAKE_CLOSURE"
call_fn_tok = None
for i in range(i, len(tokens)):
if tokens[i].kind.startswith('CALL_FUNCTION'):
call_fn_tok = tokens[i]
break
if not call_fn_tok:
raise RuntimeError("build_class custom rule for %s needs to find CALL_FUNCTION"
% opname)
# customize build_class rule
# FIXME: What's the deal with the two rules? Different Python versions?
# Different situations? Note that the above rule is based on the CALL_FUNCTION
# token found, while this one doesn't.
if self.version < 3.6:
call_function = self.call_fn_name(call_fn_tok)
args_pos, args_kw = self.get_pos_kw(call_fn_tok)
rule = ("build_class ::= LOAD_BUILD_CLASS mkfunc %s"
"%s" % (('expr ' * (args_pos - 1) + ('kwarg ' * args_kw)),
call_function))
else:
# 3.6+ handling
call_function = call_fn_tok.kind
if call_function.startswith("CALL_FUNCTION_KW"):
self.addRule("classdef ::= build_class_kw store", nop_func)
rule = ("build_class_kw ::= LOAD_BUILD_CLASS mkfunc %sLOAD_CONST %s"
% ('expr ' * (call_fn_tok.attr - 1), call_function))
else:
call_function = self.call_fn_name(call_fn_tok)
rule = ("build_class ::= LOAD_BUILD_CLASS mkfunc %s%s"
% ('expr ' * (call_fn_tok.attr - 1), call_function))
self.addRule(rule, nop_func)
return | 0.004587 |
def __view_remove_actions(self):
"""
Removes the View actions.
"""
trace_modules_action = "Actions|Umbra|Components|addons.trace_ui|Trace Module(s)"
untrace_modules_action = "Actions|Umbra|Components|addons.trace_ui|Untrace Module(s)"
for action in (trace_modules_action, untrace_modules_action):
self.__view.removeAction(self.__engine.actions_manager.get_action(action))
self.__engine.actions_manager.unregister_action(action) | 0.00998 |
def add_kb_mapping(kb_name, key, value=""):
"""Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
"""
kb = get_kb_by_name(kb_name)
if key in kb.kbrvals:
# update
kb.kbrvals[key].m_value = value
else:
# insert
kb.kbrvals.set(models.KnwKBRVAL(m_key=key, m_value=value)) | 0.002217 |
def format_and_annualise(self, raw_cov_array):
"""
Helper method which annualises the output of shrinkage calculations,
and formats the result into a dataframe
:param raw_cov_array: raw covariance matrix of daily returns
:type raw_cov_array: np.ndarray
:return: annualised covariance matrix
:rtype: pd.DataFrame
"""
assets = self.X.columns
return (
pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency
) | 0.005703 |
def get_user_log(self, language='da'):
"""Get the controller state"""
payload = """<getUserLog1 xmlns="utcs" />
<getUserLog2 xmlns="utcs">0</getUserLog2>
<getUserLog3 xmlns="utcs">{language}</getUserLog3>
""".format(language=language)
xdoc = self.connection.soap_action('/ws/ConfigurationService',
'getUserLog', payload)
if xdoc:
base64data = xdoc.find('./SOAP-ENV:Body/ns1:getUserLog4/ns1:data',
IHCSoapClient.ihcns).text
if not base64data:
return False
return base64.b64decode(base64data).decode('UTF-8')
return False | 0.00266 |
def requires_swimlane_version(min_version=None, max_version=None):
"""Decorator for SwimlaneResolver methods verifying Swimlane server build version is within a given inclusive range
Raises:
InvalidVersion: Raised before decorated method call if Swimlane server version is out of provided range
ValueError: If neither min_version or max_version were provided, or if those values conflict (2.15 < 2.14)
"""
if min_version is None and max_version is None:
raise ValueError('Must provide either min_version, max_version, or both')
if min_version and max_version and compare_versions(min_version, max_version) < 0:
raise ValueError('min_version must be <= max_version ({}, {})'.format(min_version, max_version))
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
swimlane = self._swimlane
if min_version and compare_versions(min_version, swimlane.build_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
if max_version and compare_versions(swimlane.build_version, max_version, True) < 0:
raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)
return func(self, *args, **kwargs)
return wrapper
return decorator | 0.008076 |
def OnUpdate(self, event):
"""Updates the toolbar states"""
attributes = event.attr
self._update_buttoncell(attributes["button_cell"])
self.Refresh()
event.Skip() | 0.009709 |
def _accumulate(data_list, no_concat=()):
"""Concatenate a list of dicts `(name, array)`.
You can specify some names which arrays should not be concatenated.
This is necessary with lists of plots with different sizes.
"""
acc = Accumulator()
for data in data_list:
for name, val in data.items():
acc.add(name, val)
out = {name: acc[name] for name in acc.names if name not in no_concat}
# Some variables should not be concatenated but should be kept as lists.
# This is when there can be several arrays of variable length (NumPy
# doesn't support ragged arrays).
out.update({name: acc.get(name) for name in no_concat})
return out | 0.001431 |
def _load_type_counts(self):
"""Load the table of frequency counts of word forms."""
rel_path = os.path.join(CLTK_DATA_DIR,
'old_english',
'model',
'old_english_models_cltk',
'data',
'oe.counts')
path = os.path.expanduser(rel_path)
self.type_counts = {}
with open(path, 'r') as infile:
lines = infile.read().splitlines()
for line in lines:
count, word = line.split()
self.type_counts[word] = int(count) | 0.023102 |
def create(self, data):
"""
Add a new store to your MailChimp account.
Error checking on the currency code verifies that it is in the correct
three-letter, all-caps format as specified by ISO 4217 but does not
check that it is a valid code as the list of valid codes changes over
time.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"id": string*,
"list_id": string*,
"name": string*,
"currency_code": string*
}
"""
if 'id' not in data:
raise KeyError('The store must have an id')
if 'list_id' not in data:
raise KeyError('The store must have a list_id')
if 'name' not in data:
raise KeyError('The store must have a name')
if 'currency_code' not in data:
raise KeyError('The store must have a currency_code')
if not re.match(r"^[A-Z]{3}$", data['currency_code']):
raise ValueError('The currency_code must be a valid 3-letter ISO 4217 currency code')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.store_id = response['id']
else:
self.store_id = None
return response | 0.002235 |
def nextClass(self, classuri):
"""Returns the next class in the list of classes. If it's the last one, returns the first one."""
if classuri == self.classes[-1].uri:
return self.classes[0]
flag = False
for x in self.classes:
if flag == True:
return x
if x.uri == classuri:
flag = True
return None | 0.009901 |
def longitude(self):
"""
This method gets a dict of routes listed in Daft.
:return:
"""
try:
scripts = self._ad_page_content.find_all('script')
for script in scripts:
if 'longitude' in script.text:
find_list = re.findall(
r'"longitude":"([\-]?[0-9.]*[0-9]+)"', script.text)
if len(find_list) >= 1:
return find_list[0]
return None
except Exception as e:
if self._debug:
logging.error(
"Error getting longitude. Error message: " + e.args[0])
return None | 0.002861 |
def str_rstrip(x, to_strip=None):
"""Remove trailing characters from a string sample.
:param str to_strip: The string to be removed
:returns: an expression containing the modified string column.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.rstrip(to_strip='ing')
Expression = str_rstrip(text, to_strip='ing')
Length: 5 dtype: str (expression)
---------------------------------
0 Someth
1 very pretty
2 is com
3 our
4 way.
"""
# in c++ we give empty string the same meaning as None
sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else x
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | 0.003 |
def remove(self, email):
"""Remove a Collaborator.
Args:
str : Collaborator email address.
"""
if email in self._collaborators:
if self._collaborators[email] == ShareRequestValue.Add:
del self._collaborators[email]
else:
self._collaborators[email] = ShareRequestValue.Remove
self._dirty = True | 0.004975 |
def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name)
return K | 0.013699 |
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(
"task_instance_created-{}".format(task.__class__.__name__),
1, 1)
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit() | 0.001926 |
def hardware_connector_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
connector = ET.SubElement(hardware, "connector")
name = ET.SubElement(connector, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006522 |
def get_file_versions(self, secure_data_path, limit=None, offset=None):
"""
Get versions of a particular file
This is just a shim to get_secret_versions
secure_data_path -- full path to the file in the safety deposit box
limit -- Default(100), limits how many records to be returned from the api at once.
offset -- Default(0), used for pagination. Will request records from the given offset.
"""
return self.get_secret_versions(secure_data_path, limit, offset) | 0.00759 |
async def parse_form(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a form value from the request."""
post_data = self._cache.get("post")
if post_data is None:
self._cache["post"] = await req.post()
return core.get_value(self._cache["post"], name, field) | 0.009404 |
def autocorr_coeff(x, t, tau1, tau2):
"""Calculate the autocorrelation coefficient."""
return corr_coeff(x, x, t, tau1, tau2) | 0.007407 |
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success") | 0.004942 |
def mouseMoveEvent(self, event):
"""
Detect mouser over indicator and highlight the current scope in the
editor (up and down decoration arround the foldable text when the mouse
is over an indicator).
:param event: event
"""
super(FoldingPanel, self).mouseMoveEvent(event)
th = TextHelper(self.editor)
line = th.line_nbr_from_position(event.pos().y())
if line >= 0:
block = FoldScope.find_parent_scope(
self.editor.document().findBlockByNumber(line-1))
if TextBlockHelper.is_fold_trigger(block):
if self._mouse_over_line is None:
# mouse enter fold scope
QApplication.setOverrideCursor(
QCursor(Qt.PointingHandCursor))
if self._mouse_over_line != block.blockNumber() and \
self._mouse_over_line is not None:
# fold scope changed, a previous block was highlighter so
# we quickly update our highlighting
self._mouse_over_line = block.blockNumber()
self._highlight_block(block)
else:
# same fold scope, request highlight
self._mouse_over_line = block.blockNumber()
self._highlight_runner.request_job(
self._highlight_block, block)
self._highight_block = block
else:
# no fold scope to highlight, cancel any pending requests
self._highlight_runner.cancel_requests()
self._mouse_over_line = None
QApplication.restoreOverrideCursor()
self.repaint() | 0.001132 |
def options(self, url: StrOrURL, *, allow_redirects: bool=True,
**kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(hdrs.METH_OPTIONS, url,
allow_redirects=allow_redirects,
**kwargs)) | 0.014205 |
def atleast_1d(*arrs):
r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs.
"""
mags = [a.magnitude if hasattr(a, 'magnitude') else a for a in arrs]
orig_units = [a.units if hasattr(a, 'units') else None for a in arrs]
ret = np.atleast_1d(*mags)
if len(mags) == 1:
if orig_units[0] is not None:
return units.Quantity(ret, orig_units[0])
else:
return ret
return [units.Quantity(m, u) if u is not None else m for m, u in zip(ret, orig_units)] | 0.003171 |
def get_common_course_modes(self, course_run_ids):
"""
Find common course modes for a set of course runs.
This function essentially returns an intersection of types of seats available
for each course run.
Arguments:
course_run_ids(Iterable[str]): Target Course run IDs.
Returns:
set: course modes found in all given course runs
Examples:
# run1 has prof and audit, run 2 has the same
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{'prof', 'audit'}
# run1 has prof and audit, run 2 has only prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{'prof'}
# run1 has prof and audit, run 2 honor
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{}
# run1 has nothing, run2 has prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2'])
{}
# run1 has prof and audit, run 2 prof, run3 has audit
get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])
{}
# run1 has nothing, run 2 prof, run3 has prof
get_common_course_modes(['course-v1:run1', 'course-v1:run2', 'course-v1:run3'])
{}
"""
available_course_modes = None
for course_run_id in course_run_ids:
course_run = self.get_course_run(course_run_id) or {}
course_run_modes = {seat.get('type') for seat in course_run.get('seats', [])}
if available_course_modes is None:
available_course_modes = course_run_modes
else:
available_course_modes &= course_run_modes
if not available_course_modes:
return available_course_modes
return available_course_modes | 0.003127 |
def blastnprep(self):
"""Setup blastn analyses"""
# Populate threads for each gene, genome combination
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
#
# sample[self.analysistype].alleleresults = GenObject()
sample[self.analysistype].closealleles = dict()
sample[self.analysistype].mismatches = dict()
sample[self.analysistype].alignmentlength = dict()
sample[self.analysistype].subjectlength = dict()
sample[self.analysistype].queryid = dict()
sample[self.analysistype].start = dict()
sample[self.analysistype].end = dict()
sample[self.analysistype].queryseq = dict()
if type(sample[self.analysistype].allelenames) == list:
for allele in sample[self.analysistype].combinedalleles:
# Add each fasta/allele file combination to the threads
self.runblast(sample.general.bestassemblyfile, allele, sample) | 0.002712 |
def AppMoVCopeland(profile, alpha=0.5):
"""
Returns an integer that is equal to the margin of victory of the election profile, that is,
the smallest number k such that changing k votes can change the winners.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported profile type")
exit()
# Initialization
n = profile.numVoters
m = profile.numCands
# Compute the original winner d
# Initialize each Copeland score as 0.0.
copelandscores = {}
for cand in profile.candMap.keys():
copelandscores[cand] = 0.0
# For each pair of candidates, calculate the number of votes in which one beat the other.
wmgMap = profile.getWmg()
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
if wmgMap[cand1][cand2] > 0:
copelandscores[cand1] += 1.0
elif wmgMap[cand1][cand2] < 0:
copelandscores[cand2] += 1.0
# If a pair of candidates is tied, we add alpha to their score for each vote.
else:
copelandscores[cand1] += alpha
copelandscores[cand2] += alpha
d = max(copelandscores.items(), key=lambda x: x[1])[0]
#Compute c* = argmin_c RM(d,c)
relative_margin = {}
alter_without_d = delete(range(1, m + 1), d - 1)
for c in alter_without_d:
relative_margin[c] = RM(wmgMap, n, m, d, c, alpha)
c_star = min(relative_margin.items(), key=lambda x: x[1])[0]
return relative_margin[c_star]*(math.ceil(log(m)) + 1) | 0.003898 |
def pformat_xml(xml):
"""Return pretty formatted XML."""
try:
from lxml import etree # delayed import
if not isinstance(xml, bytes):
xml = xml.encode('utf-8')
xml = etree.parse(io.BytesIO(xml))
xml = etree.tostring(xml, pretty_print=True, xml_declaration=True,
encoding=xml.docinfo.encoding)
xml = bytes2str(xml)
except Exception:
if isinstance(xml, bytes):
xml = bytes2str(xml)
xml = xml.replace('><', '>\n<')
return xml.replace(' ', ' ').replace('\t', ' ') | 0.001709 |
def hset(self, hashroot, key, value):
""" hashed set """
hroot = self.root / hashroot
if not hroot.isdir():
hroot.makedirs()
hfile = hroot / gethashfile(key)
d = self.get(hfile, {})
d.update( {key : value})
self[hfile] = d | 0.013793 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'tones') and self.tones is not None:
_dict['tones'] = [x._to_dict() for x in self.tones]
if hasattr(self, 'category_id') and self.category_id is not None:
_dict['category_id'] = self.category_id
if hasattr(self, 'category_name') and self.category_name is not None:
_dict['category_name'] = self.category_name
return _dict | 0.003929 |
def teardown(self):
'''
Clean up the target once all tests are completed
'''
if self.controller:
self.controller.teardown()
for monitor in self.monitors:
monitor.teardown() | 0.008475 |
def _set_relay(self, v, load=False):
"""
Setter method for relay, mapped from YANG variable /rbridge_id/maps/relay (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_relay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_relay() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("hostip",relay.relay, yang_name="relay", rest_name="relay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip', extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}), is_container='list', yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """relay must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("hostip",relay.relay, yang_name="relay", rest_name="relay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='hostip', extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}), is_container='list', yang_name="relay", rest_name="relay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure relay ip mail settings', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'maps_relay_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)""",
})
self.__relay = t
if hasattr(self, '_set'):
self._set() | 0.003827 |
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result | 0.00041 |
def stats(self, columns):
"""Compute the stats for each column provided in columns.
Parameters
----------
columns : list of str, contains all columns to compute stats on.
"""
assert (not isinstance(columns, basestring)), "columns should be a " \
"list of strs, " \
"not a str!"
assert isinstance(columns, list), "columns should be a list!"
from pyspark.sql import functions as F
functions = [F.min, F.max, F.avg, F.count]
aggs = list(
self._flatmap(lambda column: map(lambda f: f(column), functions),
columns))
return PStats(self.from_schema_rdd(self._schema_rdd.agg(*aggs))) | 0.002466 |
def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False):
# type: (List[Union[HDXObjectUpperBound,Dict]], Union[HDXObjectUpperBound,Dict,str], str, bool) -> bool
"""Remove an HDX object from a list within the parent HDX object
Args:
objlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects
obj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary
matchon (str): Field to match on. Defaults to id.
delete (bool): Whether to delete HDX object. Defaults to False.
Returns:
bool: True if object removed, False if not
"""
if objlist is None:
return False
if isinstance(obj, six.string_types):
obj_id = obj
elif isinstance(obj, dict) or isinstance(obj, HDXObject):
obj_id = obj.get(matchon)
else:
raise HDXError('Type of object not a string, dict or T<=HDXObject')
if not obj_id:
return False
for i, objdata in enumerate(objlist):
objid = objdata.get(matchon)
if objid and objid == obj_id:
if delete:
objlist[i].delete_from_hdx()
del objlist[i]
return True
return False | 0.003698 |
def validate_count_api(rule_payload, endpoint):
"""
Ensures that the counts api is set correctly in a payload.
"""
rule = (rule_payload if isinstance(rule_payload, dict)
else json.loads(rule_payload))
bucket = rule.get('bucket')
counts = set(endpoint.split("/")) & {"counts.json"}
if len(counts) == 0:
if bucket is not None:
msg = ("""There is a count bucket present in your payload,
but you are using not using the counts API.
Please check your endpoints and try again""")
logger.error(msg)
raise ValueError | 0.001587 |
def property_cache_once_per_frame(f):
""" This decorator caches the return value for one game loop, then clears it if it is accessed in a different game loop
Only works on properties of the bot object because it requires access to self.state.game_loop """
f.frame = -1
f.cache = None
@wraps(f)
def inner(self):
if f.frame != self.state.game_loop:
f.frame = self.state.game_loop
f.cache = None
if f.cache is None:
f.cache = f(self)
return f.cache
return property(inner) | 0.005376 |
def _check_bad_exception_context(self, node):
"""Verify that the exception context is properly set.
An exception context can be only `None` or an exception.
"""
cause = utils.safe_infer(node.cause)
if cause in (astroid.Uninferable, None):
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message("bad-exception-context", node=node)
elif not isinstance(cause, astroid.ClassDef) and not utils.inherit_from_std_ex(
cause
):
self.add_message("bad-exception-context", node=node) | 0.004724 |
def delete(self, item):
"""Deletes the specified item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._delete(uri) | 0.012579 |
def to_data(self, value):
'''
Coerce python data type to simple form for serialization.
If default value was defined returns the default value if None was passed.
Throw exception is value is ``None`` is ``required`` is set to ``True``
'''
try:
if value is None and self._default is not None:
return self._export(self.default)
self._check_required(value)
value = self._export(value)
return value
except ValueError as ex:
raise ValueError(ex, self._errors['to_data']) | 0.005025 |
def write_lammps_inputs(output_dir, script_template, settings=None,
data=None, script_filename="in.lammps",
make_dir_if_not_present=True, **kwargs):
"""
Writes input files for a LAMMPS run. Input script is constructed
from a str template with placeholders to be filled by custom
settings. Data file is either written from a LammpsData
instance or copied from an existing file if read_data cmd is
inspected in the input script. Other supporting files are not
handled at the moment.
Args:
output_dir (str): Directory to output the input files.
script_template (str): String template for input script with
placeholders. The format for placeholders has to be
'$variable_name', e.g., '$temperature'
settings (dict): Contains values to be written to the
placeholders, e.g., {'temperature': 1}. Default to None.
data (LammpsData or str): Data file as a LammpsData instance or
path to an existing data file. Default to None, i.e., no
data file supplied. Useful only when read_data cmd is in
the script.
script_filename (str): Filename for the input script.
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
**kwargs: kwargs supported by LammpsData.write_file.
Examples:
>>> eam_template = '''units metal
... atom_style atomic
...
... lattice fcc 3.615
... region box block 0 20 0 20 0 20
... create_box 1 box
... create_atoms 1 box
...
... pair_style eam
... pair_coeff 1 1 Cu_u3.eam
...
... velocity all create $temperature 376847 loop geom
...
... neighbor 1.0 bin
... neigh_modify delay 5 every 1
...
... fix 1 all nvt temp $temperature $temperature 0.1
...
... timestep 0.005
...
... run $nsteps'''
>>> write_lammps_inputs('.', eam_template, settings={'temperature': 1600.0, 'nsteps': 100})
>>> with open('in.lammps') as f:
... script = f.read()
...
>>> print(script)
units metal
atom_style atomic
lattice fcc 3.615
region box block 0 20 0 20 0 20
create_box 1 box
create_atoms 1 box
pair_style eam
pair_coeff 1 1 Cu_u3.eam
velocity all create 1600.0 376847 loop geom
neighbor 1.0 bin
neigh_modify delay 5 every 1
fix 1 all nvt temp 1600.0 1600.0 0.1
timestep 0.005
run 100
"""
variables = {} if settings is None else settings
template = Template(script_template)
input_script = template.safe_substitute(**variables)
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, script_filename), "w") as f:
f.write(input_script)
read_data = re.search(r"read_data\s+(.*)\n", input_script)
if read_data:
data_filename = read_data.group(1).split()[0]
if isinstance(data, LammpsData):
data.write_file(os.path.join(output_dir, data_filename), **kwargs)
elif isinstance(data, str) and os.path.exists(data):
shutil.copyfile(data, os.path.join(output_dir, data_filename))
else:
warnings.warn("No data file supplied. Skip writing %s."
% data_filename) | 0.000528 |
def __copyfile(source, destination):
"""Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False | 0.00146 |
async def get_non_secret(
self,
typ: str,
filt: Union[dict, str] = None,
canon_wql: Callable[[dict], dict] = None,
limit: int = None) -> dict:
"""
Return dict mapping each non-secret storage record of interest by identifier or,
for no filter specified, mapping them all. If wallet has no such item, return empty dict.
:param typ: non-secret storage record type
:param filt: non-secret storage record identifier or WQL json (default all)
:param canon_wql: WQL canonicalization function (default von_anchor.canon.canon_non_secret_wql())
:param limit: maximum number of results to return (default no limit)
:return: dict mapping identifiers to non-secret storage records
"""
LOGGER.debug('Wallet.get_non_secret >>> typ: %s, filt: %s, canon_wql: %s', typ, filt, canon_wql)
if not self.handle:
LOGGER.debug('Wallet.get_non_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
records = []
if isinstance(filt, str): # ordinary lookup by value
try:
records = [json.loads(await non_secrets.get_wallet_record(
self.handle,
typ,
filt,
json.dumps({
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
})))]
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
pass
else:
LOGGER.debug(
'Wallet.get_non_secret <!< Wallet %s lookup raised indy exception %s',
self.name,
x_indy.error_code)
raise
else:
canon = canon_wql or canon_non_secret_wql
s_handle = await non_secrets.open_wallet_search(
self.handle,
typ,
json.dumps(canon(filt or {})),
json.dumps({
'retrieveRecords': True,
'retrieveTotalCount': True,
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
}))
records = []
cardinality = int(json.loads(
await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, 0))['totalCount'])
chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK)
if limit:
cardinality = min(limit, cardinality)
try:
while len(records) != cardinality:
batch = json.loads(
await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, chunk))['records']
records.extend(batch)
if len(batch) < chunk:
break
if len(records) != cardinality:
LOGGER.warning(
'Non-secret search/limit indicated %s results but fetched %s',
cardinality,
len(records))
finally:
await non_secrets.close_wallet_search(s_handle)
rv = {record['id']: StorageRecord(typ, record['value'], record['tags'], record['id']) for record in records}
LOGGER.debug('Wallet.get_non_secret <<< %s', rv)
return rv | 0.00385 |
def _make_embedded_from(self, doc):
'''Creates embedded navigators from a HAL response doc'''
ld = utils.CurieDict(self._core.default_curie, {})
for rel, doc in doc.get('_embedded', {}).items():
if isinstance(doc, list):
ld[rel] = [self._recursively_embed(d) for d in doc]
else:
ld[rel] = self._recursively_embed(doc)
return ld | 0.004819 |
def _format_command(name, envvars, base=None):
''' Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
envvars (dict):
A dictionary of the environment variable definitions from the config
base (str):
The SAS_BASE to remove from the filepaths
Yields:
A string rst-formated list-table directive
'''
yield '.. list-table:: {0}'.format(name)
yield _indent(':widths: 20 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Path')
for envvar, path in envvars.items():
tail = path.split(base)[1] if base and base in path else path
tail = envvar.upper() if envvar.upper() == 'SAS_BASE_DIR' else tail
yield _indent('* - {0}'.format(envvar.upper()))
yield _indent(' - {0}'.format(tail))
yield '' | 0.002086 |
def init_input_obj(self):
"""Section 4 - Create uwg objects from input parameters
self.simTime # simulation time parameter obj
self.weather # weather obj for simulation time period
self.forcIP # Forcing obj
self.forc # Empty forcing obj
self.geoParam # geographic parameters obj
self.RSM # Rural site & vertical diffusion model obj
self.USM # Urban site & vertical diffusion model obj
self.UCM # Urban canopy model obj
self.UBL # Urban boundary layer model
self.road # urban road element
self.rural # rural road element
self.soilindex1 # soil index for urban rsoad depth
self.soilindex2 # soil index for rural road depth
self.Sch # list of Schedule objects
"""
climate_file_path = os.path.join(self.epwDir, self.epwFileName)
self.simTime = SimParam(self.dtSim, self.dtWeather, self.Month,
self.Day, self.nDay) # simulation time parametrs
# weather file data for simulation time period
self.weather = Weather(climate_file_path, self.simTime.timeInitial, self.simTime.timeFinal)
self.forcIP = Forcing(self.weather.staTemp, self.weather) # initialized Forcing class
self.forc = Forcing() # empty forcing class
# Initialize geographic Param and Urban Boundary Layer Objects
nightStart = 18. # arbitrary values for begin/end hour for night setpoint
nightEnd = 8.
maxdx = 250. # max dx (m)
self.geoParam = Param(self.h_ubl1, self.h_ubl2, self.h_ref, self.h_temp, self.h_wind, self.c_circ,
self.maxDay, self.maxNight, self.latTree, self.latGrss, self.albVeg, self.vegStart, self.vegEnd,
nightStart, nightEnd, self.windMin, self.WGMAX, self.c_exch, maxdx, self.G, self.CP, self.VK, self.R,
self.RV, self.LV, math.pi, self.SIGMA, self.WATERDENS, self.LVTT, self.TT, self.ESTT, self.CL,
self.CPV, self.B, self.CM, self.COLBURN)
self.UBL = UBLDef(
'C', self.charLength, self.weather.staTemp[0], maxdx, self.geoParam.dayBLHeight, self.geoParam.nightBLHeight)
# Defining road
emis = 0.93
asphalt = Material(self.kRoad, self.cRoad, 'asphalt')
road_T_init = 293.
road_horizontal = 1
# fraction of surface vegetation coverage
road_veg_coverage = min(self.vegCover/(1-self.bldDensity), 1.)
# define road layers
road_layer_num = int(math.ceil(self.d_road/0.05))
# 0.5/0.05 ~ 10 x 1 matrix of 0.05 thickness
thickness_vector = [0.05 for r in range(road_layer_num)]
material_vector = [asphalt for r in range(road_layer_num)]
self.road = Element(self.alb_road, emis, thickness_vector, material_vector, road_veg_coverage,
road_T_init, road_horizontal, name="urban_road")
self.rural = copy.deepcopy(self.road)
self.rural.vegCoverage = self.rurVegCover
self.rural._name = "rural_road"
# Reference site class (also include VDM)
self.RSM = RSMDef(self.lat, self.lon, self.GMT, self.h_obs,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
self.USM = RSMDef(self.lat, self.lon, self.GMT, self.bldHeight/10.,
self.weather.staTemp[0], self.weather.staPres[0], self.geoParam, self.z_meso_dir_path)
T_init = self.weather.staTemp[0]
H_init = self.weather.staHum[0]
self.UCM = UCMDef(self.bldHeight, self.bldDensity, self.verToHor, self.treeCoverage, self.sensAnth, self.latAnth, T_init, H_init,
self.weather.staUmod[0], self.geoParam, self.r_glaze_total, self.SHGC_total, self.alb_wall_total, self.road)
self.UCM.h_mix = self.h_mix
# Define Road Element & buffer to match ground temperature depth
roadMat, newthickness = procMat(self.road, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
roadMat.append(self.SOIL)
self.soilindex1 = i
break
self.road = Element(self.road.albedo, self.road.emissivity, newthickness, roadMat,
self.road.vegCoverage, self.road.layerTemp[0], self.road.horizontal, self.road._name)
# Define Rural Element
ruralMat, newthickness = procMat(self.rural, self.MAXTHICKNESS, self.MINTHICKNESS)
for i in range(self.nSoil):
# if soil depth is greater then the thickness of the road
# we add new slices of soil at max thickness until road is greater or equal
is_soildepth_equal = self.is_near_zero(self.depth_soil[i][0] - sum(newthickness), 1e-15)
if is_soildepth_equal or (self.depth_soil[i][0] > sum(newthickness)):
while self.depth_soil[i][0] > sum(newthickness):
newthickness.append(self.MAXTHICKNESS)
ruralMat.append(self.SOIL)
self.soilindex2 = i
break
self.rural = Element(self.rural.albedo, self.rural.emissivity, newthickness,
ruralMat, self.rural.vegCoverage, self.rural.layerTemp[0], self.rural.horizontal, self.rural._name) | 0.0045 |
def label_field(self, f):
"""
Select one field as the label field.
Note that this field will be exclude from feature fields.
:param f: Selected label field
:type f: str
:rtype: DataFrame
"""
if f is None:
raise ValueError("Label field name cannot be None.")
self._assert_ml_fields_valid(f)
return _change_singleton_roles(self, {_get_field_name(f): FieldRole.LABEL}, clear_feature=True) | 0.006263 |
def map_values2(self, func):
"""
:param func:
:type func: (K, T) -> U
:rtype: TDict[U]
Usage:
>>> TDict(k1=1, k2=2, k3=3).map_values2(lambda k, v: f'{k} -> {v*2}') == {
... "k1": "k1 -> 2",
... "k2": "k2 -> 4",
... "k3": "k3 -> 6"
... }
True
"""
return TDict({k: func(k, v) for k, v in self.items()}) | 0.006787 |
def getextensibleindex(bunchdt, data, commdct, key, objname):
"""get the index of the first extensible item"""
theobject = getobject(bunchdt, key, objname)
if theobject == None:
return None
theidd = iddofobject(data, commdct, key)
extensible_i = [
i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]]
try:
extensible_i = extensible_i[0]
except IndexError:
return theobject | 0.004494 |
def send_simple(self, address, timestamp, value):
"""Queue a simple datapoint (ie. a 64-bit word), return True/False for success.
Arguments:
address -- uint64_t representing a unique metric.
timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch.
value -- uint64_t value being stored.
There are no formal restrictions on how `address` is chosen,
but it must be unique to the metric you are inserting. If you
don't have one, you may generate one by calling
`hash_identifier` with a string; the recommended input is the
source identifier.
If you don't have a `timestamp` you may pass in None to have
Pymarquise generate one for you.
"""
if self.marquise_ctx is None:
raise ValueError("Attempted to write to a closed Marquise handle.")
self.__debug("Supplied address: %s" % address)
if value is None:
raise TypeError("Can't store None as a value.")
if timestamp is None:
timestamp = self.current_timestamp()
# Wrap/convert our arguments to C datatypes before dispatching.
# FFI will take care of converting them to the right endianness. I think.
c_address = FFI.cast("uint64_t", address)
c_timestamp = FFI.cast("uint64_t", timestamp)
c_value = FFI.cast("uint64_t", value)
success = MARQUISE_SEND_SIMPLE(self.marquise_ctx, c_address, c_timestamp, c_value)
if success != 0:
self.__debug("send_simple returned %d, raising exception" % success)
raise RuntimeError("send_simple was unsuccessful, errno is %d" % FFI.errno)
self.__debug("send_simple returned %d" % success)
return True | 0.005618 |
def find_prepositions(chunked):
""" The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk.
"""
# Tokens that are not part of a preposition just get the O-tag.
for ch in chunked:
ch.append("O")
for i, chunk in enumerate(chunked):
if chunk[2].endswith("PP") and chunk[-1] == "O":
# Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund.
if i < len(chunked)-1 and \
(chunked[i+1][2].endswith(("NP", "PP")) or \
chunked[i+1][1] in ("VBG", "VBN")):
chunk[-1] = "B-PNP"
pp = True
for ch in chunked[i+1:]:
if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")):
break
if ch[2].endswith("PP") and pp:
ch[-1] = "I-PNP"
if not ch[2].endswith("PP"):
ch[-1] = "I-PNP"
pp = False
return chunked | 0.003524 |
def issn(self, mask: str = '####-####') -> str:
"""Generate a random ISSN.
:param mask: Mask of ISSN.
:return: ISSN.
"""
return self.random.custom_code(mask=mask) | 0.009852 |
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count | 0.00625 |
def fromMessage(klass, message, op_endpoint):
"""Construct me from an OpenID message.
@raises ProtocolError: When not all required parameters are present
in the message.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
@raises UntrustedReturnURL: When the C{return_to} URL is outside
the C{trust_root}.
@param message: An OpenID checkid_* request Message
@type message: openid.message.Message
@param op_endpoint: The endpoint URL of the server that this
message was sent to.
@type op_endpoint: str
@returntype: L{CheckIDRequest}
"""
self = klass.__new__(klass)
self.message = message
self.op_endpoint = op_endpoint
mode = message.getArg(OPENID_NS, 'mode')
if mode == "checkid_immediate":
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
self.return_to = message.getArg(OPENID_NS, 'return_to')
if message.isOpenID1() and not self.return_to:
fmt = "Missing required field 'return_to' from %r"
raise ProtocolError(message, text=fmt % (message,))
self.identity = message.getArg(OPENID_NS, 'identity')
self.claimed_id = message.getArg(OPENID_NS, 'claimed_id')
if message.isOpenID1():
if self.identity is None:
s = "OpenID 1 message did not contain openid.identity"
raise ProtocolError(message, text=s)
else:
if self.identity and not self.claimed_id:
s = ("OpenID 2.0 message contained openid.identity but not "
"claimed_id")
raise ProtocolError(message, text=s)
elif self.claimed_id and not self.identity:
s = ("OpenID 2.0 message contained openid.claimed_id but not "
"identity")
raise ProtocolError(message, text=s)
# There's a case for making self.trust_root be a TrustRoot
# here. But if TrustRoot isn't currently part of the "public" API,
# I'm not sure it's worth doing.
if message.isOpenID1():
trust_root_param = 'trust_root'
else:
trust_root_param = 'realm'
# Using 'or' here is slightly different than sending a default
# argument to getArg, as it will treat no value and an empty
# string as equivalent.
self.trust_root = (message.getArg(OPENID_NS, trust_root_param)
or self.return_to)
if not message.isOpenID1():
if self.return_to is self.trust_root is None:
raise ProtocolError(message, "openid.realm required when " +
"openid.return_to absent")
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
# Using TrustRoot.parse here is a bit misleading, as we're not
# parsing return_to as a trust root at all. However, valid URLs
# are valid trust roots, so we can use this to get an idea if it
# is a valid URL. Not all trust roots are valid return_to URLs,
# however (particularly ones with wildcards), so this is still a
# little sketchy.
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(message, self.return_to)
# I first thought that checking to see if the return_to is within
# the trust_root is premature here, a logic-not-decoding thing. But
# it was argued that this is really part of data validation. A
# request with an invalid trust_root/return_to is broken regardless of
# application, right?
if not self.trustRootValid():
raise UntrustedReturnURL(message, self.return_to, self.trust_root)
return self | 0.000752 |
def get_results(self, client_id, msg):
"""Get the result of 1 or more messages."""
content = msg['content']
msg_ids = sorted(set(content['msg_ids']))
statusonly = content.get('status_only', False)
pending = []
completed = []
content = dict(status='ok')
content['pending'] = pending
content['completed'] = completed
buffers = []
if not statusonly:
try:
matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
# turn match list into dict, for faster lookup
records = {}
for rec in matches:
records[rec['msg_id']] = rec
except Exception:
content = error.wrap_exception()
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id)
return
else:
records = {}
for msg_id in msg_ids:
if msg_id in self.pending:
pending.append(msg_id)
elif msg_id in self.all_completed:
completed.append(msg_id)
if not statusonly:
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
elif msg_id in records:
if rec['completed']:
completed.append(msg_id)
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
else:
pending.append(msg_id)
else:
try:
raise KeyError('No such message: '+msg_id)
except:
content = error.wrap_exception()
break
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers) | 0.00471 |
def list_slack():
"""List channels & users in slack."""
try:
token = os.environ['SLACK_TOKEN']
slack = Slacker(token)
# Get channel list
response = slack.channels.list()
channels = response.body['channels']
for channel in channels:
print(channel['id'], channel['name'])
# if not channel['is_archived']:
# slack.channels.join(channel['name'])
print()
# Get users list
response = slack.users.list()
users = response.body['members']
for user in users:
if not user['deleted']:
print(user['id'], user['name'], user['is_admin'], user[
'is_owner'])
print()
except KeyError as ex:
print('Environment variable %s not set.' % str(ex)) | 0.001211 |
def check_spelling(spelling_lang, txt):
"""
Check the spelling in the text, and compute a score. The score is the
number of words correctly (or almost correctly) spelled, minus the number
of mispelled words. Words "almost" correct remains neutral (-> are not
included in the score)
Returns:
A tuple : (fixed text, score)
"""
if os.name == "nt":
assert(not "check_spelling() not available on Windows")
return
with _ENCHANT_LOCK:
# Maximum distance from the first suggestion from python-enchant
words_dict = enchant.request_dict(spelling_lang)
try:
tknzr = enchant.tokenize.get_tokenizer(spelling_lang)
except enchant.tokenize.TokenizerNotFoundError:
# Fall back to default tokenization if no match for 'lang'
tknzr = enchant.tokenize.get_tokenizer()
score = 0
offset = 0
for (word, word_pos) in tknzr(txt):
if len(word) < _MIN_WORD_LEN:
continue
if words_dict.check(word):
# immediately correct words are a really good hint for
# orientation
score += 100
continue
suggestions = words_dict.suggest(word)
if (len(suggestions) <= 0):
# this word is useless. It may even indicates a bad orientation
score -= 10
continue
main_suggestion = suggestions[0]
lv_dist = Levenshtein.distance(word, main_suggestion)
if (lv_dist > _MAX_LEVENSHTEIN_DISTANCE):
# hm, this word looks like it's in a bad shape
continue
logger.debug("Spell checking: Replacing: %s -> %s"
% (word, main_suggestion))
# let's replace the word by its suggestion
pre_txt = txt[:word_pos + offset]
post_txt = txt[word_pos + len(word) + offset:]
txt = pre_txt + main_suggestion + post_txt
offset += (len(main_suggestion) - len(word))
# fixed words may be a good hint for orientation
score += 5
return (txt, score) | 0.000454 |
def shadows(self, data=None, t=None, dt=None, latitude=None,
init='empty', resolution='mid'):
'''
Initializes a ShadowManager object for this ``pyny.Space``
instance.
The 'empty' initialization accepts ``data`` and ``t`` and ``dt``
but the ShadowsManager will not start the calculations. It will
wait the user to manually insert the rest of the parameters.
Call ``ShadowsManager.run()`` to start the shadowing
computations.
The 'auto' initialization pre-sets all the required parameters
to run the computations\*. The available resolutions are:
* 'low'
* 'mid'
* 'high'
The 'auto' mode will use all the arguments different than
``None`` and the ``set_of_points`` of this ``pyny.Space`` if
any.
:param data: Data timeseries to project on the 3D model
(radiation, for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:param init: Initialization mode
:type init: str
:param init: Resolution for the time vector generation (if
``None``), for setting the sensible points and for the
Voronoi diagram.
:type init: str
:returns: ``ShadowsManager`` object
'''
from pyny3d.shadows import ShadowsManager
if init == 'auto':
# Resolution
if resolution == 'low':
factor = 20
elif resolution == 'mid':
factor = 40
elif resolution == 'high':
factor = 70
if dt is None: dt = 6e4/factor
if latitude is None: latitude = 0.65
# Autofill ShadowsManager Object
sm = ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude)
if self.get_sets_of_points().shape[0] == 0:
max_bound = np.diff(self.get_domain(), axis=0).max()
sm.space.mesh(mesh_size=max_bound/factor, edge=True)
## General parameters
sm.arg_vor_size = 3.5/factor
sm.run()
return sm
elif init == 'empty':
return ShadowsManager(self, data=data, t=t, dt=dt,
latitude=latitude) | 0.009104 |
def _rshift_arithmetic(self, shift_amount):
"""
Arithmetic shift right with a concrete shift amount
:param int shift_amount: Number of bits to shift right.
:return: The new StridedInterval after right shifting
:rtype: StridedInterval
"""
if self.is_empty:
return self
# If straddling the north pole, we'll have to split it into two, perform arithmetic right shift on them
# individually, then union the result back together for better precision. Note that it's an improvement from
# the original WrappedIntervals paper.
nsplit = self._nsplit()
if len(nsplit) == 1:
# preserve the highest bit :-)
highest_bit_set = self.lower_bound > StridedInterval.signed_max_int(nsplit[0].bits)
l = self.lower_bound >> shift_amount
u = self.upper_bound >> shift_amount
stride = max(self.stride >> shift_amount, 1)
mask = ((2 ** shift_amount - 1) << (self.bits - shift_amount))
if highest_bit_set:
l = l | mask
u = u | mask
if l == u:
stride = 0
return StridedInterval(bits=self.bits,
lower_bound=l,
upper_bound=u,
stride=stride,
uninitialized=self.uninitialized
)
else:
a = nsplit[0]._rshift_arithmetic(shift_amount)
b = nsplit[1]._rshift_arithmetic(shift_amount)
return a.union(b) | 0.004831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.