text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def make_hash_id():
"""
Compute the `datetime.now` based SHA-1 hash of a string.
:return: Returns the sha1 hash as a string.
:rtype: str
"""
today = datetime.datetime.now().strftime(DATETIME_FORMAT)
return hashlib.sha1(today.encode('utf-8')).hexdigest()
| 0.003546 |
def register_instance(self, obj, prefix=''):
"""Create new subdispatcher and register all public object methods on
it.
To be used in conjunction with the :py:func:`public`
decorator (*not* :py:func:`RPCDispatcher.public`).
:param obj: The object whose public methods should be made available.
:type obj: object
:param str prefix: A prefix for the new subdispatcher.
"""
dispatch = self.__class__()
for name, f in inspect.getmembers(
obj, lambda f: callable(f) and hasattr(f, '_rpc_public_name')):
dispatch.add_method(f, f._rpc_public_name)
# add to dispatchers
self.add_subdispatch(dispatch, prefix)
| 0.002755 |
def get_live_data_dir():
"""
pygeth needs a base directory to store it's chain data. By default this is
the directory that `geth` uses as it's `datadir`.
"""
if sys.platform == 'darwin':
data_dir = os.path.expanduser(os.path.join(
"~",
"Library",
"Ethereum",
))
elif sys.platform in {'linux', 'linux2', 'linux3'}:
data_dir = os.path.expanduser(os.path.join(
"~",
".ethereum",
))
elif sys.platform == 'win32':
data_dir = os.path.expanduser(os.path.join(
"\\",
"~",
"AppData",
"Roaming",
"Ethereum",
))
else:
raise ValueError((
"Unsupported platform: '{0}'. Only darwin/linux2/win32 are "
"supported. You must specify the geth datadir manually"
).format(sys.platform))
return data_dir
| 0.001075 |
def _badlink(info, base):
"""
Links are interpreted relative to the directory containing the link
"""
tip = _resolved(os.path.join(base, os.path.dirname(info.name)))
return _badpath(info.linkname, base=tip)
| 0.004425 |
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
| 0.006494 |
def updateCorpInfo(self, CorpNum, CorpInfo, UserID=None):
""" ๋ด๋น์ ์ ๋ณด ์์
args
CorpNum : ํ์ ์ฌ์
์๋ฒํธ
CorpInfo : ํ์ฌ ์ ๋ณด, Reference CorpInfo class
UserID : ํ์ ์์ด๋
return
์ฒ๋ฆฌ๊ฒฐ๊ณผ. consist of code and message
raise
PopbillException
"""
postData = self._stringtify(CorpInfo)
return self._httppost('/CorpInfo', postData, CorpNum, UserID)
| 0.004246 |
def list_upcoming(cls):
"""
Returns a collection of upcoming tasks (tasks that have not yet been completed,
regardless of whether theyโre overdue) for the authenticated user
:return:
:rtype: list
"""
return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/upcoming').text
)
)
| 0.006536 |
def writeFailure(failure, logger=None):
"""
Write a L{twisted.python.failure.Failure} to the log.
This is for situations where you got an unexpected exception and want to
log a traceback. For example, if you have C{Deferred} that might error,
you'll want to wrap it with a L{eliot.twisted.DeferredContext} and then add
C{writeFailure} as the error handler to get the traceback logged:
d = DeferredContext(dostuff())
d.addCallback(process)
# Final error handler.
d.addErrback(writeFailure)
@param failure: L{Failure} to write to the log.
@type logger: L{eliot.ILogger}. Will be deprecated at some point, so just
ignore it.
@return: None
"""
# Failure.getBriefTraceback does not include source code, so does not do
# I/O.
_writeTracebackMessage(
logger, failure.value.__class__, failure.value,
failure.getBriefTraceback())
| 0.001072 |
def GetHasherNamesFromString(cls, hasher_names_string):
"""Retrieves a list of a hasher names from a comma separated string.
Takes a string of comma separated hasher names transforms it to a list of
hasher names.
Args:
hasher_names_string (str): comma separated names of hashers to enable,
the string 'all' to enable all hashers or 'none' to disable all
hashers.
Returns:
list[str]: names of valid hashers from the string, or an empty list if no
valid names are found.
"""
hasher_names = []
if not hasher_names_string or hasher_names_string.strip() == 'none':
return hasher_names
if hasher_names_string.strip() == 'all':
return cls.GetHasherNames()
for hasher_name in hasher_names_string.split(','):
hasher_name = hasher_name.strip()
if not hasher_name:
continue
hasher_name = hasher_name.lower()
if hasher_name in cls._hasher_classes:
hasher_names.append(hasher_name)
return hasher_names
| 0.00677 |
def _resolve_duplicates(self):
'''
Merge variables connected by identity operator to reduce the number of redundant variables
'''
self._initialize_graph_status_for_traversing()
# Traverse the graph from roots to leaves
for operator in self.topological_operator_iterator():
if operator.type != 'identity':
continue
if any(variable.is_root for variable in operator.inputs) and \
any(variable.is_leaf for variable in operator.outputs):
continue
# Replace the output variable with the input variable everywhere
original = operator.inputs[0]
duplicate = operator.outputs[0]
for another_scope in self.scopes:
for another_operator in another_scope.operators.values():
for i in range(len(another_operator.inputs)):
if another_operator.inputs[i].onnx_name != duplicate.onnx_name:
continue
another_operator.inputs[i] = original
# When original variable's documentation string or denotation is empty but duplicate's is not, we
# copy that field to the original variable to avoid information loss.
if not original.type.doc_string and duplicate.type.doc_string:
original.type.doc_string = duplicate.type.doc_string
if isinstance(original.type, TensorType) and isinstance(duplicate.type, TensorType):
if not original.type.denotation and duplicate.type.denotation:
original.type.denotation = duplicate.type.denotation
if not original.type.channel_denotations:
original.type.channel_denotations = duplicate.type.channel_denotations
elif duplicate.type.channel_denotations:
# Merge the channel denotations if available in both the original and the duplicate
for i in range(len(original.type.channel_denotations)):
if original.type.channel_denotations[i]:
continue
original.type.channel_denotations[i] = duplicate.type.channel_denotations[i]
# Sometime, shapes of duplicates are different. We try to replace the original variable's unknown dimensions
# as many as possible because we will get rid of the duplicate.
if len(original.type.shape) == len(duplicate.type.shape):
for i in range(len(original.type.shape)):
if original.type.shape[i] != 'None':
continue
original.type.shape[i] = duplicate.type.shape[i]
# Because we're iterating through the topology, we cannot delete any operator or variable. Otherwise,
# the traversing function may be broken. We will delete those abandoned ones later.
duplicate.is_abandoned = True
operator.is_abandoned = True
for scope in self.scopes:
# Find out who is going to be abandoned
abandoned_operator_names = set(onnx_name for onnx_name, operator in scope.operators.items()
if operator.is_abandoned)
abandoned_variable_names = set(onnx_name for onnx_name, variable in scope.variables.items()
if variable.is_abandoned)
# Remove abandoned operators
for name in abandoned_operator_names:
scope.delete_local_operator(name)
# Remove abandoned variables
for name in abandoned_variable_names:
scope.delete_local_variable(name)
| 0.003944 |
def create_contact(self, *args, **kwargs):
"""Creates a contact"""
url = 'contacts.json'
contact_data = {
'active': True,
'helpdesk_agent': False,
'description': 'Freshdesk Contact'
}
contact_data.update(kwargs)
payload = {
'user': contact_data
}
return Contact(**self._api._post(url, data=payload)['user'])
| 0.004762 |
def put_metadata(self, key, value, namespace='default'):
"""
Add metadata to the current active trace entity.
Metadata is not indexed but can be later retrieved
by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string
"""
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_metadata(key, value, namespace)
| 0.003125 |
def load(
inputobj,
c="gold",
alpha=None,
wire=False,
bc=None,
texture=None,
smoothing=None,
threshold=None,
connectivity=False,
):
"""
Returns a ``vtkActor`` from reading a file, directory or ``vtkPolyData``.
:param c: color in RGB format, hex, symbol or name
:param alpha: transparency (0=invisible)
:param wire: show surface as wireframe
:param bc: backface color of internal surface
:param texture: any png/jpg file can be used as texture
For volumetric data (tiff, slc, vti files):
:param smoothing: gaussian filter to smooth vtkImageData
:param threshold: value to draw the isosurface
:param connectivity: if True only keeps the largest portion of the polydata
"""
if alpha is None:
alpha = 1
if isinstance(inputobj, vtk.vtkPolyData):
a = Actor(inputobj, c, alpha, wire, bc, texture)
if inputobj and inputobj.GetNumberOfPoints() == 0:
colors.printc("~lightning Warning: actor has zero points.", c=5)
return a
acts = []
if isinstance(inputobj, list):
flist = inputobj
else:
import glob
flist = sorted(glob.glob(inputobj))
for fod in flist:
if os.path.isfile(fod):
if fod.endswith(".vtm"):
acts += loadMultiBlockData(fod, unpack=True)
else:
a = _loadFile(fod, c, alpha, wire, bc, texture,
smoothing, threshold, connectivity)
acts.append(a)
elif os.path.isdir(fod):
acts = _loadDir(fod, c, alpha, wire, bc, texture,
smoothing, threshold, connectivity)
if not len(acts):
colors.printc("~times Error in load(): cannot find", inputobj, c=1)
return None
if len(acts) == 1:
return acts[0]
else:
return acts
| 0.000525 |
def center_at(self, x, y):
"""Center the menu at x, y"""
self.x = x - (self.width / 2)
self.y = y - (self.height / 2)
| 0.014085 |
def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):
"""
Setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes.
"""
if not isinstance(dataFrame, pandas.core.frame.DataFrame):
raise TypeError("not of type pandas.core.frame.DataFrame")
self.layoutAboutToBeChanged.emit()
if copyDataFrame:
self._dataFrame = dataFrame.copy()
else:
self._dataFrame = dataFrame
self._columnDtypeModel = ColumnDtypeModel(dataFrame)
self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)
self._columnDtypeModel.changeFailed.connect(
lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)
)
if filePath is not None:
self._filePath = filePath
self.layoutChanged.emit()
self.dataChanged.emit()
self.dataFrameChanged.emit()
| 0.003953 |
def markdown(text, renderer=None, **options):
"""
Parses the provided Markdown-formatted text into valid HTML, and returns
it as a :class:`flask.Markup` instance.
:param text: Markdown-formatted text to be rendered into HTML
:param renderer: A custom misaka renderer to be used instead of the default one
:param options: Additional options for customizing the default renderer
:return: A :class:`flask.Markup` instance representing the rendered text
"""
ext, rndr = make_flags(**options)
if renderer:
md = misaka.Markdown(renderer,ext)
result = md(text)
else:
result = misaka.html(text, extensions=ext, render_flags=rndr)
if options.get("smartypants"):
result = misaka.smartypants(result)
return Markup(result)
| 0.003774 |
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None):
"""
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
"""
detokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"detokenizer.perl")
with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
command = ["perl", detokenizer_fname]
if lang_code:
command.append("-l")
command.append(lang_code)
detokenizer = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out))
detokenizer_thread.start()
for line in inp:
detokenizer.stdin.write(line)
detokenizer.stdin.close()
detokenizer_thread.join()
detokenizer.wait()
| 0.002894 |
def add_signature(key, inputs, outputs):
"""Adds a signature to current graph.
Args:
key: Signature key as a string.
inputs: Signature inputs as a map from string to Tensor or SparseTensor.
outputs: Signature outputs as a map from string to Tensor or SparseTensor.
(Recall that a Variable is not a Tensor, but Variable.value() is.)
Raises:
TypeError: if the arguments have the wrong types.
"""
_check_dict_maps_to_tensors_or_sparse_tensors(inputs)
_check_dict_maps_to_tensors_or_sparse_tensors(outputs)
input_info = {
input_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for input_name, tensor in inputs.items()
}
output_info = {
output_name: tf_v1.saved_model.utils.build_tensor_info(tensor)
for output_name, tensor in outputs.items()
}
signature = tf_v1.saved_model.signature_def_utils.build_signature_def(
input_info, output_info)
tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))
| 0.008122 |
def version(*names, **kwargs):
'''
Common interface for obtaining the version of installed packages.
Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required.
Partial FMRI is returned if the package is not installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.version vim
salt '*' pkg.version foo bar baz
salt '*' pkg_resource.version pkg://solaris/entire
'''
if not names:
return ''
cmd = ['/bin/pkg', 'list', '-Hv']
cmd.extend(names)
lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines()
ret = {}
for line in lines:
ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
# Append package names which are not installed/found
unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640
ret.update(zip(unmatched, itertools.cycle(('',))))
# Return a string if only one package name passed
if len(names) == 1:
try:
return next(six.itervalues(ret))
except StopIteration:
return ''
return ret
| 0.002577 |
def bid(self, trade_id, bid, fast=False):
"""Make a bid.
:params trade_id: Trade id.
:params bid: Amount of credits You want to spend.
:params fast: True for fastest bidding (skips trade status & credits check).
"""
method = 'PUT'
url = 'trade/%s/bid' % trade_id
if not fast:
rc = self.tradeStatus(trade_id)[0]
# don't bid if current bid is equal or greater than our max bid
if rc['currentBid'] >= bid or self.credits < bid:
return False # TODO: add exceptions
data = {'bid': bid}
try:
rc = self.__request__(method, url, data=json.dumps(data), params={'sku_b': self.sku_b}, fast=fast)[
'auctionInfo'][0]
except PermissionDenied: # too slow, somebody took it already :-(
return False
if rc['bidState'] == 'highest' or (
rc['tradeState'] == 'closed' and rc['bidState'] == 'buyNow'): # checking 'tradeState' is required?
return True
else:
return False
| 0.004591 |
def bestfit(self):
"""
Returns a series with the bestfit values.
Example:
Series.bestfit()
Returns: series
The returned series contains a parameter
called 'formula' which includes the string representation
of the bestfit line.
"""
# statsmodel cannot be included on requirements.txt
# see https://github.com/scikit-learn/scikit-learn/issues/4164
# which shares the same issue as statsmodel
try:
import statsmodels.api as sm
except:
raise Exception("statsmodels is required: " \
"please run " \
"pip install statsmodels" )
x=pd.Series(list(range(1,len(self)+1)),index=self.index)
x=sm.add_constant(x)
model=sm.OLS(self,x)
fit=model.fit()
vals=fit.params.values
best_fit=fit.fittedvalues
# the below methods have been deprecated in Pandas
# model=pd.ols(x=x,y=self,intercept=True)
# best_fit=model.y_fitted
best_fit.formula='%.2f*x+%.2f' % (vals[0],vals[1])
return best_fit
| 0.057359 |
def set_fan_direction(self, direction):
"""
:param direction: a string one of ["forward", "reverse"]
:return: nothing
"""
desired_state = {"direction": direction}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response)
| 0.005376 |
def get_arrays_from_file(params_file, params=None):
"""Reads the values of one or more parameters from an hdf file and
returns as a dictionary.
Parameters
----------
params_file : str
The hdf file that contains the values of the parameters.
params : {None, list}
If provided, will just retrieve the given parameter names.
Returns
-------
dict
A dictionary of the parameters mapping `param_name -> array`.
"""
try:
f = h5py.File(params_file, 'r')
except:
raise ValueError('File not found.')
if params is not None:
if not isinstance(params, list):
params = [params]
for p in params:
if p not in f.keys():
raise ValueError('Parameter {} is not in {}'
.format(p, params_file))
else:
params = [str(k) for k in f.keys()]
params_values = {p:f[p][:] for p in params}
try:
bandwidth = f.attrs["bandwidth"]
except KeyError:
bandwidth = "scott"
f.close()
return params_values, bandwidth
| 0.003228 |
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
"""Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for
properties such as ``idle_active`` indicating the player is done with regular playback and just idling around
"""
sema = threading.Semaphore(value=0)
def observer(name, val):
if cond(val):
sema.release()
self.observe_property(name, observer)
if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))):
sema.acquire()
self.unobserve_property(name, observer)
| 0.009119 |
def all_departed_units(self):
"""
Collection of all units that were previously part of any relation on
this endpoint but which have since departed.
This collection is persistent and mutable. The departed units will
be kept until they are explicitly removed, to allow for reasonable
cleanup of units that have left.
Example: You need to run a command each time a unit departs the relation.
.. code-block:: python
@when('endpoint.{endpoint_name}.departed')
def handle_departed_unit(self):
for name, unit in self.all_departed_units.items():
# run the command to remove `unit` from the cluster
# ..
self.all_departed_units.clear()
clear_flag(self.expand_name('departed'))
Once a unit is departed, it will no longer show up in
:attr:`all_joined_units`. Note that units are considered departed as
soon as the departed hook is entered, which differs slightly from how
the Juju primitives behave (departing units are still returned from
``related-units`` until after the departed hook is complete).
This collection is a :class:`KeyList`, so can be used as a mapping to
look up units by their unit name, or iterated or accessed by index.
"""
if self._all_departed_units is None:
self._all_departed_units = CachedKeyList.load(
'reactive.endpoints.departed.{}'.format(self.endpoint_name),
RelatedUnit._deserialize,
'unit_name')
return self._all_departed_units
| 0.001793 |
def copy(self):
"""A shallow copy."""
# Could just ``return self.__class__(self)`` here instead, but the below is faster. It uses
# __new__ to create a copy instance while bypassing its __init__, which would result
# in copying this bidict's items into the copy instance one at a time. Instead, make whole
# copies of each of the backing mappings, and make them the backing mappings of the copy,
# avoiding copying items one at a time.
copy = self.__class__.__new__(self.__class__)
copy._fwdm = self._fwdm.copy() # pylint: disable=protected-access
copy._invm = self._invm.copy() # pylint: disable=protected-access
copy._init_inv() # pylint: disable=protected-access
return copy
| 0.007813 |
def parse_child(self, node):
"""
Parses <Child>
@param node: Node containing the <Child> element
@type node: xml.etree.Element
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
else:
self.raise_error('<Child> must specify a name.')
if 'type' in node.lattrib:
type_ = node.lattrib['type']
else:
self.raise_error("Child '{0}' must specify a type.", name)
self.current_component_type.add_children(Children(name, type_, False))
| 0.003546 |
def bug_info(exc_type, exc_value, exc_trace):
"""Prints the traceback and invokes the ipython debugger on any exception
Only invokes ipydb if you are outside ipython or python interactive session.
So scripts must be called from OS shell in order for exceptions to ipy-shell-out.
Dependencies:
Needs `pip install ipdb`
Arguments:
exc_type (type): The exception type/class (e.g. RuntimeError)
exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor)
exc_trace (Traceback): The traceback instance
References:
http://stackoverflow.com/a/242531/623735
Example Usage:
$ python -c 'from pug import debug;x=[];x[0]'
Traceback (most recent call last):
File "<string>", line 1, in <module>
IndexError: list index out of range
> <string>(1)<module>()
ipdb> x
[]
ipdb> locals()
{'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None}
ipdb>
"""
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# We are in interactive mode or don't have a tty-like device, so we call the default hook
sys.__excepthook__(exc_type, exc_value, exc_trace)
else:
# Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print
# (e.g. the ImportError and the Exception that got you here)
import ipdb
# We are NOT in interactive mode, print the exception
traceback.print_exception(exc_type, exc_value, exc_trace)
print
# Start the debugger in post-mortem mode.
ipdb.post_mortem(exc_trace)
| 0.005028 |
def main(jlink_serial, device):
"""Main function.
Args:
jlink_serial (str): the J-Link serial number
device (str): the target CPU
Returns:
``None``
Raises:
JLinkException: on error
"""
buf = StringIO.StringIO()
jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)
jlink.open(serial_no=jlink_serial)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(device, verbose=True)
# Figure out our original endianess first.
big_endian = jlink.set_little_endian()
if big_endian:
jlink.set_big_endian()
print('Target Endian Mode: %s Endian' % ('Big' if big_endian else 'Little'))
| 0.002941 |
def reference_contexts_for_variants(
variants,
context_size,
transcript_id_whitelist=None):
"""
Extract a set of reference contexts for each variant in the collection.
Parameters
----------
variants : varcode.VariantCollection
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Returns a dictionary from variants to lists of ReferenceContext objects,
sorted by max coding sequence length of any transcript.
"""
result = OrderedDict()
for variant in variants:
result[variant] = reference_contexts_for_variant(
variant=variant,
context_size=context_size,
transcript_id_whitelist=transcript_id_whitelist)
return result
| 0.001074 |
def smart_device_selection(preferred_device_type=None):
"""Get a list of device environments that is suitable for use in MOT.
Basically this gets the total list of devices using all_devices() and applies a filter on it.
This filter does the following:
1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover'
platform.
More things may be implemented in the future.
Args:
preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'.
If no devices of this type can be found, we will use any other device available.
Returns:
list of CLEnvironment: List with the CL device environments.
"""
cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type)
platform_names = [env.platform.name for env in cl_environments]
has_amd_pro_platform = any('AMD Accelerated Parallel Processing' in name for name in platform_names)
if has_amd_pro_platform:
return list(filter(lambda env: 'Clover' not in env.platform.name, cl_environments))
if preferred_device_type is not None and not len(cl_environments):
return CLEnvironmentFactory.all_devices()
return cl_environments
| 0.007326 |
def encode(self, inputRow):
"""Encodes the given input row as a dict, with the
keys being the field names. This also adds in some meta fields:
'_category': The value from the category field (if any)
'_reset': True if the reset field was True (if any)
'_sequenceId': the value from the sequenceId field (if any)
:param inputRow: sequence of values corresponding to a single input metric
data row
:rtype: dict
"""
# Create the return dict
result = dict(zip(self._fieldNames, inputRow))
# Add in the special fields
if self._categoryFieldIndex is not None:
# category value can be an int or a list
if isinstance(inputRow[self._categoryFieldIndex], int):
result['_category'] = [inputRow[self._categoryFieldIndex]]
else:
result['_category'] = (inputRow[self._categoryFieldIndex]
if inputRow[self._categoryFieldIndex]
else [None])
else:
result['_category'] = [None]
if self._resetFieldIndex is not None:
result['_reset'] = int(bool(inputRow[self._resetFieldIndex]))
else:
result['_reset'] = 0
if self._learningFieldIndex is not None:
result['_learning'] = int(bool(inputRow[self._learningFieldIndex]))
result['_timestampRecordIdx'] = None
if self._timestampFieldIndex is not None:
result['_timestamp'] = inputRow[self._timestampFieldIndex]
# Compute the record index based on timestamp
result['_timestampRecordIdx'] = self._computeTimestampRecordIdx(
inputRow[self._timestampFieldIndex])
else:
result['_timestamp'] = None
# -----------------------------------------------------------------------
# Figure out the sequence ID
hasReset = self._resetFieldIndex is not None
hasSequenceId = self._sequenceFieldIndex is not None
if hasReset and not hasSequenceId:
# Reset only
if result['_reset']:
self._sequenceId += 1
sequenceId = self._sequenceId
elif not hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
result['_reset'] = int(sequenceId != self._sequenceId)
self._sequenceId = sequenceId
elif hasReset and hasSequenceId:
sequenceId = inputRow[self._sequenceFieldIndex]
else:
sequenceId = 0
if sequenceId is not None:
result['_sequenceId'] = hash(sequenceId)
else:
result['_sequenceId'] = None
return result
| 0.008839 |
def is_union(declaration):
"""
Returns True if declaration represents a C++ union
Args:
declaration (declaration_t): the declaration to be checked.
Returns:
bool: True if declaration represents a C++ union
"""
if not is_class(declaration):
return False
decl = class_traits.get_declaration(declaration)
return decl.class_type == class_declaration.CLASS_TYPES.UNION
| 0.002375 |
def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
"""Calculate the covariance matrix for x and y or more expressions, possibly on a grid defined by binby.
Either x and y are expressions, e.g:
>>> df.cov("x", "y")
Or only the x argument is given with a list of expressions, e,g.:
>>> df.cov(["x, "y, "z"])
Example:
>>> df.cov("x", "y")
array([[ 53.54521742, -3.8123135 ],
[ -3.8123135 , 60.62257881]])
>>> df.cov(["x", "y", "z"])
array([[ 53.54521742, -3.8123135 , -0.98260511],
[ -3.8123135 , 60.62257881, 1.21381057],
[ -0.98260511, 1.21381057, 25.55517638]])
>>> df.cov("x", "y", binby="E", shape=2)
array([[[ 9.74852878e+00, -3.02004780e-02],
[ -3.02004780e-02, 9.99288215e+00]],
[[ 8.43996546e+01, -6.51984181e+00],
[ -6.51984181e+00, 9.68938284e+01]]])
:param x: {expression}
:param y: {expression_single}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}, the last dimensions are of shape (2,2)
"""
selection = _ensure_strings_from_expressions(selection)
if y is None:
if not _issequence(x):
raise ValueError("if y argument is not given, x is expected to be sequence, not %r", x)
expressions = x
else:
expressions = [x, y]
N = len(expressions)
binby = _ensure_list(binby)
shape = _expand_shape(shape, len(binby))
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, selection=selection, delay=True)
@delayed
def calculate(expressions, limits):
# print('limits', limits)
task = tasks.TaskStatistic(self, binby, shape, limits, weights=expressions, op=tasks.OP_COV, selection=selection)
self.executor.schedule(task)
progressbar.add_task(task, "covariance values for %r" % expressions)
return task
@delayed
def finish(values):
N = len(expressions)
counts = values[..., :N]
sums = values[..., N:2 * N]
with np.errstate(divide='ignore', invalid='ignore'):
means = sums / counts
# matrix of means * means.T
meansxy = means[..., None] * means[..., None, :]
counts = values[..., 2 * N:2 * N + N**2]
sums = values[..., 2 * N + N**2:]
shape = counts.shape[:-1] + (N, N)
counts = counts.reshape(shape)
sums = sums.reshape(shape)
with np.errstate(divide='ignore', invalid='ignore'):
moments2 = sums / counts
cov_matrix = moments2 - meansxy
return cov_matrix
progressbar = vaex.utils.progressbars(progress)
values = calculate(expressions, limits)
cov_matrix = finish(values)
return self._delay(delay, cov_matrix)
| 0.002197 |
def make_single_template_plots(workflow, segs, data_read_name, analyzed_name,
params, out_dir, inj_file=None, exclude=None,
require=None, tags=None, params_str=None,
use_exact_inj_params=False):
"""Function for creating jobs to run the pycbc_single_template code and
to run the associated plotting code pycbc_single_template_plots and add
these jobs to the workflow.
Parameters
-----------
workflow : workflow.Workflow instance
The pycbc.workflow.Workflow instance to add these jobs to.
segs : workflow.File instance
The pycbc.workflow.File instance that points to the XML file containing
the segment lists of data read in and data analyzed.
data_read_name : str
The name of the segmentlist containing the data read in by each
inspiral job in the segs file.
analyzed_name : str
The name of the segmentlist containing the data analyzed by each
inspiral job in the segs file.
params : dictionary
A dictionary containing the parameters of the template to be used.
params[ifo+'end_time'] is required for all ifos in workflow.ifos.
If use_exact_inj_params is False then also need to supply values for
[mass1, mass2, spin1z, spin2x]. For precessing templates one also
needs to supply [spin1y, spin1x, spin2x, spin2y, inclination]
additionally for precession one must supply u_vals or
u_vals_+ifo for all ifos. u_vals is the ratio between h_+ and h_x to
use when constructing h(t). h(t) = (h_+ * u_vals) + h_x.
out_dir : str
Directory in which to store the output files.
inj_file : workflow.File (optional, default=None)
If given send this injection file to the job so that injections are
made into the data.
exclude : list (optional, default=None)
If given, then when considering which subsections in the ini file to
parse for options to add to single_template_plot, only use subsections
that *do not* match strings in this list.
require : list (optional, default=None)
If given, then when considering which subsections in the ini file to
parse for options to add to single_template_plot, only use subsections
matching strings in this list.
tags : list (optional, default=None)
Add this list of tags to all jobs.
params_str : str (optional, default=None)
If given add this string to plot title and caption to describe the
template that was used.
use_exact_inj_params : boolean (optional, default=False)
If True do not use masses and spins listed in the params dictionary
but instead use the injection closest to the filter time as a template.
Returns
--------
output_files : workflow.FileList
The list of workflow.Files created in this function.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'single_template_plot'
secs = requirestr(workflow.cp.get_subsections(name), require)
secs = excludestr(secs, exclude)
files = FileList([])
for tag in secs:
for ifo in workflow.ifos:
if params['%s_end_time' % ifo] == -1.0:
continue
# Reanalyze the time around the trigger in each detector
node = SingleTemplateExecutable(workflow.cp, 'single_template',
ifos=[ifo], out_dir=out_dir,
tags=[tag] + tags).create_node()
if use_exact_inj_params:
node.add_opt('--use-params-of-closest-injection')
else:
node.add_opt('--mass1', "%.6f" % params['mass1'])
node.add_opt('--mass2', "%.6f" % params['mass2'])
node.add_opt('--spin1z',"%.6f" % params['spin1z'])
node.add_opt('--spin2z',"%.6f" % params['spin2z'])
node.add_opt('--template-start-frequency',
"%.6f" % params['f_lower'])
# Is this precessing?
if 'u_vals' in params or 'u_vals_%s' % ifo in params:
node.add_opt('--spin1x',"%.6f" % params['spin1x'])
node.add_opt('--spin1y',"%.6f" % params['spin1y'])
node.add_opt('--spin2x',"%.6f" % params['spin2x'])
node.add_opt('--spin2y',"%.6f" % params['spin2y'])
node.add_opt('--inclination',"%.6f" % params['inclination'])
try:
node.add_opt('--u-val',"%.6f" % params['u_vals'])
except:
node.add_opt('--u-val',
"%.6f" % params['u_vals_%s' % ifo])
# str(numpy.float64) restricts to 2d.p. BE CAREFUL WITH THIS!!!
str_trig_time = '%.6f' %(params[ifo + '_end_time'])
node.add_opt('--trigger-time', str_trig_time)
node.add_input_opt('--inspiral-segments', segs)
if inj_file is not None:
node.add_input_opt('--injection-file', inj_file)
node.add_opt('--data-read-name', data_read_name)
node.add_opt('--data-analyzed-name', analyzed_name)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file', store_file=False)
data = node.output_files[0]
workflow += node
# Make the plot for this trigger and detector
node = PlotExecutable(workflow.cp, name, ifos=[ifo],
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_input_opt('--single-template-file', data)
node.new_output_file_opt(workflow.analysis_time, '.png',
'--output-file')
title="'%s SNR and chi^2 timeseries" %(ifo)
if params_str is not None:
title+= " using %s" %(params_str)
title+="'"
node.add_opt('--plot-title', title)
caption = "'The SNR and chi^2 timeseries around the injection"
if params_str is not None:
caption += " using %s" %(params_str)
if use_exact_inj_params:
caption += ". The injection itself was used as the template.'"
else:
caption += ". The template used has the following parameters: "
caption += "mass1=%s, mass2=%s, spin1z=%s, spin2z=%s'"\
%(params['mass1'], params['mass2'], params['spin1z'],
params['spin2z'])
node.add_opt('--plot-caption', caption)
workflow += node
files += node.output_files
return files
| 0.003501 |
def transform(self, X=None, y=None):
"""
Transform an image using an Affine transform with the given
zoom parameters. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> tx = ants.contrib.Zoom2D(zoom=(0.8,0.8,0.8))
>>> img2 = tx.transform(img)
"""
# unpack zoom range
zoom_x, zoom_y= self.zoom
self.params = (zoom_x, zoom_y)
zoom_matrix = np.array([[zoom_x, 0, 0],
[0, zoom_y, 0]])
self.tx.set_parameters(zoom_matrix)
if self.lazy or X is None:
return self.tx
else:
return self.tx.apply_to_image(X, reference=self.reference)
| 0.002868 |
async def peek(self):
"""
Look at a task without changing its state.
Always returns `True`.
"""
the_tuple = await self.queue.peek(self.tube, self.task_id)
self.update_from_tuple(the_tuple)
return True
| 0.007722 |
def frombinary(self, s):
"""Decode the binary string into an in memory list.
S is a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
p = 0
while p<len(s):
(slen, dpos, dlen, ulen, flag, typcd) = struct.unpack(self.ENTRYSTRUCT,
s[p:p+entrylen])
nmlen = slen - entrylen
p = p + entrylen
(nm,) = struct.unpack(`nmlen`+'s', s[p:p+nmlen])
p = p + nmlen
# version 4
# self.data.append((dpos, dlen, ulen, flag, typcd, nm[:-1]))
# version 5
# nm may have up to 15 bytes of padding
pos = nm.find('\0')
if pos < 0:
self.data.append((dpos, dlen, ulen, flag, typcd, nm))
else:
self.data.append((dpos, dlen, ulen, flag, typcd, nm[:pos]))
| 0.00765 |
def register_file(name, member, path, digest='', conn=None):
'''
Register a file in the package database
'''
close = False
if conn is None:
close = True
conn = init()
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
name,
'{0}/{1}'.format(path, member.path),
member.size,
member.mode,
digest,
member.devmajor,
member.devminor,
member.linkname,
member.linkpath,
member.uname,
member.gname,
member.mtime
))
if close:
conn.close()
| 0.003273 |
def get_edges(self, indexed=None):
"""Edges of the mesh
Parameters
----------
indexed : str | None
If indexed is None, return (Nf, 3) array of vertex indices,
two per edge in the mesh.
If indexed is 'faces', then return (Nf, 3, 2) array of vertex
indices with 3 edges per face, and two vertices per edge.
Returns
-------
edges : ndarray
The edges.
"""
if indexed is None:
if self._edges is None:
self._compute_edges(indexed=None)
return self._edges
elif indexed == 'faces':
if self._edges_indexed_by_faces is None:
self._compute_edges(indexed='faces')
return self._edges_indexed_by_faces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
| 0.004381 |
def connect(self, *, db=None):
"""
A connected device can be switched to 'database mode' where the device will
not use the BACnet network but instead obtain its contents from a previously
stored database.
"""
if db:
self.poll(command="stop")
self.properties.db_name = db.split(".")[0]
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Already connected, provide db arg if you want to connect to db"
)
| 0.012681 |
def download_cutout(self, reading, focus=None, needs_apcor=False):
"""
Downloads a cutout of the FITS image for a given source reading.
Args:
reading: ossos.astrom.SourceReading
The reading which will be the focus of the downloaded image.
focus: tuple(int, int)
The x, y coordinates that should be the focus of the downloaded
image. These coordinates should be in terms of the
source_reading parameter's coordinate system.
Default value is None, in which case the source reading's x, y
position is used as the focus.
needs_apcor: bool
If True, the apcor file with data needed for photometry
calculations is downloaded in addition to the image.
Defaults to False.
Returns:
cutout: ossos.downloads.data.SourceCutout
"""
logger.debug("Doing download_cutout with inputs: reading:{} focus:{} needs_apcor:{}".format(reading,
focus,
needs_apcor))
assert isinstance(reading, SourceReading)
min_radius = config.read('CUTOUTS.SINGLETS.RADIUS')
if not isinstance(min_radius, Quantity):
min_radius = min_radius * units.arcsec
radius = max(reading.uncertainty_ellipse.a,
reading.uncertainty_ellipse.b) * 2.5 + min_radius
logger.debug("got radius for cutout: {}".format(radius))
image_uri = reading.get_image_uri()
logger.debug("Getting cutout at {} for {}".format(reading.reference_sky_coord, image_uri))
hdulist = storage._cutout_expnum(reading.obs,
reading.reference_sky_coord, radius)
# hdulist = storage.ra_dec_cutout(image_uri, reading.reference_sky_coord, radius)
logger.debug("Getting the aperture correction.")
source = SourceCutout(reading, hdulist, radius=radius)
# Accessing the attribute here to trigger the download.
try:
apcor = source.apcor
zmag = source.zmag
source.reading.get_observation_header()
except Exception as ex:
if needs_apcor:
import sys, traceback
sys.stderr.write("Failed to retrieve apcor but apcor required. Raising error, see logs for more details")
sys.stderr.write(traceback.print_exc())
pass
logger.debug("Sending back the source reading.")
return source
| 0.003346 |
def Lazarek_Black(m, D, mul, kl, Hvap, q=None, Te=None):
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in vertical tubes for either upward or downward flow. Correlation
is as shown in [1]_, and also reviewed in [2]_ and [3]_.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient.
Quality independent. Requires no properties of the gas.
Uses a Reynolds number assuming all the flow is liquid.
.. math::
h_{tp} = 30 Re_{lo}^{0.857} Bg^{0.714} \frac{k_l}{D}
Re_{lo} = \frac{G_{tp}D}{\mu_l}
Parameters
----------
m : float
Mass flow rate [kg/s]
D : float
Diameter of the channel [m]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ has been reviewed.
[2]_ claims it was developed for a range of quality 0-0.6,
Relo 860-5500, mass flux 125-750 kg/m^2/s, q of 1.4-38 W/cm^2, and with a
pipe diameter of 3.1 mm. Developed with data for R113 only.
Examples
--------
>>> Lazarek_Black(m=10, D=0.3, mul=1E-3, kl=0.6, Hvap=2E6, Te=100)
9501.932636079293
References
----------
.. [1] Lazarek, G. M., and S. H. Black. "Evaporative Heat Transfer,
Pressure Drop and Critical Heat Flux in a Small Vertical Tube with
R-113." International Journal of Heat and Mass Transfer 25, no. 7 (July
1982): 945-60. doi:10.1016/0017-9310(82)90070-9.
.. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations
of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide."
International Journal of Refrigeration 36, no. 8 (December 2013):
2017-39. doi:10.1016/j.ijrefrig.2013.05.015.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
'''
G = m/(pi/4*D**2)
Relo = G*D/mul
if q:
Bg = Boiling(G=G, q=q, Hvap=Hvap)
return 30*Relo**0.857*Bg**0.714*kl/D
elif Te:
# Solved with sympy
return 27000*30**(71/143)*(1./(G*Hvap))**(357/143)*Relo**(857/286)*Te**(357/143)*kl**(500/143)/D**(500/143)
else:
raise Exception('Either q or Te is needed for this correlation')
| 0.004977 |
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
else:
# FIXME: Nothing changed, scenario doesn't make sense.
_observed_errors += 1
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
| 0.00143 |
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
| 0.006908 |
def copy(cls, data):
"""Set the clipboard data ('Copy').
Parameters: data to set (string)
Optional: datatype if it's not a string
Returns: True / False on successful copy, Any exception raised (like
passes the NSPasteboardCommunicationError) should be caught
by the caller.
"""
pp = pprint.PrettyPrinter()
copy_data = 'Data to copy (put in pasteboard): %s'
logging.debug(copy_data % pp.pformat(data))
# Clear the pasteboard first:
cleared = cls.clearAll()
if not cleared:
logging.warning('Clipboard could not clear properly')
return False
# Prepare to write the data
# If we just use writeObjects the sequence to write to the clipboard is
# a) Call clearContents()
# b) Call writeObjects() with a list of objects to write to the
# clipboard
if not isinstance(data, types.ListType):
data = [data]
pb = AppKit.NSPasteboard.generalPasteboard()
pb_set_ok = pb.writeObjects_(data)
return bool(pb_set_ok)
| 0.001759 |
def get_service_name_resources(cls, service_name):
""" Get resource models by service name """
from django.apps import apps
resources = cls._registry[service_name]['resources'].keys()
return [apps.get_model(resource) for resource in resources]
| 0.007246 |
def chk_qualifiers(self):
"""Check format of qualifier"""
if self.name == 'id2gos':
return
for ntd in self.associations:
# print(ntd)
qual = ntd.Qualifier
assert isinstance(qual, set), '{NAME}: QUALIFIER MUST BE A LIST: {NT}'.format(
NAME=self.name, NT=ntd)
assert qual != set(['']), ntd
assert qual != set(['-']), ntd
assert 'always' not in qual, 'SPEC SAID IT WOULD BE THERE'
| 0.005988 |
def uuid(self):
""" Return UUID of logical volume
:return: str
"""
uuid_file = '/sys/block/%s/dm/uuid' % os.path.basename(os.path.realpath(self.volume_path()))
lv_uuid = open(uuid_file).read().strip()
if lv_uuid.startswith('LVM-') is True:
return lv_uuid[4:]
return lv_uuid
| 0.037801 |
def _parse_expr(self):
"""
Generate sentence token trees from the current position to
the next closing parentheses / end of the list and return it
['1', '(', '2', '|', '3, ')'] -> ['1', [['2'], ['3']]]
['2', '|', '3'] -> [['2'], ['3']]
"""
# List of all generated sentences
sentence_list = []
# Currently active sentence
cur_sentence = []
sentence_list.append(Sentence(cur_sentence))
# Determine which form the current expression has
while self._current_position < len(self.tokens):
cur = self.tokens[self._current_position]
self._current_position += 1
if cur == '(':
# Parse the subexpression
subexpr = self._parse_expr()
# Check if the subexpression only has one branch
# -> If so, append "(" and ")" and add it as is
normal_brackets = False
if len(subexpr.tree()) == 1:
normal_brackets = True
cur_sentence.append(Word('('))
# add it to the sentence
cur_sentence.append(subexpr)
if normal_brackets:
cur_sentence.append(Word(')'))
elif cur == '|':
# Begin parsing a new sentence
cur_sentence = []
sentence_list.append(Sentence(cur_sentence))
elif cur == ')':
# End parsing the current subexpression
break
# TODO anything special about {sth}?
else:
cur_sentence.append(Word(cur))
return Options(sentence_list)
| 0.001147 |
def _setAxesNames(self, axisNames):
""" Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis'
"""
for col, _ in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, '')
self._axisNames = tuple(axisNames)
self._fullAxisNames = tuple([axName + self.AXIS_POST_FIX for axName in axisNames])
for col, label in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, label)
| 0.006897 |
def simple_round_factory(tol):
"""helper function for simple_round (a factory for simple_round functions)"""
def simple_round(*args, **kwds):
argstype = type(args)
_args = list(args)
_kwds = kwds.copy()
for i,j in enumerate(args): # args[0] is the class.
if isinstance(j, float): _args[i] = round(j, tol[i - 1] \
if isinstance(tol, (list, tuple)) else tol) # don't round int
for k, (i,j) in enumerate(kwds.items()):
if isinstance(j, float): _kwds[i] = round(j, tol[k] \
if isinstance(tol, (list, tuple)) else tol)
return argstype(_args), _kwds
return simple_round
| 0.017673 |
async def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
"""
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
"""
# the first time sending the commands we send all of the commands that were queued up.
# if we have to run through it again, we only retry the commands that failed.
attempt = sorted(stack, key=lambda x: x.position)
# build a list of node objects based on node names we need to
nodes = {}
# as we move through each command that still needs to be processed,
# we figure out the slot number that command maps to, then from the slot determine the node.
for c in attempt:
# refer to our internal node -> slot table that tells us where a given
# command should route to.
slot = self._determine_slot(*c.args)
node = self.connection_pool.get_node_by_slot(slot)
# little hack to make sure the node name is populated. probably could clean this up.
self.connection_pool.nodes.set_node_name(node)
# now that we know the name of the node ( it's just a string in the form of host:port )
# we can build a list of commands for each node.
node_name = node['name']
if node_name not in nodes:
nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node))
nodes[node_name].append(c)
# send the commands in sequence.
# we write to all the open sockets for each node first, before reading anything
# this allows us to flush all the requests out across the network essentially in parallel
# so that we can read them all in parallel as they come back.
# we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference.
node_commands = nodes.values()
for n in node_commands:
await n.write()
for n in node_commands:
await n.read()
# release all of the redis connections we allocated earlier back into the connection pool.
# we used to do this step as part of a try/finally block, but it is really dangerous to
# release connections back into the pool if for some reason the socket has data still left in it
# from a previous operation. The write and read operations already have try/catch around them for
# all known types of errors including connection and socket level errors.
# So if we hit an exception, something really bad happened and putting any of
# these connections back into the pool is a very bad idea.
# the socket might have unread buffer still sitting in it, and then the
# next time we read from it we pass the buffered result back from a previous
# command and every single request after to that connection will always get
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
for n in nodes.values():
self.connection_pool.release(n.connection)
# if the response isn't an exception it is a valid response from the node
# we're all done with that command, YAY!
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position)
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaing comamnds one at a time using `execute_command`
# in the main client. This keeps our retry logic in one place mostly,
# and allows us to be more confident in correctness of behavior.
# at this point any speed gains from pipelining have been lost
# anyway, so we might as well make the best attempt to get the correct
# behavior.
#
# The client command will handle retries for each individual command
# sequentially as we pass each one into `execute_command`. Any exceptions
# that bubble out should only appear once all retries have been exhausted.
#
# If a lot of commands have failed, we'll be setting the
# flag to rebuild the slots table from scratch. So MOVED errors should
# correct themselves fairly quickly.
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
for c in attempt:
try:
# send each command individually like we do in the main client.
c.result = await super(StrictClusterPipeline, self).execute_command(*c.args, **c.options)
except RedisError as e:
c.result = e
# turn the response back into a simple flat array that corresponds
# to the sequence of commands issued in the stack in pipeline.execute()
response = [c.result for c in sorted(stack, key=lambda x: x.position)]
if raise_on_error:
self.raise_first_error(stack)
return response
| 0.006212 |
def mulmod(computation: BaseComputation) -> None:
"""
Modulo Multiplication
"""
left, right, mod = computation.stack_pop(num_items=3, type_hint=constants.UINT256)
if mod == 0:
result = 0
else:
result = (left * right) % mod
computation.stack_push(result)
| 0.006711 |
def encode_async_options(async):
"""Encode Async options for JSON encoding."""
options = copy.deepcopy(async._options)
options['_type'] = reference_to_path(async.__class__)
# JSON don't like datetimes.
eta = options.get('task_args', {}).get('eta')
if eta:
options['task_args']['eta'] = time.mktime(eta.timetuple())
callbacks = async._options.get('callbacks')
if callbacks:
options['callbacks'] = encode_callbacks(callbacks)
if '_context_checker' in options:
_checker = options.pop('_context_checker')
options['__context_checker'] = reference_to_path(_checker)
if '_process_results' in options:
_processor = options.pop('_process_results')
options['__process_results'] = reference_to_path(_processor)
return options
| 0.006158 |
def modify(self, pk=None, create_on_missing=False, **kwargs):
"""Modify an already existing object.
Fields in the resource's `identity` tuple can be used in lieu of a primary key for a lookup; in such a case,
only other fields are written.
To modify unique fields, you must use the primary key for the lookup.
=====API DOCS=====
Modify an already existing object.
:param pk: Primary key of the resource to be modified.
:type pk: int
:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects
matching the appropriate unique criteria is not found.
:type create_on_missing: bool
:param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the
resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are
also in resource's identity will be used to lookup existing reosource.
:returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:
"changed", a flag indicating if the resource is successfully updated; "id", an integer which
is the primary key of the updated object.
:rtype: dict
=====API DOCS=====
"""
return self.write(pk, create_on_missing=create_on_missing, force_on_exists=True, **kwargs)
| 0.007373 |
def delegators_count(self, account):
"""
Get number of delegators for a specific representative **account**
.. version 8.0 required
:param account: Account to get number of delegators for
:type account: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.delegators_count(
... account="xrb_1111111111111111111111111111111111111111111111111117353trpda"
... )
2
"""
account = self._process_value(account, 'account')
payload = {"account": account}
resp = self.call('delegators_count', payload)
return int(resp['count'])
| 0.004615 |
def delete(self, bucket=None, ignore=False):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterate
tables = []
for bucket in buckets:
# Check existent
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from buckets
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Add table to tables
table = self.__get_table(bucket)
tables.append(table)
# Drop tables, update metadata
self.__metadata.drop_all(tables=tables)
self.__metadata.clear()
self.__reflect()
| 0.001899 |
async def tile(tile_number):
"""
Handles GET requests for a tile number.
:param int tile_number: Number of the tile between 0 and `max_tiles`^2.
:raises HTTPError: 404 if tile exceeds `max_tiles`^2.
"""
try:
tile = get_tile(tile_number)
except TileOutOfBoundsError:
abort(404)
buf = BytesIO(tile.tobytes())
tile.save(buf, 'JPEG')
content = buf.getvalue()
response = await make_response(content)
response.headers['Content-Type'] = 'image/jpg'
response.headers['Accept-Ranges'] = 'bytes'
response.headers['Content-Length'] = str(len(content))
return response
| 0.001575 |
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, base-type other
"""
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datetimelike(other):
other = tslibs.iNaT
elif isinstance(other, (timedelta, np.timedelta64)):
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError(other)
return values, other
| 0.002125 |
def _extract_rpm_file(self, target_file, extract_path):
"""Extracts the rpm file.
:param target_file: the firmware file to be extracted from
:param extract_path: the path where extraction is supposed to happen
:raises: ImageExtractionFailed, if any issue with extraction
"""
if not os.path.exists(extract_path):
os.makedirs(extract_path)
os.chdir(extract_path)
if find_executable('rpm2cpio') is None:
raise exception.ImageExtractionFailed(
image_ref=target_file, reason='Command `rpm2cpio` not found.')
if find_executable('cpio') is None:
raise exception.ImageExtractionFailed(
image_ref=target_file, reason='Command `cpio` not found.')
try:
rpm2cpio = subprocess.Popen('rpm2cpio ' + target_file,
shell=True,
stdout=subprocess.PIPE)
cpio = subprocess.Popen('cpio -idm', shell=True,
stdin=rpm2cpio.stdout)
out, err = cpio.communicate()
except (OSError, ValueError) as e:
raise exception.ImageExtractionFailed(
image_ref=target_file,
reason='Unexpected error in extracting file. ' + str(e))
| 0.000805 |
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not (connection.retry_on_timeout and
isinstance(e, TimeoutError)):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
| 0.00266 |
def build_layout(self, dset_id: str):
"""
:param dset_id:
:return:
"""
all_fields = list(self.get_data(dset_id=dset_id).keys())
try:
field_reference = self.skd[dset_id].attrs('target')
except:
field_reference = all_fields[0]
fields_comparison = [all_fields[1]]
# chart type widget
self.register_widget(
chart_type=widgets.RadioButtons(
options=['individual', 'grouped'],
value='individual',
description='Chart Type:'
)
)
# bins widget
self.register_widget(
bins=IntSlider(
description='Bins:',
min=2, max=10, value=2,
continuous_update=False
)
)
# fields comparison widget
self.register_widget(
xs=widgets.SelectMultiple(
description='Xs:',
options=[f for f in all_fields if not f == field_reference],
value=fields_comparison
)
)
# field reference widget
self.register_widget(
y=widgets.Dropdown(
description='Y:',
options=all_fields,
value=field_reference
)
)
# used to internal flow control
y_changed = [False]
self.register_widget(
box_filter_panel=widgets.VBox([
self._('y'), self._('xs'), self._('bins')
])
)
# layout widgets
self.register_widget(
table=widgets.HTML(),
chart=widgets.HTML()
)
self.register_widget(vbox_chart=widgets.VBox([
self._('chart_type'), self._('chart')
]))
self.register_widget(
tab=widgets.Tab(
children=[
self._('box_filter_panel'),
self._('table'),
self._('vbox_chart')
]
)
)
self.register_widget(dashboard=widgets.HBox([self._('tab')]))
# observe hooks
def w_y_change(change: dict):
"""
When y field was changed xs field should be updated and data table
and chart should be displayed/updated.
:param change:
:return:
"""
# remove reference field from the comparison field list
_xs = [
f for f in all_fields
if not f == change['new']
]
y_changed[0] = True # flow control variable
_xs_value = list(self._('xs').value)
if change['new'] in self._('xs').value:
_xs_value.pop(_xs_value.index(change['new']))
if not _xs_value:
_xs_value = [_xs[0]]
self._('xs').options = _xs
self._('xs').value = _xs_value
self._display_result(y=change['new'], dset_id=dset_id)
y_changed[0] = False # flow control variable
# widgets registration
# change tab settings
self._('tab').set_title(0, 'Filter')
self._('tab').set_title(1, 'Data')
self._('tab').set_title(2, 'Chart')
# data panel
self._('table').value = '...'
# chart panel
self._('chart').value = '...'
# create observe callbacks
self._('bins').observe(
lambda change: (
self._display_result(bins=change['new'], dset_id=dset_id)
), 'value'
)
self._('y').observe(w_y_change, 'value')
# execute display result if 'y' was not changing.
self._('xs').observe(
lambda change: (
self._display_result(xs=change['new'], dset_id=dset_id)
if not y_changed[0] else None
), 'value'
)
self._('chart_type').observe(
lambda change: (
self._display_result(chart_type=change['new'], dset_id=dset_id)
), 'value'
)
| 0.000728 |
def _clear_namespace():
""" Clear names that are not part of the strict ES API
"""
ok_names = set(default_backend.__dict__)
ok_names.update(['gl2', 'glplus']) # don't remove the module
NS = globals()
for name in list(NS.keys()):
if name.lower().startswith('gl'):
if name not in ok_names:
del NS[name]
| 0.00277 |
def deriv(self, x: str, ctype: ContentType) -> SchemaPattern:
"""Return derivative of the receiver."""
return Alternative.combine(self.left.deriv(x, ctype),
self.right.deriv(x, ctype))
| 0.008511 |
def show_tricky_tasks(self, verbose=0):
"""
Print list of tricky tasks i.e. tasks that have been restarted or
launched more than once or tasks with corrections.
Args:
verbose: Verbosity level. If > 0, task history and corrections (if any) are printed.
"""
nids, tasks = [], []
for task in self.iflat_tasks():
if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)):
nids.append(task.node_id)
tasks.append(task)
if not nids:
cprint("Everything's fine, no tricky tasks found", color="green")
else:
self.show_status(nids=nids)
if not verbose:
print("Use --verbose to print task history.")
return
for nid, task in zip(nids, tasks):
cprint(repr(task), **task.status.color_opts)
self.show_history(nids=[nid], full_history=False, metadata=False)
#if task.num_restarts:
# self.show_restarts(nids=[nid])
if task.num_corrections:
self.show_corrections(nids=[nid])
| 0.004996 |
def ingest(self, token, endpoint=None, timeout=None, compress=None):
"""Obtain a datapoint and event ingest client."""
from . import ingest
if ingest.sf_pbuf:
client = ingest.ProtoBufSignalFxIngestClient
else:
_logger.warn('Protocol Buffers not installed properly; '
'falling back to JSON.')
client = ingest.JsonSignalFxIngestClient
compress = compress if compress is not None else self._compress
return client(
token=token,
endpoint=endpoint or self._ingest_endpoint,
timeout=timeout or self._timeout,
compress=compress)
| 0.00295 |
def update(self, dt=-1):
"""
Return type string, compatible with numpy.
"""
self.library.update.argtypes = [c_double]
self.library.update.restype = c_int
if dt == -1:
# use default timestep
dt = self.get_time_step()
result = wrap(self.library.update)(dt)
return result
| 0.005618 |
def to_internal(self, attribute_profile, external_dict):
"""
Converts the external data from "type" to internal
:type attribute_profile: str
:type external_dict: dict[str, str]
:rtype: dict[str, str]
:param attribute_profile: From which external type to convert (ex: oidc, saml, ...)
:param external_dict: Attributes in the external format
:return: Attributes in the internal format
"""
internal_dict = {}
for internal_attribute_name, mapping in self.from_internal_attributes.items():
if attribute_profile not in mapping:
logger.debug("no attribute mapping found for internal attribute '%s' the attribute profile '%s'" % (
internal_attribute_name, attribute_profile))
# skip this internal attribute if we have no mapping in the specified profile
continue
external_attribute_name = mapping[attribute_profile]
attribute_values = self._collate_attribute_values_by_priority_order(external_attribute_name,
external_dict)
if attribute_values: # Only insert key if it has some values
logger.debug("backend attribute '%s' mapped to %s" % (external_attribute_name,
internal_attribute_name))
internal_dict[internal_attribute_name] = attribute_values
else:
logger.debug("skipped backend attribute '%s': no value found", external_attribute_name)
internal_dict = self._handle_template_attributes(attribute_profile, internal_dict)
return internal_dict
| 0.007374 |
def _perform_custom_queries(self, conn, custom_queries, tags, instance):
"""
Perform custom queries to collect additional metrics like number of result and duration of the query
"""
for query in custom_queries:
name = query.get("name")
if name is None:
self.log.error("`name` field is required for custom query")
continue
search_base = query.get("search_base")
if search_base is None:
self.log.error("`search_base` field is required for custom query #%s", name)
continue
search_filter = query.get("search_filter")
if search_filter is None:
self.log.error("`search_filter` field is required for custom query #%s", name)
continue
attrs = query.get("attributes")
if "username" in query:
username = query.get("username")
password = query.get("password")
if not username:
# username is an empty string, we want anonymous bind
username = None
password = None
else:
# username not specified, we want to reuse the credentials for the monitor backend
username = instance.get("username")
password = instance.get("password")
try:
# Rebind with different credentials
auth_method = ldap3.SIMPLE if username else ldap3.ANONYMOUS
if username is None:
conn.user = None
res = conn.rebind(user=username, password=password, authentication=auth_method)
if not res:
raise ldap3.core.exceptions.LDAPBindError("Error binding to server: {}".format(conn.result))
except ldap3.core.exceptions.LDAPBindError:
self.log.exception("Could not rebind to server at %s to perform query %s", instance.get("url"), name)
continue
try:
# Perform the search query
conn.search(search_base, search_filter, attributes=attrs)
except ldap3.core.exceptions.LDAPException:
self.log.exception("Unable to perform search query for %s", name)
continue
query_tags = ['query:{}'.format(name)]
query_tags.extend(tags)
query_time = self._get_query_time(conn)
results = len(conn.entries)
self.gauge("{}.query.duration".format(self.METRIC_PREFIX), query_time, tags=query_tags)
self.gauge("{}.query.entries".format(self.METRIC_PREFIX), results, tags=query_tags)
| 0.004388 |
def parse_stdout(self, filelike):
"""Parse the formulae from the content written by the script to standard out.
:param filelike: filelike object of stdout
:returns: an exit code in case of an error, None otherwise
"""
from aiida.orm import Dict
formulae = {}
content = filelike.read().strip()
if not content:
return self.exit_codes.ERROR_EMPTY_OUTPUT_FILE
try:
for line in content.split('\n'):
datablock, formula = re.split(r'\s+', line.strip(), 1)
formulae[datablock] = formula
except Exception: # pylint: disable=broad-except
self.logger.exception('Failed to parse formulae from the stdout file\n%s', traceback.format_exc())
return self.exit_codes.ERROR_PARSING_OUTPUT_DATA
else:
self.out('formulae', Dict(dict=formulae))
return
| 0.004329 |
def _input_stmt(self, stmt: object) -> tuple:
"""
takes the input key from kwargs and processes it to aid in the generation of a model statement
:param stmt: str, list, or dict that contains the model information.
:return: tuple of strings one for the class statement one for the model statements
"""
code = ''
cls = ''
if isinstance(stmt, str):
code += "%s " % (stmt)
elif isinstance(stmt, dict):
try:
if 'interval' in stmt.keys():
if isinstance(stmt['interval'], str):
code += "%s " % stmt['interval']
if isinstance(stmt['interval'], list):
code += "%s " % " ".join(stmt['interval'])
if 'nominal' in stmt.keys():
if isinstance(stmt['nominal'], str):
code += "%s " % stmt['nominal']
cls += "%s " % stmt['nominal']
if isinstance(stmt['nominal'], list):
code += "%s " % " ".join(stmt['nominal'])
cls += "%s " % " ".join(stmt['nominal'])
except:
raise SyntaxError("Proper Keys not found for INPUT dictionary: %s" % stmt.keys())
elif isinstance(stmt, list):
if len(stmt) == 1:
code += "%s" % str(stmt[0])
elif len(stmt) > 1:
code += "%s" % " ".join(stmt)
else:
raise SyntaxError("The input list has no members")
else:
raise SyntaxError("INPUT is in an unknown format: %s" % str(stmt))
return (code, cls)
| 0.003529 |
def path_from_uri(self, uri):
"""Make a safe path name from uri.
In the case that uri is already a local path then the
same path is returned.
"""
(scheme, netloc, path, params, query, frag) = urlparse(uri)
if (netloc == ''):
return(uri)
path = '/'.join([netloc, path])
path = re.sub('[^\w\-\.]', '_', path)
path = re.sub('__+', '_', path)
path = re.sub('[_\.]+$', '', path)
path = re.sub('^[_\.]+', '', path)
return(path)
| 0.013208 |
def get_nodes(self, request):
"""
Generates the nodelist
:param request:
:return: list of nodes
"""
nodes = []
language = get_language_from_request(request, check_path=True)
current_site = get_current_site(request)
page_site = self.instance.node.site
if self.instance and page_site != current_site:
return []
categories_menu = False
posts_menu = False
config = False
if self.instance:
if not self._config.get(self.instance.application_namespace, False):
self._config[self.instance.application_namespace] = BlogConfig.objects.get(
namespace=self.instance.application_namespace
)
config = self._config[self.instance.application_namespace]
if not getattr(request, 'toolbar', False) or not request.toolbar.edit_mode_active:
if self.instance == self.instance.get_draft_object():
return []
else:
if self.instance == self.instance.get_public_object():
return []
if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_CATEGORIES):
categories_menu = True
if config and config.menu_structure in (MENU_TYPE_COMPLETE, MENU_TYPE_POSTS):
posts_menu = True
if config and config.menu_structure in (MENU_TYPE_NONE, ):
return nodes
used_categories = []
if posts_menu:
posts = Post.objects
if hasattr(self, 'instance') and self.instance:
posts = posts.namespace(self.instance.application_namespace).on_site()
posts = posts.active_translations(language).distinct().\
select_related('app_config').prefetch_related('translations', 'categories')
for post in posts:
post_id = None
parent = None
used_categories.extend(post.categories.values_list('pk', flat=True))
if categories_menu:
category = post.categories.first()
if category:
parent = '{0}-{1}'.format(category.__class__.__name__, category.pk)
post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk),
else:
post_id = '{0}-{1}'.format(post.__class__.__name__, post.pk),
if post_id:
node = NavigationNode(
post.get_title(),
post.get_absolute_url(language),
post_id,
parent
)
nodes.append(node)
if categories_menu:
categories = BlogCategory.objects
if config:
categories = categories.namespace(self.instance.application_namespace)
if config and not config.menu_empty_categories:
categories = categories.active_translations(language).filter(
pk__in=used_categories
).distinct()
else:
categories = categories.active_translations(language).distinct()
categories = categories.order_by('parent__id', 'translations__name').\
select_related('app_config').prefetch_related('translations')
added_categories = []
for category in categories:
if category.pk not in added_categories:
node = NavigationNode(
category.name,
category.get_absolute_url(),
'{0}-{1}'.format(category.__class__.__name__, category.pk),
(
'{0}-{1}'.format(
category.__class__.__name__, category.parent.id
) if category.parent else None
)
)
nodes.append(node)
added_categories.append(category.pk)
return nodes
| 0.004105 |
def yield_amd_require_string_arguments(
node, pos,
reserved_module=reserved_module, wrapped=define_wrapped):
"""
This yields only strings within the lists provided in the argument
list at the specified position from a function call.
Originally, this was implemented for yield a list of module names to
be imported as represented by this given node, which must be of the
FunctionCall type.
"""
for i, child in enumerate(node.args.items[pos]):
if isinstance(child, asttypes.String):
result = to_str(child)
if ((result not in reserved_module) and (
result != define_wrapped.get(i))):
yield result
| 0.001406 |
def infer_dict(node, context=None):
"""Try to infer a dict call to a Dict node.
The function treats the following cases:
* dict()
* dict(mapping)
* dict(iterable)
* dict(iterable, **kwargs)
* dict(mapping, **kwargs)
* dict(**kwargs)
If a case can't be inferred, we'll fallback to default inference.
"""
call = arguments.CallSite.from_call(node)
if call.has_invalid_arguments() or call.has_invalid_keywords():
raise UseInferenceDefault
args = call.positional_arguments
kwargs = list(call.keyword_arguments.items())
if not args and not kwargs:
# dict()
return nodes.Dict()
elif kwargs and not args:
# dict(a=1, b=2, c=4)
items = [(nodes.Const(key), value) for key, value in kwargs]
elif len(args) == 1 and kwargs:
# dict(some_iterable, b=2, c=4)
elts = _get_elts(args[0], context)
keys = [(nodes.Const(key), value) for key, value in kwargs]
items = elts + keys
elif len(args) == 1:
items = _get_elts(args[0], context)
else:
raise UseInferenceDefault()
value = nodes.Dict(
col_offset=node.col_offset, lineno=node.lineno, parent=node.parent
)
value.postinit(items)
return value
| 0.000775 |
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
| 0.001631 |
def __profile_file(self):
"""Method used to profile the given file line by line."""
self.line_profiler = pprofile.Profile()
self.line_profiler.runfile(
open(self.pyfile.path, "r"), {}, self.pyfile.path
)
| 0.008097 |
def request(self, path, action, data=''):
"""To make a request to the API."""
# Check if the path includes URL or not.
head = self.base_url
if path.startswith(head):
path = path[len(head):]
path = quote_plus(path, safe='/')
if not path.startswith(self.api):
path = self.api + path
log.debug('Using path %s' % path)
# If we have data, convert to JSON
if data:
data = json.dumps(data)
log.debug('Data to sent: %s' % data)
# In case of key authentication
if self.private_key and self.public_key:
timestamp = str(int(time.time()))
log.debug('Using timestamp: {}'.format(timestamp))
unhashed = path + timestamp + str(data)
log.debug('Using message: {}'.format(unhashed))
self.hash = hmac.new(str.encode(self.private_key),
msg=unhashed.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest()
log.debug('Authenticating with hash: %s' % self.hash)
self.headers['X-Public-Key'] = self.public_key
self.headers['X-Request-Hash'] = self.hash
self.headers['X-Request-Timestamp'] = timestamp
auth = False
# In case of user credentials authentication
elif self.username and self.password:
auth = requests.auth.HTTPBasicAuth(self.username, self.password)
# Set unlock reason
if self.unlock_reason:
self.headers['X-Unlock-Reason'] = self.unlock_reason
log.info('Unlock Reason: %s' % self.unlock_reason)
url = head + path
# Try API request and handle Exceptions
try:
if action == 'get':
log.debug('GET request %s' % url)
self.req = requests.get(url, headers=self.headers, auth=auth,
verify=False)
elif action == 'post':
log.debug('POST request %s' % url)
self.req = requests.post(url, headers=self.headers, auth=auth,
verify=False, data=data)
elif action == 'put':
log.debug('PUT request %s' % url)
self.req = requests.put(url, headers=self.headers,
auth=auth, verify=False,
data=data)
elif action == 'delete':
log.debug('DELETE request %s' % url)
self.req = requests.delete(url, headers=self.headers,
verify=False, auth=auth)
if self.req.content == b'':
result = None
log.debug('No result returned.')
else:
result = self.req.json()
if 'error' in result and result['error']:
raise TPMException(result['message'])
except requests.exceptions.RequestException as e:
log.critical("Connection error for " + str(e))
raise TPMException("Connection error for " + str(e))
except ValueError as e:
if self.req.status_code == 403:
log.warning(url + " forbidden")
raise TPMException(url + " forbidden")
elif self.req.status_code == 404:
log.warning(url + " forbidden")
raise TPMException(url + " not found")
else:
message = ('%s: %s %s' % (e, self.req.url, self.req.text))
log.debug(message)
raise ValueError(message)
return result
| 0.00054 |
def notify_launch(self, log_level='ERROR'):
"""logs launcher message before startup
Args:
log_level (str): level to notify at
"""
if not self.debug:
self.logger.log(
logging.getLevelName(log_level),
'LAUNCHING %s -- %s', self.PROGNAME, platform.node()
)
flask_options = {
key: getattr(self, key) for key in OPTION_ARGS
}
flask_options['host'] = self.get_host()
self.logger.info('OPTIONS: %s', flask_options)
| 0.00361 |
def create_asyncio_eventloop(loop=None):
"""
Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance
for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It
is a wrapper around an asyncio loop.
:param loop: The asyncio eventloop (or `None` if the default asyncioloop
should be used.)
"""
# Inline import, to make sure the rest doesn't break on Python 2. (Where
# asyncio is not available.)
if is_windows():
from prompt_toolkit.eventloop.asyncio_win32 import Win32AsyncioEventLoop as AsyncioEventLoop
else:
from prompt_toolkit.eventloop.asyncio_posix import PosixAsyncioEventLoop as AsyncioEventLoop
return AsyncioEventLoop(loop)
| 0.004038 |
def to_json(self):
"""
Returns the JSON representation of the API key.
"""
result = super(ApiKey, self).to_json()
result.update({
'name': self.name,
'description': self.description,
'accessToken': self.access_token,
'environments': [e.to_json() for e in self.environments]
})
return result
| 0.005076 |
def key_event_to_name(event):
""" Converts a keystroke event into a corresponding key name.
"""
key_code = event.key()
modifiers = event.modifiers()
if modifiers & QtCore.Qt.KeypadModifier:
key = keypad_map.get(key_code)
else:
key = None
if key is None:
key = key_map.get(key_code)
name = ''
if modifiers & QtCore.Qt.ControlModifier:
name += 'Ctrl'
if modifiers & QtCore.Qt.AltModifier:
name += '-Alt' if name else 'Alt'
if modifiers & QtCore.Qt.MetaModifier:
name += '-Meta' if name else 'Meta'
if modifiers & QtCore.Qt.ShiftModifier and ((name != '') or (key is not None and len(key) > 1)):
name += '-Shift' if name else 'Shift'
if key:
if name:
name += '-'
name += key
return name
| 0.008403 |
def tcase_exit(trun, tsuite, tcase):
"""..."""
#pylint: disable=locally-disabled, unused-argument
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:exit { fname: %r }" % tcase["fname"])
rcode = 0
for hook in reversed(tcase["hooks"]["exit"]): # tcase EXIT-hooks
rcode = script_run(trun, hook)
if rcode:
break
if trun["conf"]["VERBOSE"]:
cij.emph("rnr:tcase:exit { rcode: %r }" % rcode, rcode)
return rcode
| 0.004158 |
def p_nonfluent_def(self, p):
'''nonfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY NON_FLUENT COMMA type_spec COMMA DEFAULT ASSIGN_EQUAL range_const RCURLY SEMI
| IDENT COLON LCURLY NON_FLUENT COMMA type_spec COMMA DEFAULT ASSIGN_EQUAL range_const RCURLY SEMI'''
if len(p) == 16:
p[0] = PVariable(name=p[1], fluent_type='non-fluent', range_type=p[9], param_types=p[3], default=p[13])
else:
p[0] = PVariable(name=p[1], fluent_type='non-fluent', range_type=p[6], default=p[10])
| 0.010753 |
def map_query_string(self):
"""Maps the GET query string params the the query_key_mapper dict and
updates the request's GET QueryDict with the mapped keys.
"""
if (not self.query_key_mapper or
self.request.method == 'POST'):
# Nothing to map, don't do anything.
# return self.request.POST
return {}
keys = list(self.query_key_mapper.keys())
return {self.query_key_mapper.get(k) if k in keys else k: v.strip()
for k, v in self.request.GET.items()}
| 0.005357 |
def disassociate_failure_node(self, parent, child):
"""Remove a failure node link.
The resulatant 2 nodes will both become root nodes.
=====API DOCS=====
Remove a failure node link.
:param parent: Primary key of parent node to disassociate failure node from.
:type parent: int
:param child: Primary key of child node to be disassociated.
:type child: int
:returns: Dictionary of only one key "changed", which indicates whether the disassociation succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._disassoc(
self._forward_rel_name('failure'), parent, child)
| 0.005848 |
def log_repo_action(func):
"""
Log all repo actions to .dgit/log.json
"""
def _inner(*args, **kwargs):
result = func(*args, **kwargs)
log_action(func, result, *args, **kwargs)
return result
_inner.__name__ = func.__name__
_inner.__doc__ = func.__doc__
return _inner
| 0.020173 |
def _getitem(self, key):
"""Return specified page of series from cache or file."""
key = int(key)
if key < 0:
key %= self._len
if len(self._pages) == 1 and 0 < key < self._len:
index = self._pages[0].index
return self.parent.pages._getitem(index + key)
return self._pages[key]
| 0.005682 |
def FindDevice(self, address):
'''Find a specific device by bluetooth address.
'''
for obj in mockobject.objects.keys():
if obj.startswith('/org/bluez/') and 'dev_' in obj:
o = mockobject.objects[obj]
if o.props[DEVICE_IFACE]['Address'] \
== dbus.String(address, variant_level=1):
return obj
raise dbus.exceptions.DBusException('No such device.',
name='org.bluez.Error.NoSuchDevice')
| 0.001969 |
def find_max_and_min_frequencies(name, mass_range_params, freqs):
"""
ADD DOCS
"""
cutoff_fns = pnutils.named_frequency_cutoffs
if name not in cutoff_fns.keys():
err_msg = "%s not recognized as a valid cutoff frequency choice." %name
err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys())
raise ValueError(err_msg)
# Can I do this quickly?
total_mass_approxs = {
"SchwarzISCO": pnutils.f_SchwarzISCO,
"LightRing" : pnutils.f_LightRing,
"ERD" : pnutils.f_ERD
}
if name in total_mass_approxs.keys():
# This can be done quickly if the cutoff only depends on total mass
# Assumes that lower total mass = higher cutoff frequency
upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass)
lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass)
else:
# Do this numerically
# FIXME: Is 1000000 the right choice? I think so, but just highlighting
mass1, mass2, spin1z, spin2z = \
get_random_mass(1000000, mass_range_params)
mass_dict = {}
mass_dict['mass1'] = mass1
mass_dict['mass2'] = mass2
mass_dict['spin1z'] = spin1z
mass_dict['spin2z'] = spin2z
tmp_freqs = cutoff_fns[name](mass_dict)
upper_f_cutoff = tmp_freqs.max()
lower_f_cutoff = tmp_freqs.min()
cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff])
if lower_f_cutoff < freqs.min():
warn_msg = "WARNING: "
warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,)
warn_msg += "which is lower than the lowest frequency calculated "
warn_msg += "for the metric: %s Hz. " %(freqs.min())
warn_msg += "Distances for these waveforms will be calculated at "
warn_msg += "the lowest available metric frequency."
logging.warn(warn_msg)
if upper_f_cutoff > freqs.max():
warn_msg = "WARNING: "
warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,)
warn_msg += "which is larger than the highest frequency calculated "
warn_msg += "for the metric: %s Hz. " %(freqs.max())
warn_msg += "Distances for these waveforms will be calculated at "
warn_msg += "the largest available metric frequency."
logging.warn(warn_msg)
return find_closest_calculated_frequencies(cutoffs, freqs)
| 0.004092 |
def write_hfb_template(m):
"""write a template file for an hfb (yuck!)
Parameters
----------
m : flopy.modflow.Modflow instance with an HFB file
Returns
-------
(tpl_filename, df) : (str, pandas.DataFrame)
the name of the template file and a dataframe with useful info.
"""
assert m.hfb6 is not None
hfb_file = os.path.join(m.model_ws,m.hfb6.file_name[0])
assert os.path.exists(hfb_file),"couldn't find hfb_file {0}".format(hfb_file)
f_in = open(hfb_file,'r')
tpl_file = hfb_file+".tpl"
f_tpl = open(tpl_file,'w')
f_tpl.write("ptf ~\n")
parnme,parval1,xs,ys = [],[],[],[]
iis,jjs,kks = [],[],[]
xc = m.sr.xcentergrid
yc = m.sr.ycentergrid
while True:
line = f_in.readline()
if line == "":
break
f_tpl.write(line)
if not line.startswith("#"):
raw = line.strip().split()
nphfb = int(raw[0])
mxfb = int(raw[1])
nhfbnp = int(raw[2])
if nphfb > 0 or mxfb > 0:
raise Exception("not supporting terrible HFB pars")
for i in range(nhfbnp):
line = f_in.readline()
if line == "":
raise Exception("EOF")
raw = line.strip().split()
k = int(raw[0]) - 1
i = int(raw[1]) - 1
j = int(raw[2]) - 1
pn = "hb{0:02}{1:04d}{2:04}".format(k,i,j)
pv = float(raw[5])
raw[5] = "~ {0} ~".format(pn)
line = ' '.join(raw)+'\n'
f_tpl.write(line)
parnme.append(pn)
parval1.append(pv)
xs.append(xc[i,j])
ys.append(yc[i,j])
iis.append(i)
jjs.append(j)
kks.append(k)
break
f_tpl.close()
f_in.close()
df = pd.DataFrame({"parnme":parnme,"parval1":parval1,"x":xs,"y":ys,
"i":iis,"j":jjs,"k":kks},index=parnme)
df.loc[:,"pargp"] = "hfb_hydfac"
df.loc[:,"parubnd"] = df.parval1.max() * 10.0
df.loc[:,"parlbnd"] = df.parval1.min() * 0.1
return tpl_file,df
| 0.016629 |
def system_monitor_LineCard_threshold_down_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
LineCard = ET.SubElement(system_monitor, "LineCard")
threshold = ET.SubElement(LineCard, "threshold")
down_threshold = ET.SubElement(threshold, "down-threshold")
down_threshold.text = kwargs.pop('down_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| 0.004967 |
def get_children(self):
"""Gets the children of this composition.
return: (osid.repository.CompositionList) - the composition
children
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_assets_template
if not bool(self._my_map['childIds']):
raise errors.IllegalState('no childIds')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_composition_lookup():
raise errors.OperationFailed('Repository does not support Composition lookup')
# What about the Proxy?
lookup_session = mgr.get_composition_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
return lookup_session.get_compositions_by_ids(self.get_child_ids())
| 0.005269 |
def ensure_overlap_ratio(self, required_ratio=0.5):
"""
Ensure that every adjacent pair of frequency bands meets the overlap
ratio criteria. This can be helpful in scenarios where a scale is
being used in an invertible transform, and something like the `constant
overlap add constraint
<https://ccrma.stanford.edu/~jos/sasp/Constant_Overlap_Add_COLA_Cases.html>`_
must be met in order to not introduce artifacts in the reconstruction.
Args:
required_ratio (float): The required overlap ratio between all
adjacent frequency band pairs
Raises:
AssertionError: when the overlap ratio for one or more adjacent
frequency band pairs is not met
"""
msg = \
'band {i}: ratio must be at least {required_ratio} but was {ratio}'
for i in range(0, len(self) - 1):
b1 = self[i]
b2 = self[i + 1]
try:
ratio = b1.intersection_ratio(b2)
except ValueError:
ratio = 0
if ratio < required_ratio:
raise AssertionError(msg.format(**locals()))
| 0.001667 |
def build_default_simulation(self, tax_benefit_system, count = 1):
"""
Build a simulation where:
- There are ``count`` persons
- There are ``count`` instances of each group entity, containing one person
- Every person has, in each entity, the first role
"""
simulation = Simulation(tax_benefit_system, tax_benefit_system.instantiate_entities())
for population in simulation.populations.values():
population.count = count
population.ids = np.array(range(count))
if not population.entity.is_person:
population.members_entity_id = population.ids # Each person is its own group entity
return simulation
| 0.009309 |
def load_mnist_data(args):
'''
Load MNIST dataset
'''
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test
| 0.004992 |
def apply_markup(value, arg=None):
"""
Applies text-to-HTML conversion.
Takes an optional argument to specify the name of a filter to use.
"""
if arg is not None:
return formatter(value, filter_name=arg)
return formatter(value)
| 0.011152 |
def fq_merge(R1, R2):
"""
merge separate fastq files
"""
c = itertools.cycle([1, 2, 3, 4])
for r1, r2 in zip(R1, R2):
n = next(c)
if n == 1:
pair = [[], []]
pair[0].append(r1.strip())
pair[1].append(r2.strip())
if n == 4:
yield pair
| 0.003165 |
def upload_files(self, owner, id, file, **kwargs):
"""
Upload files
Upload multiple files at once to a dataset via multipart request. This endpoint expects requests of type `multipart/form-data` and you can include one or more parts named `file`, each containing a different file to be uploaded. For example, assuming that, you want to upload two local files named `file1.csv` and `file2.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -F \"[email protected]\" \\ -F \"[email protected]\" \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files ``` Swagger clients will limit this method of upload to one file at a time. Other HTTP clients capable of making multipart/form-data requests can be used to upload multiple files in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_files(owner, id, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param file file: Multipart-encoded file contents (required)
:param bool expand_archives: Indicates whether compressed files should be expanded upon upload.
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_files_with_http_info(owner, id, file, **kwargs)
else:
(data) = self.upload_files_with_http_info(owner, id, file, **kwargs)
return data
| 0.003079 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.