code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def parse_numeric_code(self, force_hex=False):
"""
Parses and returns the numeric code as an integer.
The numeric code can be either base 10 or base 16, depending on
where the message came from.
:param force_hex: force the numeric code to be processed as base 16.
:type force_hex: boolean
:raises: ValueError
"""
code = None
got_error = False
if not force_hex:
try:
code = int(self.numeric_code)
except ValueError:
got_error = True
if force_hex or got_error:
try:
code = int(self.numeric_code, 16)
except ValueError:
raise
return code | Parses and returns the numeric code as an integer.
The numeric code can be either base 10 or base 16, depending on
where the message came from.
:param force_hex: force the numeric code to be processed as base 16.
:type force_hex: boolean
:raises: ValueError |
def readint2dnorm(filename):
"""Read corrected intensity and error matrices (Matlab mat or numpy npz
format for Beamline B1 (HASYLAB/DORISIII))
Input
-----
filename: string
the name of the file
Outputs
-------
two ``np.ndarray``-s, the Intensity and the Error matrices
File formats supported:
-----------------------
``.mat``
Matlab MAT file, with (at least) two fields: Intensity and Error
``.npz``
Numpy zip file, with (at least) two fields: Intensity and Error
other
the file is opened with ``np.loadtxt``. The error matrix is tried
to be loaded from the file ``<name>_error<ext>`` where the intensity was
loaded from file ``<name><ext>``. I.e. if ``somedir/matrix.dat`` is given,
the existence of ``somedir/matrix_error.dat`` is checked. If not found,
None is returned for the error matrix.
Notes
-----
The non-existence of the Intensity matrix results in an exception. If the
Error matrix does not exist, None is returned for it.
"""
# the core of read2dintfile
if filename.upper().endswith('.MAT'): # Matlab
m = scipy.io.loadmat(filename)
elif filename.upper().endswith('.NPZ'): # Numpy
m = np.load(filename)
else: # loadtxt
m = {'Intensity': np.loadtxt(filename)}
name, ext = os.path.splitext(filename)
errorfilename = name + '_error' + ext
if os.path.exists(errorfilename):
m['Error'] = np.loadtxt(errorfilename)
Intensity = m['Intensity']
try:
Error = m['Error']
return Intensity, Error
except:
return Intensity, None | Read corrected intensity and error matrices (Matlab mat or numpy npz
format for Beamline B1 (HASYLAB/DORISIII))
Input
-----
filename: string
the name of the file
Outputs
-------
two ``np.ndarray``-s, the Intensity and the Error matrices
File formats supported:
-----------------------
``.mat``
Matlab MAT file, with (at least) two fields: Intensity and Error
``.npz``
Numpy zip file, with (at least) two fields: Intensity and Error
other
the file is opened with ``np.loadtxt``. The error matrix is tried
to be loaded from the file ``<name>_error<ext>`` where the intensity was
loaded from file ``<name><ext>``. I.e. if ``somedir/matrix.dat`` is given,
the existence of ``somedir/matrix_error.dat`` is checked. If not found,
None is returned for the error matrix.
Notes
-----
The non-existence of the Intensity matrix results in an exception. If the
Error matrix does not exist, None is returned for it. |
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline,
**unwrap_kwargs_session(kwargs)) | Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
async def f():
plan = await db.command(
'aggregate', 'COLLECTION-NAME',
pipeline=[{'$project': {'x': 1}}],
explain=True)
print(plan)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation |
def p_objectlist_1(self, p):
"objectlist : objectlist objectitem"
if DEBUG:
self.print_p(p)
p[0] = p[1] + [p[2]] | objectlist : objectlist objectitem |
def _analyse_mat_sections(sections):
"""
Cases:
- ICRU flag present, LOADDEDX flag missing -> data loaded from some data hardcoded in SH12A binary,
no need to load external files
- ICRU flag present, LOADDEDX flag present -> data loaded from external files. ICRU number read from ICRU flag,
any number following LOADDEDX flag is ignored.
- ICRU flag missing, LOADDEDX flag present -> data loaded from external files. ICRU number read from LOADDEDX
- ICRU flag missing, LOADDEDX flag missing -> nothing happens
"""
icru_numbers = []
for section in sections:
load_present = False
load_value = False
icru_value = False
for e in section:
split_line = e.split()
if "LOADDEDX" in e:
load_present = True
if len(split_line) > 1:
load_value = split_line[1] if "!" not in split_line[1] else False # ignore ! comments
elif "ICRU" in e and len(split_line) > 1:
icru_value = split_line[1] if "!" not in split_line[1] else False # ignore ! comments
if load_present: # LOADDEDX is present, so external file is required
if icru_value: # if ICRU value was given
icru_numbers.append(icru_value)
elif load_value: # if only LOADDEDX with values was present in section
icru_numbers.append(load_value)
return icru_numbers | Cases:
- ICRU flag present, LOADDEDX flag missing -> data loaded from some data hardcoded in SH12A binary,
no need to load external files
- ICRU flag present, LOADDEDX flag present -> data loaded from external files. ICRU number read from ICRU flag,
any number following LOADDEDX flag is ignored.
- ICRU flag missing, LOADDEDX flag present -> data loaded from external files. ICRU number read from LOADDEDX
- ICRU flag missing, LOADDEDX flag missing -> nothing happens |
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name) | A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level |
def buildType(columns=[], extra=[]):
"""Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type`
"""
return Type(id="epics:nt/NTTable:1.0",
spec=[
('labels', 'as'),
('value', ('S', None, columns)),
('descriptor', 's'),
('alarm', alarm),
('timeStamp', timeStamp),
] + extra) | Build a table
:param list columns: List of column names and types. eg [('colA', 'd')]
:param list extra: A list of tuples describing additional non-standard fields
:returns: A :py:class:`Type` |
def getInstIdFromIndices(self, *indices):
"""Return column instance identification from indices"""
try:
return self._idxToIdCache[indices]
except TypeError:
cacheable = False
except KeyError:
cacheable = True
idx = 0
instId = ()
parentIndices = []
for impliedFlag, modName, symName in self._indexNames:
if idx >= len(indices):
break
mibObj, = mibBuilder.importSymbols(modName, symName)
syntax = mibObj.syntax.clone(indices[idx])
instId += self.valueToOid(syntax, impliedFlag, parentIndices)
parentIndices.append(syntax)
idx += 1
if cacheable:
self._idxToIdCache[indices] = instId
return instId | Return column instance identification from indices |
def _set_up_savefolder(self):
"""
Create catalogs for different file output to clean up savefolder.
Non-public method
Parameters
----------
None
Returns
-------
None
"""
if self.savefolder == None:
return
self.cells_path = os.path.join(self.savefolder, 'cells')
if RANK == 0:
if not os.path.isdir(self.cells_path):
os.mkdir(self.cells_path)
self.figures_path = os.path.join(self.savefolder, 'figures')
if RANK == 0:
if not os.path.isdir(self.figures_path):
os.mkdir(self.figures_path)
self.populations_path = os.path.join(self.savefolder, 'populations')
if RANK == 0:
if not os.path.isdir(self.populations_path):
os.mkdir(self.populations_path)
COMM.Barrier() | Create catalogs for different file output to clean up savefolder.
Non-public method
Parameters
----------
None
Returns
-------
None |
def get_register(self, motors, disable_sync_read=False):
""" Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. """
if not motors:
return False
ids = [m.id for m in motors]
getter = getattr(self.io, 'get_{}'.format(self.regname))
values = (sum([list(getter([id])) for id in ids], [])
if disable_sync_read else
getter(ids))
if not values:
return False
for m, val in zip(motors, values):
m.__dict__[self.varname] = val
for m in motors:
m._read_synced[self.varname].done()
return True | Gets the value from the specified register and sets it to the :class:`~pypot.dynamixel.motor.DxlMotor`. |
def getRanking(self, profile, sampleFileName = None):
"""
Returns a list of lists that orders all candidates in tiers from best to worst when we use
MCMC approximation to compute Bayesian utilities for an election profile.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: An optional argument for the name of the input file containing
sample data. If a file name is given, this method will use the samples in the file
instead of generating samples itself.
"""
if sampleFileName != None:
candScoresMap = self.getCandScoresMapFromSamplesFile(profile, sampleFileName)
else:
candScoresMap = self.getCandScoresMap(profile)
# We generate a map that associates each score with the candidates that have that acore.
reverseCandScoresMap = dict()
for key, value in candScoresMap.items():
if value not in reverseCandScoresMap.keys():
reverseCandScoresMap[value] = [key]
else:
reverseCandScoresMap[value].append(key)
# We sort the scores by either decreasing order or increasing order.
if self.maximizeCandScore == True:
sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True)
else:
sortedCandScores = sorted(reverseCandScoresMap.keys())
# We put the candidates into our ranking based on the order in which their score appears
ranking = []
for candScore in sortedCandScores:
for cand in reverseCandScoresMap[candScore]:
ranking.append(cand)
return ranking | Returns a list of lists that orders all candidates in tiers from best to worst when we use
MCMC approximation to compute Bayesian utilities for an election profile.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: An optional argument for the name of the input file containing
sample data. If a file name is given, this method will use the samples in the file
instead of generating samples itself. |
def task(self):
"""
Find the task for this build.
Wraps the getTaskInfo RPC.
:returns: deferred that when fired returns the Task object, or None if
we could not determine the task for this build.
"""
# If we have no .task_id, this is a no-op to return None.
if not self.task_id:
return defer.succeed(None)
return self.connection.getTaskInfo(self.task_id) | Find the task for this build.
Wraps the getTaskInfo RPC.
:returns: deferred that when fired returns the Task object, or None if
we could not determine the task for this build. |
def confd_state_daemon_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
daemon_status = ET.SubElement(confd_state, "daemon-status")
daemon_status.text = kwargs.pop('daemon_status')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def set_result(self, result):
"""Set future's result if needed (can be an exception).
Else raise if needed."""
result = result.result()[0]
if self.future is not None:
if isinstance(result, Exception):
self.future.set_exception(result)
else:
self.future.set_result(result)
self.future = None
elif isinstance(result, Exception):
raise result | Set future's result if needed (can be an exception).
Else raise if needed. |
def _iter_vals(key):
"""! Iterate over values of a key
"""
for i in range(winreg.QueryInfoKey(key)[1]):
yield winreg.EnumValue(key, i) | ! Iterate over values of a key |
def calculate_subscription_lifecycle(subscription_id):
"""
Calculates the expected lifecycle position the subscription in
subscription_ids, and creates a BehindSubscription entry for them.
Args:
subscription_id (str): ID of subscription to calculate lifecycle for
"""
subscription = Subscription.objects.select_related("messageset", "schedule").get(
id=subscription_id
)
behind = subscription.messages_behind()
if behind == 0:
return
current_messageset = subscription.messageset
current_sequence_number = subscription.next_sequence_number
end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1]
BehindSubscription.objects.create(
subscription=subscription,
messages_behind=behind,
current_messageset=current_messageset,
current_sequence_number=current_sequence_number,
expected_messageset=end_subscription.messageset,
expected_sequence_number=end_subscription.next_sequence_number,
) | Calculates the expected lifecycle position the subscription in
subscription_ids, and creates a BehindSubscription entry for them.
Args:
subscription_id (str): ID of subscription to calculate lifecycle for |
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue() | Generate the python source for a node tree. |
def _copy(self):
"""
Create a new L{TransitionTable} just like this one using a copy of the
underlying transition table.
@rtype: L{TransitionTable}
"""
table = {}
for existingState, existingOutputs in self.table.items():
table[existingState] = {}
for (existingInput, existingTransition) in existingOutputs.items():
table[existingState][existingInput] = existingTransition
return TransitionTable(table) | Create a new L{TransitionTable} just like this one using a copy of the
underlying transition table.
@rtype: L{TransitionTable} |
def _process_raw_report(self, raw_report):
"Default raw input report data handler"
if not self.is_opened():
return
if not self.__evt_handlers and not self.__raw_handler:
return
if not raw_report[0] and \
(raw_report[0] not in self.__input_report_templates):
# windows sends an empty array when disconnecting
# but, this might have a collision with report_id = 0
if not hid_device_path_exists(self.device_path):
#windows XP sends empty report when disconnecting
self.__reading_thread.abort() #device disconnected
return
if self.__raw_handler:
#this might slow down data throughput, but at the expense of safety
self.__raw_handler(helpers.ReadOnlyList(raw_report))
return
# using pre-parsed report templates, by report id
report_template = self.__input_report_templates[raw_report[0]]
# old condition snapshot
old_values = report_template.get_usages()
# parse incoming data
report_template.set_raw_data(raw_report)
# and compare it
event_applies = self.evt_decision
evt_handlers = self.__evt_handlers
for key in report_template.keys():
if key not in evt_handlers:
continue
#check if event handler exist!
for event_kind, handlers in evt_handlers[key].items():
#key=event_kind, values=handler set
new_value = report_template[key].value
if not event_applies[event_kind](old_values[key], new_value):
continue
#decision applies, call handlers
for function_handler in handlers:
#check if the application wants some particular parameter
if handlers[function_handler]:
function_handler(new_value,
event_kind, handlers[function_handler])
else:
function_handler(new_value, event_kind) | Default raw input report data handler |
def attribute(self, name):
"""Expression for an input attribute.
An input attribute is an attribute on the input
port of the operator invocation.
Args:
name(str): Name of the attribute.
Returns:
Expression: Expression representing the input attribute.
"""
return super(Map, self).attribute(self._inputs[0], name) | Expression for an input attribute.
An input attribute is an attribute on the input
port of the operator invocation.
Args:
name(str): Name of the attribute.
Returns:
Expression: Expression representing the input attribute. |
def plot_circular(widths, colors, curviness=0.2, mask=True, topo=None, topomaps=None, axes=None, order=None):
"""Circluar connectivity plot.
Topos are arranged in a circle, with arrows indicating connectivity
Parameters
----------
widths : float or array, shape (n_channels, n_channels)
Width of each arrow. Can be a scalar to assign the same width to all arrows.
colors : array, shape (n_channels, n_channels, 3) or (3)
RGB color values for each arrow or one RGB color value for all arrows.
curviness : float, optional
Factor that determines how much arrows tend to deviate from a straight line.
mask : array, dtype = bool, shape (n_channels, n_channels)
Enable or disable individual arrows
topo : :class:`~eegtopo.topoplot.Topoplot`
This object draws the topo plot
topomaps : array, shape = [w_pixels, h_pixels]
Scalp-projected map
axes : axis, optional
Axis to draw into. A new figure is created by default.
order : list of int
Rearrange channels.
Returns
-------
axes : Axes object
The axes into which was plotted.
"""
colors = np.asarray(colors)
widths = np.asarray(widths)
mask = np.asarray(mask)
colors = np.maximum(colors, 0)
colors = np.minimum(colors, 1)
if len(widths.shape) > 2:
[n, m] = widths.shape
elif len(colors.shape) > 3:
[n, m, c] = widths.shape
elif len(mask.shape) > 2:
[n, m] = mask.shape
else:
n = len(topomaps)
m = n
if not order:
order = list(range(n))
#a = np.asarray(a)
#[n, m] = a.shape
assert(n == m)
if axes is None:
fig = new_figure()
axes = fig.add_subplot(111)
axes.set_yticks([])
axes.set_xticks([])
axes.set_frame_on(False)
if len(colors.shape) < 3:
colors = np.tile(colors, (n,n,1))
if len(widths.shape) < 2:
widths = np.tile(widths, (n,n))
if len(mask.shape) < 2:
mask = np.tile(mask, (n,n))
np.fill_diagonal(mask, False)
if topo:
alpha = 1.5 if n < 10 else 1.25
r = alpha * topo.head_radius / (np.sin(np.pi/n))
else:
r = 1
for i in range(n):
if topo:
o = (r*np.sin(i*2*np.pi/n), r*np.cos(i*2*np.pi/n))
plot_topo(axes, topo, topomaps[order[i]], offset=o)
for i in range(n):
for j in range(n):
if not mask[order[i], order[j]]:
continue
a0 = j*2*np.pi/n
a1 = i*2*np.pi/n
x0, y0 = r*np.sin(a0), r*np.cos(a0)
x1, y1 = r*np.sin(a1), r*np.cos(a1)
ex = (x0 + x1) / 2
ey = (y0 + y1) / 2
en = np.sqrt(ex**2 + ey**2)
if en < 1e-10:
en = 0
ex = y0 / r
ey = -x0 / r
w = -r
else:
ex /= en
ey /= en
w = np.sqrt((x1-x0)**2 + (y1-y0)**2) / 2
if x0*y1-y0*x1 < 0:
w = -w
d = en*(1-curviness)
h = en-d
t = np.linspace(-1, 1, 100)
dist = (t**2+2*t+1)*w**2 + (t**4-2*t**2+1)*h**2
tmask1 = dist >= (1.4*topo.head_radius)**2
tmask2 = dist >= (1.2*topo.head_radius)**2
tmask = np.logical_and(tmask1, tmask2[::-1])
t = t[tmask]
x = (h*t*t+d)*ex - w*t*ey
y = (h*t*t+d)*ey + w*t*ex
# Arrow Head
s = np.sqrt((x[-2] - x[-1])**2 + (y[-2] - y[-1])**2)
width = widths[order[i], order[j]]
x1 = 0.1*width*(x[-2] - x[-1] + y[-2] - y[-1])/s + x[-1]
y1 = 0.1*width*(y[-2] - y[-1] - x[-2] + x[-1])/s + y[-1]
x2 = 0.1*width*(x[-2] - x[-1] - y[-2] + y[-1])/s + x[-1]
y2 = 0.1*width*(y[-2] - y[-1] + x[-2] - x[-1])/s + y[-1]
x = np.concatenate([x, [x1, x[-1], x2]])
y = np.concatenate([y, [y1, y[-1], y2]])
axes.plot(x, y, lw=width, color=colors[order[i], order[j]], solid_capstyle='round', solid_joinstyle='round')
return axes | Circluar connectivity plot.
Topos are arranged in a circle, with arrows indicating connectivity
Parameters
----------
widths : float or array, shape (n_channels, n_channels)
Width of each arrow. Can be a scalar to assign the same width to all arrows.
colors : array, shape (n_channels, n_channels, 3) or (3)
RGB color values for each arrow or one RGB color value for all arrows.
curviness : float, optional
Factor that determines how much arrows tend to deviate from a straight line.
mask : array, dtype = bool, shape (n_channels, n_channels)
Enable or disable individual arrows
topo : :class:`~eegtopo.topoplot.Topoplot`
This object draws the topo plot
topomaps : array, shape = [w_pixels, h_pixels]
Scalp-projected map
axes : axis, optional
Axis to draw into. A new figure is created by default.
order : list of int
Rearrange channels.
Returns
-------
axes : Axes object
The axes into which was plotted. |
def get_dict_hashid(dict_):
r"""
Args:
dict_ (dict):
Returns:
int: id hash
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
CommandLine:
python -m utool.util_dict --test-get_dict_hashid
python3 -m utool.util_dict --test-get_dict_hashid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> dict_ = {'a': 'b'}
>>> dict_ = {'a': {'c': 'd'}}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism
>>> hashid = get_dict_hashid(dict_)
>>> result = str(hashid)
>>> print(result)
mxgkepoboqjerkhb
oegknoalkrkojumi
"""
import utool as ut
raw_text = ut.repr4(dict_, sorted_=True, strvals=True, nl=2)
#print('raw_text = %r' % (raw_text,))
hashid = ut.hashstr27(raw_text)
#from utool import util_hash
#hashid = hash(frozenset(dict_.items()))
#hashid = util_hash.make_hash(dict_)
return hashid | r"""
Args:
dict_ (dict):
Returns:
int: id hash
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
CommandLine:
python -m utool.util_dict --test-get_dict_hashid
python3 -m utool.util_dict --test-get_dict_hashid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> dict_ = {'a': 'b'}
>>> dict_ = {'a': {'c': 'd'}}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism
>>> hashid = get_dict_hashid(dict_)
>>> result = str(hashid)
>>> print(result)
mxgkepoboqjerkhb
oegknoalkrkojumi |
def format_datetime(time):
"""
Formats a date, converting the time to the user timezone if one is specified
"""
user_time_zone = timezone.get_current_timezone()
if time.tzinfo is None:
time = time.replace(tzinfo=pytz.utc)
user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT'))
time = time.astimezone(user_time_zone)
return time.strftime("%b %d, %Y %H:%M") | Formats a date, converting the time to the user timezone if one is specified |
def _get_magnitude_vector_properties(catalogue, config):
'''If an input minimum magnitude is given then consider catalogue
only above the minimum magnitude - returns corresponding properties'''
mmin = config.get('input_mmin', np.min(catalogue['magnitude']))
neq = np.float(np.sum(catalogue['magnitude'] >= mmin - 1.E-7))
return neq, mmin | If an input minimum magnitude is given then consider catalogue
only above the minimum magnitude - returns corresponding properties |
def preproc(self, which='sin', **kwargs):
"""
Create preprocessing data
Parameters
----------
which: str
The name of the numpy function to apply
``**kwargs``
Any other parameter for the
:meth:`model_organization.ModelOrganizer.app_main` method
"""
self.app_main(**kwargs)
config = self.exp_config
config['infile'] = infile = osp.join(config['expdir'], 'input.dat')
func = getattr(np, which) # np.sin, np.cos or np.tan
data = func(np.linspace(-np.pi, np.pi))
self.logger.info('Saving input data to %s', infile)
np.savetxt(infile, data) | Create preprocessing data
Parameters
----------
which: str
The name of the numpy function to apply
``**kwargs``
Any other parameter for the
:meth:`model_organization.ModelOrganizer.app_main` method |
def discard(self, element):
"""
Return a new PSet with element removed. Returns itself if element is not present.
"""
if element in self._map:
return self.evolver().remove(element).persistent()
return self | Return a new PSet with element removed. Returns itself if element is not present. |
def detect_version(basedir, compiler=None, **compiler_attrs):
"""Compile, link & execute a test program, in empty directory `basedir`.
The C compiler will be updated with any keywords given via setattr.
Parameters
----------
basedir : path
The location where the test program will be compiled and run
compiler : str
The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
**compiler_attrs : dict
Any extra compiler attributes, which will be set via ``setattr(cc)``.
Returns
-------
A dict of properties for zmq compilation, with the following two keys:
vers : tuple
The ZMQ version as a tuple of ints, e.g. (2,2,0)
settings : dict
The compiler options used to compile the test function, e.g. `include_dirs`,
`library_dirs`, `libs`, etc.
"""
if compiler is None:
compiler = get_default_compiler()
cfile = pjoin(basedir, 'vers.cpp')
shutil.copy(pjoin(os.path.dirname(__file__), 'vers.cpp'), cfile)
# check if we need to link against Realtime Extensions library
if sys.platform.startswith('linux'):
cc = ccompiler.new_compiler(compiler=compiler)
cc.output_dir = basedir
if not cc.has_function('timer_create'):
if 'libraries' not in compiler_attrs:
compiler_attrs['libraries'] = []
compiler_attrs['libraries'].append('rt')
cc = get_compiler(compiler=compiler, **compiler_attrs)
efile = test_compilation(cfile, compiler=cc)
patch_lib_paths(efile, cc.library_dirs)
rc, so, se = get_output_error([efile])
if rc:
msg = "Error running version detection script:\n%s\n%s" % (so,se)
logging.error(msg)
raise IOError(msg)
handlers = {'vers': lambda val: tuple(int(v) for v in val.split('.'))}
props = {}
for line in (x for x in so.split('\n') if x):
key, val = line.split(':')
props[key] = handlers[key](val)
return props | Compile, link & execute a test program, in empty directory `basedir`.
The C compiler will be updated with any keywords given via setattr.
Parameters
----------
basedir : path
The location where the test program will be compiled and run
compiler : str
The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
**compiler_attrs : dict
Any extra compiler attributes, which will be set via ``setattr(cc)``.
Returns
-------
A dict of properties for zmq compilation, with the following two keys:
vers : tuple
The ZMQ version as a tuple of ints, e.g. (2,2,0)
settings : dict
The compiler options used to compile the test function, e.g. `include_dirs`,
`library_dirs`, `libs`, etc. |
def attribute_text_label(node, current_word):
"""
Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string.
"""
node.text = normalize_string(current_word)
node.text = node.text.strip(" ")
node.udepth = 1
if len(node.text) > 0 and node.text[0].isdigit():
split_sent = node.text.split(" ", 1)
label = split_sent[0]
if len(split_sent) > 1:
text = split_sent[1]
node.text = text
if all(c.isdigit() for c in label):
node.label = int(label)
else:
text = label + " " + text
node.text = text
if len(node.text) == 0:
node.text = None | Tries to recover the label inside a string
of the form '(3 hello)' where 3 is the label,
and hello is the string. Label is not assigned
if the string does not follow the expected
format.
Arguments:
----------
node : LabeledTree, current node that should
possibly receive a label.
current_word : str, input string. |
def convert_shape(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert shape operation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting shape ...')
def target_layer(x):
import tensorflow as tf
return tf.shape(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) | Convert shape operation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers |
def _get_build_env(env):
'''
Get build environment overrides dictionary to use in build process
'''
env_override = ''
if env is None:
return env_override
if not isinstance(env, dict):
raise SaltInvocationError(
'\'env\' must be a Python dictionary'
)
for key, value in env.items():
env_override += '{0}={1}\n'.format(key, value)
env_override += 'export {0}\n'.format(key)
return env_override | Get build environment overrides dictionary to use in build process |
def empty(shape, ctx=None, dtype=None, stype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
stype : str, optional
An optional storage type (default is `default`).
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
"""
if stype is None or stype == 'default':
return _empty_ndarray(shape, ctx, dtype)
else:
return _empty_sparse_ndarray(stype, shape, ctx, dtype) | Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
stype : str, optional
An optional storage type (default is `default`).
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), stype='csr')
<CSRNDArray 1x2 @cpu(0)> |
async def is_change_done(self, zone, change_id):
"""Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean
"""
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE | Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean |
def enable_nvm():
'''add to ~/.bashrc: Export of $NVM env variable and load nvm command.'''
bash_snippet = '~/.bashrc_nvm'
install_file_legacy(path=bash_snippet)
prefix = flo('if [ -f {bash_snippet} ]; ')
enabler = flo('if [ -f {bash_snippet} ]; then source {bash_snippet}; fi')
if env.host == 'localhost':
uncomment_or_update_or_append_line(filename='~/.bashrc', prefix=prefix,
new_line=enabler)
else:
print(cyan('\nappend to ~/.bashrc:\n\n ') + enabler) | add to ~/.bashrc: Export of $NVM env variable and load nvm command. |
def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,
filter=None, extension="png"):
"""
Prepares a URL to get data (raster or vector) from a NamedMap or
AnonymousMap
:param x: The x tile
:param y: The y tile
:param z: The zoom level
:param layer_id: Can be a number (referring to the # layer of your \
map), all layers of your map, or a list of layers.
To show just the basemap layer, enter the value 0
To show the first layer, enter the value 1
To show all layers, enter the value 'all'
To show a list of layers, enter the comma separated \
layer value as '0,1,2'
:param feature_id: The id of the feature
:param filter: The filter to be applied to the layer
:param extension: The format of the data to be retrieved: png, mvt, ...
:type x: int
:type y: int
:type z: int
:type layer_id: str
:type feature_id: str
:type filter: str
:type extension: str
:return: A URL to download data
:rtype: str
:raise: CartoException
"""
base_url = self.client.base_url + self.Meta.collection_endpoint
template_id = self.template_id if hasattr(self, 'template_id') \
else self.layergroupid
if layer_id is not None and feature_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/attributes/{feature_id}"). \
format(template_id=template_id,
layer=layer_id,
feature_id=feature_id)
elif layer_id is not None and filter is not None:
url = urljoin(base_url,
"{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
filter=filter,
z=z, x=x, y=y,
extension=extension)
elif layer_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
layer=layer_id,
z=z, x=x, y=y,
extension=extension)
else:
url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \
format(
template_id=template_id,
z=z, x=x, y=y,
extension=extension)
if hasattr(self, 'auth') and self.auth is not None \
and len(self.auth['valid_tokens']) > 0:
url = urljoin(url, "?auth_token={auth_token}"). \
format(auth_token=self.auth['valid_tokens'][0])
return url | Prepares a URL to get data (raster or vector) from a NamedMap or
AnonymousMap
:param x: The x tile
:param y: The y tile
:param z: The zoom level
:param layer_id: Can be a number (referring to the # layer of your \
map), all layers of your map, or a list of layers.
To show just the basemap layer, enter the value 0
To show the first layer, enter the value 1
To show all layers, enter the value 'all'
To show a list of layers, enter the comma separated \
layer value as '0,1,2'
:param feature_id: The id of the feature
:param filter: The filter to be applied to the layer
:param extension: The format of the data to be retrieved: png, mvt, ...
:type x: int
:type y: int
:type z: int
:type layer_id: str
:type feature_id: str
:type filter: str
:type extension: str
:return: A URL to download data
:rtype: str
:raise: CartoException |
def _init_mythril_dir() -> str:
"""
Initializes the mythril dir and config.ini file
:return: The mythril dir's path
"""
try:
mythril_dir = os.environ["MYTHRIL_DIR"]
except KeyError:
mythril_dir = os.path.join(os.path.expanduser("~"), ".mythril")
if not os.path.exists(mythril_dir):
# Initialize data directory
log.info("Creating mythril data directory")
os.mkdir(mythril_dir)
db_path = str(Path(mythril_dir) / "signatures.db")
if not os.path.exists(db_path):
# if the default mythril dir doesn't contain a signature DB
# initialize it with the default one from the project root
asset_dir = Path(__file__).parent.parent / "support" / "assets"
copyfile(str(asset_dir / "signatures.db"), db_path)
return mythril_dir | Initializes the mythril dir and config.ini file
:return: The mythril dir's path |
def fillna(data, other, join="left", dataset_join="left"):
"""Fill missing values in this object with data from the other object.
Follows normal broadcasting and alignment rules.
Parameters
----------
join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining the indexes of the passed objects along each
dimension
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
"""
from .computation import apply_ufunc
return apply_ufunc(duck_array_ops.fillna, data, other,
join=join,
dask="allowed",
dataset_join=dataset_join,
dataset_fill_value=np.nan,
keep_attrs=True) | Fill missing values in this object with data from the other object.
Follows normal broadcasting and alignment rules.
Parameters
----------
join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining the indexes of the passed objects along each
dimension
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object |
def conditional_loss_ratio(loss_ratios, poes, probability):
"""
Return the loss ratio corresponding to the given PoE (Probability
of Exceendance). We can have four cases:
1. If `probability` is in `poes` it takes the bigger
corresponding loss_ratios.
2. If it is in `(poe1, poe2)` where both `poe1` and `poe2` are
in `poes`, then we perform a linear interpolation on the
corresponding losses
3. if the given probability is smaller than the
lowest PoE defined, it returns the max loss ratio .
4. if the given probability is greater than the highest PoE
defined it returns zero.
:param loss_ratios: an iterable over non-decreasing loss ratio
values (float)
:param poes: an iterable over non-increasing probability of
exceedance values (float)
:param float probability: the probability value used to
interpolate the loss curve
"""
assert len(loss_ratios) >= 3, loss_ratios
rpoes = poes[::-1]
if probability > poes[0]: # max poes
return 0.0
elif probability < poes[-1]: # min PoE
return loss_ratios[-1]
if probability in poes:
return max([loss
for i, loss in enumerate(loss_ratios)
if probability == poes[i]])
else:
interval_index = bisect.bisect_right(rpoes, probability)
if interval_index == len(poes): # poes are all nan
return float('nan')
elif interval_index == 1: # boundary case
x1, x2 = poes[-2:]
y1, y2 = loss_ratios[-2:]
else:
x1, x2 = poes[-interval_index-1:-interval_index + 1]
y1, y2 = loss_ratios[-interval_index-1:-interval_index + 1]
return (y2 - y1) / (x2 - x1) * (probability - x1) + y1 | Return the loss ratio corresponding to the given PoE (Probability
of Exceendance). We can have four cases:
1. If `probability` is in `poes` it takes the bigger
corresponding loss_ratios.
2. If it is in `(poe1, poe2)` where both `poe1` and `poe2` are
in `poes`, then we perform a linear interpolation on the
corresponding losses
3. if the given probability is smaller than the
lowest PoE defined, it returns the max loss ratio .
4. if the given probability is greater than the highest PoE
defined it returns zero.
:param loss_ratios: an iterable over non-decreasing loss ratio
values (float)
:param poes: an iterable over non-increasing probability of
exceedance values (float)
:param float probability: the probability value used to
interpolate the loss curve |
def check_readable(self, timeout):
"""
Poll ``self.stdout`` and return True if it is readable.
:param float timeout: seconds to wait I/O
:return: True if readable, else False
:rtype: boolean
"""
rlist, wlist, xlist = select.select([self._stdout], [], [], timeout)
return bool(len(rlist)) | Poll ``self.stdout`` and return True if it is readable.
:param float timeout: seconds to wait I/O
:return: True if readable, else False
:rtype: boolean |
def update_plot_limits(ax, white_space):
"""Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d
"""
if hasattr(ax, 'zz_dataLim'):
bounds = ax.xy_dataLim.bounds
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
bounds = ax.zz_dataLim.bounds
ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
else:
bounds = ax.dataLim.bounds
assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space) | Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d |
def adaptStandardLogging(loggerName, logCategory, targetModule):
"""
Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None
"""
logger = logging.getLogger(loggerName)
# if there is already a FluHandler, exit
if map(lambda h: isinstance(h, LogHandler), logger.handlers):
return
logger.setLevel(logLevelToStdLevel(getCategoryLevel(logCategory)))
logger.addHandler(LogHandler(logCategory, targetModule)) | Make a logger from the standard library log through the Flumotion logging
system.
@param loggerName: The standard logger to adapt, e.g. 'library.module'
@type loggerName: str
@param logCategory: The Flumotion log category to use when reporting output
from the standard logger, e.g. 'librarymodule'
@type logCategory: str
@param targetModule: The name of the module that the logging should look
like it's coming from. Use this if you don't want to
see the file names and line numbers of the library
who's logger you are adapting.
@type targetModule: str or None |
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history) | execute query, get all list of lists |
def where_task(self, token_id, presented_pronunciation, confusion_probability):
"""Provide the prediction of the where task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is being provided
:param confusion_probability: The list or array of confusion probabilities at each index
"""
self['tokens'].setdefault(token_id, {}) \
.setdefault('where', self._where_default(presented_pronunciation))
if confusion_probability is not None:
self['tokens'][token_id]['where'] = list(confusion_probability) | Provide the prediction of the where task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is being provided
:param confusion_probability: The list or array of confusion probabilities at each index |
def missing_output_files(self):
"""Make and return a dictionary of the missing output files.
This returns a dictionary mapping
filepath to list of links that produce the file as output.
"""
missing = self.check_output_files(return_found=False)
ret_dict = {}
for miss_file in missing:
ret_dict[miss_file] = [self.linkname]
return ret_dict | Make and return a dictionary of the missing output files.
This returns a dictionary mapping
filepath to list of links that produce the file as output. |
def _get_instance(self, iname, namespace, property_list, local_only,
include_class_origin, include_qualifiers):
"""
Local method implements getinstance. This is generally used by
other instance methods that need to get an instance from the
repository.
It attempts to get the instance, copies it, and filters it
for input parameters like localonly, includequalifiers, and
propertylist.
Returns:
CIMInstance copy from the repository with property_list filtered,
and qualifers removed if include_qualifiers=False and
class origin removed if include_class_origin False
"""
instance_repo = self._get_instance_repo(namespace)
rtn_tup = self._find_instance(iname, instance_repo)
inst = rtn_tup[1]
if inst is None:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Instance not found in repository namespace {0!A}. "
"Path={1!A}", namespace, iname))
rtn_inst = deepcopy(inst)
# If local_only remove properties where class_origin
# differs from class of target instance
if local_only:
for p in rtn_inst:
class_origin = rtn_inst.properties[p].class_origin
if class_origin and class_origin != inst.classname:
del rtn_inst[p]
# if not repo_lite test against class properties
if not self._repo_lite and local_only:
# gets class propertylist which may be local only or all
# superclasses
try:
cl = self._get_class(iname.classname, namespace,
local_only=local_only)
except CIMError as ce:
if ce.status_code == CIM_ERR_NOT_FOUND:
raise CIMError(
CIM_ERR_INVALID_CLASS,
_format("Class {0!A} not found for instance {1!A} in "
"namespace {2!A}.",
iname.classname, iname, namespace))
class_pl = cl.properties.keys()
for p in list(rtn_inst):
if p not in class_pl:
del rtn_inst[p]
self._filter_properties(rtn_inst, property_list)
if not include_qualifiers:
self._remove_qualifiers(rtn_inst)
if not include_class_origin:
self._remove_classorigin(rtn_inst)
return rtn_inst | Local method implements getinstance. This is generally used by
other instance methods that need to get an instance from the
repository.
It attempts to get the instance, copies it, and filters it
for input parameters like localonly, includequalifiers, and
propertylist.
Returns:
CIMInstance copy from the repository with property_list filtered,
and qualifers removed if include_qualifiers=False and
class origin removed if include_class_origin False |
def cumulative_value(self, slip_moment, mmax, mag_value, bbar, dbar):
'''
Returns the rate of events with M > mag_value
:param float slip_moment:
Product of slip (cm/yr) * Area (cm ^ 2) * shear_modulus (dyne-cm)
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
'''
delta_m = mmax - mag_value
a_2 = self._get_a2(bbar, dbar, slip_moment, mmax)
return a_2 * (np.exp(bbar * delta_m) - 1.) * (delta_m > 0.0) | Returns the rate of events with M > mag_value
:param float slip_moment:
Product of slip (cm/yr) * Area (cm ^ 2) * shear_modulus (dyne-cm)
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter |
def num_samples(self):
"""
Return the total number of samples.
"""
with audioread.audio_open(self.path) as f:
return int(f.duration * f.samplerate) | Return the total number of samples. |
def make_seg_table(workflow, seg_files, seg_names, out_dir, tags=None,
title_text=None, description=None):
""" Creates a node in the workflow for writing the segment summary
table. Returns a File instances for the output file.
"""
seg_files = list(seg_files)
seg_names = list(seg_names)
if tags is None: tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_segtable', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--segment-files', seg_files)
quoted_seg_names = []
for s in seg_names:
quoted_seg_names.append("'" + s + "'")
node.add_opt('--segment-names', ' '.join(quoted_seg_names))
if description:
node.add_opt('--description', "'" + description + "'")
if title_text:
node.add_opt('--title-text', "'" + title_text + "'")
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
return node.output_files[0] | Creates a node in the workflow for writing the segment summary
table. Returns a File instances for the output file. |
def save_file(self, data, filename, size=None, thumbnail_size=None):
"""
Saves an image File
:param data: FileStorage from Flask form upload field
:param filename: Filename with full path
"""
max_size = size or self.max_size
thumbnail_size = thumbnail_size or self.thumbnail_size
if data and isinstance(data, FileStorage):
try:
self.image = Image.open(data)
except Exception as e:
raise ValidationError("Invalid image: %s" % e)
path = self.get_path(filename)
# If Path does not exist, create it
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission)
# Figure out format
filename, format = self.get_save_format(filename, self.image)
if self.image and (self.image.format != format or max_size):
if max_size:
image = self.resize(self.image, max_size)
else:
image = self.image
self.save_image(image, self.get_path(filename), format)
else:
data.seek(0)
data.save(path)
self.save_thumbnail(data, filename, format, thumbnail_size)
return filename | Saves an image File
:param data: FileStorage from Flask form upload field
:param filename: Filename with full path |
def getElement(self, attri, fname, numtype='cycNum'):
'''
In this method instead of getting a particular column of data,
the program gets a particular row of data for a particular
element name.
attri : string
The name of the attribute we are looking for. A complete
list of them can be obtained by calling
>>> get('element_name')
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
Returns
-------
array
A numpy array of the four element attributes, number, Z, A
and abundance, in that order.
Notes
-----
Warning
'''
element=[] #Variable for holding the list of element names
number=[] #Variable for holding the array of numbers
z=[] #Variable for holding the array of z
a=[] #Variable for holding the array of a
abd=[] #Variable for holding the array of Abundance
data=[] #variable for the final list of data
fname=self.findFile(fname,numtype)
f=open(fname,'r')
for i in range(self.index+1):
f.readline()
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
lines[i]=lines[i].split()
index=0
data=[]
while index < len (self.dcols):
if attri== self.dcols[index]:
break
index+=1
element=self.get(self.dcols[5],fname,numtype)
number=[]
z=[]
a=[]
isom=[]
abd=[]
for i in range(len(lines)):
number.append(int(lines[i][0]))
z.append(float(lines[i][1]))
isom.append(float(lines[i][2]))
abd.append(float(lines[i][1]))
index=0 #Variable for determing the index in the data columns
while index < len(element):
if attri == element[index]:
break
index+=1
data.append(number[index])
data.append(z[index])
data.append(a[index])
data.append(isom[index])
data.append(abd[index])
return array(data) | In this method instead of getting a particular column of data,
the program gets a particular row of data for a particular
element name.
attri : string
The name of the attribute we are looking for. A complete
list of them can be obtained by calling
>>> get('element_name')
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
Returns
-------
array
A numpy array of the four element attributes, number, Z, A
and abundance, in that order.
Notes
-----
Warning |
def _prepare_headers(self, request, filter=None, order_by=None, group_by=[], page=None, page_size=None):
""" Prepare headers for the given request
Args:
request: the NURESTRequest to send
filter: string
order_by: string
group_by: list of names
page: int
page_size: int
"""
if filter:
request.set_header('X-Nuage-Filter', filter)
if order_by:
request.set_header('X-Nuage-OrderBy', order_by)
if page is not None:
request.set_header('X-Nuage-Page', str(page))
if page_size:
request.set_header('X-Nuage-PageSize', str(page_size))
if len(group_by) > 0:
header = ", ".join(group_by)
request.set_header('X-Nuage-GroupBy', 'true')
request.set_header('X-Nuage-Attributes', header) | Prepare headers for the given request
Args:
request: the NURESTRequest to send
filter: string
order_by: string
group_by: list of names
page: int
page_size: int |
def get_form_kwargs(self):
"""
Pass template pack argument
"""
kwargs = super(FormContainersMixin, self).get_form_kwargs()
kwargs.update({
'pack': "foundation-{}".format(self.kwargs.get('foundation_version'))
})
return kwargs | Pass template pack argument |
def _get_observation(self):
"""
Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation
"""
di = super()._get_observation()
# camera observations
if self.use_camera_obs:
camera_obs = self.sim.render(
camera_name=self.camera_name,
width=self.camera_width,
height=self.camera_height,
depth=self.camera_depth,
)
if self.camera_depth:
di["image"], di["depth"] = camera_obs
else:
di["image"] = camera_obs
# low-level object information
if self.use_object_obs:
# position and rotation of cylinder and hole
hole_pos = self.sim.data.body_xpos[self.hole_body_id]
hole_quat = T.convert_quat(
self.sim.data.body_xquat[self.hole_body_id], to="xyzw"
)
di["hole_pos"] = hole_pos
di["hole_quat"] = hole_quat
cyl_pos = self.sim.data.body_xpos[self.cyl_body_id]
cyl_quat = T.convert_quat(
self.sim.data.body_xquat[self.cyl_body_id], to="xyzw"
)
di["cyl_to_hole"] = cyl_pos - hole_pos
di["cyl_quat"] = cyl_quat
# Relative orientation parameters
t, d, cos = self._compute_orientation()
di["angle"] = cos
di["t"] = t
di["d"] = d
di["object-state"] = np.concatenate(
[
di["hole_pos"],
di["hole_quat"],
di["cyl_to_hole"],
di["cyl_quat"],
[di["angle"]],
[di["t"]],
[di["d"]],
]
)
return di | Returns an OrderedDict containing observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
object-state: requires @self.use_object_obs to be True.
contains object-centric information.
image: requires @self.use_camera_obs to be True.
contains a rendered frame from the simulation.
depth: requires @self.use_camera_obs and @self.camera_depth to be True.
contains a rendered depth map from the simulation |
def bulk_copy(self, ids):
"""Bulk copy a set of devices.
:param ids: Int list of device IDs.
:return: :class:`devices.Device <devices.Device>` list
"""
schema = DeviceSchema()
return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema) | Bulk copy a set of devices.
:param ids: Int list of device IDs.
:return: :class:`devices.Device <devices.Device>` list |
def load_file(filename, out=sys.stdout):
"""
load a Python source file and compile it to byte-code
_load_file(filename: string): code_object
filename: name of file containing Python source code
(normally a .py)
code_object: code_object compiled from this source code
This function does NOT write any file!
"""
fp = open(filename, 'rb')
try:
source = fp.read()
try:
if PYTHON_VERSION < 2.6:
co = compile(source, filename, 'exec')
else:
co = compile(source, filename, 'exec', dont_inherit=True)
except SyntaxError:
out.write('>>Syntax error in %s\n' % filename)
raise
finally:
fp.close()
return co | load a Python source file and compile it to byte-code
_load_file(filename: string): code_object
filename: name of file containing Python source code
(normally a .py)
code_object: code_object compiled from this source code
This function does NOT write any file! |
def remove(self, name=None, prefix=None, pkgs=None, all_=False):
"""
Remove a package (from an environment) by name.
Returns {
success: bool, (this is always true),
(other information)
}
"""
logger.debug(str((prefix, pkgs)))
cmd_list = ['remove', '--json', '--yes']
if not pkgs and not all_:
raise TypeError("Must specify at least one package to remove, or "
"all=True.")
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else:
raise TypeError('must specify either an environment name or a '
'path for package removal')
if all_:
cmd_list.extend(['--all'])
else:
cmd_list.extend(pkgs)
return self._call_and_parse(cmd_list) | Remove a package (from an environment) by name.
Returns {
success: bool, (this is always true),
(other information)
} |
def modify_binding(site, binding, hostheader=None, ipaddress=None, port=None,
sslflags=None):
'''
Modify an IIS Web Binding. Use ``site`` and ``binding`` to target the
binding.
.. versionadded:: 2017.7.0
Args:
site (str): The IIS site name.
binding (str): The binding to edit. This is a combination of the
IP address, port, and hostheader. It is in the following format:
ipaddress:port:hostheader. For example, ``*:80:`` or
``*:80:salt.com``
hostheader (str): The host header of the binding. Usually the hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
The following will seat the host header of binding ``*:80:`` for ``site0``
to ``example.com``
.. code-block:: bash
salt '*' win_iis.modify_binding site='site0' binding='*:80:' hostheader='example.com'
'''
if sslflags is not None and sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
current_sites = list_sites()
if site not in current_sites:
log.debug("Site '%s' not defined.", site)
return False
current_bindings = list_bindings(site)
if binding not in current_bindings:
log.debug("Binding '%s' not defined.", binding)
return False
# Split out the binding so we can insert new ones
# Use the existing value if not passed
i, p, h = binding.split(':')
new_binding = ':'.join([ipaddress if ipaddress is not None else i,
six.text_type(port) if port is not None else six.text_type(p),
hostheader if hostheader is not None else h])
if new_binding != binding:
ps_cmd = ['Set-WebBinding',
'-Name', "'{0}'".format(site),
'-BindingInformation', "'{0}'".format(binding),
'-PropertyName', 'BindingInformation',
'-Value', "'{0}'".format(new_binding)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify binding: {0}\nError: {1}' \
''.format(binding, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if sslflags is not None and \
sslflags != current_sites[site]['bindings'][binding]['sslflags']:
ps_cmd = ['Set-WebBinding',
'-Name', "'{0}'".format(site),
'-BindingInformation', "'{0}'".format(new_binding),
'-PropertyName', 'sslflags',
'-Value', "'{0}'".format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to modify binding SSL Flags: {0}\nError: {1}' \
''.format(sslflags, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Binding modified successfully: %s', binding)
return True | Modify an IIS Web Binding. Use ``site`` and ``binding`` to target the
binding.
.. versionadded:: 2017.7.0
Args:
site (str): The IIS site name.
binding (str): The binding to edit. This is a combination of the
IP address, port, and hostheader. It is in the following format:
ipaddress:port:hostheader. For example, ``*:80:`` or
``*:80:salt.com``
hostheader (str): The host header of the binding. Usually the hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
The following will seat the host header of binding ``*:80:`` for ``site0``
to ``example.com``
.. code-block:: bash
salt '*' win_iis.modify_binding site='site0' binding='*:80:' hostheader='example.com' |
def country_code_by_name(self, hostname):
"""
Returns 2-letter country code (e.g. US) from hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.country_code_by_addr(addr) | Returns 2-letter country code (e.g. US) from hostname.
:arg hostname: Hostname (e.g. example.com) |
async def close(self):
"""|coro|
Closes the connection to discord.
"""
if self._closed:
return
await self.http.close()
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close()
self._ready.clear() | |coro|
Closes the connection to discord. |
def log(self, obj):
'''
Commit an arbitrary (picklable) object to the log
'''
entries = self.get()
entries.append(obj)
# Only log the last |n| entries if set
if self._size > 0:
entries = entries[-self._size:]
self._write_entries(entries) | Commit an arbitrary (picklable) object to the log |
def get_success_url(self):
"""Reverses the ``redis_metric_aggregate_detail`` URL using
``self.metric_slugs`` as an argument."""
slugs = '+'.join(self.metric_slugs)
url = reverse('redis_metric_aggregate_detail', args=[slugs])
# Django 1.6 quotes reversed URLs, which changes + into %2B. We want
# want to keep the + in the url (it's ok according to RFC 1738)
# https://docs.djangoproject.com/en/1.6/releases/1.6/#quoting-in-reverse
return url.replace("%2B", "+") | Reverses the ``redis_metric_aggregate_detail`` URL using
``self.metric_slugs`` as an argument. |
def attachmethod(target):
'''
Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1
'''
if isinstance(target, type):
def decorator(func):
setattr(target, func.__name__, func)
else:
def decorator(func):
setattr(target, func.__name__, partial(func, target))
return decorator | Reference: https://blog.tonyseek.com/post/open-class-in-python/
class Spam(object):
pass
@attach_method(Spam)
def egg1(self, name):
print((self, name))
spam1 = Spam()
# OpenClass 加入的方法 egg1 可用
spam1.egg1("Test1")
# 输出Test1 |
def compile_geo(d):
"""
Compile top-level Geography dictionary.
:param d:
:return:
"""
logger_excel.info("enter compile_geo")
d2 = OrderedDict()
# get max number of sites, or number of coordinate points given.
num_loc = _get_num_locations(d)
# if there's one more than one location put it in a collection
if num_loc > 1:
d2["type"] = "FeatureCollection"
features = []
for idx in range(0, num_loc):
# Do process for one site
site = _parse_geo_locations(d, idx)
features.append(site)
d2["features"] = features
# if there's only one location
elif num_loc == 1:
d2 = _parse_geo_location(d)
logger_excel.info("exit compile_geo")
return d2 | Compile top-level Geography dictionary.
:param d:
:return: |
def set_cookie(name, value):
"""Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list.
"""
r = app.make_response(redirect(url_for("view_cookies")))
r.set_cookie(key=name, value=value, secure=secure_cookie())
return r | Sets a cookie and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: path
name: name
type: string
- in: path
name: value
type: string
produces:
- text/plain
responses:
200:
description: Set cookies and redirects to cookie list. |
def _parse_prop(search, proplist):
"""Extract property value from record using the given urn search filter."""
props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]
if len(props) > 0:
return props[0]['value'][list(props[0]['value'].keys())[0]] | Extract property value from record using the given urn search filter. |
def headers(params={}):
"""This decorator adds the headers passed in to the response
http://flask.pocoo.org/snippets/100/
"""
def decorator(f):
if inspect.isclass(f):
h = headers(params)
apply_function_to_members(f, h)
return f
@functools.wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
for header, value in params.items():
h[header] = value
return resp
return decorated_function
return decorator | This decorator adds the headers passed in to the response
http://flask.pocoo.org/snippets/100/ |
def _set_font(self, family, size, bold, italic):
"""
Set the font properties of all the text in this text frame to
*family*, *size*, *bold*, and *italic*.
"""
def iter_rPrs(txBody):
for p in txBody.p_lst:
for elm in p.content_children:
yield elm.get_or_add_rPr()
# generate a:endParaRPr for each <a:p> element
yield p.get_or_add_endParaRPr()
def set_rPr_font(rPr, name, size, bold, italic):
f = Font(rPr)
f.name, f.size, f.bold, f.italic = family, Pt(size), bold, italic
txBody = self._element
for rPr in iter_rPrs(txBody):
set_rPr_font(rPr, family, size, bold, italic) | Set the font properties of all the text in this text frame to
*family*, *size*, *bold*, and *italic*. |
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name))
return FastlyWordpress(self, content) | Get information on a specific wordpress. |
def parse_command_only(self, rawinput: str) -> Statement:
"""Partially parse input into a Statement object.
The command is identified, and shortcuts and aliases are expanded.
Multiline commands are identified, but terminators and output
redirection are not parsed.
This method is used by tab completion code and therefore must not
generate an exception if there are unclosed quotes.
The `Statement` object returned by this method can at most contain values
in the following attributes:
- args
- raw
- command
- multiline_command
`Statement.args` includes all output redirection clauses and command
terminators.
Different from parse(), this method does not remove redundant whitespace
within args. However, it does ensure args has no leading or trailing
whitespace.
"""
# expand shortcuts and aliases
line = self._expand(rawinput)
command = ''
args = ''
match = self._command_pattern.search(line)
if match:
# we got a match, extract the command
command = match.group(1)
# take everything from the end of the first match group to
# the end of the line as the arguments (stripping leading
# and trailing spaces)
args = line[match.end(1):].strip()
# if the command is empty that means the input was either empty
# or something weird like '>'. args should be empty if we couldn't
# parse a command
if not command or not args:
args = ''
# set multiline
if command in self.multiline_commands:
multiline_command = command
else:
multiline_command = ''
# build the statement
statement = Statement(args,
raw=rawinput,
command=command,
multiline_command=multiline_command,
)
return statement | Partially parse input into a Statement object.
The command is identified, and shortcuts and aliases are expanded.
Multiline commands are identified, but terminators and output
redirection are not parsed.
This method is used by tab completion code and therefore must not
generate an exception if there are unclosed quotes.
The `Statement` object returned by this method can at most contain values
in the following attributes:
- args
- raw
- command
- multiline_command
`Statement.args` includes all output redirection clauses and command
terminators.
Different from parse(), this method does not remove redundant whitespace
within args. However, it does ensure args has no leading or trailing
whitespace. |
def set_style(style, mpl=False, **kwargs):
"""
If mpl is False accept either style name or a TStyle instance.
If mpl is True accept either style name or a matplotlib.rcParams-like
dictionary
"""
if mpl:
import matplotlib as mpl
style_dictionary = {}
if isinstance(style, string_types):
style_dictionary = get_style(style, mpl=True, **kwargs)
log.info("using matplotlib style '{0}'".format(style))
elif isinstance(style, dict):
style_dictionary = style
log.info("using user-defined matplotlib style")
else:
raise TypeError("style must be a matplotlib style name or dict")
for k, v in style_dictionary.items():
mpl.rcParams[k] = v
else:
if isinstance(style, string_types):
style = get_style(style, **kwargs)
log.info("using ROOT style '{0}'".format(style.GetName()))
style.cd() | If mpl is False accept either style name or a TStyle instance.
If mpl is True accept either style name or a matplotlib.rcParams-like
dictionary |
def Open(self):
"""Connects to the database and creates the required tables.
Raises:
IOError: if the specified output file already exists.
OSError: if the specified output file already exists.
ValueError: if the filename is not set.
"""
if not self._filename:
raise ValueError('Missing filename.')
if not self._append and os.path.isfile(self._filename):
raise IOError((
'Unable to use an already existing file for output '
'[{0:s}]').format(self._filename))
self._connection = sqlite3.connect(self._filename)
self._cursor = self._connection.cursor()
# Create table in database.
if not self._append:
self._cursor.execute(self._CREATE_TABLE_QUERY)
for field in self._META_FIELDS:
query = 'CREATE TABLE l2t_{0:s}s ({0:s}s TEXT, frequency INT)'.format(
field)
self._cursor.execute(query)
if self._set_status:
self._set_status('Created table: l2t_{0:s}'.format(field))
self._cursor.execute('CREATE TABLE l2t_tags (tag TEXT)')
if self._set_status:
self._set_status('Created table: l2t_tags')
query = 'CREATE TABLE l2t_saved_query (name TEXT, query TEXT)'
self._cursor.execute(query)
if self._set_status:
self._set_status('Created table: l2t_saved_query')
query = (
'CREATE TABLE l2t_disk (disk_type INT, mount_path TEXT, '
'dd_path TEXT, dd_offset TEXT, storage_file TEXT, export_path TEXT)')
self._cursor.execute(query)
query = (
'INSERT INTO l2t_disk (disk_type, mount_path, dd_path, dd_offset, '
'storage_file, export_path) VALUES (0, "", "", "", "", "")')
self._cursor.execute(query)
if self._set_status:
self._set_status('Created table: l2t_disk')
self._count = 0 | Connects to the database and creates the required tables.
Raises:
IOError: if the specified output file already exists.
OSError: if the specified output file already exists.
ValueError: if the filename is not set. |
def _open_url(url):
"""Open a HTTP connection to the URL and return a file-like object."""
response = requests.get(url, stream=True)
if response.status_code != 200:
raise IOError("Unable to download {}, HTTP {}".format(url, response.status_code))
return response | Open a HTTP connection to the URL and return a file-like object. |
def render_to_response(self, *args, **kwargs):
'''Canonicalize the URL if the slug changed'''
if self.request.path != self.object.get_absolute_url():
return HttpResponseRedirect(self.object.get_absolute_url())
return super(TalkView, self).render_to_response(*args, **kwargs) | Canonicalize the URL if the slug changed |
def _evaluate_barycentric(nodes, degree, lambda1, lambda2, lambda3):
r"""Compute a point on a surface.
Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)` for a
B |eacute| zier surface / triangle defined by ``nodes``.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
lambda1 (float): Parameter along the reference triangle.
lambda2 (float): Parameter along the reference triangle.
lambda3 (float): Parameter along the reference triangle.
Returns:
numpy.ndarray: The evaluated point as a ``D x 1`` array (where ``D``
is the ambient dimension where ``nodes`` reside).
"""
dimension, num_nodes = nodes.shape
binom_val = 1.0
result = np.zeros((dimension, 1), order="F")
index = num_nodes - 1
result[:, 0] += nodes[:, index]
# curve evaluate_multi_barycentric() takes arrays.
lambda1 = np.asfortranarray([lambda1])
lambda2 = np.asfortranarray([lambda2])
for k in six.moves.xrange(degree - 1, -1, -1):
# We want to go from (d C (k + 1)) to (d C k).
binom_val = (binom_val * (k + 1)) / (degree - k)
index -= 1 # Step to last element in column.
# k = d - 1, d - 2, ...
# d - k = 1, 2, ...
# We know column k has (d - k + 1) elements.
new_index = index - degree + k # First element in column.
col_nodes = nodes[:, new_index : index + 1] # noqa: E203
col_nodes = np.asfortranarray(col_nodes)
col_result = _curve_helpers.evaluate_multi_barycentric(
col_nodes, lambda1, lambda2
)
result *= lambda3
result += binom_val * col_result
# Update index for next iteration.
index = new_index
return result | r"""Compute a point on a surface.
Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)` for a
B |eacute| zier surface / triangle defined by ``nodes``.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
lambda1 (float): Parameter along the reference triangle.
lambda2 (float): Parameter along the reference triangle.
lambda3 (float): Parameter along the reference triangle.
Returns:
numpy.ndarray: The evaluated point as a ``D x 1`` array (where ``D``
is the ambient dimension where ``nodes`` reside). |
def handle_button(self, event, event_type):
"""Convert the button information from quartz into evdev format."""
# 0 for left
# 1 for right
# 2 for middle/center
# 3 for side
mouse_button_number = self._get_mouse_button_number(event)
# Identify buttons 3,4,5
if event_type in (25, 26):
event_type = event_type + (mouse_button_number * 0.1)
# Add buttons to events
event_type_string, event_code, value, scan = self.codes[event_type]
if event_type_string == "Key":
scan_event, key_event = self.emulate_press(
event_code, scan, value, self.timeval)
self.events.append(scan_event)
self.events.append(key_event)
# doubleclick/n-click of button
click_state = self._get_click_state(event)
repeat = self.emulate_repeat(click_state, self.timeval)
self.events.append(repeat) | Convert the button information from quartz into evdev format. |
def get_reply_states(self, string, dataset):
"""Get initial states from input string.
Parameters
----------
string : `str`
Input string.
dataset : `str`
Dataset key.
Returns
-------
`list` of `list` of `str`
"""
words = get_words(string)
if not words:
return []
long_word = 4
long_words = [word for word in words if len(word) >= long_word]
short_words = [word for word in words if len(word) < long_word]
for words in (long_words, short_words):
ret = [
states
for states in (
self.storage.get_states(dataset, word)
for word in words
)
if states
]
if ret:
return ret
return [] | Get initial states from input string.
Parameters
----------
string : `str`
Input string.
dataset : `str`
Dataset key.
Returns
-------
`list` of `list` of `str` |
def increment(key, delta=1, host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
'''
conn = _connect(host, port)
_check_stats(conn)
cur = get(key)
if cur is None:
raise CommandExecutionError('Key \'{0}\' does not exist'.format(key))
elif not isinstance(cur, six.integer_types):
raise CommandExecutionError(
'Value for key \'{0}\' must be an integer to be '
'incremented'.format(key)
)
try:
return conn.incr(key, delta)
except ValueError:
raise SaltInvocationError('Delta value must be an integer') | Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2 |
def create_cell_renderer_combo(self, tree_view, title="title", assign=0, editable=False, model=None, function=None):
"""'
Function creates a CellRendererCombo with title, model
"""
renderer_combo = Gtk.CellRendererCombo()
renderer_combo.set_property('editable', editable)
if model:
renderer_combo.set_property('model', model)
if function:
renderer_combo.connect("edited", function)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", False)
column = Gtk.TreeViewColumn(title, renderer_combo, text=assign)
tree_view.append_column(column) | Function creates a CellRendererCombo with title, model |
def session_context(fn):
"""
Handles session setup and teardown
"""
@functools.wraps(fn)
def wrap(*args, **kwargs):
session = args[0].Session() # obtain from self
result = fn(*args, session=session, **kwargs)
session.close()
return result
return wrap | Handles session setup and teardown |
def unescape_all(string):
"""Resolve all html entities to their corresponding unicode character"""
def escape_single(matchobj):
return _unicode_for_entity_with_name(matchobj.group(1))
return entities.sub(escape_single, string) | Resolve all html entities to their corresponding unicode character |
def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self) | Run this module asynchronously and return a poller. |
def _maybe_validate_distributions(distributions, dtype_override, validate_args):
"""Checks that `distributions` satisfies all assumptions."""
assertions = []
if not _is_iterable(distributions) or not distributions:
raise ValueError('`distributions` must be a list of one or more '
'distributions.')
if dtype_override is None:
dts = [
dtype_util.base_dtype(d.dtype)
for d in distributions
if d.dtype is not None
]
if dts[1:] != dts[:-1]:
raise TypeError('Distributions must have same dtype; found: {}.'.format(
set(dtype_util.name(dt) for dt in dts)))
# Validate event_ndims.
for d in distributions:
if tensorshape_util.rank(d.event_shape) is not None:
if tensorshape_util.rank(d.event_shape) != 1:
raise ValueError('`Distribution` must be vector variate, '
'found event nimds: {}.'.format(
tensorshape_util.rank(d.event_shape)))
elif validate_args:
assertions.append(
assert_util.assert_equal(
1, tf.size(input=d.event_shape_tensor()),
message='`Distribution` must be vector variate.'))
batch_shapes = [d.batch_shape for d in distributions]
if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes):
if batch_shapes[1:] != batch_shapes[:-1]:
raise ValueError('Distributions must have the same `batch_shape`; '
'found: {}.'.format(batch_shapes))
elif validate_args:
batch_shapes = [
tensorshape_util.as_list(d.batch_shape) # pylint: disable=g-complex-comprehension
if tensorshape_util.is_fully_defined(d.batch_shape) else
d.batch_shape_tensor() for d in distributions
]
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
b1, b2,
message='Distribution `batch_shape`s must be identical.')
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
return assertions | Checks that `distributions` satisfies all assumptions. |
def remove_metadata_key(self, obj, key):
"""
Removes the specified key from the object's metadata. If the key does
not exist in the metadata, nothing is done.
"""
meta_dict = {key: ""}
return self.set_metadata(obj, meta_dict) | Removes the specified key from the object's metadata. If the key does
not exist in the metadata, nothing is done. |
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).') | Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. |
def access(i):
"""
Input: Can be dictionary or string (string will be converted to dictionary)
{
action
module_uoa or CID -> converted to cid
or
(cid1) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid2) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid3) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
or
(repo_uoa)
(module_uoa)
(data_uoa)
(out=type) Module output
== '' - none
== 'con' - console interaction (if from CMD, default)
== 'json' - return dict as json to console
== 'json_with_sep' - separation line and return dict as json to console
== 'json_file' - return dict as json to file
(out_file) Output file if out=='json_file'
(con_encoding) - force encoding for IO
(ck_profile) - if 'yes', profile CK
INPUT TO A GIVEN FUNCTION
NOTE: If INPUT is a string and it will be converted to INPUT dictionary as follows (the same as CK command line):
ck key1=value1 -> converted to {key1:value1}
-key10 -> converted to {key10:"yes"}
-key11=value11 -> converted to {key11:value11}
--key12 -> converted to {key12:"yes"}
--key13=value13 -> converted to {key13:value13}
@file_json -> JSON from this file will be merged with INPUT
@@ -> CK will ask user ot enter manually JSON from console and merge with INPUT
@@key -> Enter JSON manually from console and merge with INPUT under this key
@@@cmd_json -> convert string to JSON (special format) and merge with INPUT
-- xyz -> add everything after -- to "unparsed_cmd" key in INPUT
When string is converted to INPUT dictionary, "cmd" variable is set to True
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
OUTPUT FROM A GIVEN FUNCTION
}
"""
global con_encoding
# # Set fresh configuration for each access - very costly
# if cfg.get('loading_config','') == '':
# cfg['loading_config'] = 'yes'
# r=access({'action':'load',
# 'repo_uoa':cfg['repo_name_default'],
# 'module_uoa':cfg['subdir_kernel'],
# 'data_uoa':cfg['subdir_kernel_default']})
# if r['return']==0:
# cfg.update(r['dict'])
#
# r=access({'action':'load',
# 'repo_uoa':cfg['repo_name_local'],
# 'module_uoa':cfg['subdir_kernel'],
# 'data_uoa':cfg['subdir_kernel_default']})
# if r['return']==0:
# cfg.update(r['dict'])
# cfg['loading_config'] = ''
rr={'return':0}
ii={}
cmd=False
o=''
### If input is string, split into list and process in the next condition
if type(i)==str:
cmd=True
x=i.split(' ')
i=x
### If input is a list
if type(i)==list:
if len(i)==1 and i[0].strip()=='test_install':
return rr # installation test
cmd=True
rr=convert_ck_list_to_dict(i)
if rr['return']==0:
i=rr.get('ck_dict',{})
if i.get('out','')=='': i['out']='con' # Default output is console
# if called from CMD or with string
o=''
if rr['return']==0:
# Check output mode
o=i.get('out','')
### If profile
cp=i.get('ck_profile','')
if cp=='yes':
import time
start_time = time.time()
### Process request ######################################
if i.get('con_encoding','')!='': con_encoding=i['con_encoding']
### Process action ###################################
rr=init({})
if rr['return']==0:
# Run module with a given action
rr=perform_action(i)
if rr.get('out','')!='': o=rr['out']
if cp=='yes':
elapsed_time=time.time()-start_time
rr['ck_profile_time']=elapsed_time
if o=='con':
out('CK profile time: '+str(elapsed_time)+' sec.')
# Finalize call (check output) ####################################
if o=='json' or o=='json_with_sep':
if o=='json_with_sep': out(cfg['json_sep'])
rr1=dumps_json({'dict':rr})
if rr1['return']==0:
s=rr1['string']
out(s)
elif o=='json_file':
fn=i.get('out_file','')
if fn=='':
rr['return']=1
rr['error']='out==json_file but out_file is not defined in kernel access function'
else:
rr1=save_json_to_file({'json_file':fn, 'dict':rr})
if rr1['return']>0:
rr['return']=1
rr['error']=rr1['error']
# If error and CMD, output error to console
if cmd:
if rr['return']>0:
x=''
if type(i)==dict:
x=i.get('module_uoa','')
if x!='':
x='['+x+'] '
out(cfg['error']+x+rr['error']+'!')
return rr | Input: Can be dictionary or string (string will be converted to dictionary)
{
action
module_uoa or CID -> converted to cid
or
(cid1) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid2) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
(cid3) - if doesn't have = and doesn't start from -- or - or @ -> appended to cids[]
or
(repo_uoa)
(module_uoa)
(data_uoa)
(out=type) Module output
== '' - none
== 'con' - console interaction (if from CMD, default)
== 'json' - return dict as json to console
== 'json_with_sep' - separation line and return dict as json to console
== 'json_file' - return dict as json to file
(out_file) Output file if out=='json_file'
(con_encoding) - force encoding for IO
(ck_profile) - if 'yes', profile CK
INPUT TO A GIVEN FUNCTION
NOTE: If INPUT is a string and it will be converted to INPUT dictionary as follows (the same as CK command line):
ck key1=value1 -> converted to {key1:value1}
-key10 -> converted to {key10:"yes"}
-key11=value11 -> converted to {key11:value11}
--key12 -> converted to {key12:"yes"}
--key13=value13 -> converted to {key13:value13}
@file_json -> JSON from this file will be merged with INPUT
@@ -> CK will ask user ot enter manually JSON from console and merge with INPUT
@@key -> Enter JSON manually from console and merge with INPUT under this key
@@@cmd_json -> convert string to JSON (special format) and merge with INPUT
-- xyz -> add everything after -- to "unparsed_cmd" key in INPUT
When string is converted to INPUT dictionary, "cmd" variable is set to True
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
OUTPUT FROM A GIVEN FUNCTION
} |
def list(self, muted=values.unset, hold=values.unset, coaching=values.unset,
limit=None, page_size=None):
"""
Lists ParticipantInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool muted: Whether to return only participants that are muted
:param bool hold: Whether to return only participants that are on hold
:param bool coaching: Whether to return only participants who are coaching another call
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.participant.ParticipantInstance]
"""
return list(self.stream(
muted=muted,
hold=hold,
coaching=coaching,
limit=limit,
page_size=page_size,
)) | Lists ParticipantInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool muted: Whether to return only participants that are muted
:param bool hold: Whether to return only participants that are on hold
:param bool coaching: Whether to return only participants who are coaching another call
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.participant.ParticipantInstance] |
def run(self, command, **kwargs):
"""Run a command on the remote host.
This is just a wrapper around ``RemoteTask(self.hostname, ...)``
"""
return RemoteTask(self.hostname, command,
identity_file=self._identity_file, **kwargs) | Run a command on the remote host.
This is just a wrapper around ``RemoteTask(self.hostname, ...)`` |
def children(self):
"""
Returns the children in this group.
:return [<QtGui.QListWidgetItem>, ..]
"""
new_refs = set()
output = []
for ref in self._children:
item = ref()
if item is not None:
output.append(item)
new_refs.add(ref)
self._children = new_refs
return output | Returns the children in this group.
:return [<QtGui.QListWidgetItem>, ..] |
def euler(self):
"""TODO DEPRECATE THIS?"""
e_xyz = transformations.euler_from_matrix(self.rotation, 'sxyz')
return np.array([180.0 / np.pi * a for a in e_xyz]) | TODO DEPRECATE THIS? |
def login(self):
"""
用户登陆
:return: true if login successfully
"""
self._cookies = self.__get_rndnum_cookies()
# print self._cookies
UserCode, UserPwd = self._userid, self._userpsw
Validate = self._cookies['LogonNumber']
Submit = '%CC%E1+%BD%BB'
headers = {'Referer': 'http://jwc.wyu.edu.cn/student/body.htm'}
# save header
self._headers = headers
data = {
'UserCode': UserCode,
'UserPwd': UserPwd,
'Validate': Validate,
'Submit': Submit,
}
r = requests.post('http://jwc.wyu.edu.cn/student/logon.asp', data=data, headers=headers, cookies=self._cookies)
# print r.content.decode(_.get_charset(r.content))
return True if r.status_code == 200 else False | 用户登陆
:return: true if login successfully |
def update_insert_values(bel_resource: Mapping, mapping: Mapping[str, Tuple[str, str]], values: Dict[str, str]) -> None:
"""Update the value dictionary with a BEL resource dictionary."""
for database_column, (section, key) in mapping.items():
if section in bel_resource and key in bel_resource[section]:
values[database_column] = bel_resource[section][key] | Update the value dictionary with a BEL resource dictionary. |
def last_day(self):
"""Return the last day of Yom Tov or Shabbat.
This is useful for three-day holidays, for example: it will return the
last in a string of Yom Tov + Shabbat.
If this HDate is Shabbat followed by no Yom Tov, returns the Saturday.
If this HDate is neither Yom Tov, nor Shabbat, this just returns
itself.
"""
day_iter = self
while day_iter.next_day.is_yom_tov or day_iter.next_day.is_shabbat:
day_iter = day_iter.next_day
return day_iter | Return the last day of Yom Tov or Shabbat.
This is useful for three-day holidays, for example: it will return the
last in a string of Yom Tov + Shabbat.
If this HDate is Shabbat followed by no Yom Tov, returns the Saturday.
If this HDate is neither Yom Tov, nor Shabbat, this just returns
itself. |
def XYZ_to_galcencyl(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True):
"""
NAME:
XYZ_to_galcencyl
PURPOSE:
transform XYZ coordinates (wrt Sun) to cylindrical Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
R,phi,z
HISTORY:
2010-09-24 - Written - Bovy (NYU)
"""
XYZ= nu.atleast_2d(XYZ_to_galcenrect(X,Y,Z,Xsun=Xsun,Zsun=Zsun,
_extra_rot=_extra_rot))
return nu.array(rect_to_cyl(XYZ[:,0],XYZ[:,1],XYZ[:,2])).T | NAME:
XYZ_to_galcencyl
PURPOSE:
transform XYZ coordinates (wrt Sun) to cylindrical Galactocentric coordinates
INPUT:
X - X
Y - Y
Z - Z
Xsun - cylindrical distance to the GC
Zsun - Sun's height above the midplane
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
R,phi,z
HISTORY:
2010-09-24 - Written - Bovy (NYU) |
async def load_variant(obj, elem, elem_type=None, params=None, field_archiver=None, wrapped=None):
"""
Loads variant from the obj representation
:param obj:
:param elem:
:param elem_type:
:param params:
:param field_archiver:
:param wrapped:
:return:
"""
field_archiver = field_archiver if field_archiver else load_field
is_wrapped = elem_type.WRAPS_VALUE if wrapped is None else wrapped
if is_wrapped:
elem = elem_type() if elem is None else elem
fname = list(obj.keys())[0]
for field in elem_type.f_specs():
if field[0] != fname:
continue
fvalue = await field_archiver(obj[fname], field[1], field[2:], elem if not is_wrapped else None)
if is_wrapped:
elem.set_variant(field[0], fvalue)
return elem if is_wrapped else fvalue
raise ValueError('Unknown tag: %s' % fname) | Loads variant from the obj representation
:param obj:
:param elem:
:param elem_type:
:param params:
:param field_archiver:
:param wrapped:
:return: |
def about():
"""
About box for aps. Gives version numbers for
aps, NumPy, SciPy, Cython, and MatPlotLib.
"""
print("")
print("aps: APS Journals API in Python for Humans")
print("Copyright (c) 2017 and later.")
print("Xiao Shang")
print("")
print("aps Version: %s" % aps.__version__)
print("Numpy Version: %s" % numpy.__version__)
print("Scipy Version: %s" % scipy.__version__)
try:
import Cython
cython_ver = Cython.__version__
except:
cython_ver = 'None'
print("Cython Version: %s" % cython_ver)
try:
import matplotlib
matplotlib_ver = matplotlib.__version__
except:
matplotlib_ver = 'None'
print("Matplotlib Version: %s" % matplotlib_ver)
print("Python Version: %d.%d.%d" % sys.version_info[0:3])
print("Number of CPUs: %s" % hardware_info()['cpus'])
# print("BLAS Info: %s" % _blas_info())
print("Platform Info: %s (%s)" % (platform.system(),
platform.machine()))
aps_install_path = os.path.dirname(inspect.getsourcefile(aps))
print("Installation path: %s" % aps_install_path)
print("") | About box for aps. Gives version numbers for
aps, NumPy, SciPy, Cython, and MatPlotLib. |
def double(self, column, total=None, places=None):
"""
Create a new double column on the table.
:param column: The column
:type column: str
:type total: int
:type places: 2
:rtype: Fluent
"""
return self._add_column("double", column, total=total, places=places) | Create a new double column on the table.
:param column: The column
:type column: str
:type total: int
:type places: 2
:rtype: Fluent |
def append_num_column(self, text: str, index: int):
""" Add value to the output row, width based on index """
width = self.columns[index]["width"]
return f"{text:>{width}}" | Add value to the output row, width based on index |
def search_tor_node(self, data_type, data):
"""Lookup an artifact to check if it is a known tor exit node.
:param data_type: The artifact type. Must be one of 'ip', 'fqdn'
or 'domain'
:param data: The artifact to lookup
:type data_type: str
:type data: str
:return: Data relative to the tor node. If the looked-up artifact is
related to a tor exit node it will contain a `nodes` array.
That array will contains a list of nodes containing the
following keys:
- name: name given to the router
- ip: their IP address
- hostname: Hostname of the router
- country_code: ISO2 code of the country hosting the router
- as_name: ASName registering the router
- as_number: ASNumber registering the router
Otherwise, `nodes` will be empty.
:rtype: list
"""
results = []
if data_type == 'ip':
results = self._get_node_from_ip(data)
elif data_type == 'fqdn':
results = self._get_node_from_fqdn(data)
elif data_type == 'domain':
results = self._get_node_from_domain(data)
else:
pass
return {"nodes": results} | Lookup an artifact to check if it is a known tor exit node.
:param data_type: The artifact type. Must be one of 'ip', 'fqdn'
or 'domain'
:param data: The artifact to lookup
:type data_type: str
:type data: str
:return: Data relative to the tor node. If the looked-up artifact is
related to a tor exit node it will contain a `nodes` array.
That array will contains a list of nodes containing the
following keys:
- name: name given to the router
- ip: their IP address
- hostname: Hostname of the router
- country_code: ISO2 code of the country hosting the router
- as_name: ASName registering the router
- as_number: ASNumber registering the router
Otherwise, `nodes` will be empty.
:rtype: list |
def coarsegrain(F, sets):
r"""Coarse-grains the flux to the given sets.
Parameters
----------
F : (n, n) ndarray or scipy.sparse matrix
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
Notes
-----
The coarse grained flux is defined as
.. math:: fc_{I,J} = \sum_{i \in I,j \in J} f_{i,j}
Note that if you coarse-grain a net flux, it does n ot necessarily
have a net flux property anymore. If want to make sure you get a
netflux, use to_netflux(coarsegrain(F,sets)).
References
----------
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009)
"""
if issparse(F):
return sparse.tpt.coarsegrain(F, sets)
elif isdense(F):
return dense.tpt.coarsegrain(F, sets)
else:
raise _type_not_supported | r"""Coarse-grains the flux to the given sets.
Parameters
----------
F : (n, n) ndarray or scipy.sparse matrix
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
Notes
-----
The coarse grained flux is defined as
.. math:: fc_{I,J} = \sum_{i \in I,j \in J} f_{i,j}
Note that if you coarse-grain a net flux, it does n ot necessarily
have a net flux property anymore. If want to make sure you get a
netflux, use to_netflux(coarsegrain(F,sets)).
References
----------
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) |
def _read_data(self):
"""
Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled.
"""
while True:
try:
data = yield from self._socket.recv()
except asyncio.CancelledError:
break
except ConnectionClosed:
break
self._push_packet(data)
self._loop.call_soon(self.close) | Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled. |
def spin_sz(self):
"""Returns the z-component of the spin of the secondary mass."""
return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z,
self.spin2z) | Returns the z-component of the spin of the secondary mass. |
Subsets and Splits