text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def fork(self, state, expression, policy='ALL', setstate=None):
"""
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
in the child state.
"""
assert isinstance(expression, Expression)
if setstate is None:
setstate = lambda x, y: None
# Find a set of solutions for expression
solutions = state.concretize(expression, policy)
if not solutions:
raise ExecutorError("Forking on unfeasible constraint set")
if len(solutions) == 1:
setstate(state, solutions[0])
return state
logger.info("Forking. Policy: %s. Values: %s",
policy,
', '.join(f'0x{sol:x}' for sol in solutions))
self._publish('will_fork_state', state, expression, solutions, policy)
# Build and enqueue a state for each solution
children = []
for new_value in solutions:
with state as new_state:
new_state.constrain(expression == new_value)
# and set the PC of the new state to the concrete pc-dest
#(or other register or memory address to concrete)
setstate(new_state, new_value)
self._publish('did_fork_state', new_state, expression, new_value, policy)
# enqueue new_state
state_id = self.enqueue(new_state)
# maintain a list of children for logging purpose
children.append(state_id)
logger.info("Forking current state into states %r", children)
return None | 0.002756 |
def clear(self):
""" clear all tree data
"""
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE
self._root_hash = BLANK_ROOT | 0.008547 |
def load_streams(chunks):
"""
Given a gzipped stream of data, yield streams of decompressed data.
"""
chunks = peekable(chunks)
while chunks:
if six.PY3:
dc = zlib.decompressobj(wbits=zlib.MAX_WBITS | 16)
else:
dc = zlib.decompressobj(zlib.MAX_WBITS | 16)
yield load_stream(dc, chunks)
if dc.unused_data:
chunks = peekable(itertools.chain((dc.unused_data,), chunks)) | 0.002203 |
def manage(group_id):
"""Manage your group."""
group = Group.query.get_or_404(group_id)
form = GroupForm(request.form, obj=group)
if form.validate_on_submit():
if group.can_edit(current_user):
try:
group.update(**form.data)
flash(_('Group "%(name)s" was updated', name=group.name),
'success')
except Exception as e:
flash(str(e), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
else:
flash(
_(
'You cannot edit group %(group_name)s',
group_name=group.name
),
'error'
)
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
) | 0.001056 |
def main():
'''main routine'''
# process arguments
if len(sys.argv) < 4:
usage()
rgname = sys.argv[1]
vmss_name = sys.argv[2]
capacity = sys.argv[3]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
scaleoutput = azurerm.scale_vmss(access_token, subscription_id, rgname, vmss_name, capacity)
print(scaleoutput.text) | 0.002445 |
def get_device_state(self):
"""
returns the full device state
"""
log.debug("getting device state...")
cmd, url = DEVICE_URLS["get_device_state"]
return self._exec(cmd, url) | 0.00905 |
def notify_rollover(self, stream):
"""Notify that a reading in the given stream was overwritten.
Args:
stream (DataStream): The stream that had overwritten data.
"""
self.offset -= 1
if not self.matches(stream):
return
if self._count == 0:
raise InternalError("BufferedStreamWalker out of sync with storage engine, count was wrong.")
self._count -= 1 | 0.006711 |
def get_interface_detail_output_interface_ifHCOutMulticastPkts(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ifHCOutMulticastPkts = ET.SubElement(interface, "ifHCOutMulticastPkts")
ifHCOutMulticastPkts.text = kwargs.pop('ifHCOutMulticastPkts')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002237 |
def score(self, outcomes, modelparams, expparams, return_L=False):
r"""
Returns the numerically computed score of the likelihood
function, defined as:
.. math::
q(d, \vec{x}; \vec{e}) = \vec{\nabla}_{\vec{x}} \log \Pr(d | \vec{x}; \vec{e}).
Calls are represented as a four-index tensor
``score[idx_modelparam, idx_outcome, idx_model, idx_experiment]``.
The left-most index may be suppressed for single-parameter models.
The numerical gradient is computed using the central difference method,
with step size given by the property `~ScoreMixin.h`.
If return_L is True, both `q` and the likelihood `L` are returned as `q, L`.
"""
if len(modelparams.shape) == 1:
modelparams = modelparams[:, np.newaxis]
# compute likelihood at central point
L0 = self.likelihood(outcomes, modelparams, expparams)
# allocate space for the score
q = np.empty([self.n_modelparams,
outcomes.shape[0],
modelparams.shape[0],
expparams.shape[0]])
h_perturb = np.empty(modelparams.shape)
# just loop over the model parameter as there usually won't be so many
# of them that vectorizing would be worth the effort.
for mp_idx in range(self.n_modelparams):
h_perturb[:] = np.zeros(modelparams.shape)
h_perturb[:, mp_idx] = self.h[mp_idx]
# use the chain rule since taking the numerical derivative of a
# logarithm is unstable
q[mp_idx, :] = (
self.likelihood(outcomes, modelparams + h_perturb, expparams) -
self.likelihood(outcomes, modelparams - h_perturb, expparams)
) / (2 * self.h[mp_idx] * L0)
if return_L:
return q, L0
else:
return q | 0.011899 |
def getAccountNames(store, protocol=None):
"""
Retrieve account name information about the given database.
@param store: An Axiom Store representing a user account. It must
have been opened through the store which contains its account
information.
@return: A generator of two-tuples of (username, domain) which
refer to the given store.
"""
return ((meth.localpart, meth.domain) for meth
in getLoginMethods(store, protocol)) | 0.004175 |
def set_block(fd):
"""Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread
when the underlying kernel buffer is exhausted."""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) | 0.003984 |
def load_label(self, idx, label_type=None):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
if label_type == 'semantic':
label = scipy.io.loadmat('{}/SemanticLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
elif label_type == 'geometric':
label = scipy.io.loadmat('{}/GeoLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
label[label == -1] = 0
else:
raise Exception("Unknown label type: {}. Pick semantic or geometric.".format(label_type))
label = label.astype(np.uint8)
label -= 1 # rotate labels so classes start at 0, void is 255
label = label[np.newaxis, ...]
return label.copy() | 0.005464 |
def parse_cmd(self, tree, inp_cmd = None):
""" Extract command and options from string.
The tree argument should contain a specifically formatted dict
which describes the available commands, options, arguments and
callbacks to methods for completion of arguments.
TODO: document dict format
The inp_cmd argument should contain a list of strings containing
the complete command to parse, such as sys.argv (without the first
element which specified the command itself).
"""
# reset state from previous execution
self.exe = None
self.arg = None
self.exe_options = {}
self.children = tree['children']
self.key = tree['children']
option_parsing = False
self._scoop_rest_arguments = False
if inp_cmd is not None:
self.inp_cmd = inp_cmd
# iterate the list of inputted commands
i = 0
while i < len(self.inp_cmd):
p = self.inp_cmd[i]
self.key = {}
# Find which of the valid commands matches the current element of inp_cmd
if self.children is not None:
self.key_complete = False
match = False
for param, content in self.children.items():
# match string to command
if param.find(p) == 0:
self.key[param] = content
match = True
# If we have an exact match, make sure that
# is the only element in self.key
if p == param and len(self.inp_cmd) > i+1:
self.key_complete = True
self.key = { param: content }
break
# if we are in scoop-rest-mode, place elements not matching
# anything in argument-array
if not match:
if self._scoop_rest_arguments:
self.arg.append(p)
else:
raise InvalidCommand("Invalid argument: " + p)
else:
raise InvalidCommand('ran out of parameters; command too long')
# Note that there are two reasons self.key can contain entries:
# 1) The current string (p) contained something and matched a param
# 2) The current string (p) is empty and matches all children
# If p is empty we don't really have a match but still need to
# have data in self.key to show all possible completions at this
# level. Therefore, we skip the command matching stuff when
# len(p) == 0
if len(p) != 0 and len(self.key) == 1:
key, val = list(self.key.items())[0]
i, option_parsing = self._examine_key(key, val, p, i, option_parsing)
i += 1 | 0.002677 |
def insert_users_for_optional_roles_trigger(plpy, td):
"""Trigger to update users from optional roles entries.
A compatibility trigger to insert users from moduleoptionalroles
records. This is primarily for legacy compatibility, but it is not
possible to tell whether the entry came from legacy or cnx-publishing.
Therefore, we only insert into users.
"""
modified_state = "OK"
users = td['new']['personids'] and td['new']['personids'] or []
plan = plpy.prepare("""\
SELECT username FROM users WHERE username = any($1)""",
['text[]'])
existing_users = set([r['username'] for r in plpy.execute(plan, (users,))])
new_users = set(users).difference(existing_users)
for username in new_users:
plan = plpy.prepare("""\
INSERT INTO users (username, first_name, last_name, full_name, title)
SELECT personid, firstname, surname, fullname, honorific
FROM persons where personid = $1""", ['text'])
plpy.execute(plan, (username,))
return modified_state | 0.000965 |
def _convert_duration_to_hhmmss(self, duration):
"""stub"""
time_secs = duration.seconds
min_, sec = divmod(time_secs, 60)
hour, min_ = divmod(min_, 60)
results = {
'hours': hour,
'minutes': min_,
'seconds': sec
}
return results | 0.006231 |
def process_file(path):
""" Open a single labeled image at path and get needed information, return as a dictionary"""
info = dict()
with fits.open(path) as hdu:
head = hdu[0].header
data = hdu[0].data
labels = {theme: value for value, theme in list(hdu[1].data)}
info['filename'] = os.path.basename(path)
info['trainer'] = head['expert']
info['date-label'] = dateparser.parse(head['date-lab'])
info['date-observation'] = dateparser.parse(head['date-end'])
for theme in themes:
info[theme + "_count"] = np.sum(data == labels[theme])
return info | 0.003273 |
def add_items(self, dataframe, hide_cols=()):
"""
Add items and/or update existing items in grid
"""
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
# add more rows
self.AppendRows(len(dataframe))
columns = dataframe.columns
row_num = -1
# fill in all rows with appropriate values
for ind, row in dataframe.iterrows():
row_num += 1
for col_num, col in enumerate(columns):
value = row[col]
self.SetCellValue(row_num, col_num, str(value))
# set citation default value
if col == 'citations':
citation = row['citations']
if (citation is None) or (citation is np.nan):
self.SetCellValue(row_num, col_num, 'This study')
else:
if 'This study' not in citation:
if len(citation):
citation += ':'
citation += 'This study'
self.SetCellValue(row_num, col_num, citation)
self.row_labels.extend(dataframe.index) | 0.002166 |
def probe_services(self, handle, conn_id, callback):
"""Given a connected device, probe for its GATT services and characteristics
Args:
handle (int): a handle to the connection on the BLED112 dongle
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): Callback to be called when this procedure finishes
"""
self._command_task.async_command(['_probe_services', handle], callback,
{'connection_id': conn_id, 'handle': handle}) | 0.009693 |
def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):
"""Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results.
"""
expectation = getattr(df, expectation_type)
results = list()
for column in columns:
results.append(expectation(column, *args, **kwargs))
return results | 0.004779 |
def ParseApplicationUsageRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses an application usage row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
# TODO: replace usage by definition(s) in eventdata. Not sure which values
# it will hold here.
application_name = self._GetRowValue(query_hash, row, 'event')
usage = 'Application {0:s}'.format(application_name)
event_data = MacOSApplicationUsageEventData()
event_data.application = self._GetRowValue(query_hash, row, 'app_path')
event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')
event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')
event_data.count = self._GetRowValue(query_hash, row, 'number_times')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'last_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.000791 |
def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None | 0.003922 |
def fetch_import_ref_restriction(self,):
"""Fetch whether importing the reference is restricted
:returns: True, if importing the reference is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() not in (self.LOADED, self.UNLOADED)
return restricted or inter.fetch_action_restriction(self, 'import_reference') | 0.006993 |
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
) -> 'QubitOrder':
"""Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
"""
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val)
if isinstance(val, QubitOrder):
return val
raise ValueError(
"Don't know how to interpret <{}> as a Basis.".format(val)) | 0.00565 |
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern | 0.004107 |
def ParseOptions(cls, options, analysis_plugin):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of TaggingAnalysisPlugin')
tagging_file = cls._ParseStringOption(options, 'tagging_file')
if not tagging_file:
raise errors.BadConfigOption(
'Tagging analysis plugin requires a tagging file.')
tagging_file_path = tagging_file
if not os.path.isfile(tagging_file_path):
# Check if the file exists in the data location path.
data_location = getattr(options, 'data_location', None)
if data_location:
tagging_file_path = os.path.join(data_location, tagging_file)
if not os.path.isfile(tagging_file_path):
raise errors.BadConfigOption(
'No such tagging file: {0:s}.'.format(tagging_file))
try:
analysis_plugin.SetAndLoadTagFile(tagging_file_path)
except UnicodeDecodeError:
raise errors.BadConfigOption(
'Invalid tagging file: {0:s} encoding must be UTF-8.'.format(
tagging_file))
except errors.TaggingFileError as exception:
raise errors.BadConfigOption(
'Unable to read tagging file: {0:s} with error: {1!s}'.format(
tagging_file, exception)) | 0.006098 |
def ipoTodayDF(token='', version=''):
'''This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:
rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.
https://iexcloud.io/docs/api/#ipo-calendar
10am, 10:30am UTC daily
Args:
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
val = ipoToday(token, version)
if val:
df = pd.io.json.json_normalize(val, 'rawData')
_toDatetime(df)
_reindex(df, 'symbol')
else:
df = pd.DataFrame()
return df | 0.004115 |
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"):
"""Convert the Amino-acid sequence into 1-hot-encoding numpy array
# Arguments
seq_vec: List of strings/amino-acid sequences
maxlen: Maximum sequence length. See `pad_sequences` for more detail
seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail
encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ).
# Returns
numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
"""
return encodeSequence(seq_vec,
vocab=AMINO_ACIDS,
neutral_vocab="_",
maxlen=maxlen,
seq_align=seq_align,
pad_value="_",
encode_type=encode_type) | 0.003367 |
def spawn(self, fn, *args, **kwargs):
"""Spawn a new process, attached to this Namespace.
It will be monitored by the "watcher" process in the Socket. If the
socket disconnects, all these greenlets are going to be killed, after
calling BaseNamespace.disconnect()
This method uses the ``exception_handler_decorator``. See
Namespace documentation for more information.
"""
# self.log.debug("Spawning sub-Namespace Greenlet: %s" % fn.__name__)
if hasattr(self, 'exception_handler_decorator'):
fn = self.exception_handler_decorator(fn)
new = gevent.spawn(fn, *args, **kwargs)
self.jobs.append(new)
return new | 0.002793 |
def main():
"""
Project's main method which will parse the command line arguments, run a
scan using the TagCubeClient and exit.
"""
cmd_args = TagCubeCLI.parse_args()
try:
tagcube_cli = TagCubeCLI.from_cmd_args(cmd_args)
except ValueError, ve:
# We get here when there are no credentials configured
print '%s' % ve
sys.exit(1)
try:
sys.exit(tagcube_cli.run())
except ValueError, ve:
# We get here when the configured credentials had some issue (invalid)
# or there was some error (such as invalid profile name) with the params
print '%s' % ve
sys.exit(2) | 0.003012 |
def calc_circuit_breaker_position(self, debug=False):
""" Calculates the optimal position of a circuit breaker on route.
Parameters
----------
debug: bool, defaults to False
If True, prints process information.
Returns
-------
int
position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker)
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
The circuit breakers are used here for checking tech. constraints only and will be re-located after connection
of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers
References
----------
See Also
--------
ding0.grid.mv_grid.tools.set_circuit_breakers
"""
# TODO: add references (Tao)
# set init value
demand_diff_min = 10e6
# check possible positions in route
for ctr in range(len(self._nodes)):
# split route and calc demand difference
route_demand_part1 = sum([node.demand() for node in self._nodes[0:ctr]])
route_demand_part2 = sum([node.demand() for node in self._nodes[ctr:len(self._nodes)]])
demand_diff = abs(route_demand_part1 - route_demand_part2)
if demand_diff < demand_diff_min:
demand_diff_min = demand_diff
position = ctr
if debug:
logger.debug('sum 1={}'.format(
sum([node.demand() for node in self._nodes[0:position]])))
logger.debug('sum 2={}'.format(sum([node.demand() for node in
self._nodes[
position:len(self._nodes)]])))
logger.debug(
'Position of circuit breaker: {0}-{1} (sumdiff={2})'.format(
self._nodes[position - 1], self._nodes[position],
demand_diff_min))
return position | 0.005523 |
def cluster_kmeans(data=None, k=None, max_iter=10, tolerance=1e-5, stride=1,
metric='euclidean', init_strategy='kmeans++', fixed_seed=False,
n_jobs=None, chunksize=None, skip=0, keep_data=False, clustercenters=None, **kwargs):
r"""k-means clustering
If data is given, it performs a k-means clustering and then assigns the
data using a Voronoi discretization. It returns a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
object that can be used to extract the discretized data sequences, or to
assign other data points to the same partition. If data is not given, an
empty :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>`
will be created that still needs to be parametrized, e.g. in a :func:`pipeline`.
Parameters
----------
data: ndarray (T, d) or list of ndarray (T_i, d) or a reader created by :func:`source`
input data, if available in memory
k: int
the number of cluster centers. When not specified (None), min(sqrt(N), 5000) is chosen as default value,
where N denotes the number of data points
max_iter : int
maximum number of iterations before stopping. When not specified (None), min(sqrt(N),5000) is chosen
as default value, where N denotes the number of data points
tolerance : float
stop iteration when the relative change in the cost function
:math:`C(S) = \sum_{i=1}^{k} \sum_{\mathbf x \in S_i} \left\| \mathbf x - \boldsymbol\mu_i \right\|^2`
is smaller than tolerance.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it
is often sufficient to estimate transformations at a longer stride.
Note that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
init_strategy : str
determines if the initial cluster centers are chosen according to the kmeans++-algorithm
or drawn uniformly distributed from the provided data set
fixed_seed : bool or (positive) integer
if set to true, the random seed gets fixed resulting in deterministic behavior; default is false.
If an integer >= 0 is given, use this to initialize the random generator.
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
skip : int, default=0
skip the first initial n frames per trajectory.
keep_data: boolean, default=False
if you intend to quickly resume a non-converged kmeans iteration, set this to True.
Otherwise the linear memory array will have to be re-created. Note that the data will also be deleted,
if and only if the estimation converged within the given tolerance parameter.
clustercenters: ndarray (k, dim), default=None
if passed, the init_strategy is ignored and these centers will be iterated.
Returns
-------
kmeans : a :class:`KmeansClustering <pyemma.coordinates.clustering.KmeansClustering>` clustering object
Object for kmeans clustering.
It holds discrete trajectories and cluster center information.
Examples
--------
>>> import numpy as np
>>> from pyemma.util.contexts import settings
>>> import pyemma.coordinates as coor
>>> traj_data = [np.random.random((100, 3)), np.random.random((100,3))]
>>> with settings(show_progress_bars=False):
... cluster_obj = coor.cluster_kmeans(traj_data, k=20, stride=1)
... cluster_obj.get_output() # doctest: +ELLIPSIS
[array([...
.. seealso:: **Theoretical background**: `Wiki page <http://en.wikipedia.org/wiki/K-means_clustering>`_
.. autoclass:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.clustering.kmeans.KmeansClustering
:attributes:
References
----------
The k-means algorithms was invented in [1]_. The term k-means was
first used in [2]_.
.. [1] Steinhaus, H. (1957).
Sur la division des corps materiels en parties.
Bull. Acad. Polon. Sci. (in French) 4, 801-804.
.. [2] MacQueen, J. B. (1967).
Some Methods for classification and Analysis of Multivariate Observations.
Proceedings of 5th Berkeley Symposium on Mathematical Statistics and
Probability 1. University of California Press. pp. 281-297
"""
from pyemma.coordinates.clustering.kmeans import KmeansClustering
res = KmeansClustering(n_clusters=k, max_iter=max_iter, metric=metric, tolerance=tolerance,
init_strategy=init_strategy, fixed_seed=fixed_seed, n_jobs=n_jobs, skip=skip,
keep_data=keep_data, clustercenters=clustercenters, stride=stride)
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(cluster_kmeans)['chunksize'], **kwargs)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = cs
return res | 0.004649 |
def screenshot(self, filename=None):
""" Take a screenshot """
# screen size: 1280x720
screen = self._ymc.screenshot()
if filename:
screen.save(filename)
return screen | 0.008889 |
def _vmomentsurfacemassGrid(self,n,m,grid):
"""Internal function to evaluate vmomentsurfacemass using a grid
rather than direct integration"""
if len(grid.df.shape) == 3: tlist= True
else: tlist= False
if tlist:
nt= grid.df.shape[2]
out= []
for ii in range(nt):
out.append(nu.dot(grid.vRgrid**n,nu.dot(grid.df[:,:,ii],grid.vTgrid**m))*\
(grid.vRgrid[1]-grid.vRgrid[0])*(grid.vTgrid[1]-grid.vTgrid[0]))
return nu.array(out)
else:
return nu.dot(grid.vRgrid**n,nu.dot(grid.df,grid.vTgrid**m))*\
(grid.vRgrid[1]-grid.vRgrid[0])*(grid.vTgrid[1]-grid.vTgrid[0]) | 0.034868 |
def run(self, dag):
"""Run one pass of the lookahead mapper on the provided DAG.
Args:
dag (DAGCircuit): the directed acyclic graph to be mapped
Returns:
DAGCircuit: A dag mapped to be compatible with the coupling_map in
the property_set.
Raises:
TranspilerError: if the coupling map or the layout are not
compatible with the DAG
"""
coupling_map = self._coupling_map
ordered_virtual_gates = list(dag.serial_layers())
if self.initial_layout is None:
if self.property_set["layout"]:
self.initial_layout = self.property_set["layout"]
else:
self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
if len(dag.qubits()) != len(self.initial_layout):
raise TranspilerError('The layout does not match the amount of qubits in the DAG')
if len(self._coupling_map.physical_qubits) != len(self.initial_layout):
raise TranspilerError(
"Mappers require to have the layout to be the same size as the coupling map")
mapped_gates = []
layout = self.initial_layout.copy()
gates_remaining = ordered_virtual_gates.copy()
while gates_remaining:
best_step = _search_forward_n_swaps(layout, gates_remaining,
coupling_map)
layout = best_step['layout']
gates_mapped = best_step['gates_mapped']
gates_remaining = best_step['gates_remaining']
mapped_gates.extend(gates_mapped)
# Preserve input DAG's name, regs, wire_map, etc. but replace the graph.
mapped_dag = _copy_circuit_metadata(dag, coupling_map)
for node in mapped_gates:
mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs)
return mapped_dag | 0.003582 |
def render_template(cmd_derived_from_alias, pos_args_table):
"""
Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments.
Args:
cmd_derived_from_alias: The string to be injected with positional arguemnts.
pos_args_table: The dictionary used to rendered.
Returns:
A processed string with positional arguments injected.
"""
try:
cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True)
template = jinja.Template(cmd_derived_from_alias)
# Shlex.split allows us to split a string by spaces while preserving quoted substrings
# (positional arguments in this case)
rendered = shlex.split(template.render(pos_args_table))
# Manually check if there is any runtime error (such as index out of range)
# since Jinja template engine only checks for compile time error.
# Only check for runtime errors if there is an empty string in rendered.
if '' in rendered:
check_runtime_errors(cmd_derived_from_alias, pos_args_table)
return rendered
except Exception as exception:
# Exception raised from runtime error
if isinstance(exception, CLIError):
raise
# The template has some sort of compile time errors
split_exception_message = str(exception).split()
# Check if the error message provides the index of the erroneous character
error_index = split_exception_message[-1]
if error_index.isdigit():
split_exception_message.insert(-1, 'index')
error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias)
# Calculate where to put an arrow (^) char so that it is exactly below the erroneous character
# e.g. ... "{{a.split('|)}}"
# ^
error_msg += '\n{}^'.format(' ' * (len(error_msg) - len(cmd_derived_from_alias) + int(error_index) - 1))
else:
exception_str = str(exception).replace('"{{', '}}').replace('}}"', '}}')
error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str)
raise CLIError(error_msg) | 0.005745 |
def download(self, url,
file_name,
headers=None,
show_progress=True):
'''stream to a temporary file, rename on successful completion
Parameters
==========
file_name: the file name to stream to
url: the url to stream from
headers: additional headers to add
force: If the final image exists, don't overwrite
'''
fd, tmp_file = tempfile.mkstemp(prefix=("%s.tmp." % file_name))
os.close(fd)
# Should we verify the request?
verify = self._verify()
# Check here if exists
if requests.head(url, verify=verify).status_code in [200, 401]:
response = self.stream(url, headers=headers, stream_to=tmp_file)
if isinstance(response, HTTPError):
bot.error("Error downloading %s, exiting." %url)
sys.exit(1)
shutil.move(tmp_file, file_name)
else:
bot.error("Invalid url or permissions %s" %url)
return file_name | 0.006972 |
def takes_arg(obj, arg: str) -> bool:
"""
Checks whether the provided obj takes a certain arg.
If it's a class, we're really checking whether its constructor does.
If it's a function or method, we're checking the object itself.
Otherwise, we raise an error.
"""
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return arg in signature.parameters | 0.001715 |
def get_userpass_value(cli_value, config, key, prompt_strategy):
"""Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
"""
if cli_value is not None:
return cli_value
elif config.get(key):
return config[key]
else:
return prompt_strategy() | 0.001131 |
def make_configure_tab(self):
""" initial set up of configure tab"""
# Setup the choice between single and multicolor
modeframe = tk.Frame(self.tab_configure)
self.mode = tk.IntVar()
singlecolor = tk.Radiobutton(modeframe, text="Single color", variable=self.mode,
value=1, command=lambda: self.disable_multicolor())
multicolor = tk.Radiobutton(modeframe, text="Three color", variable=self.mode,
value=3, command=lambda: self.disable_singlecolor())
self.mode.set(3)
singlecolor.pack(side=tk.LEFT)
multicolor.pack(side=tk.LEFT)
updatebutton = tk.Button(master=modeframe, text="Update",
command=self.update_button_action)
updatebutton.pack(side=tk.RIGHT)
modeframe.grid(row=0, column=0)
self.setup_multicolor()
self.setup_singlecolor() | 0.006309 |
def get(self, **kwargs):
""" Same as search, but will throw an error if there are multiple or no
results. If there are multiple results and only one is an exact match
on api_version, that resource will be returned.
"""
results = self.search(**kwargs)
# If there are multiple matches, prefer exact matches on api_version
if len(results) > 1 and kwargs.get('api_version'):
results = [
result for result in results if result.group_version == kwargs['api_version']
]
# If there are multiple matches, prefer non-List kinds
if len(results) > 1 and not all([isinstance(x, ResourceList) for x in results]):
results = [result for result in results if not isinstance(result, ResourceList)]
if len(results) == 1:
return results[0]
elif not results:
raise ResourceNotFoundError('No matches found for {}'.format(kwargs))
else:
raise ResourceNotUniqueError('Multiple matches found for {}: {}'.format(kwargs, results)) | 0.007306 |
def CallApiHandler(handler, args, token=None):
"""Handles API call to a given handler with given args and token."""
result = handler.Handle(args, token=token)
expected_type = handler.result_type
if expected_type is None:
expected_type = None.__class__
if result.__class__ != expected_type:
raise UnexpectedResultTypeError(
"Expected %s, but got %s." %
(expected_type.__name__, result.__class__.__name__))
return result | 0.006276 |
def resolve_dynamic_values(env):
"""
Resolve dynamic values inside need data.
Rough workflow:
#. Parse all needs and their data for a string like [[ my_func(a,b,c) ]]
#. Extract function name and call parameters
#. Execute registered function name with extracted call parameters
#. Replace original string with return value
:param env: Sphinx environment
:return: return value of given function
"""
# Only perform calculation if not already done yet
if env.needs_workflow['dynamic_values_resolved']:
return
needs = env.needs_all_needs
for key, need in needs.items():
for need_option in need:
if need_option in ['docname', 'lineno', 'target_node', 'content']:
# dynamic values in this data are not allowed.
continue
if not isinstance(need[need_option], (list, set)):
func_call = True
while func_call:
try:
func_call, func_return = _detect_and_execute(need[need_option], need, env)
except FunctionParsingException:
raise SphinxError("Function definition of {option} in file {file}:{line} has "
"unsupported parameters. "
"supported are str, int/float, list".format(option=need_option,
file=need['docname'],
line=need['lineno']))
if func_call is None:
continue
# Replace original function string with return value of function call
if func_return is None:
need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), '')
else:
need[need_option] = need[need_option].replace('[[{}]]'.format(func_call), str(func_return))
if need[need_option] == '':
need[need_option] = None
else:
new_values = []
for element in need[need_option]:
try:
func_call, func_return = _detect_and_execute(element, need, env)
except FunctionParsingException:
raise SphinxError("Function definition of {option} in file {file}:{line} has "
"unsupported parameters. "
"supported are str, int/float, list".format(option=need_option,
file=need['docname'],
line=need['lineno']))
if func_call is None:
new_values.append(element)
else:
# Replace original function string with return value of function call
if isinstance(need[need_option], (str, int, float)):
new_values.append(element.replace('[[{}]]'.format(func_call), str(func_return)))
else:
if isinstance(need[need_option], (list, set)):
new_values += func_return
need[need_option] = new_values
# Finally set a flag so that this function gets not executed several times
env.needs_workflow['dynamic_values_resolved'] = True | 0.004331 |
def restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
scrollbar_pos = self.get_option('scrollbar_position', None)
if scrollbar_pos is not None:
self.explorer.treewidget.set_scrollbar_position(scrollbar_pos) | 0.006803 |
def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
"""
if mean_frequency == -1:
mu = np.mean(frequency)
else:
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y | 0.001715 |
def set_id(self, dxid):
'''
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs)
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')):
# localjob IDs (which do not follow the usual ID
# syntax) should be allowed; otherwise, follow the
# usual syntax checking
verify_string_dxid(dxid, self._class)
self._dxid = dxid | 0.006349 |
def cache_data(self):
"""
Cache some basic data such as financial statement metrics
"""
# Set Slug if not set
if not self.slug_name:
self.slug_name = slugify(self.name).strip()
if len(self.slug_name) > 255:
self.slug_name = self.slug_name[0:254] | 0.006154 |
def unset(self, host, *args):
"""
Removes settings for a host.
Parameters
----------
host : the host to remove settings from.
*args : list of settings to removes.
"""
self.__check_host_args(host, args)
remove_idx = [idx for idx, x in enumerate(self.lines_)
if x.host == host and x.key.lower() in args]
for idx in reversed(sorted(remove_idx)):
del self.lines_[idx] | 0.004184 |
def filter_options(keys, options):
"""
Filter 'options' with given 'keys'.
:param keys: key names of optional keyword arguments
:param options: optional keyword arguments to filter with 'keys'
>>> filter_options(("aaa", ), dict(aaa=1, bbb=2))
{'aaa': 1}
>>> filter_options(("aaa", ), dict(bbb=2))
{}
"""
return dict((k, options[k]) for k in keys if k in options) | 0.002475 |
def _retry(event, attempts, delay):
"""
An iterator of pairs of (attempt number, event set), checking whether
`event` is set up to `attempts` number of times, and delaying `delay`
seconds in between.
Terminates as soon as `event` is set, or until `attempts` have been made.
Intended to be used in a loop, as in:
for num, ok in _retry(event_to_wait_for, 10, 1.0):
do_async_thing_that_sets_event()
_log('tried %d time(s) to set event', num)
if not ok:
raise Exception('failed to set event')
"""
event.clear()
attempted = 0
while attempted < attempts and not event.is_set():
yield attempted, event.is_set()
if event.wait(delay):
break
yield attempted, event.is_set() | 0.001266 |
def prepare_child_message(self,
gas: int,
to: Address,
value: int,
data: BytesOrView,
code: bytes,
**kwargs: Any) -> Message:
"""
Helper method for creating a child computation.
"""
kwargs.setdefault('sender', self.msg.storage_address)
child_message = Message(
gas=gas,
to=to,
value=value,
data=data,
code=code,
depth=self.msg.depth + 1,
**kwargs
)
return child_message | 0.011594 |
def listen(queue):
""" Appends events to the queue (ButtonEvent, WheelEvent, and MoveEvent). """
if not os.geteuid() == 0:
raise OSError("Error 13 - Must be run as administrator")
listener = MouseEventListener(lambda e: queue.put(e) or is_allowed(e.name, e.event_type == KEY_UP))
t = threading.Thread(target=listener.run, args=())
t.daemon = True
t.start() | 0.007732 |
def get_policy_for_vhost(self, vhost, name):
"""
Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
"""
return self._api_get('/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | 0.004751 |
def apply_changes(self, event):
"""
applies the changes in the various attribute boxes of this object to all highlighted fit objects in the logger, these changes are reflected both in this object and in the Zeq_GUI parent object.
@param: event -> the wx.ButtonEvent that triggered this function
"""
new_name = self.name_box.GetLineText(0)
new_color = self.color_box.GetValue()
new_tmin = self.tmin_box.GetValue()
new_tmax = self.tmax_box.GetValue()
next_i = -1
changed_i = []
while True:
next_i = self.logger.GetNextSelected(next_i)
if next_i == -1:
break
specimen = self.fit_list[next_i][1]
fit = self.fit_list[next_i][0]
if new_name:
if new_name not in [x.name for x in self.parent.pmag_results_data['specimens'][specimen]]: fit.name = new_name
if new_color:
fit.color = self.color_dict[new_color]
#testing
not_both = True
if new_tmin and new_tmax:
if fit == self.parent.current_fit:
self.parent.tmin_box.SetStringSelection(new_tmin)
self.parent.tmax_box.SetStringSelection(new_tmax)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,new_tmin,new_tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
not_both = False
if new_tmin and not_both:
if fit == self.parent.current_fit:
self.parent.tmin_box.SetStringSelection(new_tmin)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,new_tmin,fit.tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
if new_tmax and not_both:
if fit == self.parent.current_fit:
self.parent.tmax_box.SetStringSelection(new_tmax)
fit.put(specimen,self.parent.COORDINATE_SYSTEM, self.parent.get_PCA_parameters(specimen,fit,fit.tmin,new_tmax,self.parent.COORDINATE_SYSTEM,fit.PCA_type))
changed_i.append(next_i)
offset = 0
for i in changed_i:
i -= offset
v = self.update_logger_entry(i)
if v == "s":
offset += 1
self.parent.update_selection() | 0.011264 |
def close_boundaries_sensibly(model):
"""
Return a cobra model with all boundaries closed and changed constraints.
In the returned model previously fixed reactions are no longer constrained
as such. Instead reactions are constrained according to their
reversibility. This is to prevent the FBA from becoming infeasible when
trying to solve a model with closed exchanges and one fixed reaction.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
cobra.Model
A cobra model with all boundary reactions closed and the constraints
of each reaction set according to their reversibility.
"""
for rxn in model.reactions:
if rxn.reversibility:
rxn.bounds = -1, 1
else:
rxn.bounds = 0, 1
for boundary in model.boundary:
boundary.bounds = (0, 0) | 0.001089 |
def add_settings_parser(subparsers, parent_parser):
"""Creates the args parser needed for the settings command and its
subcommands.
"""
# The following parser is for the settings subsection of commands. These
# commands display information about the currently applied on-chain
# settings.
settings_parser = subparsers.add_parser(
'settings',
help='Displays on-chain settings',
description='Displays the values of currently active on-chain '
'settings.')
settings_parsers = settings_parser.add_subparsers(
title='settings',
dest='settings_cmd')
settings_parsers.required = True
list_parser = settings_parsers.add_parser(
'list',
help='Lists the current keys and values of on-chain settings',
description='List the current keys and values of on-chain '
'settings. The content can be exported to various '
'formats for external consumption.'
)
list_parser.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
list_parser.add_argument(
'--filter',
type=str,
default='',
help='filters keys that begin with this value')
list_parser.add_argument(
'--format',
default='default',
choices=['default', 'csv', 'json', 'yaml'],
help='choose the output format') | 0.000669 |
def _read_from_definition_body(self):
"""
Read the Swagger document from DefinitionBody. It could either be an inline Swagger dictionary or an
AWS::Include macro that contains location of the included Swagger. In the later case, we will download and
parse the Swagger document.
Returns
-------
dict
Swagger document, if we were able to parse. None, otherwise
"""
# Let's try to parse it as AWS::Include Transform first. If not, then fall back to assuming the Swagger document
# was inclined directly into the body
location = parse_aws_include_transform(self.definition_body)
if location:
LOG.debug("Trying to download Swagger from %s", location)
return self._download_swagger(location)
# Inline Swagger, just return the contents which should already be a dictionary
LOG.debug("Detected Inline Swagger definition")
return self.definition_body | 0.006 |
def fit(
self, frequency, recency, T, weights=None, initial_params=None, verbose=False, tol=1e-7, index=None, **kwargs
):
"""
Fit the data to the MBG/NBD model.
Parameters
----------
frequency: array_like
the frequency vector of customers' purchases
(denoted x in literature).
recency: array_like
the recency vector of customers' purchases
(denoted t_x in literature).
T: array_like
customers' age (time units since first purchase)
weights: None or array_like
Number of customers with given frequency/recency/T,
defaults to 1 if not specified. Fader and
Hardie condense the individual RFM matrix into all
observed combinations of frequency/recency/T. This
parameter represents the count of customers with a given
purchase pattern. Instead of calculating individual
log-likelihood, the log-likelihood is calculated for each
pattern and multiplied by the number of customers with
that pattern.
verbose : bool, optional
set to true to print out convergence diagnostics.
tol : float, optional
tolerance for termination of the function minimization process.
index: array_like, optional
index for resulted DataFrame which is accessible via self.data
kwargs:
key word arguments to pass to the scipy.optimize.minimize
function as options dict
Returns
-------
ModifiedBetaGeoFitter:
With additional properties and methods like ``params_`` and ``predict``
"""
# although the parent method is called, this class's
# _negative_log_likelihood is referenced
super(ModifiedBetaGeoFitter, self).fit(
frequency, recency, T, weights, initial_params, verbose, tol, index=index, **kwargs
)
# this needs to be reassigned from the parent method
self.generate_new_data = lambda size=1: modified_beta_geometric_nbd_model(
T, *self._unload_params("r", "alpha", "a", "b"), size=size
)
self.variance_matrix_ = self._compute_variance_matrix()
self.standard_errors_ = self._compute_standard_errors()
self.confidence_intervals_ = self._compute_confidence_intervals()
return self | 0.002864 |
def prsdp(string):
"""
Parse a string as a double precision number, encapsulating error handling.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsdp_c.html
:param string: String representing a d.p. number.
:type string: str
:return: D.p. value obtained by parsing string.
:rtype: float
"""
string = stypes.stringToCharP(string)
dpval = ctypes.c_double()
libspice.prsdp_c(string, ctypes.byref(dpval))
return dpval.value | 0.002088 |
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
ls_proj = self.to_line_string(closed=False).project(
from_shape, to_shape)
return self.copy(exterior=ls_proj.coords) | 0.002882 |
def new(self, page_name, **dict):
'''
Create a new item with the provided dict information
at the given page_name. Returns the new item.
As of version 2.2 of Redmine, this doesn't seem to function.
'''
self._item_new_path = '/projects/%s/wiki/%s.json' % \
(self._project.identifier, page_name)
# Call the base class new method
return super(Redmine_Wiki_Pages_Manager, self).new(**dict) | 0.00432 |
def avatar_url_as(self, *, format=None, size=1024):
"""Returns a friendly URL version of the avatar the webhook has.
If the webhook does not have a traditional avatar, their default
avatar URL is returned instead.
The format must be one of 'jpeg', 'jpg', or 'png'.
The size must be a power of 2 between 16 and 1024.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is equivalent to png.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
if self.avatar is None:
# Default is always blurple apparently
return Asset(self._state, 'https://cdn.discordapp.com/embed/avatars/0.png')
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 1024")
format = format or 'png'
if format not in ('png', 'jpg', 'jpeg'):
raise InvalidArgument("format must be one of 'png', 'jpg', or 'jpeg'.")
url = 'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(self, format, size)
return Asset(self._state, url) | 0.004038 |
def report_parse(self):
"""
If the pipeline has previously been run on these data, instead of reading through the results, parse the
report instead
"""
# Initialise lists
report_strains = list()
genus_list = list()
if self.analysistype == 'mlst':
for sample in self.runmetadata.samples:
try:
genus_list.append(sample.general.referencegenus)
except AttributeError:
sample.general.referencegenus = 'ND'
genus_list.append(sample.general.referencegenus)
# Read in the report
if self.analysistype == 'mlst':
for genus in genus_list:
try:
report_name = os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype,
genus=genus))
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
except FileNotFoundError:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
else:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
# Populate strains not in the report with 'empty' GenObject with appropriate attributes
for sample in self.runmetadata.samples:
if sample.name not in report_strains:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].sequencetype = 'ND'
sample[self.analysistype].matches = 0
sample[self.analysistype].results = dict() | 0.003982 |
def __generate_actors(self, operator, upstream_channels,
downstream_channels):
"""Generates one actor for each instance of the given logical
operator.
Attributes:
operator (Operator): The logical operator metadata.
upstream_channels (list): A list of all upstream channels for
all instances of the operator.
downstream_channels (list): A list of all downstream channels
for all instances of the operator.
"""
num_instances = operator.num_instances
logger.info("Generating {} actors of type {}...".format(
num_instances, operator.type))
in_channels = upstream_channels.pop(
operator.id) if upstream_channels else []
handles = []
for i in range(num_instances):
# Collect input and output channels for the particular instance
ip = [
channel for channel in in_channels
if channel.dst_instance_id == i
] if in_channels else []
op = [
channel for channels_list in downstream_channels.values()
for channel in channels_list if channel.src_instance_id == i
]
log = "Constructed {} input and {} output channels "
log += "for the {}-th instance of the {} operator."
logger.debug(log.format(len(ip), len(op), i, operator.type))
input_gate = DataInput(ip)
output_gate = DataOutput(op, operator.partitioning_strategies)
handle = self.__generate_actor(i, operator, input_gate,
output_gate)
if handle:
handles.append(handle)
return handles | 0.001684 |
def select_cb(self, viewer, event, data_x, data_y):
"""Called when the user clicks on the color bar viewer.
Calculate the index of the color bar they clicked on and
set that color map in the current channel viewer.
"""
if not (self._cmxoff <= data_x < self._cmwd):
# need to click within the width of the bar
return
i = int(data_y / (self._cmht + self._cmsep))
if 0 <= i < len(self.cm_names):
name = self.cm_names[i]
msg = "cmap => '%s'" % (name)
self.logger.info(msg)
channel = self.fv.get_channel_info()
if channel is not None:
viewer = channel.fitsimage
#viewer.onscreen_message(msg, delay=0.5)
viewer.set_color_map(name) | 0.003686 |
def mangle_coverage(local_path, log):
"""Edit .coverage file substituting Windows file paths to Linux paths.
:param str local_path: Destination path to save file to.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
"""
# Read the file, or return if not a .coverage file.
with open(local_path, mode='rb') as handle:
if handle.read(13) != b'!coverage.py:':
log.debug('File %s not a coverage file.', local_path)
return
handle.seek(0)
# I'm lazy, reading all of this into memory. What could possibly go wrong?
file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough?
# Substitute paths.
for windows_path in set(REGEX_MANGLE.findall(file_contents)):
unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1]
unix_absolute_path = os.path.abspath(unix_relative_path)
if not os.path.isfile(unix_absolute_path):
log.debug('Windows path: %s', windows_path)
log.debug('Unix relative path: %s', unix_relative_path)
log.error('No such file: %s', unix_absolute_path)
raise HandledError
file_contents = file_contents.replace(windows_path, unix_absolute_path)
# Write.
with open(local_path, 'w') as handle:
handle.write(file_contents) | 0.002865 |
def _guess_name(desc, taken=None):
"""Attempts to guess the menu entry name from the function name."""
taken = taken or []
name = ""
# Try to find the shortest name based on the given description.
for word in desc.split():
c = word[0].lower()
if not c.isalnum():
continue
name += c
if name not in taken:
break
# If name is still taken, add a number postfix.
count = 2
while name in taken:
name = name + str(count)
count += 1
return name | 0.001838 |
def require_resource(self, type: Type[T_Resource], name: str = 'default') -> T_Resource:
"""
Look up a resource in the chain of contexts and raise an exception if it is not found.
This is like :meth:`get_resource` except that instead of returning ``None`` when a resource
is not found, it will raise :exc:`~asphalt.core.context.ResourceNotFound`.
:param type: type of the requested resource
:param name: name of the requested resource
:return: the requested resource
:raises asphalt.core.context.ResourceNotFound: if a resource of the given type and name was
not found
"""
resource = self.get_resource(type, name)
if resource is None:
raise ResourceNotFound(type, name)
return resource | 0.008653 |
def execute_phase(self, phase):
"""Executes a phase or skips it, yielding PhaseExecutionOutcome instances.
Args:
phase: Phase to execute.
Returns:
The final PhaseExecutionOutcome that wraps the phase return value
(or exception) of the final phase run. All intermediary results, if any,
are REPEAT and handled internally. Returning REPEAT here means the phase
hit its limit for repetitions.
"""
repeat_count = 1
repeat_limit = phase.options.repeat_limit or sys.maxsize
while not self._stopping.is_set():
is_last_repeat = repeat_count >= repeat_limit
phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat)
if phase_execution_outcome.is_repeat and not is_last_repeat:
repeat_count += 1
continue
return phase_execution_outcome
# We've been cancelled, so just 'timeout' the phase.
return PhaseExecutionOutcome(None) | 0.005325 |
def frontend_routing(self, context):
"""
Returns the targeted frontend and original state
:type context: satosa.context.Context
:rtype satosa.frontends.base.FrontendModule
:param context: The response context
:return: frontend
"""
target_frontend = context.state[STATE_KEY]
satosa_logging(logger, logging.DEBUG, "Routing to frontend: %s " % target_frontend, context.state)
context.target_frontend = target_frontend
frontend = self.frontends[context.target_frontend]["instance"]
return frontend | 0.005068 |
def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | 0.001363 |
def match(self, request, *, root=None, path=None, methods=None):
"""Return match or None for the request/constraints pair.
.. seealso:: Proxy:
:meth:`.Router.match`
"""
return self.__router.match(
request, root=root, path=path, methods=methods) | 0.006645 |
def display(self):
""" dump operation
"""
print("{}".format(self))
for task in self.tasks:
print(" - {}".format(task)) | 0.012195 |
def _create_subepochs(x, nperseg, step):
"""Transform the data into a matrix for easy manipulation
Parameters
----------
x : 1d ndarray
actual data values
nperseg : int
number of samples in each row to create
step : int
distance in samples between rows
Returns
-------
2d ndarray
a view (i.e. doesn't copy data) of the original x, with shape
determined by nperseg and step. You should use the last dimension
"""
axis = x.ndim - 1 # last dim
nsmp = x.shape[axis]
stride = x.strides[axis]
noverlap = nperseg - step
v_shape = *x.shape[:axis], (nsmp - noverlap) // step, nperseg
v_strides = *x.strides[:axis], stride * step, stride
v = as_strided(x, shape=v_shape, strides=v_strides,
writeable=False) # much safer
return v | 0.001174 |
def compare_cells(self, cell1, cell2):
'''
return true if exactly equal or if equal but modified,
otherwise return false
return type: BooleanPlus
'''
eqlanguage = cell1["language"] == cell2["language"]
eqinput = cell1["input"] == cell2["input"]
eqoutputs = self.equaloutputs(cell1["outputs"], cell2["outputs"])
if eqlanguage and eqinput and eqoutputs:
return BooleanPlus(True, False)
elif not self.check_modified:
return BooleanPlus(False, False)
input1 = u"".join(cell1['input'])
input2 = u"".join(cell2['input'])
similarity_percent = Levenshtein.ratio(input1, input2)
if similarity_percent >= 0.65:
return BooleanPlus(True, True)
return BooleanPlus(False, False) | 0.002433 |
def set_display_label(self, display_label):
"""Seta a display label.
arg: display_label (string): the new display label
raise: InvalidArgument - ``display_label`` is invalid
raise: NoAccess - ``display_label`` cannot be modified
raise: NullArgument - ``display_label`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_display_label_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_string(display_label,
self.get_display_label_metadata()):
raise errors.InvalidArgument()
self._my_map['displayLabel']['text'] = display_label | 0.002725 |
def _url_builder(url_root,api_key,path,params):
"""
Helper funcation to build a parameterized url.
"""
params['api_key'] = api_key
url_end = urlencode(params)
url = "%s%s%s" % (url_root,path,url_end)
return url | 0.02521 |
def _dbus_get_object(bus_name, object_name):
""" Fetches DBUS proxy object given the specified parameters.
`bus_name`
Name of the bus interface.
`object_name`
Object path related to the interface.
Returns object or ``None``.
"""
try:
bus = dbus.SessionBus()
obj = bus.get_object(bus_name, object_name)
return obj
except (NameError, dbus.exceptions.DBusException):
return None | 0.002088 |
def read_minutes_months(self, s):
"""Return a (minutes, months) tuple after parsing a "M,N" string.
"""
try:
(minutes, months) = [int(x.strip()) for x in s.split(',')]
return minutes, months
except Exception:
raise ParsingError(('Value should be "minutes, months"')) | 0.005988 |
def set_callbacks(self, **dic_functions):
"""Register callbacks needed by the interface object"""
for action in self.interface.CALLBACKS:
try:
f = dic_functions[action]
except KeyError:
pass
else:
setattr(self.interface.callbacks, action, f)
manquantes = [
a for a in self.interface.CALLBACKS if not a in dic_functions]
if not manquantes:
logging.debug(
f"{self.__class__.__name__} : Tous les callbacks demandés sont fournis.")
else:
logging.warning(
f"{self.__class__.__name__} didn't set asked callbacks {manquantes}") | 0.007022 |
def _emiss_ep(self, Eph):
"""
Electron-proton bremsstrahlung emissivity per unit photon energy
"""
if self.weight_ep == 0.0:
return np.zeros_like(Eph)
gam = np.vstack(self._gam)
eps = (Eph / mec2).decompose().value
# compute integral with electron distribution
emiss = c.cgs * trapz_loglog(
np.vstack(self._nelec) * self._sigma_ep(gam, eps),
self._gam,
axis=0,
).to(u.cm ** 2 / Eph.unit)
return emiss | 0.003781 |
def get_block(self, x,y,z, coord=False):
"""Return the id of the block at x, y, z."""
"""
Laid out like:
(0,0,0), (0,1,0), (0,2,0) ... (0,127,0), (0,0,1), (0,1,1), (0,2,1) ... (0,127,1), (0,0,2) ... (0,127,15), (1,0,0), (1,1,0) ... (15,127,15)
::
blocks = []
for x in range(15):
for z in range(15):
for y in range(127):
blocks.append(Block(x,y,z))
"""
offset = y + z*128 + x*128*16 if (coord == False) else coord[1] + coord[2]*128 + coord[0]*128*16
return self.blocksList[offset] | 0.014423 |
def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator | 0.002457 |
def flush_mod(self):
"""Flush all pending LDAP modifications."""
for dn in self.__pending_mod_dn__:
try:
if self.__ro__:
for mod in self.__mod_queue__[dn]:
if mod[0] == ldap.MOD_DELETE:
mod_str = "DELETE"
elif mod[0] == ldap.MOD_ADD:
mod_str = "ADD"
else:
mod_str = "REPLACE"
print("{} VALUE {} = {} FOR {}".format(mod_str,
mod[1],
mod[2],
dn))
else:
self.__con__.modify_s(dn, self.__mod_queue__[dn])
except ldap.TYPE_OR_VALUE_EXISTS:
print("Error! Conflicting Batch Modification: %s"
% str(self.__mod_queue__[dn]))
continue
except ldap.NO_SUCH_ATTRIBUTE:
print("Error! Conflicting Batch Modification: %s"
% str(self.__mod_queue__[dn]))
continue
self.__mod_queue__[dn] = None
self.__pending_mod_dn__ = [] | 0.001509 |
def format_help(self, formatter):
"""
Format an option group's help text, outdenting the title so it's
flush with the "SCons Options" title we print at the top.
"""
formatter.dedent()
result = formatter.format_heading(self.title)
formatter.indent()
result = result + optparse.OptionContainer.format_help(self, formatter)
return result | 0.004926 |
def hash(filename):
'''returns string of MD5 hash of given filename'''
buffer_size = 10*1024*1024
m = hashlib.md5()
with open(filename) as f:
buff = f.read(buffer_size)
while len(buff)>0:
m.update(buff)
buff = f.read(buffer_size)
dig = m.digest()
return ''.join(['%x' % ord(x) for x in dig]) | 0.005634 |
def is_datetime(value,
minimum = None,
maximum = None,
coerce_value = False,
**kwargs):
"""Indicate whether ``value`` is a :class:`datetime <python:datetime.datetime>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is on or after
this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>`
/ :obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>`
/ :obj:`None <python:None>`
:param coerce_value: If ``True``, will return ``True`` if ``value`` can be
coerced to a :class:`datetime <python:datetime.datetime>`. If ``False``,
will only return ``True`` if ``value`` is a complete timestamp. Defaults to
``False``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.datetime(value,
minimum = minimum,
maximum = maximum,
coerce_value = coerce_value,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | 0.010538 |
def suitify(parent):
"""
Turn the stuff after the first colon in parent's children
into a suite, if it wasn't already
"""
for node in parent.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# One-liners have no suite node, we have to fake one up
for i, node in enumerate(parent.children):
if node.type == token.COLON:
break
else:
raise ValueError(u"No class suite and no ':'!")
# Move everything into a suite node
suite = Node(syms.suite, [Newline(), Leaf(token.INDENT, indentation(node) + indentation_step(node))])
one_node = parent.children[i+1]
one_node.remove()
one_node.prefix = u''
suite.append_child(one_node)
parent.append_child(suite) | 0.002484 |
def get_rating(postid, userid):
'''
Get the rating of certain post and user.
'''
try:
recs = TabRating.select().where(
(TabRating.post_id == postid) & (TabRating.user_id == userid)
)
except:
return False
if recs.count() > 0:
return recs.get().rating
else:
return False | 0.0075 |
def tradingpairs(self, pairs):
'''
Use this function to query price and volume data for MANY trading pairs.
Usage: http://api.cryptocoincharts.info/tradingPairs/[currency1_currency2,currency2_currency3,...]
A example pair: currency1_currency2 = "doge_btc"
currency2_currency3 = "btc_eur"
http://api.cryptocoincharts.info/tradingPairs/"doge_btc,btc_eur"
'''
url = self.API_PATH + 'tradingPairs/'
data = { 'pairs':pairs }
json_data = json.loads(self._getdata(url, data))
tradingpairs = []
for entry in json_data:
tradingpair = TradingPair()
tradingpair.id = entry['id']
tradingpair.price = entry['price']
tradingpair.price_before_24h = entry['price_before_24h']
tradingpair.volume_first = entry['volume_first']
tradingpair.volume_second = entry['volume_second']
tradingpair.volume_btc = entry['volume_btc']
tradingpair.best_market = entry['best_market']
tradingpair.latest_trade = entry['latest_trade']
tradingpairs.append(tradingpair)
return tradingpairs | 0.008779 |
def get_proxy_config(self, headers, path):
"""
stub. this really needs to be a call to the remote
restful interface to get the appropriate host and
headers to use for this upload
"""
self.ofs.conn.add_aws_auth_header(headers, 'PUT', path)
from pprint import pprint
pprint(headers)
host = self.ofs.conn.server_name()
return host, headers | 0.004808 |
def get_order_specification_visitor(name, registry=None):
"""
Returns the class registered as the order specification
visitor utility under the given name (one of the
:const:`everest.querying.base.EXPRESSION_KINDS` constants).
:returns: class implementing
:class:`everest.interfaces.IOrderSpecificationVisitor`
"""
if registry is None:
registry = get_current_registry()
return registry.getUtility(IOrderSpecificationVisitor, name=name) | 0.002066 |
def take_at_least_n_seconds(time_s):
"""A context manager which ensures it takes at least time_s to execute.
Example:
with take_at_least_n_seconds(5):
do.Something()
do.SomethingElse()
# if Something and SomethingElse took 3 seconds, the with block with sleep
# for 2 seconds before exiting.
Args:
time_s: The number of seconds this block should take. If it doesn't take at
least this time, then this method blocks during __exit__.
Yields:
To do some actions then on completion waits the remaining time.
"""
timeout = PolledTimeout(time_s)
yield
while not timeout.has_expired():
time.sleep(timeout.remaining) | 0.008982 |
def _is_running(self, tries=10):
"""
Return if the server is running according to pg_ctl.
"""
# We can't possibly be running if our base_pathname isn't defined.
if not self.base_pathname:
return False
if tries < 1:
raise ValueError('tries must be > 0')
cmd = [
PostgresFinder.find_root() / 'pg_ctl',
'status',
'-D',
self.base_pathname,
]
votes = 0
while abs(votes) < tries:
time.sleep(0.1)
running = (subprocess.call(cmd, stdout=DEV_NULL) == 0)
if running and votes >= 0:
votes += 1
elif not running and votes <= 0:
votes -= 1
else:
votes = 0
return votes > 0 | 0.00241 |
def _init_step(self):
"""Initialize next step of simulation to be run.
"""
self._age += 1
self.env.age = self._age
self._log(logging.INFO, "")
self._log(logging.INFO, "\t***** Step {:0>4} *****". format(self.age))
self._log(logging.INFO, "")
self._agents_to_act = self._get_order_agents()
self._step_processing_time = 0.0
self._step_start_time = time.time() | 0.004577 |
def rotation(cls, angle, pivot=None):
"""Create a rotation transform at the specified angle,
optionally about the specified pivot point.
:param angle: Rotation angle in degrees
:type angle: float
:param pivot: Point to rotate about, if omitted the
rotation is about the origin.
:type pivot: sequence
:rtype: Affine
"""
ca, sa = cos_sin_deg(angle)
if pivot is None:
return tuple.__new__(cls, (ca, sa, 0.0, -sa, ca, 0.0, 0.0, 0.0, 1.0))
else:
px, py = pivot
return tuple.__new__(
cls,
(
ca,
sa,
px - px * ca + py * sa,
-sa,
ca,
py - px * sa - py * ca,
0.0,
0.0,
1.0,
),
) | 0.003171 |
def make_heading_abstracts(self, heading_div):
"""
An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter
"""
for abstract in self.article.root.xpath('./front/article-meta/abstract'):
#Make a copy of the abstract
abstract_copy = deepcopy(abstract)
abstract_copy.tag = 'div'
#Abstracts are a rather diverse bunch, keep an eye on them!
title_text = abstract_copy.xpath('./title[1]/text()')
for title in abstract_copy.findall('.//title'):
remove(title)
#Create a header for the abstract
abstract_header = etree.Element('h2')
remove_all_attributes(abstract_copy)
#Set the header text and abstract id according to abstract type
abstract_type = abstract.attrib.get('abstract-type')
log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type))
if abstract_type == 'summary':
abstract_header.text = 'Author Summary'
abstract_copy.attrib['id'] = 'author-summary'
elif abstract_type == 'editors-summary':
abstract_header.text = 'Editors\' Summary'
abstract_copy.attrib['id'] = 'editor-summary'
elif abstract_type == 'synopsis':
abstract_header.text = 'Synopsis'
abstract_copy.attrib['id'] = 'synopsis'
elif abstract_type == 'alternate':
#Right now, these will only be included if there is a title to
#give it
if title_text:
abstract_header.text= title_text[0]
abstract_copy.attrib['id'] = 'alternate'
else:
continue
elif abstract_type is None:
abstract_header.text = 'Abstract'
abstract_copy.attrib['id'] = 'abstract'
elif abstract_type == 'toc': # We don't include these
continue
else: # Warn about these, then skip
log.warning('No handling for abstract-type="{0}"'.format(abstract_type))
continue
#abstract_header.text = abstract_type
#abstract_copy.attrib['id'] = abstract_type
heading_div.append(abstract_header)
heading_div.append(abstract_copy) | 0.005405 |
def is_closest_date_parameter(task, param_name):
""" Return the parameter class of param_name on task. """
for name, obj in task.get_params():
if name == param_name:
return hasattr(obj, 'use_closest_date')
return False | 0.004 |
def remove(mod, persist=False, comment=True):
'''
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm
'''
pre_mods = lsmod()
res = __salt__['cmd.run_all']('kldunload {0}'.format(mod),
python_shell=False)
if res['retcode'] == 0:
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
else:
return 'Error removing module {0}: {1}'.format(mod, res['stderr']) | 0.002283 |
def flatten(iterable, maps=None, unique=False) -> list:
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x | 0.003247 |
def changeLocalUserPassword(self, login, user, password):
"""
Parameters:
- login
- user
- password
"""
self.send_changeLocalUserPassword(login, user, password)
self.recv_changeLocalUserPassword() | 0.004329 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.