text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def randomize_molecule_low(molecule, manipulations):
"""Return a randomized copy of the molecule, without the nonbond check."""
manipulations = copy.copy(manipulations)
shuffle(manipulations)
coordinates = molecule.coordinates.copy()
for manipulation in manipulations:
manipulation.apply(coordinates)
return molecule.copy_with(coordinates=coordinates) | 0.002604 |
def sheet_to_table(worksheet):
"""Transforma una hoja de libro de Excel en una lista de diccionarios.
Args:
worksheet (Workbook.worksheet): Hoja de cálculo de un archivo XLSX
según los lee `openpyxl`
Returns:
list_of_dicts: Lista de diccionarios, con tantos elementos como
registros incluya la hoja, y con tantas claves por diccionario como
campos tenga la hoja.
"""
headers = []
value_rows = []
for row_i, row in enumerate(worksheet.iter_rows()):
# lee los headers y el tamaño máximo de la hoja en columnas en fila 1
if row_i == 0:
for header_cell in row:
if header_cell.value:
headers.append(parse_value(header_cell))
else:
break
continue
# limita la cantidad de celdas a considerar, por la cantidad de headers
row_cells = [parse_value(cell) for index, cell in enumerate(row)
if index < len(headers)]
# agrega las filas siguientes que tengan al menos un campo no nulo
if any(row_cells):
value_rows.append(row_cells)
# no se admiten filas vacías, eso determina el fin de la hoja
else:
break
# convierte las filas en diccionarios con los headers como keys
table = [
# Ignoro los campos con valores nulos (None)
{k: v for (k, v) in zip(headers, row) if v is not None}
for row in value_rows
]
return table | 0.000651 |
def uppercase_chars(string: any) -> str:
"""Return all (and only) the uppercase chars in the given string."""
return ''.join([c if c.isupper() else '' for c in str(string)]) | 0.010582 |
def get_arp_output_arp_entry_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_type = ET.SubElement(arp_entry, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003086 |
def create_extras(cls: Type[T],
extras: Dict[str, Any]) -> Dict[str, Any]:
"""
Given a dictionary of extra arguments, returns a dictionary of
kwargs that actually are a part of the signature of the cls.from_params
(or cls) method.
"""
subextras: Dict[str, Any] = {}
if hasattr(cls, "from_params"):
from_params_method = cls.from_params # type: ignore
else:
# In some rare cases, we get a registered subclass that does _not_ have a
# from_params method (this happens with Activations, for instance, where we
# register pytorch modules directly). This is a bit of a hack to make those work,
# instead of adding a `from_params` method for them somehow. Then the extras
# in the class constructor are what we are looking for, to pass on.
from_params_method = cls
if takes_kwargs(from_params_method):
# If annotation.params accepts **kwargs, we need to pass them all along.
# For example, `BasicTextFieldEmbedder.from_params` requires a Vocabulary
# object, but `TextFieldEmbedder.from_params` does not.
subextras = extras
else:
# Otherwise, only supply the ones that are actual args; any additional ones
# will cause a TypeError.
subextras = {k: v for k, v in extras.items()
if takes_arg(from_params_method, k)}
return subextras | 0.005634 |
def create_ppo_optimizer(self, probs, old_probs, value, entropy, beta, epsilon, lr, max_step):
"""
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
:param old_probs: Past policy probabilities
:param value: Current value estimate
:param beta: Entropy regularization strength
:param entropy: Current policy entropy
:param epsilon: Value for policy-divergence threshold
:param lr: Learning rate
:param max_step: Total number of training steps.
"""
self.returns_holder = tf.placeholder(shape=[None], dtype=tf.float32, name='discounted_rewards')
self.advantage = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='advantages')
self.learning_rate = tf.train.polynomial_decay(lr, self.global_step, max_step, 1e-10, power=1.0)
self.old_value = tf.placeholder(shape=[None], dtype=tf.float32, name='old_value_estimates')
decay_epsilon = tf.train.polynomial_decay(epsilon, self.global_step, max_step, 0.1, power=1.0)
decay_beta = tf.train.polynomial_decay(beta, self.global_step, max_step, 1e-5, power=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
clipped_value_estimate = self.old_value + tf.clip_by_value(tf.reduce_sum(value, axis=1) - self.old_value,
- decay_epsilon, decay_epsilon)
v_opt_a = tf.squared_difference(self.returns_holder, tf.reduce_sum(value, axis=1))
v_opt_b = tf.squared_difference(self.returns_holder, clipped_value_estimate)
self.value_loss = tf.reduce_mean(tf.dynamic_partition(tf.maximum(v_opt_a, v_opt_b), self.mask, 2)[1])
# Here we calculate PPO policy loss. In continuous control this is done independently for each action gaussian
# and then averaged together. This provides significantly better performance than treating the probability
# as an average of probabilities, or as a joint probability.
r_theta = tf.exp(probs - old_probs)
p_opt_a = r_theta * self.advantage
p_opt_b = tf.clip_by_value(r_theta, 1.0 - decay_epsilon, 1.0 + decay_epsilon) * self.advantage
self.policy_loss = -tf.reduce_mean(tf.dynamic_partition(tf.minimum(p_opt_a, p_opt_b), self.mask, 2)[1])
self.loss = self.policy_loss + 0.5 * self.value_loss - decay_beta * tf.reduce_mean(
tf.dynamic_partition(entropy, self.mask, 2)[1])
if self.use_curiosity:
self.loss += 10 * (0.2 * self.forward_loss + 0.8 * self.inverse_loss)
self.update_batch = optimizer.minimize(self.loss) | 0.007413 |
def _draw_arrow(
self, x1, y1, x2, y2, Dx, Dy, label="", width=1.0, arrow_curvature=1.0, color="grey",
patchA=None, patchB=None, shrinkA=0, shrinkB=0, arrow_label_size=None):
"""
Draws a slightly curved arrow from (x1,y1) to (x2,y2).
Will allow the given patches at start end end.
"""
# set arrow properties
dist = _sqrt(
((x2 - x1) / float(Dx))**2 + ((y2 - y1) / float(Dy))**2)
arrow_curvature *= 0.075 # standard scale
rad = arrow_curvature / (dist)
tail_width = width
head_width = max(0.5, 2 * width)
head_length = head_width
self.ax.annotate(
"", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data',
arrowprops=dict(
arrowstyle='simple,head_length=%f,head_width=%f,tail_width=%f' % (
head_length, head_width, tail_width),
color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=patchA, patchB=patchB,
connectionstyle="arc3,rad=%f" % -rad),
zorder=0)
# weighted center position
center = _np.array([0.55 * x1 + 0.45 * x2, 0.55 * y1 + 0.45 * y2])
v = _np.array([x2 - x1, y2 - y1]) # 1->2 vector
vabs = _np.abs(v)
vnorm = _np.array([v[1], -v[0]]) # orthogonal vector
vnorm = _np.divide(vnorm, _np.linalg.norm(vnorm)) # normalize
# cross product to determine the direction into which vnorm points
z = _np.cross(v, vnorm)
if z < 0:
vnorm *= -1
offset = 0.5 * arrow_curvature * \
((vabs[0] / (vabs[0] + vabs[1]))
* Dx + (vabs[1] / (vabs[0] + vabs[1])) * Dy)
ptext = center + offset * vnorm
self.ax.text(
ptext[0], ptext[1], label, size=arrow_label_size,
horizontalalignment='center', verticalalignment='center', zorder=1) | 0.003127 |
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values} | 0.004132 |
def datetime(anon, obj, field, val):
"""
Returns a random datetime
"""
return anon.faker.datetime(field=field) | 0.007937 |
def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:
"""Modifies the ESI by looking up an operation id.
Args:
path: raw ESI URL path
path_data: data to format the path with (can be None)
post_data: data to send to ESI
Returns:
ESI data
"""
path = self._get_path_for_op_id(id)
return self.post_path(path, path_data, post_data) | 0.006637 |
def generate_source_catalog(image, **kwargs):
""" Build source catalogs for each chip using photutils.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : `~astropy.io.fits.HDUList`
Input image as an astropy.io.fits HDUList.
dqname : str
EXTNAME for the DQ array, if present, in the input image HDUList.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the threshold used for identifying sources in
the image relative to the background RMS.
If None, compute a default value of (background+3*rms(background)).
If threshold < 0.0, use absolute value as scaling factor for default value.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
-------
source_cats : dict
Dict of astropy Tables identified by chip number with
each table containing sources from image extension ``('sci', chip)``.
"""
if not isinstance(image, pf.HDUList):
raise ValueError("Input {} not fits.HDUList object".format(image))
dqname = kwargs.get('dqname', 'DQ')
output = kwargs.get('output', None)
# Build source catalog for entire image
source_cats = {}
numSci = countExtn(image, extname='SCI')
for chip in range(numSci):
chip += 1
# find sources in image
if output:
rootname = image[0].header['rootname']
outroot = '{}_sci{}_src'.format(rootname, chip)
kwargs['output'] = outroot
imgarr = image['sci', chip].data
# apply any DQ array, if available
dqmask = None
if image.index_of(dqname):
dqarr = image[dqname, chip].data
# "grow out" regions in DQ mask flagged as saturated by several
# pixels in every direction to prevent the
# source match algorithm from trying to match multiple sources
# from one image to a single source in the
# other or vice-versa.
# Create temp DQ mask containing all pixels flagged with any value EXCEPT 256
non_sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=256)
# Create temp DQ mask containing saturated pixels ONLY
sat_mask = bitfield_to_boolean_mask(dqarr, ignore_flags=~256)
# Grow out saturated pixels by a few pixels in every direction
grown_sat_mask = ndimage.binary_dilation(sat_mask, iterations=5)
# combine the two temporary DQ masks into a single composite DQ mask.
dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask)
# dqmask = bitfield_to_boolean_mask(dqarr, good_mask_value=False)
# TODO: <---Remove this old no-sat bit grow line once this
# thing works
seg_tab, segmap = extract_sources(imgarr, dqmask=dqmask, **kwargs)
seg_tab_phot = seg_tab
source_cats[chip] = seg_tab_phot
return source_cats | 0.001304 |
def consume(self, state):
"""
consume new producer state
"""
self.state.append(self.func(state))
return self.state | 0.012987 |
def result(self, timeout=None):
"""Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
"""
start = time.time()
while True:
task = self.get_task()
if not task or task.status not in (FINISHED, FAILED):
if not timeout:
continue
elif time.time() - start < timeout:
continue
else:
raise TimeoutError()
if task.status == FAILED:
raise task.result
return task.result | 0.002427 |
def authorize_redirect(self, callback_uri=None, extra_params=None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
http = httpclient.AsyncHTTPClient()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri))
else:
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri)) | 0.003712 |
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = {'require_in', 'watch_in', 'onfail_in', 'onchanges_in', 'use', 'use_in', 'prereq', 'prereq_in'}
req_in_all = req_in.union({'require', 'watch', 'onfail', 'onfail_stop', 'onchanges'})
extend = {}
errors = []
disabled_reqs = self.opts.get('disabled_requisites', [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if not arg:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
if key in disabled_reqs:
log.warning('The %s requisite has been disabled, Ignoring.', key)
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if '.' in _state:
errors.append(
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
)
_state = _state.split('.')[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [x for x
in high[ind]
if not x.startswith('__')]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [state for state
in iter(high[_id])
if not state.startswith('__')]:
for j in iter(high[_id][state]):
if isinstance(j, dict) and 'name' in j:
if j['name'] == ind:
ind = {state: _id}
found = True
if not found:
continue
if not ind:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == 'sls':
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if '.' in pstate:
errors.append(
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
pstate,
pname,
body['__sls__'],
pstate[:pstate.find('.')]
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[ext_id][_req_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in six.iteritems(extend):
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors | 0.00225 |
def slackpkg_update(self):
"""This replace slackpkg ChangeLog.txt file with new
from Slackware official mirrors after update distribution.
"""
NEW_ChangeLog_txt = URL(mirrors("ChangeLog.txt", "")).reading()
if os.path.isfile(self.meta.slackpkg_lib_path + "ChangeLog.txt.old"):
os.remove(self.meta.slackpkg_lib_path + "ChangeLog.txt.old")
if os.path.isfile(self.meta.slackpkg_lib_path + "ChangeLog.txt"):
shutil.copy2(self.meta.slackpkg_lib_path + "ChangeLog.txt",
self.meta.slackpkg_lib_path + "ChangeLog.txt.old")
os.remove(self.meta.slackpkg_lib_path + "ChangeLog.txt")
with open(self.meta.slackpkg_lib_path + "ChangeLog.txt", "w") as log:
log.write(NEW_ChangeLog_txt)
log.close() | 0.00243 |
def _sample_item(self, **kwargs):
"""Sample an item from the pool according to the instrumental
distribution
"""
t = self.t_
# Update instrumental distribution
self._calc_inst_pmf()
if self.record_inst_hist:
inst_pmf = self._inst_pmf[:,t]
else:
inst_pmf = self._inst_pmf
# Sample label and record weight
loc, stratum_idx = self.strata.sample(pmf = inst_pmf)
weight = self.strata.weights_[stratum_idx]/inst_pmf[stratum_idx]
return loc, weight, {'stratum': stratum_idx} | 0.00846 |
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp) | 0.007813 |
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
OSFileEntry: parent file entry or None if not available.
"""
location = getattr(self.path_spec, 'location', None)
if location is None:
return None
parent_location = self._file_system.DirnamePath(location)
if parent_location is None:
return None
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
path_spec = os_path_spec.OSPathSpec(location=parent_location)
return OSFileEntry(self._resolver_context, self._file_system, path_spec) | 0.006678 |
def user_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#create-user"
api_path = "/api/v2/users.json"
return self.call(api_path, method="POST", data=data, **kwargs) | 0.00885 |
def TNE_metric(bpmn_graph):
"""
Returns the value of the TNE metric (Total Number of Events of the Model)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
"""
events_counts = get_events_counts(bpmn_graph)
return sum(
[count for _, count in events_counts.items()]
) | 0.00271 |
def get_clients_per_page(self, per_page=1000, page=1, params=None):
"""
Get clients per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=CLIENTS, per_page=per_page, page=page, params=params) | 0.007264 |
def val_where(cond, tval, fval):
"""Like tf.where but works on namedtuples."""
if isinstance(tval, tf.Tensor):
return tf.where(cond, tval, fval)
elif isinstance(tval, tuple):
cls = type(tval)
return cls(*(val_where(cond, t, f) for t, f in zip(tval, fval)))
else:
raise Exception(TypeError) | 0.015974 |
def fit(self, X, design, nuisance=None, scan_onsets=None, coords=None,
inten=None):
"""Compute the Bayesian RSA
Parameters
----------
X: numpy array, shape=[time_points, voxels]
If you have multiple scans of the same participants that you
want to analyze together, you should concatenate them along
the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: numpy array, shape=[time_points, conditions]
This is the design matrix. It should only include the hypothetic
response for task conditions. You should not include
regressors for a DC component or motion parameters, unless you
want to estimate their pattern similarity with response patterns
to your task conditions. If you want to model head motion,
you should include them in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension,
with every column for one condition across runs.
For example, if you have 3 runs of experiment of one participant,
with each run lasting 200 TR. And you have 4 conditions,
then design should be a 600 x 4 numpy array.
nuisance: optional, numpy array, shape=[time_points, nuisance_factors]
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors
and a constant baseline)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
Please do not include time course of constant baseline in nuisance.
scan_onsets: optional, numpy array, shape=[runs,]
This specifies the indices of X which correspond to the onset
of each scanning run. For example, if you have two experimental
runs of the same subject, each with 100 TRs, then scan_onsets
should be [0,100].
If you do not provide the argument, the program will
assume all data are from the same run.
The effect of them is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
coords: optional, numpy array, shape=[voxels,3]
This is the coordinate of each voxel,
used for implementing Gaussian Process prior.
inten: optional, numpy array, shape=[voxel,]
This is the average fMRI intensity in each voxel.
It should be calculated from your data without any preprocessing
such as z-scoring. Because it should reflect
whether a voxel is bright (grey matter) or dark (white matter).
A Gaussian Process kernel defined on both coordinate and intensity
imposes a smoothness prior on adjcent voxels
but with the same tissue type. The Gaussian Process
is experimental and has shown good performance on
some visual datasets.
"""
logger.info('Running Bayesian RSA')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
assert not self.GP_inten or (self.GP_inten and self.GP_space),\
'You must speficiy GP_space to True'\
'if you want to use GP_inten'
# Check input data
assert_all_finite(X)
assert X.ndim == 2, 'The data should be 2-dimensional ndarray'
assert np.all(np.std(X, axis=0) > 0),\
'The time courses of some voxels do not change at all.'\
' Please make sure all voxels are within the brain'
# check design matrix
assert_all_finite(design)
assert design.ndim == 2,\
'The design matrix should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(design) == design.shape[1], \
'Your design matrix has rank smaller than the number of'\
' columns. Some columns can be explained by linear '\
'combination of other columns. Please check your design matrix.'
assert np.size(design, axis=0) == np.size(X, axis=0),\
'Design matrix and data do not '\
'have the same number of time points.'
assert self.rank is None or self.rank <= design.shape[1],\
'Your design matrix has fewer columns than the rank you set'
# Check the nuisance regressors.
if nuisance is not None:
assert_all_finite(nuisance)
assert nuisance.ndim == 2,\
'The nuisance regressor should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(nuisance) == nuisance.shape[1], \
'The nuisance regressor has rank smaller than the number of'\
'columns. Some columns can be explained by linear '\
'combination of other columns. Please check your nuisance' \
'regressors.'
assert np.size(nuisance, axis=0) == np.size(X, axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
# check scan_onsets validity
assert scan_onsets is None or\
(np.max(scan_onsets) <= X.shape[0] and np.min(scan_onsets) >= 0),\
'Some scan onsets provided are out of the range of time points.'
# check the size of coords and inten
if self.GP_space:
logger.info('Fitting with Gaussian Process prior on log(SNR)')
assert coords is not None and coords.shape[0] == X.shape[1],\
'Spatial smoothness was requested by setting GP_space. '\
'But the voxel number of coords does not match that of '\
'data X, or voxel coordinates are not provided. '\
'Please make sure that coords is in the shape of '\
'[n_voxel x 3].'
assert coords.ndim == 2,\
'The coordinate matrix should be a 2-d array'
if self.GP_inten:
assert inten is not None and inten.shape[0] == X.shape[1],\
'The voxel number of intensity does not '\
'match that of data X, or intensity not provided.'
assert np.var(inten) > 0,\
'All voxels have the same intensity.'
if (not self.GP_space and coords is not None) or\
(not self.GP_inten and inten is not None):
logger.warning('Coordinates or image intensity provided'
' but GP_space or GP_inten is not set '
'to True. The coordinates or intensity are'
' ignored.')
# Estimate the number of necessary nuisance regressors
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('number of nuisance regressors is determined '
'automatically.')
run_TRs, n_runs = self._run_TR_from_scan_onsets(
X.shape[0], scan_onsets)
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance, False)
ts_reg = np.concatenate((ts_base, design), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X)[0]
residuals = X - np.dot(ts_reg, beta_hat)
self.n_nureg_ = np.max(
[1, Ncomp_SVHT_MG_DLD_approx(residuals,
self.nureg_zscore)])
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
self.n_nureg_ = np.int32(self.n_nureg_)
else:
self.n_nureg_ = self.n_nureg
self.n_nureg_ = np.int32(self.n_nureg_)
# Run Bayesian RSA
# Note that we have a change of notation here. Within _fit_RSA_UV,
# design matrix is named X and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the tradition of scikit-learn that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
if not self.GP_space:
# If GP_space is not requested, then the model is fitted
# without imposing any Gaussian Process prior on log(SNR^2)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, _, _, _,\
self.X0_ = self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
elif not self.GP_inten:
# If GP_space is requested, but GP_inten is not, a GP prior
# based on spatial locations of voxels will be imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, _, \
self.X0_ = self._fit_RSA_UV(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets, coords=coords)
else:
# If both self.GP_space and self.GP_inten are True,
# a GP prior based on both location and intensity is imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, self.lGPinten_, self.X0_ = \
self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets,
coords=coords, inten=inten)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_, self._sigma2_design_ = \
self._est_AR1(self.design_, same_para=True)
self._rho_X0_, self._sigma2_X0_ = self._est_AR1(self.X0_)
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
# Finally, we fit a null model with the same setting except
# that there is no response to X
self.beta0_null_, self.sigma_null_, self.rho_null_, \
self.X0_null_ = self._fit_null(Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self._rho_X0_null_, self._sigma2_X0_null_ =\
self._est_AR1(self.X0_null_)
return self | 0.000258 |
def _get_traceback_no_io():
"""
Return a version of L{traceback} that doesn't do I/O.
"""
try:
module = load_module(str("_traceback_no_io"), traceback)
except NotImplementedError:
# Can't fix the I/O problem, oh well:
return traceback
class FakeLineCache(object):
def checkcache(self, *args, **kwargs):
None
def getline(self, *args, **kwargs):
return ""
def lazycache(self, *args, **kwargs):
return None
module.linecache = FakeLineCache()
return module | 0.001748 |
def byPromissor(self, ID):
""" Returns all directions to a promissor. """
res = []
for direction in self.table:
if ID in direction[1]:
res.append(direction)
return res | 0.008811 |
def run(self):
'''Run until there are no events to be processed.'''
# We left-append rather than emit (right-append) because some message
# may have been already queued for execution before the director runs.
global_event_queue.appendleft((INITIATE, self, (), {}))
while global_event_queue:
self.process_event(global_event_queue.popleft()) | 0.005115 |
def delete(self, key):
'''Removes the object named by `key` in `service`.
Args:
key: Key naming the object to remove.
'''
key = self._service_key(key)
self._service_ops['delete'](key) | 0.004762 |
def off_command_control(self, val_id):
"""
Parameters
----------
val_id : str
Returns
-------
requests.Response
"""
data = "control,controlId=0|" + val_id
return self._basic_post(url='commandControlPublic', data=data) | 0.006711 |
def _get_rs_id(variant, rs_map, variant_type):
"""
Given a variant dict, return unambiguous RS ID
TODO
Some sequence alterations appear to have mappings to dbsnp's notation
for example,
reference allele: TTTTTTTTTTTTTT
variant allele: TTTTTTTTTTTTTTT
Is theoretically the same as -/T, we should clarify with UDP and then add
functionality to map this notation to the more common -/T
:param variant:
:param rs_map:
:param type: snp or indel
:return:
"""
rs_id = None
if variant_type == 'snp':
variant_key = "{0}-{1}".format(variant['chromosome'], variant['position'])
if variant_key in rs_map:
snp_candidates = [
rs_dict for rs_dict in rs_map[variant_key]
if rs_dict['type'] == 'snp']
if len(snp_candidates) == 1:
rs_id = snp_candidates[0]["rs_id"]
elif variant_type == 'indel':
rs_candidates = []
variant_key = "{0}-{1}".format(variant['chromosome'], variant['position'])
if variant_key in rs_map:
snp_candidates = [
rs_dict for rs_dict in rs_map[variant_key]
if rs_dict['type'] == 'in-del']
for candidate in snp_candidates:
alleles = candidate['alleles'].split('/')
if variant['reference_allele'] in alleles \
and variant['variant_allele'] in alleles:
rs_candidates.append(candidate['rs_id'])
if len(rs_candidates) == 1:
rs_id = rs_candidates[0]
elif len(rs_candidates) > 1:
LOG.info(
"ambiguous rs mapping for: %s\ncandidate ids: %s",
variant, rs_candidates)
else:
LOG.info(
"rs at coordinate but no match found"
" for variant %s\n candidate ids: %s",
variant, rs_map[variant_key])
else:
LOG.warning("type: %s unsupported", variant_type)
return rs_id | 0.002208 |
def attributes(self):
"""Return sync attributes."""
attr = {
'name': self.name,
'id': self.sync_id,
'network_id': self.network_id,
'serial': self.serial,
'status': self.status,
'region': self.region,
'region_id': self.region_id,
}
return attr | 0.005587 |
def randomMails(self, count=1):
"""
Return random e-mails.
:rtype: list
:returns: list of random e-mails
"""
self.check_count(count)
random_nicks = self.rn.random_nicks(count=count)
random_domains = sample(self.dmails, count)
return [
nick.lower() + "@" + domain for nick, domain in zip(random_nicks,
random_domains)
] | 0.004193 |
def do_keyframes_overlap(self):
"""Checks for keyframs timing overlap.
Returns the name of the first keyframs that overlapped."""
skl = self.sorted_key_list()
for i in range(len(skl)-1):
this_time = self.dct[skl[i]]['__abs_time__']
next_time = self.dct[skl[i+1]]['__abs_time__']
if abs(next_time-this_time) < 1e-6:
# key frame times overlap
return skl[i]
# Return None if all passed
return None | 0.003914 |
def includeme(config):
"""
:type config: :class:`pyramid.config.Configurator`
"""
settings = config.registry.settings
swagger_versions = get_swagger_versions(settings)
# for rendering /swagger.yaml
config.add_renderer(
'yaml', 'pyramid_swagger.api.YamlRendererFactory',
)
# Add the SwaggerSchema to settings to make it available to the validation
# tween and `register_api_doc_endpoints`
settings['pyramid_swagger.schema12'] = None
settings['pyramid_swagger.schema20'] = None
# Store under two keys so that 1.2 and 2.0 can co-exist.
if SWAGGER_12 in swagger_versions:
settings['pyramid_swagger.schema12'] = get_swagger_schema(settings)
if SWAGGER_20 in swagger_versions:
settings['pyramid_swagger.schema20'] = get_swagger_spec(settings)
config.add_tween(
"pyramid_swagger.tween.validation_tween_factory",
under=pyramid.tweens.EXCVIEW
)
config.add_renderer('pyramid_swagger', PyramidSwaggerRendererFactory())
if settings.get('pyramid_swagger.enable_api_doc_views', True):
if SWAGGER_12 in swagger_versions:
register_api_doc_endpoints(
config,
settings['pyramid_swagger.schema12'].get_api_doc_endpoints())
if SWAGGER_20 in swagger_versions:
register_api_doc_endpoints(
config,
build_swagger_20_swagger_schema_views(config),
base_path=settings.get('pyramid_swagger.base_path_api_docs', '')) | 0.001306 |
def _get_cygwin_path(self, windows_path):
"""
Convert windows path to cygpath
"""
conv_cmd = [os.path.join(self._cygwin_bin_location, "cygpath.exe"),
"-u", windows_path]
process = Popen(conv_cmd,
stdout=PIPE, stderr=PIPE, shell=False)
out, err = process.communicate()
if err:
print(err)
raise Exception(err)
return out.strip() | 0.004357 |
def get_user(self, name):
"""Get the user for the given name
:param name: The username
:type name: :class:`str`
:returns: the user instance
:rtype: :class:`models.User`
:raises: None
"""
r = self.kraken_request('GET', 'user/' + name)
return models.User.wrap_get_user(r) | 0.005848 |
def del_downtime(self, downtime_id):
"""
Delete a downtime in this object
:param downtime_id: id of the downtime to delete
:type downtime_id: int
:return: None
"""
if downtime_id in self.downtimes:
self.downtimes[downtime_id].can_be_deleted = True
del self.downtimes[downtime_id] | 0.005556 |
def update_keys(self):
"""Updates the Google API key with the text value"""
from ...main import add_api_key
add_api_key("reddit_api_user_agent", self.reddit_api_user_agent.get())
add_api_key("reddit_api_client_id", self.reddit_api_client_id.get())
add_api_key("reddit_api_client_secret", self.reddit_api_client_secret.get()) | 0.008242 |
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
"""
readline = StringIO(src).readline
toktypes = set()
try:
for t in tokenize.generate_tokens(readline):
toktypes.add(t[0])
except tokenize.TokenError:
pass
return(tokenize.COMMENT in toktypes) | 0.006861 |
def derive(self, srcfile=None, request=None, outfile=None):
"""Do sequence of manipulations for IIIF to derive output image.
Named argments:
srcfile -- source image file
request -- IIIFRequest object with parsed parameters
outfile -- output image file. If set the the output file will be
written to that file, otherwise a new temporary file
will be created and outfile set to its location.
See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order
Region THEN Size THEN Rotation THEN Quality THEN Format
Typical use:
r = IIIFRequest(region=...)
m = IIIFManipulator()
try:
m.derive(srcfile='a.jpg',request=r)
# .. serve m.outfile
except IIIFError as e:
# ..
finally:
m.cleanup() #removes temp m.outfile
"""
# set if specified
if (srcfile is not None):
self.srcfile = srcfile
if (request is not None):
self.request = request
if (outfile is not None):
self.outfile = outfile
if (self.outfile is not None):
# create path to output dir if necessary
dir = os.path.dirname(self.outfile)
if (not os.path.exists(dir)):
os.makedirs(dir)
#
self.do_first()
(x, y, w, h) = self.region_to_apply()
self.do_region(x, y, w, h)
(w, h) = self.size_to_apply()
self.do_size(w, h)
(mirror, rot) = self.rotation_to_apply(no_mirror=True)
self.do_rotation(mirror, rot)
(quality) = self.quality_to_apply()
self.do_quality(quality)
self.do_format(self.request.format)
self.do_last()
return(self.outfile, self.mime_type) | 0.001066 |
async def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
"""
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect) | 0.006349 |
def map_keys_deep(f, dct):
"""
Implementation of map that recurses. This tests the same keys at every level of dict and in lists
:param f: 2-ary function expecting a key and value and returns a modified key
:param dct: Dict for deep processing
:return: Modified dct with matching props mapped
"""
return _map_deep(lambda k, v: [f(k, v), v], dct) | 0.008043 |
def repo_groups(self, project_key, repo_key, limit=99999, filter_str=None):
"""
Get repository Groups
:param project_key:
:param repo_key:
:param limit: OPTIONAL: The limit of the number of groups to return, this may be restricted by
fixed system limits. Default by built-in method: 99999
:param filter_str: OPTIONAL: group filter string
:return:
"""
url = 'rest/api/1.0/projects/{project_key}/repos/{repo_key}/permissions/groups'.format(
project_key=project_key,
repo_key=repo_key)
params = {}
if limit:
params['limit'] = limit
if filter_str:
params['filter'] = filter_str
return (self.get(url, params=params) or {}).get('values') | 0.006165 |
def rest_put(url, data, timeout):
'''Call rest put method'''
try:
response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\
data=data, timeout=timeout)
return response
except Exception as e:
print('Get exception {0} when sending http put to url {1}'.format(str(e), url))
return None | 0.009852 |
def to_sections(idl_parsed):
"""
Iterates through elements in idl_parsed list and returns a list of section dicts.
Currently elements of type "comment", "enum", "struct", and "interface" are processed.
:Parameters:
idl_parsed
Barrister parsed IDL
"""
sections = []
for entity in idl_parsed:
if entity["type"] == "comment":
sections.append(to_section(entity["value"], ""))
elif entity["type"] == "enum":
sections.append(parse_enum(entity))
elif entity["type"] == "struct":
sections.append(parse_struct(entity))
elif entity["type"] == "interface":
sections.extend(parse_interface(entity))
return sections | 0.004115 |
def rlmb_ppo_quick():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_ppo_base()
hparams.epochs = 2
hparams.model_train_steps = 25000
hparams.ppo_epochs_num = 700
hparams.ppo_epoch_length = 50
return hparams | 0.033473 |
def histogram(data):
"""Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int]
"""
ret = {}
for datum in data:
if datum in ret:
ret[datum] += 1
else:
ret[datum] = 1
return ret | 0.002941 |
def get_kvlayer_stream_ids_by_doc_id(client, doc_id):
'''Retrieve stream ids from :mod:`kvlayer`.
Namely, it returns an iterator over all stream ids with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of str
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
doc_id_range = make_doc_id_range(doc_id)
for k in client.scan_keys(STREAM_ITEMS_TABLE, doc_id_range):
yield kvlayer_key_to_stream_id(k) | 0.001339 |
def internal_get_next_statement_targets(dbg, seq, thread_id, frame_id):
''' gets the valid line numbers for use with set next statement '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
code = frame.f_code
xml = "<xml>"
if hasattr(code, 'co_lnotab'):
lineno = code.co_firstlineno
lnotab = code.co_lnotab
for i in itertools.islice(lnotab, 1, len(lnotab), 2):
if isinstance(i, int):
lineno = lineno + i
else:
# in python 2 elements in co_lnotab are of type str
lineno = lineno + ord(i)
xml += "<line>%d</line>" % (lineno,)
else:
xml += "<line>%d</line>" % (frame.f_lineno,)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_next_statement_targets_message(seq, xml)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd) | 0.003613 |
def subscribe(self, code_list, subtype_list, is_first_push=True):
"""
订阅注册需要的实时信息,指定股票和订阅的数据类型即可
注意:len(code_list) * 订阅的K线类型的数量 <= 100
:param code_list: 需要订阅的股票代码列表
:param subtype_list: 需要订阅的数据类型列表,参见SubType
:param is_first_push: 订阅成功后是否马上推送一次数据
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.subscribe(['HK.00700'], [SubType.QUOTE)])
quote_ctx.close()
"""
return self._subscribe_impl(code_list, subtype_list, is_first_push) | 0.002653 |
def _insert_base_path(self):
"""If the "base" path is set in the paths section of the config, insert
it into the python path.
"""
if config.BASE in self.paths:
sys.path.insert(0, self.paths[config.BASE]) | 0.008065 |
def copyFile(src, dest):
"""Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string)
"""
#Src Exists?
try:
if os.path.isfile(src):
dpath, dfile = os.path.split(dest)
if not os.path.isdir(dpath):
os.makedirs(dpath)
if not os.path.exists(dest):
touch(dest)
try:
shutil.copy2(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
logging.exception('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
logging.exception('Error: %s' % e.strerror)
except:
logging.exception('Error: src to copy does not exist.') | 0.003344 |
def _read(self, fp, fpname):
"""A direct copy of the py2.4 version of the super class's _read method
to assure it uses ordered dicts. Had to change one line to make it work.
Future versions have this fixed, but in fact its quite embarrassing for the
guys not to have done it right in the first place !
Removed big comments to make it more compact.
Made sure it ignores initial whitespace as git uses tabs"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
is_multi_line = False
e = None # None, or an exception
def string_decode(v):
if v[-1] == '\\':
v = v[:-1]
# end cut trailing escapes to prevent decode error
if PY3:
return v.encode(defenc).decode('unicode_escape')
else:
return v.decode('string_escape')
# end
# end
while True:
# we assume to read binary !
line = fp.readline().decode(defenc)
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or self.re_comment.match(line):
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# is it a section header?
mo = self.SECTCRE.match(line.strip())
if not is_multi_line and mo:
sectname = mo.group('header').strip()
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == cp.DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict((('__name__', sectname),))
self._sections[sectname] = cursect
self._proxies[sectname] = None
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise cp.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
elif not is_multi_line:
mo = self.OPTCRE.match(line)
if mo:
# We might just have handled the last line, which could contain a quotation we want to remove
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval and not optval.strip().startswith('"'):
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
if optval == '""':
optval = ''
# end handle empty string
optname = self.optionxform(optname.rstrip())
if len(optval) > 1 and optval[0] == '"' and optval[-1] != '"':
is_multi_line = True
optval = string_decode(optval[1:])
# end handle multi-line
cursect[optname] = optval
else:
# check if it's an option with no value - it's just ignored by git
if not self.OPTVALUEONLY.match(line):
if not e:
e = cp.ParsingError(fpname)
e.append(lineno, repr(line))
continue
else:
line = line.rstrip()
if line.endswith('"'):
is_multi_line = False
line = line[:-1]
# end handle quotations
cursect[optname] += string_decode(line)
# END parse section or option
# END while reading
# if any parsing errors occurred, raise an exception
if e:
raise e | 0.00192 |
def GetUsers(alias=None):
"""Gets all of users assigned to a given account.
https://t3n.zendesk.com/entries/22427662-GetUsers
:param alias: short code for a particular account. If none will use account's default alias
"""
if alias is None: alias = clc.v1.Account.GetAlias()
r = clc.v1.API.Call('post','User/GetUsers',{'AccountAlias': alias})
if int(r['StatusCode']) == 0:
return(r['Users']) | 0.040964 |
def link_to_storage(self, sensor_log):
"""Attach this DataStreamer to an underlying SensorLog.
Calling this method is required if you want to use this DataStreamer
to generate reports from the underlying data in the SensorLog.
You can call it multiple times and it will unlink itself from any
previous SensorLog each time.
Args:
sensor_log (SensorLog): Actually create a StreamWalker to go along with this
streamer so that we can check if it's triggered.
"""
if self.walker is not None:
self._sensor_log.destroy_walker(self.walker)
self.walker = None
self.walker = sensor_log.create_walker(self.selector)
self._sensor_log = sensor_log | 0.003891 |
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.connection
if not conninfo.hostname:
raise KeyError("Missing hostname for AMQP connection.")
if conninfo.userid is None:
raise KeyError("Missing user id for AMQP connection.")
if conninfo.password is None:
raise KeyError("Missing password for AMQP connection.")
if not conninfo.port:
conninfo.port = self.default_port
conn = amqp.Connection(host=conninfo.hostname,
port=conninfo.port,
userid=conninfo.userid,
password=conninfo.password,
virtual_host=conninfo.virtual_host)
return conn | 0.002454 |
def _cleanup(self) -> None:
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
now = self._loop.time()
timeout = self._keepalive_timeout
if self._conns:
connections = {}
deadline = now - timeout
for key, conns in self._conns.items():
alive = []
for proto, use_time in conns:
if proto.is_connected():
if use_time - deadline < 0:
transport = proto.transport
proto.close()
if (key.is_ssl and
not self._cleanup_closed_disabled):
self._cleanup_closed_transports.append(
transport)
else:
alive.append((proto, use_time))
if alive:
connections[key] = alive
self._conns = connections
if self._conns:
self._cleanup_handle = helpers.weakref_handle(
self, '_cleanup', timeout, self._loop) | 0.001658 |
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)) | 0.000501 |
def _orbList(obj1, obj2, aspList):
""" Returns a list with the orb and angular
distances from obj1 to obj2, considering a
list of possible aspects.
"""
sep = angle.closestdistance(obj1.lon, obj2.lon)
absSep = abs(sep)
return [
{
'type': asp,
'orb': abs(absSep - asp),
'separation': sep,
} for asp in aspList
] | 0.007519 |
def identifier(self):
"""Get the identifier for this node.
Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256)
of the public key's `key`. This corresponds exactly to the data used in
traditional Bitcoin addresses. It is not advised to represent this data
in base58 format though, as it may be interpreted as an address that
way (and wallet software is not required to accept payment to the chain
key itself).
"""
key = self.get_public_key_hex()
return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key))))) | 0.003263 |
def create_session(self, session_request, protocol):
"""CreateSession.
[Preview API] Creates a session, a wrapper around a feed that can store additional metadata on the packages published to it.
:param :class:`<SessionRequest> <azure.devops.v5_0.provenance.models.SessionRequest>` session_request: The feed and metadata for the session
:param str protocol: The protocol that the session will target
:rtype: :class:`<SessionResponse> <azure.devops.v5_0.provenance.models.SessionResponse>`
"""
route_values = {}
if protocol is not None:
route_values['protocol'] = self._serialize.url('protocol', protocol, 'str')
content = self._serialize.body(session_request, 'SessionRequest')
response = self._send(http_method='POST',
location_id='503b4e54-ebf4-4d04-8eee-21c00823c2ac',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('SessionResponse', response) | 0.006284 |
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function for this distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Negative loglikelihood of the Cauchy family
"""
return -np.sum(ss.cauchy.logpdf(y, loc=mean, scale=scale)) | 0.002721 |
def train(sess, loss, x_train, y_train,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None,
devices=None, x_batch_preprocessor=None, use_ema=False,
ema_decay=.998, run_canary=None,
loss_threshold=1e5, dataset_train=None, dataset_size=None):
"""
Run (optionally multi-replica, synchronous) training to minimize `loss`
:param sess: TF session to use when training the graph
:param loss: tensor, the loss to minimize
:param x_train: numpy array with training inputs or tf Dataset
:param y_train: numpy array with training outputs or tf Dataset
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:param devices: list of device names to use for training
If None, defaults to: all GPUs, if GPUs are available
all devices, if no GPUs are available
:param x_batch_preprocessor: callable
Takes a single tensor containing an x_train batch as input
Returns a single tensor containing an x_train batch as output
Called to preprocess the data before passing the data to the Loss
:param use_ema: bool
If true, uses an exponential moving average of the model parameters
:param ema_decay: float or callable
The decay parameter for EMA, if EMA is used
If a callable rather than a float, this is a callable that takes
the epoch and batch as arguments and returns the ema_decay for
the current batch.
:param loss_threshold: float
Raise an exception if the loss exceeds this value.
This is intended to rapidly detect numerical problems.
Sometimes the loss may legitimately be higher than this value. In
such cases, raise the value. If needed it can be np.inf.
:param dataset_train: tf Dataset instance.
Used as a replacement for x_train, y_train for faster performance.
:param dataset_size: integer, the size of the dataset_train.
:return: True if model trained
"""
# Check whether the hardware is working correctly
canary.run_canary()
if run_canary is not None:
warnings.warn("The `run_canary` argument is deprecated. The canary "
"is now much cheaper and thus runs all the time. The "
"canary now uses its own loss function so it is not "
"necessary to turn off the canary when training with "
" a stochastic loss. Simply quit passing `run_canary`."
"Passing `run_canary` may become an error on or after "
"2019-10-16.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
# Be sure to support 0 epochs for debugging purposes
if args.nb_epochs is None:
raise ValueError("`args` must specify number of epochs")
if optimizer is None:
if args.learning_rate is None:
raise ValueError("Learning rate was not given in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if rng is None:
rng = np.random.RandomState()
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
grads = []
xs = []
preprocessed_xs = []
ys = []
if dataset_train is not None:
assert x_train is None and y_train is None and x_batch_preprocessor is None
if dataset_size is None:
raise ValueError("You must provide a dataset size")
data_iterator = dataset_train.make_one_shot_iterator().get_next()
x_train, y_train = sess.run(data_iterator)
devices = infer_devices(devices)
for device in devices:
with tf.device(device):
x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])
y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])
xs.append(x)
ys.append(y)
if x_batch_preprocessor is not None:
x = x_batch_preprocessor(x)
# We need to keep track of these so that the canary can feed
# preprocessed values. If the canary had to feed raw values,
# stochastic preprocessing could make the canary fail.
preprocessed_xs.append(x)
loss_value = loss.fprop(x, y, **fprop_args)
grads.append(optimizer.compute_gradients(
loss_value, var_list=var_list))
num_devices = len(devices)
print("num_devices: ", num_devices)
grad = avg_grads(grads)
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.apply_gradients(grad)
epoch_tf = tf.placeholder(tf.int32, [])
batch_tf = tf.placeholder(tf.int32, [])
if use_ema:
if callable(ema_decay):
ema_decay = ema_decay(epoch_tf, batch_tf)
ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
with tf.control_dependencies([train_step]):
train_step = ema.apply(var_list)
# Get pointers to the EMA's running average variables
avg_params = [ema.average(param) for param in var_list]
# Make temporary buffers used for swapping the live and running average
# parameters
tmp_params = [tf.Variable(param, trainable=False)
for param in var_list]
# Define the swapping operation
param_to_tmp = [tf.assign(tmp, param)
for tmp, param in safe_zip(tmp_params, var_list)]
with tf.control_dependencies(param_to_tmp):
avg_to_param = [tf.assign(param, avg)
for param, avg in safe_zip(var_list, avg_params)]
with tf.control_dependencies(avg_to_param):
tmp_to_avg = [tf.assign(avg, tmp)
for avg, tmp in safe_zip(avg_params, tmp_params)]
swap = tmp_to_avg
batch_size = args.batch_size
assert batch_size % num_devices == 0
device_batch_size = batch_size // num_devices
if init_all:
sess.run(tf.global_variables_initializer())
else:
initialize_uninitialized_global_variables(sess)
for epoch in xrange(args.nb_epochs):
if dataset_train is not None:
nb_batches = int(math.ceil(float(dataset_size) / batch_size))
else:
# Indices to shuffle training set
index_shuf = list(range(len(x_train)))
# Randomly repeat a few training examples each epoch to avoid
# having a too-small batch
while len(index_shuf) % batch_size != 0:
index_shuf.append(rng.randint(len(x_train)))
nb_batches = len(index_shuf) // batch_size
rng.shuffle(index_shuf)
# Shuffling here versus inside the loop doesn't seem to affect
# timing very much, but shuffling here makes the code slightly
# easier to read
x_train_shuffled = x_train[index_shuf]
y_train_shuffled = y_train[index_shuf]
prev = time.time()
for batch in range(nb_batches):
if dataset_train is not None:
x_train_shuffled, y_train_shuffled = sess.run(data_iterator)
start, end = 0, batch_size
else:
# Compute batch start and end indices
start = batch * batch_size
end = (batch + 1) * batch_size
# Perform one training step
diff = end - start
assert diff == batch_size
feed_dict = {epoch_tf: epoch, batch_tf: batch}
for dev_idx in xrange(num_devices):
cur_start = start + dev_idx * device_batch_size
cur_end = start + (dev_idx + 1) * device_batch_size
feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]
feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]
if cur_end != end and dataset_train is None:
msg = ("batch_size (%d) must be a multiple of num_devices "
"(%d).\nCUDA_VISIBLE_DEVICES: %s"
"\ndevices: %s")
args = (batch_size, num_devices,
os.environ['CUDA_VISIBLE_DEVICES'],
str(devices))
raise ValueError(msg % args)
if feed is not None:
feed_dict.update(feed)
_, loss_numpy = sess.run(
[train_step, loss_value], feed_dict=feed_dict)
if np.abs(loss_numpy) > loss_threshold:
raise ValueError("Extreme loss during training: ", loss_numpy)
if np.isnan(loss_numpy) or np.isinf(loss_numpy):
raise ValueError("NaN/Inf loss during training")
assert (dataset_train is not None or
end == len(index_shuf)) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
if use_ema:
# Before running evaluation, load the running average
# parameters into the live slot, so we can see how well
# the EMA parameters are performing
sess.run(swap)
evaluate()
if use_ema:
# Swap the parameters back, so that we continue training
# on the live parameters
sess.run(swap)
if use_ema:
# When training is done, swap the running average parameters into
# the live slot, so that we use them when we deploy the model
sess.run(swap)
return True | 0.008006 |
def get_tx_fee(tx_hex, config_path=None, bitcoind_opts=None, bitcoind_client=None):
"""
Get the tx fee for a tx
Return the fee on success
Return None on error
"""
tx_fee_per_byte = get_tx_fee_per_byte(config_path=config_path, bitcoind_opts=bitcoind_opts, bitcoind_client=bitcoind_client)
if tx_fee_per_byte is None:
return None
return calculate_tx_fee(tx_hex, tx_fee_per_byte) | 0.007194 |
def select_page(self, limit, offset=0, **kwargs):
"""
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
"""
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break | 0.003781 |
def invoke(self):
"""
Call the external handler to be invoked.
"""
# flush to ensure external process can see flags as they currently
# are, and write flags (flush releases lock)
unitdata.kv().flush()
subprocess.check_call([self._filepath, '--invoke', self._test_output], env=os.environ) | 0.008746 |
def partial(self, fn, *user_args, **user_kwargs):
"""Return function with closure to lazily inject annotated callable.
Repeat calls to the resulting function will reuse injections from the
first call.
Positional arguments are provided in this order:
1. positional arguments provided by injector
2. positional arguments provided in `partial_fn = partial(fn, *args)`
3. positional arguments provided in `partial_fn(*args)`
Keyword arguments are resolved in this order (later override earlier):
1. keyword arguments provided by injector
2. keyword arguments provided in `partial_fn = partial(fn, **kwargs)`
3. keyword arguments provided in `partial_fn(**kargs)`
Note that Python function annotations (in Python 3) are injected as
keyword arguments, as documented in `annotate`, which affects the
argument order here.
`annotate.partial` accepts arguments in same manner as this `partial`.
"""
self.get_annotations(fn) # Assert has annotations.
def lazy_injection_fn(*run_args, **run_kwargs):
arg_pack = getattr(lazy_injection_fn, 'arg_pack', None)
if arg_pack is not None:
pack_args, pack_kwargs = arg_pack
else:
jeni_args, jeni_kwargs = self.prepare_callable(fn, partial=True)
pack_args = jeni_args + user_args
pack_kwargs = {}
pack_kwargs.update(jeni_kwargs)
pack_kwargs.update(user_kwargs)
lazy_injection_fn.arg_pack = (pack_args, pack_kwargs)
final_args = pack_args + run_args
final_kwargs = {}
final_kwargs.update(pack_kwargs)
final_kwargs.update(run_kwargs)
return fn(*final_args, **final_kwargs)
return lazy_injection_fn | 0.002646 |
def chip_as_adjacency_list(device: 'cirq.google.XmonDevice',
) -> Dict[GridQubit, List[GridQubit]]:
"""Gives adjacency list representation of a chip.
The adjacency list is constructed in order of above, left_of, below and
right_of consecutively.
Args:
device: Chip to be converted.
Returns:
Map from nodes to list of qubits which represent all the neighbours of
given qubit.
"""
c_set = set(device.qubits)
c_adj = {} # type: Dict[GridQubit, List[GridQubit]]
for n in device.qubits:
c_adj[n] = []
for m in [above(n), left_of(n), below(n), right_of(n)]:
if m in c_set:
c_adj[n].append(m)
return c_adj | 0.002717 |
def range(self, low, high, with_scores=False, desc=False, reverse=False):
"""
Return a range of items between ``low`` and ``high``. By
default scores will not be included, but this can be controlled
via the ``with_scores`` parameter.
:param low: Lower bound.
:param high: Upper bound.
:param bool with_scores: Whether the range should include the
scores along with the items.
:param bool desc: Whether to sort the results descendingly.
:param bool reverse: Whether to select the range in reverse.
"""
if reverse:
return self.database.zrevrange(self.key, low, high, with_scores)
else:
return self.database.zrange(self.key, low, high, desc, with_scores) | 0.002551 |
def get_all_conversion_chains(self, from_type: Type[Any] = JOKER, to_type: Type[Any] = JOKER) \
-> Tuple[List[Converter], List[Converter], List[Converter]]:
"""
Utility method to find matching converters or conversion chains.
:param from_type: a required type of input object, or JOKER for 'wildcard'(*) .
WARNING: "from_type=AnyObject/object/Any" means
"all converters able to source from anything", which is different from "from_type=JOKER" which means "all
converters whatever their source type".
:param to_type: a required type of output object, or JOKER for 'wildcard'(*) .
WARNING: "to_type=AnyObject/object/Any" means "all
converters able to produce any type of object", which is different from "to_type=JOKER" which means "all
converters whatever type they are able to produce".
:return: a tuple of lists of matching converters, by type of *dest_type* match : generic, approximate, exact.
The order of each list is from *less relevant* to *most relevant*
"""
if from_type is JOKER and to_type is JOKER:
matching_dest_generic = self._generic_nonstrict_conversion_chains.copy() + \
self._generic_conversion_chains.copy()
matching_dest_approx = []
matching_dest_exact = self._specific_non_strict_conversion_chains.copy() + \
self._specific_conversion_chains.copy()
else:
matching_dest_generic, matching_dest_approx, matching_dest_exact = [], [], []
# first transform any 'Any' type requirement into the official class for that
to_type = get_validated_type(to_type, 'to_type', enforce_not_joker=False)
# handle generic converters first
for c in (self._generic_nonstrict_conversion_chains + self._generic_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
# match
if is_any_type(to_type):
# special case where desired to_type is already Any : in that case a generic converter will
# appear in 'exact match'
matching_dest_exact.append(c)
else:
# this is a match from a generic parser to a specific type : add in 'generic' cataegory
matching_dest_generic.append(c)
# then the specific
for c in (self._specific_non_strict_conversion_chains + self._specific_conversion_chains):
match, source_exact, dest_exact = c.is_able_to_convert_detailed(strict=self.strict,
from_type=from_type,
to_type=to_type)
if match:
if not is_any_type(to_type):
if dest_exact:
# we dont care if source is exact or approximate as long as dest is exact
matching_dest_exact.append(c)
else:
# this means that dest is approximate.
matching_dest_approx.append(c)
else:
# we only want to keep the generic ones, and they have already been added
pass
return matching_dest_generic, matching_dest_approx, matching_dest_exact | 0.006773 |
def _init_command(self, action, flags=None):
''' a wrapper to the base init_command, ensuring that "oci" is added
to each command
Parameters
==========
action: the main action to perform (e.g., build)
flags: one or more additional flags (e.g, volumes)
not implemented yet.
'''
from spython.main.base.command import init_command
if not isinstance(action, list):
action = [action]
cmd = ['oci'] + action
return init_command(self, cmd, flags) | 0.005164 |
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass | 0.012245 |
def get_proficiencies_for_objective_and_resource(self, objective_id, resource_id):
"""Gets a ``ProficiencyList`` relating to the given objective and resource ````.
arg: objective_id (osid.id.Id): an objective ``Id``
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: NullArgument - ``objective_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_peers
# NOTE: This implementation currently ignores plenary and effective views
collection = JSONClientValidated('learning',
collection='Proficiency',
runtime=self._runtime)
result = collection.find(
dict({'resourceId': str(objective_id),
'objectiveId': str(resource_id)},
**self._view_filter())).sort('_id', ASCENDING)
return objects.ProficiencyList(result, runtime=self._runtime) | 0.003709 |
def __callbackWrapper(self, transfer_p):
"""
Makes it possible for user-provided callback to alter transfer when
fired (ie, mark transfer as not submitted upon call).
"""
self.__submitted = False
self.__after_completion(self)
callback = self.__callback
if callback is not None:
callback(self)
if self.__doomed:
self.close() | 0.004773 |
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result | 0.000992 |
def seek(self, position):
"""Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
"""
self._position = position
range_string = make_range_string(self._position)
logger.debug('content_length: %r range_string: %r', self._content_length, range_string)
#
# Close old body explicitly.
# When first seek(), self._body is not exist. Catch the exception and do nothing.
#
try:
self._body.close()
except AttributeError:
pass
if position == self._content_length == 0 or position == self._content_length:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
self._body = io.BytesIO()
else:
self._body = self._object.get(Range=range_string)['Body'] | 0.00498 |
def nvmlDeviceGetSupportedEventTypes(handle):
r"""
/**
* Returns information about events supported on device
*
* For Fermi &tm; or newer fully supported devices.
*
* Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows.
*
* @param device The identifier of the target device
* @param eventTypes Reference in which to return bitmask of supported events
*
* @return
* - \ref NVML_SUCCESS if the eventTypes has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlEventType
* @see nvmlDeviceRegisterEvents
*/
nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes
"""
c_eventTypes = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedEventTypes")
ret = fn(handle, byref(c_eventTypes))
_nvmlCheckReturn(ret)
return bytes_to_str(c_eventTypes.value) | 0.005212 |
def add_connection_score(self, node):
"""
Return a numeric value that determines this node's score for adding
a new connection. A negative value indicates that no connections
should be made to this node for at least that number of seconds.
A value of -inf indicates no connections should be made to this
node for the foreseeable future.
This score should ideally take into account the connectedness of
available nodes, so that those with less current connections will
get more.
"""
# TODO: this should ideally take node history into account
conntime = node.seconds_until_connect_ok()
if conntime > 0:
self.log("not considering %r for new connection; has %r left on "
"connect blackout" % (node, conntime))
return -conntime
numconns = self.num_connectors_to(node)
if numconns >= self.max_connections_per_node:
return float('-Inf')
return sys.maxint - numconns | 0.001912 |
def on_exchange_declareok(self, unused_frame):
"""
Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
self._logger.debug('Exchange declared')
self.setup_queue(self._queue) | 0.009063 |
def post_message(self, message, duration=None, pause=True, style="info"):
""" Post a message on the screen with Messenger.
Arguments:
message: The message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
style: "info", "success", or "error".
You can also post messages by using =>
self.execute_script('Messenger().post("My Message")')
"""
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
js_utils.post_message(
self.driver, message, duration, style=style)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration)) | 0.002137 |
def piecewise(target, throat_endpoints='throat.endpoints',
throat_centroid='throat.centroid'):
r"""
Calculate throat length from end points and optionally a centroid
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_endpoints : string
Dictionary key of the throat endpoint values.
throat_centroid : string
Dictionary key of the throat centroid values, optional.
Returns
-------
Lt : ndarray
Array containing throat lengths for the given geometry.
Notes
-----
(1) By default, the model assumes that the centroids of pores and the
connecting throat in each conduit are colinear.
(2) If `throat_centroid` is passed, the model accounts for the extra
length. This could be useful for Voronoi or extracted networks.
"""
_np.warnings.filterwarnings('ignore', category=RuntimeWarning)
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
# Get throat endpoints
EP1 = network[throat_endpoints + '.head'][throats]
EP2 = network[throat_endpoints + '.tail'][throats]
# Calculate throat length
Lt = _sqrt(((EP1 - EP2)**2).sum(axis=1))
# Handle the case where pores & throat centroids are not colinear
try:
Ct = network[throat_centroid][throats]
Lt = _sqrt(((Ct - EP1)**2).sum(axis=1)) + \
_sqrt(((Ct - EP2)**2).sum(axis=1))
except KeyError:
pass
_np.warnings.filterwarnings('default', category=RuntimeWarning)
return Lt | 0.000577 |
def _insert_update(self, index: int, length: int) -> None:
"""Update self._type_to_spans according to the added length."""
ss, se = self._span
for spans in self._type_to_spans.values():
for span in spans:
if index < span[1] or span[1] == index == se:
span[1] += length
# index is before s, or at s but not on self_span
if index < span[0] or span[0] == index != ss:
span[0] += length | 0.003861 |
def _parse_line(line=''):
'''
Used by conf() to break config lines into
name/value pairs
'''
parts = line.split()
key = parts.pop(0)
value = ' '.join(parts)
return key, value | 0.004854 |
def build(tagname_or_element, ns_uri=None, adapter=None):
"""
Return a :class:`~xml4h.builder.Builder` that represents an element in
a new or existing XML DOM and provides "chainable" methods focussed
specifically on adding XML content.
:param tagname_or_element: a string name for the root node of a
new XML document, or an :class:`~xml4h.nodes.Element` node in an
existing document.
:type tagname_or_element: string or :class:`~xml4h.nodes.Element` node
:param ns_uri: a namespace URI to apply to the new root node. This
argument has no effect this method is acting on an element.
:type ns_uri: string or None
:param adapter: the *xml4h* implementation adapter class used to
interact with the document DOM nodes.
If None, :attr:`best_adapter` will be used.
:type adapter: adapter class or None
:return: a :class:`~xml4h.builder.Builder` instance that represents an
:class:`~xml4h.nodes.Element` node in an XML DOM.
"""
if adapter is None:
adapter = best_adapter
if isinstance(tagname_or_element, basestring):
doc = adapter.create_document(
tagname_or_element, ns_uri=ns_uri)
element = doc.root
elif isinstance(tagname_or_element, xml4h.nodes.Element):
element = tagname_or_element
else:
raise xml4h.exceptions.IncorrectArgumentTypeException(
tagname_or_element, [basestring, xml4h.nodes.Element])
return Builder(element) | 0.000665 |
def report(self, start=0, end=None):
"""
This will return a list of call reports which have the endpoint
with arguments and a representation of the data
:param start: int of the index to start at
:param end: int of the index to end at
:return: list of str
"""
return [api_call.report for api_call in self[start:end]] | 0.005263 |
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line) | 0.00265 |
def get_audits():
"""Get OS hardening access audits.
:returns: dictionary of audits
"""
audits = []
settings = utils.get_settings('os')
# Remove write permissions from $PATH folders for all regular users.
# This prevents changing system-wide commands from normal users.
path_folders = {'/usr/local/sbin',
'/usr/local/bin',
'/usr/sbin',
'/usr/bin',
'/bin'}
extra_user_paths = settings['environment']['extra_user_paths']
path_folders.update(extra_user_paths)
audits.append(ReadOnly(path_folders))
# Only allow the root user to have access to the shadow file.
audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
if 'change_user' not in settings['security']['users_allow']:
# su should only be accessible to user and group root, unless it is
# expressly defined to allow users to change to root via the
# security_users_allow config option.
audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
return audits | 0.000894 |
def setup(path_config="~/.config/scalar/config.yaml", configuration_name=None):
"""
Load a configuration from a default or specified configuration file, accessing a default or
specified configuration name.
"""
global config
global client
global token
global room
# config file
path_config = Path(path_config).expanduser()
log.debug("load config {path}".format(path = path_config))
if not path_config.exists():
log.error("no config {path} found".format(path = path_config))
sys.exit()
else:
with open(str(path_config), "r") as _file:
config = yaml.load(_file)
if not configuration_name:
for configuration in list(config["configurations"].items()):
if configuration[1]["default"]:
config = configuration[1]
else:
config["configurations"][configuration_name]
# connect to homeserver and room
log.debug("Matrix username: " + config["username"])
log.debug("connect to homeserver " + config["homeserver"])
client = MatrixClient(config["homeserver"])
token = client.login_with_password(username = config["username"], password = config["passcode"])
log.debug("connect to room " + config["room_alias"])
room = client.join_room(config["room_alias"]) | 0.008416 |
def deploy(self, id_networkv4):
"""Deploy network in equipments and set column 'active = 1' in tables redeipv4
:param id_networkv4: ID for NetworkIPv4
:return: Equipments configuration output
"""
data = dict()
uri = 'api/networkv4/%s/equipments/' % id_networkv4
return super(ApiNetworkIPv4, self).post(uri, data=data) | 0.007958 |
def implementation(self, for_type=None, for_types=None):
"""Return a decorator that will register the implementation.
Example:
@multimethod
def add(x, y):
pass
@add.implementation(for_type=int)
def add(x, y):
return x + y
@add.implementation(for_type=SomeType)
def add(x, y):
return int(x) + int(y)
"""
for_types = self.__get_types(for_type, for_types)
def _decorator(implementation):
self.implement(implementation, for_types=for_types)
return self
return _decorator | 0.003017 |
def __get_query_agg_terms(cls, field, agg_id=None):
"""
Create a es_dsl aggregation object based on a term.
:param field: field to be used to aggregate
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"terms": {
"field": <field>,
"size:": <size>,
"order":{
"_count":"desc"
}
}
Which will then be used as Search.aggs.bucket(agg_id, query_agg) method
to add aggregations to the es_dsl Search object
"""
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("terms", field=field, size=cls.AGG_SIZE, order={"_count": "desc"})
return (agg_id, query_agg) | 0.004734 |
def hist_calls_with_dims(**dims):
"""Decorator to check the distribution of return values of a
function with dimensions.
"""
def hist_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn), **dims)
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return fn_wrapper
return hist_wrapper | 0.00188 |
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr | 0.005703 |
def is_out_of_range(brain_or_object, result=_marker):
"""Checks if the result for the analysis passed in is out of range and/or
out of shoulders range.
min max
warn min max warn
·········|---------------|=====================|---------------|·········
----- out-of-range -----><----- in-range ------><----- out-of-range -----
<-- shoulder --><----- in-range ------><-- shoulder -->
:param brain_or_object: A single catalog brain or content object
:param result: Tentative result. If None, use the analysis result
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Tuple of two elements. The first value is `True` if the result is
out of range and `False` if it is in range. The second value is `True` if
the result is out of shoulder range and `False` if it is in shoulder range
:rtype: (bool, bool)
"""
analysis = api.get_object(brain_or_object)
if not IAnalysis.providedBy(analysis) and \
not IReferenceAnalysis.providedBy(analysis):
api.fail("{} is not supported. Needs to be IAnalysis or "
"IReferenceAnalysis".format(repr(analysis)))
if result is _marker:
result = api.safe_getattr(analysis, "getResult", None)
if not api.is_floatable(result):
# Result is empty/None or not a valid number
return False, False
result = api.to_float(result)
# Note that routine analyses, duplicates and reference analyses all them
# implement the function getResultRange:
# - For routine analyses, the function returns the valid range based on the
# specs assigned during the creation process.
# - For duplicates, the valid range is the result of the analysis the
# the duplicate was generated from +/- the duplicate variation.
# - For reference analyses, getResultRange returns the valid range as
# indicated in the Reference Sample from which the analysis was created.
result_range = api.safe_getattr(analysis, "getResultsRange", None)
if not result_range:
# No result range defined or the passed in object does not suit
return False, False
# Maybe there is a custom adapter
adapters = getAdapters((analysis,), IResultOutOfRange)
for name, adapter in adapters:
ret = adapter(result=result, specification=result_range)
if not ret or not ret.get('out_of_range', False):
continue
if not ret.get('acceptable', True):
# Out of range + out of shoulders
return True, True
# Out of range, but in shoulders
return True, False
result_range = ResultsRangeDict(result_range)
# The assignment of result as default fallback for min and max guarantees
# the result will be in range also if no min/max values are defined
specs_min = api.to_float(result_range.min, result)
specs_max = api.to_float(result_range.max, result)
in_range = False
min_operator = result_range.min_operator
if min_operator == "geq":
in_range = result >= specs_min
else:
in_range = result > specs_min
max_operator = result_range.max_operator
if in_range:
if max_operator == "leq":
in_range = result <= specs_max
else:
in_range = result < specs_max
# If in range, no need to check shoulders
if in_range:
return False, False
# Out of range, check shoulders. If no explicit warn_min or warn_max have
# been defined, no shoulders must be considered for this analysis. Thus, use
# specs' min and max as default fallback values
warn_min = api.to_float(result_range.warn_min, specs_min)
warn_max = api.to_float(result_range.warn_max, specs_max)
in_shoulder = warn_min <= result <= warn_max
return True, not in_shoulder | 0.000506 |
def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context | 0.005952 |
def loop_iteration(self, timeout = 60):
"""A loop iteration - check any scheduled events
and I/O available and run the handlers.
"""
if self.check_events():
return 0
next_timeout, sources_handled = self._call_timeout_handlers()
if self._quit:
return sources_handled
if next_timeout is not None:
timeout = min(next_timeout, timeout)
readable, writable, next_timeout = self._prepare_handlers()
if next_timeout is not None:
timeout = min(next_timeout, timeout)
if not readable and not writable:
readable, writable, _unused = [], [], None
time.sleep(timeout)
else:
logger.debug("select({0!r}, {1!r}, [], {2!r})"
.format( readable, writable,timeout))
readable, writable, _unused = select.select(
readable, writable, [], timeout)
for handler in readable:
handler.handle_read()
sources_handled += 1
for handler in writable:
handler.handle_write()
sources_handled += 1
return sources_handled | 0.005742 |
def scale_image(self, in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = (
(max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname) | 0.001612 |
def validate_account_user_email(self, account_id, user_id, **kwargs): # noqa: E501
"""Validate the user email. # noqa: E501
An endpoint for validating the user email. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/validate-email -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.validate_account_user_email(account_id, user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user whose email is validated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.validate_account_user_email_with_http_info(account_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.validate_account_user_email_with_http_info(account_id, user_id, **kwargs) # noqa: E501
return data | 0.001553 |
def update_agent(self, agent_id, **kwargs):
"""Updates an agent"""
url = 'agents/%s' % agent_id
agent = self._api._put(url, data=json.dumps(kwargs))
return Agent(**agent) | 0.009901 |
def parse_header(filename):
'''Returns a list of :attr:`VariableSpec`, :attr:`FunctionSpec`,
:attr:`StructSpec`, :attr:`EnumSpec`, :attr:`EnumMemberSpec`, and
:attr:`TypeDef` instances representing the c header file.
'''
with open(filename, 'rb') as fh:
content = '\n'.join(fh.read().splitlines())
content = sub('\t', ' ', content)
content = strip_comments(content)
# first get the functions
content = split(func_pat_short, content)
for i, s in enumerate(content):
if i % 2 and content[i].strip(): # matched a prototype
try:
content[i] = parse_prototype(content[i])
except Exception as e:
traceback.print_exc()
# now process structs
res = []
for i, item in enumerate(content):
if not isinstance(item, str): # if it's already a func etc. skip it
res.append(item)
continue
items = split(struct_pat, item)
j = 0
while j < len(items):
if not j % 5:
res.append(items[j])
j += 1
else:
if items[j].strip() == 'enum':
res.append(parse_enum(*items[j + 1: j + 4]))
else:
res.append(parse_struct(*items[j + 1: j + 4]))
j += 4
# now do remaining simple typedefs
content = res
res = []
for i, item in enumerate(content):
if not isinstance(item, str): # if it's already processed skip it
res.append(item)
continue
items = split(typedef_pat, item)
for j, item in enumerate(items):
res.append(TypeDef(item.strip()) if j % 2 else item)
content = [c for c in res if not isinstance(c, str) or c.strip()]
return content | 0.000534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.