text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError: If the packet sequence number is wrong.
"""
buff = bytearray()
while True:
packet_header = self._read_bytes(4)
#if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
self._force_close()
if packet_number == 0:
# MariaDB sends error packet with seqno==0 when shutdown
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query")
raise err.InternalError(
"Packet sequence number wrong - got %d expected %d"
% (packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
recv_data = self._read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(bytes(buff), self.encoding)
packet.check_error()
return packet | 0.002377 |
def map_equal_contributions(contributors):
"""assign numeric values to each unique equal-contrib id"""
equal_contribution_map = {}
equal_contribution_keys = []
for contributor in contributors:
if contributor.get("references") and "equal-contrib" in contributor.get("references"):
for key in contributor["references"]["equal-contrib"]:
if key not in equal_contribution_keys:
equal_contribution_keys.append(key)
# Do a basic sort
equal_contribution_keys = sorted(equal_contribution_keys)
# Assign keys based on sorted values
for i, equal_contribution_key in enumerate(equal_contribution_keys):
equal_contribution_map[equal_contribution_key] = i+1
return equal_contribution_map | 0.002584 |
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output | 0.004255 |
def analyze(problem, X, Y, num_resamples=1000,
conf_level=0.95, print_to_console=False, seed=None):
"""Calculates Derivative-based Global Sensitivity Measure on model outputs.
Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf',
where each entry is a list of size D (the number of parameters) containing
the indices in the same order as the parameter file.
Parameters
----------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs
Y : numpy.array
The NumPy array containing the model outputs
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. and S. Kucherenko (2009). "Derivative based global
sensitivity measures and their link with global sensitivity
indices." Mathematics and Computers in Simulation, 79(10):3009-3017,
doi:10.1016/j.matcom.2009.01.023.
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
if Y.size % (D + 1) == 0:
N = int(Y.size / (D + 1))
else:
raise RuntimeError("Incorrect number of samples in model output file.")
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
base = np.zeros(N)
X_base = np.zeros((N, D))
perturbed = np.zeros((N, D))
X_perturbed = np.zeros((N, D))
step = D + 1
base = Y[0:Y.size:step]
X_base = X[0:Y.size:step, :]
for j in range(D):
perturbed[:, j] = Y[(j + 1):Y.size:step]
X_perturbed[:, j] = X[(j + 1):Y.size:step, j]
# First order (+conf.) and Total order (+conf.)
keys = ('vi', 'vi_std', 'dgsm', 'dgsm_conf')
S = ResultDict((k, np.zeros(D)) for k in keys)
S['names'] = problem['names']
if print_to_console:
print("Parameter %s %s %s %s" % keys)
for j in range(D):
S['vi'][j], S['vi_std'][j] = calc_vi(
base, perturbed[:, j], X_perturbed[:, j] - X_base[:, j])
S['dgsm'][j], S['dgsm_conf'][j] = calc_dgsm(base, perturbed[:, j], X_perturbed[
:, j] - X_base[:, j], problem['bounds'][j], num_resamples, conf_level)
if print_to_console:
print("%s %f %f %f %f" % (
problem['names'][j], S['vi'][j], S['vi_std'][j], S['dgsm'][j], S['dgsm_conf'][j]))
return S | 0.001459 |
def path_to_geom_dicts(path, skip_invalid=True):
"""
Converts a Path element into a list of geometry dictionaries,
preserving all value dimensions.
"""
interface = path.interface.datatype
if interface == 'geodataframe':
return [row.to_dict() for _, row in path.data.iterrows()]
elif interface == 'geom_dictionary':
return path.data
geoms = []
invalid = False
xdim, ydim = path.kdims
for i, path in enumerate(path.split(datatype='columns')):
array = np.column_stack([path.pop(xdim.name), path.pop(ydim.name)])
splits = np.where(np.isnan(array[:, :2].astype('float')).sum(axis=1))[0]
arrays = np.split(array, splits+1) if len(splits) else [array]
subpaths = []
for j, arr in enumerate(arrays):
if j != (len(arrays)-1):
arr = arr[:-1] # Drop nan
if len(arr) == 0:
continue
elif len(arr) == 1:
if skip_invalid:
continue
g = Point(arr[0])
invalid = True
else:
g = LineString(arr)
subpaths.append(g)
if invalid:
geoms += [dict(path, geometry=sp) for sp in subpaths]
continue
elif len(subpaths) == 1:
geom = subpaths[0]
elif subpaths:
geom = MultiLineString(subpaths)
path['geometry'] = geom
geoms.append(path)
return geoms | 0.002017 |
def merge_code(left_code, right_code):
"""
{ relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... }
"""
data = dict()
code_lines = (left_code and left_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
data[rel_line] = [(abs_line, dis), None]
code_lines = (right_code and right_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
found = data.get(rel_line, None)
if found is None:
found = [None, (abs_line, dis)]
data[rel_line] = found
else:
found[1] = (abs_line, dis)
return data | 0.001383 |
def _collate(self, batch):
"""
Puts each data field into a tensor.
:param batch: The input data batch.
:type batch: list of features
:return: Preprocessed data.
:rtype: torch.Tensor or pair of torch.Tensor
"""
if isinstance(batch[0], tuple):
return [self._cuda(torch.Tensor(samples)) for samples in list(zip(*batch))]
else:
return self._cuda(torch.Tensor(batch)) | 0.006522 |
def functional(ifunctional):
"""
fun(fn) -> function or
fun(fn, args...) -> call of fn(args...)
:param ifunctional: f
:return: decorated function
"""
@wraps(ifunctional)
def wrapper(fn, *args, **kw):
fn = ifunctional(fn)
if args or kw:
return fn(*args, **kw)
else:
return fn
return wrapper | 0.00266 |
def mean(data):
"""Return the sample arithmetic mean of data.
>>> mean([1, 2, 3, 4, 4])
2.8
>>> from fractions import Fraction as F
>>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])
Fraction(13, 21)
>>> from decimal import Decimal as D
>>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")])
Decimal('0.5625')
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
return _sum(data)/n | 0.001686 |
def renew_secret(client, creds, opt):
"""Renews a secret. This will occur unless the user has
specified on the command line that it is not neccesary"""
if opt.reuse_token:
return
seconds = grok_seconds(opt.lease)
if not seconds:
raise aomi.exceptions.AomiCommand("invalid lease %s" % opt.lease)
renew = None
if client.version:
v_bits = client.version.split('.')
if int(v_bits[0]) == 0 and \
int(v_bits[1]) <= 8 and \
int(v_bits[2]) <= 0:
r_obj = {
'increment': seconds
}
r_path = "v1/sys/renew/{0}".format(creds['lease_id'])
# Pending discussion on https://github.com/ianunruh/hvac/issues/148
# pylint: disable=protected-access
renew = client._post(r_path, json=r_obj).json()
if not renew:
renew = client.renew_secret(creds['lease_id'], seconds)
# sometimes it takes a bit for vault to respond
# if we are within 5s then we are fine
if not renew or (seconds - renew['lease_duration'] >= 5):
client.revoke_self_token()
e_msg = 'Unable to renew with desired lease'
raise aomi.exceptions.VaultConstraint(e_msg) | 0.000814 |
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
"""Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
"""
if default:
cmd = 'default switchport trunk group'
return self.configure_interface(intf, cmd)
if disable:
cmd = 'no switchport trunk group'
return self.configure_interface(intf, cmd)
current_value = self.get(intf)['trunk_groups']
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if not self.add_trunk_group(intf, name):
failure = True
for name in set(current_value).difference(value):
if not self.remove_trunk_group(intf, name):
failure = True
return not failure | 0.001682 |
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
to_float(shape[-1]))) | 0.009901 |
def _find_child(self, tag):
"""Find the child C{etree.Element} with the matching C{tag}.
@raises L{WSDLParseError}: If more than one such elements are found.
"""
tag = self._get_namespace_tag(tag)
children = self._root.findall(tag)
if len(children) > 1:
raise WSDLParseError("Duplicate tag '%s'" % tag)
if len(children) == 0:
return None
return children[0] | 0.004494 |
def ratio(self, operand):
"""Calculate the ratio of this `Spectrogram` against a reference
Parameters
----------
operand : `str`, `FrequencySeries`, `Quantity`
a `~gwpy.frequencyseries.FrequencySeries` or
`~astropy.units.Quantity` to weight against, or one of
- ``'mean'`` : weight against the mean of each spectrum
in this Spectrogram
- ``'median'`` : weight against the median of each spectrum
in this Spectrogram
Returns
-------
spectrogram : `Spectrogram`
a new `Spectrogram`
Raises
------
ValueError
if ``operand`` is given as a `str` that isn't supported
"""
if isinstance(operand, string_types):
if operand == 'mean':
operand = self.mean(axis=0)
elif operand == 'median':
operand = self.median(axis=0)
else:
raise ValueError("operand %r unrecognised, please give a "
"Quantity or one of: 'mean', 'median'"
% operand)
out = self / operand
return out | 0.001638 |
def graphql_to_gremlin(schema, graphql_query, parameters, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to Gremlin, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
"""
compilation_result = compile_graphql_to_gremlin(
schema, graphql_query, type_equivalence_hints=type_equivalence_hints)
return compilation_result._replace(
query=insert_arguments_into_query(compilation_result, parameters)) | 0.009182 |
def add_paginated_grid_widget(self, part_model, delete=False, edit=True, export=True, clone=True,
new_instance=False, parent_part_instance=None, max_height=None, custom_title=False,
emphasize_edit=False, emphasize_clone=False, emphasize_new_instance=True,
sort_property=None, sort_direction=SortTable.ASCENDING, page_size=25,
collapse_filters=False):
"""
Add a KE-chain paginatedGrid (e.g. paginated table widget) to the customization.
The widget will be saved to KE-chain.
:param emphasize_new_instance: Emphasize the New instance button (default True)
:type emphasize_new_instance: bool
:param emphasize_edit: Emphasize the Edit button (default False)
:type emphasize_edit: bool
:param emphasize_clone: Emphasize the Clone button (default False)
:type emphasize_clone: bool
:param new_instance: Show or hide the New instance button (default False). You need to provide a
`parent_part_instance` in order for this to work.
:type new_instance: bool
:param export: Show or hide the Export Grid button (default True)
:type export: bool
:param edit: Show or hide the Edit button (default True)
:type edit: bool
:param clone: Show or hide the Clone button (default True)
:type clone: bool
:param delete: Show or hide the Delete button (default False)
:type delete: bool
:param page_size: Number of parts that will be shown per page in the grid.
:type page_size: int
:param collapse_filters: Hide or show the filters pane (default False)
:type collapse_filters: bool
:param part_model: The part model based on which all instances will be shown.
:type parent_part_instance: :class:`Part` or UUID
:param parent_part_instance: The parent part instance for which the instances will be shown or to which new
instances will be added.
:type parent_part_instance: :class:`Part` or UUID
:param max_height: The max height of the paginated grid in pixels
:type max_height: int or None
:param custom_title: A custom title for the paginated grid::
* False (default): Part instance name
* String value: Custom title
* None: No title
:type custom_title: bool or basestring or None
:param sort_property: The property model on which the part instances are being sorted on
:type sort_property: :class:`Property` or UUID
:param sort_direction: The direction on which the values of property instances are being sorted on::
* ASC (default): Sort in ascending order
* DESC: Sort in descending order
:type sort_direction: basestring (see :class:`enums.SortTable`)
:raises IllegalArgumentError: When unknown or illegal arguments are passed.
"""
height = max_height
# Check whether the part_model is uuid type or class `Part`
if isinstance(part_model, Part):
part_model_id = part_model.id
elif isinstance(part_model, text_type) and is_uuid(part_model):
part_model_id = part_model
part_model = self._client.model(id=part_model_id)
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, part_model must be a Part or Part id."
" Type is: {}".format(type(part_model)))
# Check whether the parent_part_instance is uuid type or class `Part`
if isinstance(parent_part_instance, Part):
parent_part_instance_id = parent_part_instance.id
elif isinstance(parent_part_instance, text_type) and is_uuid(parent_part_instance):
parent_part_instance_id = parent_part_instance
parent_part_instance = self._client.part(id=parent_part_instance_id)
elif isinstance(parent_part_instance, type(None)):
parent_part_instance_id = None
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, parent_part_instance must be a "
"Part, Part id or None. Type is: {}".format(type(parent_part_instance)))
# Check whether the sort_property is uuid type or class `Property`
if isinstance(sort_property, Property):
sort_property_id = sort_property.id
elif isinstance(sort_property, text_type) and is_uuid(sort_property):
sort_property_id = sort_property
sort_property = self._client.property(id=sort_property_id, category=Category.MODEL)
elif isinstance(sort_property, type(None)):
sort_property_id = None
else:
raise IllegalArgumentError("When using the add_paginated_grid_widget, sort_property must be a "
"Property, Property id or None. Type is: {}".format(type(sort_property)))
# Assertions
if not parent_part_instance and new_instance:
raise IllegalArgumentError("If you want to allow the creation of new part instances, you must specify a "
"parent_part_instance")
if sort_property and sort_property.part.id != part_model.id:
raise IllegalArgumentError("If you want to sort on a property, then sort_property must be located under "
"part_model")
# Add custom title
if custom_title is False:
show_title_value = "Default"
title = part_model.name
elif custom_title is None:
show_title_value = "No title"
title = ' '
else:
show_title_value = "Custom title"
title = str(custom_title)
# Set the collapse filters value
if collapse_filters:
collapse_filters_value = "Collapsed"
else:
collapse_filters_value = "Expanded"
# Declare paginatedGrid config
config = {
"xtype": ComponentXType.FILTEREDGRID,
"filter": {
"activity_id": str(self.activity.id),
},
"grid": {
"viewModel": {
"data": {
"actions": {
"delete": delete,
"edit": edit,
"export": export,
"newInstance": new_instance,
"cloneInstance": clone
},
"sorters": [{
"direction": sort_direction,
"property": sort_property_id
}] if sort_property_id else [],
"ui": {
"newInstance": "primary-action" if emphasize_new_instance else "default-toolbar",
"edit": "primary-action" if emphasize_edit else "default-toolbar",
"cloneInstance": "primary-action" if emphasize_clone else "default-toolbar"
},
"pageSize": page_size
}
},
"xtype": ComponentXType.PAGINATEDSUPERGRID,
"title": title,
"showTitleValue": show_title_value,
},
"maxHeight": height if height else None,
"parentInstanceId": parent_part_instance_id,
"partModelId": part_model_id,
"collapseFilters": collapse_filters
}
# Declare the meta info for the paginatedGrid
meta = {
"cloneButtonUi": "primary-action" if emphasize_clone else "defualt-toolbar",
"cloneButtonVisible": clone,
"primaryCloneUiValue": emphasize_clone,
"parentInstanceId": parent_part_instance_id,
"editButtonUi": "primary-action" if emphasize_edit else "default-toolbar",
"editButtonVisible": edit,
"customHeight": height if height else None,
"primaryAddUiValue": emphasize_new_instance,
"activityId": str(self.activity.id),
"customTitle": title,
"primaryEditUiValue": emphasize_edit,
"downloadButtonVisible": export,
"addButtonUi": "primary-action" if emphasize_new_instance else "default-toolbar",
"deleteButtonVisible": delete,
"addButtonVisible": new_instance,
"showTitleValue": show_title_value,
"partModelId": str(part_model_id),
"showHeightValue": "Set height" if height else "Automatic height",
"sortDirection": sort_direction,
"sortedColumn": sort_property_id if sort_property_id else None,
"collapseFilters": collapse_filters,
"showCollapseFiltersValue": collapse_filters_value,
"customPageSize": page_size
}
self._add_widget(dict(config=config, meta=meta, name=WidgetNames.FILTEREDGRIDWIDGET)) | 0.00392 |
def get(self):
""" Dequeue a state with the max priority """
# A shutdown has been requested
if self.is_shutdown():
return None
# if not more states in the queue, let's wait for some forks
while len(self._states) == 0:
# if no worker is running, bail out
if self.running == 0:
return None
# if a shutdown has been requested, bail out
if self.is_shutdown():
return None
# if there ares actually some workers running, wait for state forks
logger.debug("Waiting for available states")
self._lock.wait()
state_id = self._policy.choice(list(self._states))
if state_id is None:
return None
del self._states[self._states.index(state_id)]
return state_id | 0.002323 |
def downgrade():
"""Downgrade database."""
# Remove 'created' and 'updated' columns
op.drop_column('oauthclient_remoteaccount', 'created')
op.drop_column('oauthclient_remoteaccount', 'updated')
op.drop_column('oauthclient_remotetoken', 'created')
op.drop_column('oauthclient_remotetoken', 'updated')
op.drop_column('oauthclient_useridentity', 'created')
op.drop_column('oauthclient_useridentity', 'updated') | 0.002268 |
def import_plugin(self, plugin):
'''
Import plugin by given name, looking at :attr:`namespaces`.
:param plugin: plugin module name
:type plugin: str
:raises PluginNotFoundError: if not found on any namespace
'''
names = [
'%s%s%s' % (namespace, '' if namespace[-1] == '_' else '.', plugin)
if namespace else
plugin
for namespace in self.namespaces
]
for name in names:
if name in sys.modules:
return sys.modules[name]
for name in names:
try:
__import__(name)
return sys.modules[name]
except (ImportError, KeyError):
pass
raise PluginNotFoundError(
'No plugin module %r found, tried %r' % (plugin, names),
plugin, names) | 0.002255 |
def _get_center(self):
'''Returns the center point of the path, disregarding transforms.
'''
w, h = self.layout.get_pixel_size()
x = (self.x + w / 2)
y = (self.y + h / 2)
return x, y | 0.008696 |
def a_torispherical(D, f, k):
r'''Calculates depth of a torispherical head according to [1]_.
.. math::
a = a_1 + a_2
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
Returns
-------
a : float
Depth of head [m]
Examples
--------
Example from [1]_.
>>> a_torispherical(D=96., f=0.9, k=0.2)
25.684268924767125
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
alpha = asin((1-2*k)/(2*(f-k)))
a1 = f*D*(1 - cos(alpha))
a2 = k*D*cos(alpha)
return a1 + a2 | 0.002022 |
def linearize(self, index=0):
'''Linearize circular DNA at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.DNA
:raises: ValueError if the input is linear DNA.
'''
if not self.circular:
raise ValueError('Cannot relinearize linear DNA.')
copy = self.copy()
# Snip at the index
if index:
return copy[index:] + copy[:index]
copy.circular = False
copy.top.circular = False
copy.bottom.circular = False
return copy | 0.003091 |
def compare_files(path1, path2):
# type: (str, str) -> List[str]
"""Returns the delta between two files using -, ?, + format excluding
lines that are the same
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
List[str]: Delta between the two files
"""
diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines())
return [x for x in diff if x[0] in ['-', '+', '?']] | 0.00216 |
def change_username(self, username):
"""
Change username
:param username: email or str
:return:
"""
username = username.lower()
if self.username != username:
if self.get_by_username(username):
raise exceptions.AuthError("Username exists already")
self.update(username=username) | 0.005362 |
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
"""
if self._backend is not None:
return
# Make sure that the app is active
assert self._app.native
# Instantiate the backend with the right class
self._app.backend_module.CanvasBackend(self, **self._backend_kwargs)
# self._backend = set by BaseCanvasBackend
self._backend_kwargs = None # Clean up
# Connect to draw event (append to the end)
# Process GLIR commands at each paint event
self.events.draw.connect(self.context.flush_commands, position='last')
if self._autoswap:
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last') | 0.002309 |
def libvlc_vlm_set_output(p_instance, psz_name, psz_output):
'''Set the output for a media.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_output', None) or \
_Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_output) | 0.003591 |
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
SpecialFencePreprocessor(md),
">normalize_whitespace") | 0.006452 |
def ctrl_request_update(_, nl_sock_h):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L37.
Positional arguments:
nl_sock_h -- nl_sock class instance.
Returns:
Integer, genl_send_simple() output.
"""
return int(genl_send_simple(nl_sock_h, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, CTRL_VERSION, NLM_F_DUMP)) | 0.005714 |
def set_custom_colorset(self):
"""Defines a colorset with matching colors. Provided by Joachim."""
cmd.set_color('myorange', '[253, 174, 97]')
cmd.set_color('mygreen', '[171, 221, 164]')
cmd.set_color('myred', '[215, 25, 28]')
cmd.set_color('myblue', '[43, 131, 186]')
cmd.set_color('mylightblue', '[158, 202, 225]')
cmd.set_color('mylightgreen', '[229, 245, 224]') | 0.004751 |
def parse_last_period(last):
"""
Parse the --last value and return the time difference in seconds.
"""
wordmap = {
'hour': '1h',
'day': '1d',
'week': '1w',
'month': '1m'
}
# seconds
multmap = {
'h': 3600,
'd': 86400,
'w': 604800,
'm': 2592000
}
if last in wordmap:
last = wordmap[last]
cat = last[-1:].lower()
if cat not in multmap:
raise TypeError
try:
num = int(last[:-1])
if num <= 0:
raise TypeError
except ValueError:
raise TypeError
diff = num * multmap[cat]
return diff | 0.008915 |
def update_params(self, params):
"""
update connection params to maximize performance
"""
if not params.get('BINARY', True):
raise Warning('To increase performance please use ElastiCache'
' in binary mode')
else:
params['BINARY'] = True # patch params, set binary mode
if 'OPTIONS' not in params:
# set special 'behaviors' pylibmc attributes
params['OPTIONS'] = {
'tcp_nodelay': True,
'ketama': True
} | 0.003521 |
def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df | 0.007923 |
def from_collection_xml(cls, xml_content):
"""Build a :class:`~zenodio.harvest.Datacite3Collection` from
Datecite3-formatted XML.
Users should use :func:`zenodio.harvest.harvest_collection` to build a
:class:`~zenodio.harvest.Datacite3Collection` for a Community.
Parameters
----------
xml_content : str
Datacite3-formatted XML content.
Returns
-------
collection : :class:`Datacite3Collection`
The collection parsed from Zenodo OAI-PMH XML content.
"""
xml_dataset = xmltodict.parse(xml_content, process_namespaces=False)
# Unwrap the record list when harvesting a collection's datacite 3
xml_records = xml_dataset['OAI-PMH']['ListRecords']['record'] # NOQA
return cls(xml_records) | 0.00241 |
def setup_hfb_pars(self):
"""setup non-mult parameters for hfb (yuck!)
"""
if self.m.hfb6 is None:
self.logger.lraise("couldn't find hfb pak")
tpl_file,df = pyemu.gw_utils.write_hfb_template(self.m)
self.in_files.append(os.path.split(tpl_file.replace(".tpl",""))[-1])
self.tpl_files.append(os.path.split(tpl_file)[-1])
self.par_dfs["hfb"] = df | 0.009685 |
def extend(self, *iterables):
"""Add all values of all iterables at the end of the list
Args:
iterables: iterable which content to add at the end
Example:
>>> from ww import l
>>> lst = l([])
>>> lst.extend([1, 2])
[1, 2]
>>> lst
[1, 2]
>>> lst.extend([3, 4]).extend([5, 6])
[1, 2, 3, 4, 5, 6]
>>> lst
[1, 2, 3, 4, 5, 6]
"""
for value in iterables:
list.extend(self, value)
return self | 0.003442 |
def abort(bot, config, settings):
"""Run the abort command of a specified BOT by label e.g. 'MyBot'"""
print_options(bot, config, settings)
click.echo()
bot_task = BotTask(bot, config)
bot_task.abort() | 0.004525 |
def round_point_coords(pt, precision):
"""
Round the coordinates of a shapely Point to some decimal precision.
Parameters
----------
pt : shapely Point
the Point to round the coordinates of
precision : int
decimal precision to round coordinates to
Returns
-------
Point
"""
return Point([round(x, precision) for x in pt.coords[0]]) | 0.005025 |
def _resetFTDI(self):
""" reset the FTDI device
"""
if not self._isFTDI:
return
txdir = 0 # 0:OUT, 1:IN
req_type = 2 # 0:std, 1:class, 2:vendor
recipient = 0 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=0, # RESET
wValue=0, # RESET
wIndex=1,
data_or_wLength=0) | 0.003683 |
def torque_on(self):
""" Enable the torques of Herkulex
In this mode, position control and velocity control
will work.
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x60)
send_data(data) | 0.004808 |
def run(self):
"""Launch the broker(s) and worker(s) assigned on every hosts."""
# Launch the broker(s)
for hostname, nb_brokers in self.broker_hosts:
for ind in range(nb_brokers):
if self.externalHostname in utils.localHostnames:
self.brokers.append(localBroker(
debug=self.debug,
nice=self.nice,
backend=self.backend,
))
else:
self.brokers.append(remoteBroker(
hostname=hostname,
pythonExecutable=self.python_executable,
debug=self.debug,
nice=self.nice,
backend=self.backend,
rsh=self.rsh,
ssh_executable=self.ssh_executable,
))
# Share connection information between brokers
if self.b > 1:
for broker in self.brokers:
# Only send data of other brokers to a given broker
connect_data = [
BrokerInfo(
x.getHost(),
*x.getPorts(),
externalHostname=x.getHost()
)
for x in self.brokers
if x is not broker
]
broker.sendConnect(connect_data)
# Launch the workers
shells = []
origin_launched = False
for hostname, nb_workers in self.worker_hosts:
self.workers.append(self.LAUNCH_HOST_CLASS(hostname, self.rsh,
self.ssh_executable))
total_workers_host = min(nb_workers, self.workersLeft)
self.setWorkerInfo(hostname, total_workers_host, not origin_launched)
origin_launched = True
self.workersLeft -= total_workers_host
# Launch every workers at the same time
scoop.logger.debug(
"{0}: Launching '{1}'".format(
hostname,
self.workers[-1].getCommand(),
)
)
shells.append(self.workers[-1].launch(
(self.brokers[0].brokerPort,
self.brokers[0].infoPort)
if self.tunnel else None,
))
if self.workersLeft <= 0:
# We've launched every worker we needed, so let's exit the loop
break
rootProcess = shells[0][0]
# Wait for the root program
try:
if self.workers[0].isLocal():
self.errors = self.workers[0].subprocesses[0].wait()
else:
self.errors = rootProcess.wait()
except KeyboardInterrupt:
pass
scoop.logger.info('Root process is done.')
return self.errors | 0.001346 |
def codemirror_field_js_bundle(field):
"""
Filter to get CodeMirror Javascript bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_js_bundle }}
Arguments:
field (django.forms.fields.Field): A form field that contains a widget
:class:`djangocodemirror.widget.CodeMirrorWidget`.
Raises:
CodeMirrorFieldBundleError: If Codemirror configuration form field
does not have a bundle name.
Returns:
string: Bundle name to load with webassets.
"""
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.js_bundle_names()[0]
except IndexError:
msg = ("Given field with configuration name '{}' does not have a "
"Javascript bundle name")
raise CodeMirrorFieldBundleError(msg.format(field.config_name))
return bundle_name | 0.001014 |
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(set(iterable))
combs = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
res = set(frozenset(x) for x in combs)
# res = map(frozenset, combs)
return res | 0.003425 |
def print_terminal_table(headers, data_list, parse_row_fn):
"""Uses a set of headers, raw data, and a row parsing function, to print
data to the terminal in a table of rows and columns.
Args:
headers (tuple of strings): The headers for each column of data
data_list (list of dicts): Raw response data from the validator
parse_row_fn (function): Parses a dict of data into a tuple of columns
Expected args:
data (dict): A single response object from the validator
Expected return:
cols (tuple): The properties to display in each column
"""
data_iter = iter(data_list)
try:
example = next(data_iter)
example_row = parse_row_fn(example)
data_iter = itertools.chain([example], data_iter)
except StopIteration:
example_row = [''] * len(headers)
format_string = format_terminal_row(headers, example_row)
top_row = format_string.format(*headers)
print(top_row[0:-3] if top_row.endswith('...') else top_row)
for data in data_iter:
print(format_string.format(*parse_row_fn(data))) | 0.000879 |
def require_server(fn):
"""
Checks if the user has called the task with a server name.
Fabric tasks decorated with this decorator must be called like so::
fab <server name> <task name>
If no server name is given, the task will not be executed.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
if env.machine is None:
abort(red('ERROR: You must provide a server name to call this'
' task!'))
return fn(*args, **kwargs)
return wrapper | 0.001916 |
def parse(filename):
"""Parses file content into events stream"""
for event, elt in et.iterparse(filename, events= ('start', 'end', 'comment', 'pi'), huge_tree=True):
if event == 'start':
obj = _elt2obj(elt)
obj['type'] = ENTER
yield obj
if elt.text:
yield {'type': TEXT, 'text': elt.text}
elif event == 'end':
yield {'type': EXIT}
if elt.tail:
yield {'type': TEXT, 'text': elt.tail}
elt.clear()
elif event == 'comment':
yield {'type': COMMENT, 'text': elt.text}
elif event == 'pi':
yield {'type': PI, 'text': elt.text}
else:
assert False, (event, elt) | 0.003989 |
def delete_network(context, id):
"""Delete a network.
: param context: neutron api request context
: param id: UUID representing the network to delete.
"""
LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
net = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False, id=id,
scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=id)
if not context.is_admin:
if STRATEGY.is_provider_network(net.id):
raise n_exc.NotAuthorized(net_id=id)
if net.ports:
raise n_exc.NetworkInUse(net_id=id)
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
net_driver.delete_network(context, id)
for subnet in net["subnets"]:
subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net) | 0.000977 |
def get(self, sid):
"""
Constructs a AddressContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.address.AddressContext
:rtype: twilio.rest.api.v2010.account.address.AddressContext
"""
return AddressContext(self._version, account_sid=self._solution['account_sid'], sid=sid, ) | 0.007752 |
def unique_row(array, use_columns=None, selected_columns_only=False):
'''Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row.
The returned array can have all columns of the original array or only the columns defined in use_columns.
Parameters
----------
array : numpy.ndarray
use_columns : list
Index of columns to be used to define a unique row
selected_columns_only : bool
If true only the columns defined in use_columns are returned
Returns
-------
numpy.ndarray
'''
if array.dtype.names is None: # normal array has no named dtype
if use_columns is not None:
a_cut = array[:, use_columns]
else:
a_cut = array
if len(use_columns) > 1:
b = np.ascontiguousarray(a_cut).view(np.dtype((np.void, a_cut.dtype.itemsize * a_cut.shape[1])))
else:
b = np.ascontiguousarray(a_cut)
_, index = np.unique(b, return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order
else:
return a_cut[np.sort(index)] # sort to preserve order
else: # names for dtype founnd --> array is recarray
names = list(array.dtype.names)
if use_columns is not None:
new_names = [names[i] for i in use_columns]
else:
new_names = names
a_cut, index = np.unique(array[new_names], return_index=True)
if not selected_columns_only:
return array[np.sort(index)] # sort to preserve order
else:
return array[np.sort(index)][new_names] | 0.002328 |
def upload_files(self, source_paths, dir_name=None):
'''批量创建上传任务, 会扫描子目录并依次上传.
source_path - 本地文件的绝对路径
dir_name - 文件在服务器上的父目录, 如果为None的话, 会弹出一个
对话框让用户来选择一个目录.
'''
def scan_folders(folder_path):
file_list = os.listdir(folder_path)
source_paths = [os.path.join(folder_path, f) for f in file_list]
self.upload_files(source_paths,
os.path.join(dir_name, os.path.split(folder_path)[1]))
self.check_first()
if not dir_name:
folder_dialog = FolderBrowserDialog(self, self.app)
response = folder_dialog.run()
if response != Gtk.ResponseType.OK:
folder_dialog.destroy()
return
dir_name = folder_dialog.get_path()
folder_dialog.destroy()
invalid_paths = []
for source_path in source_paths:
if util.validate_pathname(source_path) != ValidatePathState.OK:
invalid_paths.append(source_path)
continue
if (os.path.split(source_path)[1].startswith('.') and
not self.app.profile['upload-hidden-files']):
continue
if os.path.isfile(source_path):
self.upload_file(source_path, dir_name)
elif os.path.isdir(source_path):
scan_folders(source_path)
self.app.blink_page(self)
self.scan_tasks()
if not invalid_paths:
return
dialog = Gtk.Dialog(_('Invalid Filepath'), self.app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.OK))
dialog.set_default_size(640, 480)
dialog.set_border_width(10)
box = dialog.get_content_area()
scrolled_window = Gtk.ScrolledWindow()
box.pack_start(scrolled_window, True, True, 0)
text_buffer = Gtk.TextBuffer()
textview = Gtk.TextView.new_with_buffer(text_buffer)
scrolled_window.add(textview)
for invalid_path in invalid_paths:
text_buffer.insert_at_cursor(invalid_path)
text_buffer.insert_at_cursor('\n')
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.ERROR)
box.pack_end(infobar, False, False, 0)
info_label= Gtk.Label()
infobar.get_content_area().pack_start(info_label, False, False, 0)
info_label.set_label(''.join([
'* ', ValidatePathStateText[1], '\n',
'* ', ValidatePathStateText[2], '\n',
'* ', ValidatePathStateText[3], '\n',
]))
box.show_all()
dialog.run()
dialog.destroy() | 0.001461 |
def set_column_si_format(tree_column, model_column_index, cell_renderer=None,
digits=2):
'''
Set the text of a numeric cell according to [SI prefixes][1]
For example, `1000 -> '1.00k'`.
[1]: https://en.wikipedia.org/wiki/Metric_prefix#List_of_SI_prefixes
Args:
tree_column (gtk.TreeViewColumn) : Tree view to append columns to.
model_column_index (int) : Index in list store model corresponding to
tree view column.
cell_renderer (gtk.CellRenderer) : Cell renderer for column. If
`None`, defaults to all cell renderers for column.
digits (int) : Number of digits after decimal (default=2).
Returns:
None
'''
def set_property(column, cell_renderer, list_store, iter, store_i):
cell_renderer.set_property('text', si_format(list_store[iter][store_i],
digits))
if cell_renderer is None:
cells = tree_column.get_cells()
else:
cells = [cell_renderer]
for cell_renderer_i in cells:
tree_column.set_cell_data_func(cell_renderer_i, set_property,
model_column_index) | 0.00082 |
def add_channel(self, chname, workspace=None,
num_images=None, settings=None,
settings_template=None,
settings_share=None, share_keylist=None):
"""Create a new Ginga channel.
Parameters
----------
chname : str
The name of the channel to create.
workspace : str or None
The name of the workspace in which to create the channel
num_images : int or None
The cache size for the number of images to keep in memory
settings : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences. If not given, one will be created.
settings_template : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences template
settings_share : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences instance to share with newly created settings
share_keylist : list of str
List of names of settings that should be shared
Returns
-------
channel : `~ginga.misc.Bunch.Bunch`
The channel info bunch.
"""
with self.lock:
if self.has_channel(chname):
return self.get_channel(chname)
if chname in self.ds.get_tabnames(None):
raise ValueError("Tab name already in use: '%s'" % (chname))
name = chname
if settings is None:
settings = self.prefs.create_category('channel_' + name)
try:
settings.load(onError='raise')
except Exception as e:
self.logger.warning("no saved preferences found for channel "
"'%s': %s" % (name, str(e)))
# copy template settings to new channel
if settings_template is not None:
osettings = settings_template
osettings.copy_settings(settings)
else:
try:
# use channel_Image as a template if one was not
# provided
osettings = self.prefs.get_settings('channel_Image')
self.logger.debug("Copying settings from 'Image' to "
"'%s'" % (name))
osettings.copy_settings(settings)
except KeyError:
pass
if (share_keylist is not None) and (settings_share is not None):
# caller wants us to share settings with another viewer
settings_share.share_settings(settings, keylist=share_keylist)
# Make sure these preferences are at least defined
if num_images is None:
num_images = settings.get('numImages',
self.settings.get('numImages', 1))
settings.set_defaults(switchnew=True, numImages=num_images,
raisenew=True, genthumb=True,
focus_indicator=False,
preload_images=False, sort_order='loadtime')
self.logger.debug("Adding channel '%s'" % (chname))
channel = Channel(chname, self, datasrc=None,
settings=settings)
bnch = self.add_viewer(chname, settings,
workspace=workspace)
# for debugging
bnch.image_viewer.set_name('channel:%s' % (chname))
opmon = self.get_plugin_manager(self.logger, self,
self.ds, self.mm)
channel.widget = bnch.widget
channel.container = bnch.container
channel.workspace = bnch.workspace
channel.connect_viewer(bnch.image_viewer)
channel.viewer = bnch.image_viewer
# older name, should eventually be deprecated
channel.fitsimage = bnch.image_viewer
channel.opmon = opmon
name = chname.lower()
self.channel[name] = channel
# Update the channels control
self.channel_names.append(chname)
self.channel_names.sort()
if len(self.channel_names) == 1:
self.cur_channel = channel
# Prepare local plugins for this channel
for spec in self.get_plugins():
opname = spec.get('klass', spec.get('module'))
if spec.get('ptype', 'global') == 'local':
opmon.load_plugin(opname, spec, chinfo=channel)
self.make_gui_callback('add-channel', channel)
return channel | 0.001648 |
def parse_netloc(scheme, netloc):
"""Parse netloc string."""
auth, _netloc = netloc.split('@')
sender, token = auth.split(':')
if ':' in _netloc:
domain, port = _netloc.split(':')
port = int(port)
else:
domain = _netloc
if scheme == 'https':
port = 443
else:
port = 80
return dict(sender=sender, token=token, domain=domain, port=port) | 0.00237 |
def gradient(self):
r"""Gradient operator of the functional.
Notes
-----
The derivative is computed using the quotient rule:
.. math::
[\nabla (f / g)](p) = (g(p) [\nabla f](p) -
f(p) [\nabla g](p)) / g(p)^2
"""
func = self
class FunctionalQuotientGradient(Operator):
"""Functional representing the gradient of ``f(.) / g(.)``."""
def _call(self, x):
"""Apply the functional to the given point."""
dividendx = func.dividend(x)
divisorx = func.divisor(x)
return ((1 / divisorx) * func.dividend.gradient(x) +
(- dividendx / divisorx**2) * func.divisor.gradient(x))
return FunctionalQuotientGradient(self.domain, self.domain,
linear=False) | 0.002183 |
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter'):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
print('%s = %d.' % (tag,len(a)-1))
return b, a, sos | 0.013807 |
def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False):
"""Update whether OS Login is enabled and update NSS cache if necessary.
Args:
oslogin_desired: bool, enable OS Login if True, disable if False.
two_factor_desired: bool, enable two factor if True, disable if False.
Returns:
int, the return code from updating OS Login, or None if not present.
"""
oslogin_configured = self._GetStatus(two_factor=False)
if oslogin_configured is None:
return None
two_factor_configured = self._GetStatus(two_factor=True)
# Two factor can only be enabled when OS Login is enabled.
two_factor_desired = two_factor_desired and oslogin_desired
if oslogin_desired:
params = ['activate']
if two_factor_desired:
params += ['--twofactor']
# OS Login is desired and not enabled.
if not oslogin_configured:
self.logger.info('Activating OS Login.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Enable two factor authentication.
if two_factor_desired and not two_factor_configured:
self.logger.info('Activating OS Login two factor authentication.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Deactivate two factor authentication.
if two_factor_configured and not two_factor_desired:
self.logger.info('Reactivating OS Login with two factor disabled.')
return (self._RunOsLoginControl(['deactivate'])
or self._RunOsLoginControl(params))
# OS Login features are already enabled. Update the cache if appropriate.
current_time = time.time()
if current_time - self.update_time > NSS_CACHE_DURATION_SEC:
self.update_time = current_time
return self._RunOsLoginNssCache()
elif oslogin_configured:
self.logger.info('Deactivating OS Login.')
return (self._RunOsLoginControl(['deactivate'])
or self._RemoveOsLoginNssCache())
# No action was needed.
return 0 | 0.007324 |
def local_temp_dir():
"""
Creates a local temporary directory. The directory is removed when no longer needed. Failure to do
so will be ignored.
:return: Path to the temporary directory.
:rtype: unicode
"""
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path, ignore_errors=True) | 0.00625 |
def remove_prefix(self, prefix):
"""Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it.
"""
if prefix not in self.__prefix_map:
return
ni = self.__lookup_prefix(prefix)
ni.prefixes.discard(prefix)
del self.__prefix_map[prefix]
# If we removed the preferred prefix, find a new one.
if ni.preferred_prefix == prefix:
ni.preferred_prefix = next(iter(ni.prefixes), None) | 0.004049 |
def update(self, data):
'''Updates object information with live data (if live data has
different values to stored object information). Changes will be
automatically applied, but not persisted in the database. Call
`db.session.add(elb)` manually to commit the changes to the DB.
Args:
# data (:obj:) AWS API Resource object fetched from AWS API
data (:dict:) Dict representing ELB data retrieved from ELB client
Returns:
True if there were any changes to the object, False otherwise
'''
updated = self.set_property('lb_name', data['LoadBalancerName'])
updated |= self.set_property('dns_name', data['DNSName'])
if 'CanonicalHostedZoneName' not in data:
data['CanonicalHostedZoneName'] = None
updated |= self.set_property(
'canonical_hosted_zone_name',
data['CanonicalHostedZoneName']
)
# Apparently you can get an ELB that doesn't have a parent VPC
if 'VPCId' in data:
updated |= self.set_property('vpc_id', data['VPCId'])
else:
updated |= self.set_property('vpc_id', 'None')
# Instances
# ELBs list instances as [{'InstanceId': <instance_id>}, ...] Sigh.
instances = [instance['InstanceId'] for instance in data['Instances']]
if sorted(instances) != sorted(self.get_property('instances')):
self.set_property('instances', instances)
updated = True
# Tags (not currently in use, but for future reference)
if 'Tags' not in data:
data['Tags'] = {}
tags = {x['Key']: x['Value'] for x in data['Tags'] or {}}
existing_tags = {x.key: x for x in self.tags}
# Check for updated or removed tags
for key in list(existing_tags.keys()):
if key not in tags:
updated |= self.delete_tag(key)
# Metrics
if 'Metrics' not in data:
data['Metrics'] = {}
updated |= self.set_property('metrics', data['Metrics'])
return updated | 0.000947 |
def send_status_message(self, object_id, status):
"""Send a message to the `status_queue` to update a job's status.
Returns `True` if the message was sent, else `False`
Args:
object_id (`str`): ID of the job that was executed
status (:obj:`SchedulerStatus`): Status of the job
Returns:
`bool`
"""
try:
body = json.dumps({
'id': object_id,
'status': status
})
self.status_queue.send_message(
MessageBody=body,
MessageGroupId='job_status',
MessageDeduplicationId=get_hash((object_id, status))
)
return True
except Exception as ex:
print(ex)
return False | 0.002466 |
def distances(self):
"""The matrix with the all-pairs shortest path lenghts"""
from molmod.ext import graphs_floyd_warshall
distances = np.zeros((self.num_vertices,)*2, dtype=int)
#distances[:] = -1 # set all -1, which is just a very big integer
#distances.ravel()[::len(distances)+1] = 0 # set diagonal to zero
for i, j in self.edges: # set edges to one
distances[i, j] = 1
distances[j, i] = 1
graphs_floyd_warshall(distances)
return distances | 0.009398 |
def _read_mode_tr(self, size, kind):
"""Read Traceroute option.
Positional arguments:
size - int, length of option
kind - int, 82 (TR)
Returns:
* dict -- extracted Traceroute (TR) option
Structure of Traceroute (TR) option [RFC 1393][RFC 6814]:
0 8 16 24
+-+-+-+-+-+-+-+-+---------------+---------------+---------------+
|F| C | Number | Length | ID Number |
+-+-+-+-+-+-+-+-+---------------+---------------+---------------+
| Outbound Hop Count | Return Hop Count |
+---------------+---------------+---------------+---------------+
| Originator IP Address |
+---------------+---------------+---------------+---------------+
Octets Bits Name Description
0 0 ip.tr.kind Kind (82)
0 0 ip.tr.type.copy Copied Flag (0)
0 1 ip.tr.type.class Option Class (0)
0 3 ip.tr.type.number Option Number (18)
1 8 ip.tr.length Length (12)
2 16 ip.tr.id ID Number
4 32 ip.tr.ohc Outbound Hop Count
6 48 ip.tr.rhc Return Hop Count
8 64 ip.tr.ip Originator IP Address
"""
if size != 12:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_idnm = self._read_unpack(2)
_ohcn = self._read_unpack(2)
_rhcn = self._read_unpack(2)
_ipad = self._read_ipv4_addr()
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
id=_idnm,
ohc=_ohcn,
rhc=_rhcn,
ip=_ipad,
)
return data | 0.000932 |
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith('-'):
if k[1:] == '_score':
raise IllegalOperation('Sorting by `-_score` is not allowed.')
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s | 0.00273 |
def reverse_lazy_with_query(named_url,**kwargs):
"Reverse named URL with GET query (lazy version)"
q = QueryDict('',mutable=True)
q.update(kwargs)
return '{}?{}'.format(reverse_lazy(named_url),q.urlencode()) | 0.017937 |
def qqplot(x, dist='norm', sparams=(), confidence=.95, figsize=(5, 4),
ax=None):
"""Quantile-Quantile plot.
Parameters
----------
x : array_like
Sample data.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
`scipy.stats.distributions` instance (i.e. they have a ``ppf`` method)
are also accepted.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters, location,
and scale). See :py:func:`scipy.stats.probplot` for more details.
confidence : float
Confidence level (.95 = 95%) for point-wise confidence envelope.
Pass False for no envelope.
figsize : tuple
Figsize in inches
ax : matplotlib axes
Axis on which to draw the plot
Returns
-------
ax : Matplotlib Axes instance
Returns the Axes object with the plot for further tweaking.
Notes
-----
This function returns a scatter plot of the quantile of the sample data `x`
against the theoretical quantiles of the distribution given in `dist`
(default = 'norm').
The points plotted in a Q–Q plot are always non-decreasing when viewed
from left to right. If the two distributions being compared are identical,
the Q–Q plot follows the 45° line y = x. If the two distributions agree
after linearly transforming the values in one of the distributions,
then the Q–Q plot follows some line, but not necessarily the line y = x.
If the general trend of the Q–Q plot is flatter than the line y = x,
the distribution plotted on the horizontal axis is more dispersed than
the distribution plotted on the vertical axis. Conversely, if the general
trend of the Q–Q plot is steeper than the line y = x, the distribution
plotted on the vertical axis is more dispersed than the distribution
plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped,
indicating that one of the distributions is more skewed than the other,
or that one of the distributions has heavier tails than the other.
In addition, the function also plots a best-fit line (linear regression)
for the data and annotates the plot with the coefficient of
determination :math:`R^2`. Note that the intercept and slope of the
linear regression between the quantiles gives a measure of the relative
location and relative scale of the samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot
.. [2] https://github.com/cran/car/blob/master/R/qqPlot.R
.. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear
Models, 2nd Ed., Sage Publications, Inc.
Examples
--------
Q-Q plot using a normal theoretical distribution:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> ax = pg.qqplot(x, dist='norm')
Two Q-Q plots using two separate axes:
.. plot::
>>> import numpy as np
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> x_exp = np.random.exponential(size=50)
>>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))
>>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False)
>>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2)
Using custom location / scale parameters as well as another Seaborn style
.. plot::
>>> import numpy as np
>>> import seaborn as sns
>>> import pingouin as pg
>>> import matplotlib.pyplot as plt
>>> np.random.seed(123)
>>> x = np.random.normal(size=50)
>>> mean, std = 0, 0.8
>>> sns.set_style('darkgrid')
>>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
"""
if isinstance(dist, str):
dist = getattr(stats, dist)
x = np.asarray(x)
x = x[~np.isnan(x)] # NaN are automatically removed
# Extract quantiles and regression
quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False)
theor, observed = quantiles[0], quantiles[1]
fit_params = dist.fit(x)
loc = fit_params[-2]
scale = fit_params[-1]
shape = fit_params[0] if len(fit_params) == 3 else None
# Observed values to observed quantiles
if loc != 0 and scale != 1:
observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1]
# Linear regression
slope, intercept, r, _, _ = stats.linregress(theor, observed)
# Start the plot
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(theor, observed, 'bo')
stats.morestats._add_axis_labels_title(ax,
xlabel='Theoretical quantiles',
ylabel='Ordered quantiles',
title='Q-Q Plot')
# Add diagonal line
end_pts = [ax.get_xlim(), ax.get_ylim()]
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, color='slategrey', lw=1.5)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
# Add regression line and annotate R2
fit_val = slope * theor + intercept
ax.plot(theor, fit_val, 'r-', lw=2)
posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0])
posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0])
ax.text(posx, posy, "$R^2=%.3f$" % r**2)
if confidence is not False:
# Confidence envelope
n = x.size
P = _ppoints(n)
crit = stats.norm.ppf(1 - (1 - confidence) / 2)
pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, shape)
se = (slope / pdf) * np.sqrt(P * (1 - P) / n)
upper = fit_val + crit * se
lower = fit_val - crit * se
ax.plot(theor, upper, 'r--', lw=1.25)
ax.plot(theor, lower, 'r--', lw=1.25)
return ax | 0.000163 |
def make_spondaic(self, scansion: str) -> str:
"""
If a pentameter line has 12 syllables, then it must start with double spondees.
:param scansion: a string of scansion patterns
:return: a scansion pattern string starting with two spondees
>>> print(PentameterScanner().make_spondaic("U U U U U U U U U U U U"))
- - - - - - U U - U U U
"""
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) | 0.005102 |
def _create_tree(self, endpoint=None, index=0):
"""
This will return a string of the endpoint tree structure
:param endpoint: Endpoint's Current path of the source
:param index: int number of tabs to space over
:return: str
"""
tab = '' # '\t' * index
ret = ''
if endpoint:
name = endpoint.path.split('.', 1)[1].replace('.', '/') + '/'
ret += tab + name + '\n'
ret += endpoint.method_calls(' ' * len(tab + name))
else:
endpoint = self
for child_name in endpoint._endpoints:
child = getattr(endpoint, child_name, None)
if child:
ret += self._create_tree(child, index + 1)
return ret | 0.002608 |
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences | 0.004032 |
def _get_disksize_MiB(iLOIP, cred):
"""Reads the dictionary of parsed MIBs and gets the disk size.
:param iLOIP: IP address of the server on which SNMP discovery
has to be executed.
:param snmp_credentials in a dictionary having following mandatory
keys.
auth_user: SNMP user
auth_protocol: Auth Protocol
auth_prot_pp: Pass phrase value for AuthProtocol.
priv_protocol:Privacy Protocol.
auth_priv_pp: Pass phrase value for Privacy Protocol.
:returns the dictionary of disk sizes of all physical drives.
"""
# '1.3.6.1.4.1.232.5.5.1.1', # cpqscsi SAS HBA Table
# '1.3.6.1.4.1.232.3.2.3.1', # cpqida Drive Array Logical Drive Table
result = _parse_mibs(iLOIP, cred)
disksize = {}
for uuid in sorted(result):
for key in result[uuid]:
# We only track the Physical Disk Size
if key.find('PhyDrvSize') >= 0:
disksize[uuid] = dict()
for suffix in sorted(result[uuid][key]):
size = result[uuid][key][suffix]
disksize[uuid][key] = str(size)
return disksize | 0.000845 |
def make_fileitem_streamlist_stream_name(stream_name, condition='is', negate=False, preserve_case=False):
"""
Create a node for FileItem/StreamList/Stream/Name
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/StreamList/Stream/Name'
content_type = 'string'
content = stream_name
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | 0.008711 |
def add_range(self, start, part_len, total_len):
"""
Add range headers indicating that this a partial response
"""
content_range = 'bytes {0}-{1}/{2}'.format(start,
start + part_len - 1,
total_len)
self.statusline = '206 Partial Content'
self.replace_header('Content-Range', content_range)
self.replace_header('Content-Length', str(part_len))
self.replace_header('Accept-Ranges', 'bytes')
return self | 0.003478 |
def ser2ber(q,n,d,t,ps):
"""
Converts symbol error rate to bit error rate. Taken from Ziemer and
Tranter page 650. Necessary when comparing different types of block codes.
parameters
----------
q: size of the code alphabet for given modulation type (BPSK=2)
n: number of channel bits
d: distance (2e+1) where e is the number of correctable errors per code word.
For hamming codes, e=1, so d=3.
t: number of correctable errors per code word
ps: symbol error probability vector
returns
-------
ber: bit error rate
"""
lnps = len(ps) # len of error vector
ber = np.zeros(lnps) # inialize output vector
for k in range(0,lnps): # iterate error vector
ser = ps[k] # channel symbol error rate
sum1 = 0 # initialize sums
sum2 = 0
for i in range(t+1,d+1):
term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i)
sum1 = sum1 + term
for i in range(d+1,n+1):
term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i))
sum2 = sum2+term
ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2)
return ber | 0.017979 |
def delete_service_group(self, group_id):
"""Deletes a service group from the loadbal_id.
:param int group_id: The id of the service group to delete
"""
svc = self.client['Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualServer']
return svc.deleteObject(id=group_id) | 0.005731 |
def ConfigureUrls(config, external_hostname = None):
"""Guides the user through configuration of various URLs used by GRR."""
print("\n\n-=GRR URLs=-\n"
"For GRR to work each client has to be able to communicate with the\n"
"server. To do this we normally need a public dns name or IP address\n"
"to communicate with. In the standard configuration this will be used\n"
"to host both the client facing server and the admin user interface.\n")
existing_ui_urn = grr_config.CONFIG.Get("AdminUI.url", default=None)
existing_frontend_urns = grr_config.CONFIG.Get("Client.server_urls")
if not existing_frontend_urns:
# Port from older deprecated setting Client.control_urls.
existing_control_urns = grr_config.CONFIG.Get(
"Client.control_urls", default=None)
if existing_control_urns is not None:
existing_frontend_urns = []
for existing_control_urn in existing_control_urns:
if not existing_control_urn.endswith("control"):
raise RuntimeError("Invalid existing control URL: %s" %
existing_control_urn)
existing_frontend_urns.append(
existing_control_urn.rsplit("/", 1)[0] + "/")
config.Set("Client.server_urls", existing_frontend_urns)
config.Set("Client.control_urls", ["deprecated use Client.server_urls"])
if not existing_frontend_urns or not existing_ui_urn:
ConfigureHostnames(config, external_hostname=external_hostname)
else:
print("Found existing settings:\n AdminUI URL: %s\n "
"Frontend URL(s): %s\n" % (existing_ui_urn, existing_frontend_urns))
if not RetryBoolQuestion("Do you want to keep this configuration?", True):
ConfigureHostnames(config, external_hostname=external_hostname) | 0.010112 |
def fetch_track(self, track_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches a song track by given ID.
:param track_id: the track ID.
:type track_id: str
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#tracks-track_id`.
'''
url = 'https://api.kkbox.com/v1.1/tracks/%s' % track_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) | 0.005618 |
def rollback(self):
"""Cancels any database changes done during the current transaction."""
if self._transaction_nesting_level == 0:
raise DBALConnectionError.no_active_transaction()
self.ensure_connected()
if self._transaction_nesting_level == 1:
self._transaction_nesting_level = 0
self._driver.rollback()
self._is_rollback_only = False
if not self._auto_commit:
self.begin_transaction()
elif self._nest_transactions_with_savepoints:
self.rollback_savepoint(self._get_nested_transaction_savepoint_name())
self._transaction_nesting_level -= 1
else:
self._is_rollback_only = True
self._transaction_nesting_level -= 1 | 0.003802 |
def name(self) -> str:
"""Return template's name (includes whitespace)."""
h = self._atomic_partition(self._first_arg_sep)[0]
if len(h) == len(self.string):
return h[2:-2]
return h[2:] | 0.008772 |
def get_categories_tree(context, template='zinnia/tags/categories_tree.html'):
"""
Return the categories as a tree.
"""
return {'template': template,
'categories': Category.objects.all().annotate(
count_entries=Count('entries')),
'context_category': context.get('category')} | 0.00303 |
def handle_data(self, data):
"""Function called for text nodes"""
if not self.silent:
possible_urls = re.findall(
r'(https?://[\w\d:#%/;$()~_?\-=\\\.&]*)', data)
# validate possible urls
# we'll transform them just in case
# they are valid.
if possible_urls and self.automatic_link_transformation:
for url in possible_urls:
if regex_url.search(url):
transformed_url = '<a href="%s">%s</a>' % (url, url)
data = data.replace(url, transformed_url)
self.result += data
else:
self.result += cgi.escape(data, True) | 0.002747 |
def set_profiling_level(self, level, slow_ms=None, session=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| :data:`~pymongo.OFF` | Off. No profiling. |
+----------------------------+------------------------------------+
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
+----------------------------+------------------------------------+
| :data:`~pymongo.ALL` | On. Includes all operations. |
+----------------------------+------------------------------------+
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
if slow_ms is not None and not isinstance(slow_ms, int):
raise TypeError("slow_ms must be an integer")
if slow_ms is not None:
self.command("profile", level, slowms=slow_ms, session=session)
else:
self.command("profile", level, session=session) | 0.001006 |
def FDMT_iteration(datain, maxDT, nchan0, f_min, f_max, iteration_num, dataType):
"""
Input:
Input - 3d array, with dimensions [nint, N_d0, nbl, nchan, npol]
f_min,f_max - are the base-band begin and end frequencies.
The frequencies can be entered in both MHz and GHz, units are factored out in all uses.
maxDT - the maximal delay (in time bins) of the maximal dispersion.
Appears in the paper as N_{\Delta}
A typical input is maxDT = N_f
dataType - To naively use FFT, one must use floating point types.
Due to casting, use either complex64 or complex128.
iteration num - Algorithm works in log2(Nf) iterations, each iteration changes all the sizes (like in FFT)
Output:
5d array, with dimensions [nint, N_d1, nbl, nchan/2, npol]
where N_d1 is the maximal number of bins the dispersion curve travels at one output frequency band
For details, see algorithm 1 in Zackay & Ofek (2014)
"""
nint, dT, nbl, nchan, npol = datain.shape
# output_dims = list(input_dims)
deltaF = 2**(iteration_num) * (f_max - f_min)/float(nchan0)
dF = (f_max - f_min)/float(nchan0)
# the maximum deltaT needed to calculate at the i'th iteration
deltaT = int(np.ceil((maxDT-1) *(1./f_min**2 - 1./(f_min + deltaF)**2) / (1./f_min**2 - 1./f_max**2)))
logger.debug("deltaT = {0}".format(deltaT))
logger.debug("N_f = {0}".format(nchan0/2**(iteration_num)))
dataout = np.zeros((nint, deltaT+1, nbl, nchan/2, npol), dataType)
logger.debug('input_dims = {0}'.format(datain.shape))
logger.debug('output_dims = {0}'.format(dataout.shape))
# No negative D's are calculated => no shift is needed
# If you want negative dispersions, this will have to change to 1+deltaT,1+deltaTOld
# Might want to calculate negative dispersions when using coherent dedispersion, to reduce the number of trial dispersions by a factor of 2 (reducing the complexity of the coherent part of the hybrid)
ShiftOutput = 0
ShiftInput = 0
F_jumps = nchan/2
# For some situations, it is beneficial to play with this correction.
# When applied to real data, one should carefully analyze and understand the effect of
# this correction on the pulse he is looking for (especially if convolving with a specific pulse profile)
if iteration_num>0:
correction = dF/2.
else:
correction = 0
for i_F in range(F_jumps):
f_start = (f_max - f_min)/float(F_jumps) * (i_F) + f_min
f_end = (f_max - f_min)/float(F_jumps) *(i_F+1) + f_min
f_middle = (f_end - f_start)/2. + f_start - correction
# it turned out in the end, that putting the correction +dF to f_middle_larger (or -dF/2 to f_middle, and +dF/2 to f_middle larger)
# is less sensitive than doing nothing when dedispersing a coherently dispersed pulse.
# The confusing part is that the hitting efficiency is better with the corrections (!?!).
f_middle_larger = (f_end - f_start)/2 + f_start + correction
deltaTLocal = int(np.ceil((maxDT-1) *(1./f_start**2 - 1./(f_end)**2) / (1./f_min**2 - 1./f_max**2)))
logger.debug('deltaT {0} deltaTLocal {1}'.format(deltaT, deltaTLocal))
for i_dT in range(deltaTLocal+1):
dT_middle = int(round(i_dT * (1./f_middle**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2)))
dT_middle_index = dT_middle + ShiftInput
dT_middle_larger = int(round(i_dT * (1./f_middle_larger**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2)))
dT_rest = i_dT - dT_middle_larger
dT_rest_index = dT_rest + ShiftInput
logger.debug('{0}:{1}, {2}+{3}, {4} <= {5}, {6}'.format(i_T_min, i_T_max, i_dT, ShiftOutput, i_F, dT_middle_index, 2*i_F))
# out of bounds data?
i_T_min = 0
i_T_max = dT_middle_larger
dataout[i_T_min:i_T_max, i_dT + ShiftOutput, :, i_F, :] = datain[i_T_min:i_T_max, dT_middle_index, :, 2*i_F, :]
# fully dedispersed data
i_T_min = dT_middle_larger
i_T_max = nint
dataout[i_T_min:i_T_max, i_dT + ShiftOutput, :, i_F, :] = datain[i_T_min:i_T_max, dT_middle_index, :, 2*i_F, :] + datain[i_T_min - dT_middle_larger:i_T_max-dT_middle_larger, dT_rest_index, :, 2*i_F+1, :]
return dataout | 0.00778 |
async def fetch_logical_load(self, llid):
"""Lookup details for a given logical load"""
url = "https://production.plum.technology/v2/getLogicalLoad"
data = {"llid": llid}
return await self.__post(url, data) | 0.008403 |
def _check_disabled(self):
"""Check if health check is disabled.
It logs a message if health check is disabled and it also adds an item
to the action queue based on 'on_disabled' setting.
Returns:
True if check is disabled otherwise False.
"""
if self.config['check_disabled']:
if self.config['on_disabled'] == 'withdraw':
self.log.info("Check is disabled and ip_prefix will be "
"withdrawn")
self.log.info("adding %s in the queue", self.ip_with_prefixlen)
self.action.put(self.del_operation)
self.log.info("Check is now permanently disabled")
elif self.config['on_disabled'] == 'advertise':
self.log.info("check is disabled, ip_prefix wont be withdrawn")
self.log.info("adding %s in the queue", self.ip_with_prefixlen)
self.action.put(self.add_operation)
self.log.info('check is now permanently disabled')
return True
return False | 0.001821 |
def read_creds_from_environment_variables():
"""
Read credentials from environment variables
:return:
"""
creds = init_creds()
# Check environment variables
if 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ:
creds['AccessKeyId'] = os.environ['AWS_ACCESS_KEY_ID']
creds['SecretAccessKey'] = os.environ['AWS_SECRET_ACCESS_KEY']
if 'AWS_SESSION_TOKEN' in os.environ:
creds['SessionToken'] = os.environ['AWS_SESSION_TOKEN']
return creds | 0.003774 |
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M | 0.002506 |
def string(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default='', # type: Optional[Text]
omit_empty=False, # type: bool
strip_whitespace=True, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean`
"""
value_parser = _string_parser(strip_whitespace)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | 0.002364 |
def render(self, context, instance, placeholder):
''' Add the cart-specific context to this form '''
context = super(SquareCheckoutFormPlugin, self).render(context, instance, placeholder)
context.update({
'squareApplicationId': getattr(settings,'SQUARE_APPLICATION_ID',''),
})
return context | 0.016997 |
def depth_august_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_august_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_august_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_august_average_ground_temperature`'.format(value))
self._depth_august_average_ground_temperature = value | 0.004711 |
def extends_(cls, kls):
"""
A view decorator to extend another view class or function to itself
It will inherit all its methods and propeties and use them on itself
-- EXAMPLES --
class Index(Pylot):
pass
index = Index()
::-> As decorator on classes ::
@index.extends_
class A(object):
def hello(self):
pass
@index.extends_
class C()
def world(self):
pass
::-> Decorator With function call ::
@index.extends_
def hello(self):
pass
"""
if inspect.isclass(kls):
for _name, _val in kls.__dict__.items():
if not _name.startswith("__"):
setattr(cls, _name, _val)
elif inspect.isfunction(kls):
setattr(cls, kls.__name__, kls)
return cls | 0.002172 |
def find_xml_generator(name="castxml"):
"""
Try to find a c++ parser (xml generator)
Args:
name (str): name of the c++ parser (e.g. castxml)
Returns:
path (str), name (str): path to the xml generator and it's name
If no c++ parser is found the function raises an exception.
pygccxml does currently only support castxml as c++ parser.
"""
if sys.version_info[:2] >= (3, 3):
path = _find_xml_generator_for_python_greater_equals_33(name)
else:
path = _find_xml_generator_for_legacy_python(name)
if path == "" or path is None:
raise Exception("No c++ parser found. Please install castxml.")
return path.rstrip(), name | 0.00142 |
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig) | 0.001789 |
def CreateHunt(hunt_obj):
"""Creates a hunt using a given hunt object."""
data_store.REL_DB.WriteHuntObject(hunt_obj)
if hunt_obj.HasField("output_plugins"):
output_plugins_states = flow.GetOutputPluginStates(
hunt_obj.output_plugins,
source="hunts/%s" % hunt_obj.hunt_id,
token=access_control.ACLToken(username=hunt_obj.creator))
data_store.REL_DB.WriteHuntOutputPluginsStates(hunt_obj.hunt_id,
output_plugins_states) | 0.007874 |
def explore_genres(self, parent_genre_id=None):
"""Get a listing of song genres.
Parameters:
parent_genre_id (str, Optional): A genre ID.
If given, a listing of this genre's sub-genres is returned.
Returns:
list: Genre dicts.
"""
response = self._call(
mc_calls.ExploreGenres,
parent_genre_id
)
genre_list = response.body.get('genres', [])
return genre_list | 0.038168 |
def expand_value(self, **kwargs):
"""
expand the selection to account for wildcards
"""
selection = []
for v in self.get_value(**kwargs):
for choice in self.choices:
if v==choice and choice not in selection:
selection.append(choice)
elif fnmatch(choice, v) and choice not in selection:
selection.append(choice)
return selection | 0.006522 |
def _search(self, model, condition=None, search_field='name',
value_field='id', label_field=None, pagination=True):
"""
Default search function
:param search_field: Used for search field, default is 'name'
:param value_field: Used for id field, default is id
:param label_field: Used for label field, default is None, then it'll use unicode() function
"""
from uliweb import json, request
name = request.GET.get('term', '')
M = functions.get_model(model)
def _v(label_field):
if label_field:
return lambda x: getattr(x, label_field)
else:
return lambda x: unicode(x)
v_field = request.values.get('label', 'title')
page = int(request.values.get('page') or 1)
limit = int(request.values.get('limit') or 10)
v_func = _v(label_field)
if name:
if condition is None:
condition = M.c[search_field].like('%' + name + '%')
if pagination:
query = M.filter(condition)
total = query.count()
rows = [{'id': getattr(obj, value_field), v_field: v_func(obj)}
for obj in query.limit(limit).offset((page-1)*limit)]
result = {'total':total, 'rows':rows}
else:
result = [{'id': getattr(obj, value_field), v_field: v_func(obj)}
for obj in M.filter(condition)]
else:
result = []
return json(result) | 0.006325 |
def get_user(self, user_id):
""" Get user details.
:param user_id: Identification of user by username (str) or user ID
(int)
:returns: User details as strings in dictionary with these keys for RT
users:
* Lang
* RealName
* Privileged
* Disabled
* Gecos
* EmailAddress
* Password
* id
* Name
Or these keys for external users (e.g. Requestors replying
to email from RT:
* RealName
* Disabled
* EmailAddress
* Password
* id
* Name
None is returned if user does not exist.
:raises UnexpectedMessageFormat: In case that returned status code is not 200
"""
msg = self.__request('user/{}'.format(str(user_id), ))
status_code = self.__get_status_code(msg)
if (status_code == 200):
pairs = {}
lines = msg.split('\n')
if (len(lines) > 2) and self.RE_PATTERNS['does_not_exist_pattern'].match(lines[2]):
return None
for line in lines[2:]:
if ': ' in line:
header, content = line.split(': ', 1)
pairs[header.strip()] = content.strip()
return pairs
else:
raise UnexpectedMessageFormat('Received status code is {:d} instead of 200.'.format(status_code)) | 0.002967 |
def from_json(cls, key):
"""Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK.
"""
obj = cls()
try:
jkey = json_decode(key)
except Exception as e: # pylint: disable=broad-except
raise InvalidJWKValue(e)
obj.import_key(**jkey)
return obj | 0.005277 |
def _get_key_redis_key(bank, key):
'''
Return the Redis key given the bank name and the key name.
'''
opts = _get_redis_keys_opts()
return '{prefix}{separator}{bank}/{key}'.format(
prefix=opts['key_prefix'],
separator=opts['separator'],
bank=bank,
key=key
) | 0.003195 |
def _parse_input_parameters(self):
"""
Set the configuration for the Logger
"""
Global.LOGGER.debug("define and parsing command line arguments")
parser = argparse.ArgumentParser(
description='A workflow engine for Pythonistas', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('FILENAME', nargs='+',help='name of the recipe file(s)')
parser.add_argument('-i', '--INTERVAL', type=int, default=500,
metavar=('MS'),
help='perform a cycle each [MS] milliseconds. (default = 500)')
parser.add_argument('-m', '--MESSAGEINTERVAL', type=int,
metavar=('X'),
help='dequeue a message each [X] tenth of milliseconds. (default = auto)')
parser.add_argument('-s', '--STATS', type=int, default=0,
metavar=('SEC'),
help='show stats each [SEC] seconds. (default = NO STATS)')
parser.add_argument('-t', '--TRACE', action='store_true',help='enable super verbose output, only useful for tracing')
parser.add_argument('-v', '--VERBOSE', action='store_true',help='enable verbose output')
parser.add_argument('-V', '--VERSION',
action="version", version=__version__)
args = parser.parse_args()
return args | 0.009649 |
def num_batches(n, batch_size):
"""Compute the number of mini-batches required to cover a data set of
size `n` using batches of size `batch_size`.
Parameters
----------
n: int
the number of samples in the data set
batch_size: int
the mini-batch size
Returns
-------
int: the number of batches required
"""
b = n // batch_size
if n % batch_size > 0:
b += 1
return b | 0.002262 |
def autobuild_doxygen(tile):
"""Generate documentation for firmware in this module using doxygen"""
iotile = IOTile('.')
doxydir = os.path.join('build', 'doc')
doxyfile = os.path.join(doxydir, 'doxygen.txt')
outfile = os.path.join(doxydir, '%s.timestamp' % tile.unique_id)
env = Environment(ENV=os.environ, tools=[])
env['IOTILE'] = iotile
# There is no /dev/null on Windows
if platform.system() == 'Windows':
action = 'doxygen %s > NUL' % doxyfile
else:
action = 'doxygen %s > /dev/null' % doxyfile
Alias('doxygen', doxydir)
env.Clean(outfile, doxydir)
inputfile = doxygen_source_path()
env.Command(doxyfile, inputfile, action=env.Action(lambda target, source, env: generate_doxygen_file(str(target[0]), iotile), "Creating Doxygen Config File"))
env.Command(outfile, doxyfile, action=env.Action(action, "Building Firmware Documentation")) | 0.003254 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.