content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def async_get_url(
hass: HomeAssistant,
*,
require_ssl: bool = False,
require_standard_port: bool = False,
allow_internal: bool = True,
allow_external: bool = True,
allow_cloud: bool = True,
allow_ip: bool = True,
prefer_external: bool = False,
prefer_cloud: bool = False,
) -> str:
"""Get a URL to this instance."""
order = [TYPE_URL_INTERNAL, TYPE_URL_EXTERNAL]
if prefer_external:
order.reverse()
# Try finding an URL in the order specified
for url_type in order:
if allow_internal and url_type == TYPE_URL_INTERNAL:
try:
return _async_get_internal_url(
hass,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
if allow_external and url_type == TYPE_URL_EXTERNAL:
try:
return _async_get_external_url(
hass,
allow_cloud=allow_cloud,
allow_ip=allow_ip,
prefer_cloud=prefer_cloud,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
# We have to be honest now, we have no viable option available
raise NoURLAvailableError | 1d4e13a8fa5d26bbc9132e85937a24d320136112 | 12,236 |
def strip(prefix: Seq, seq: Seq, partial=False, cmp=NOT_GIVEN) -> Iter:
"""
If seq starts with the same elements as in prefix, remove them from
result.
Args:
prefix:
Prefix sequence to possibly removed from seq.
seq:
Sequence of input elements.
partial:
If True, remove partial matches with prefix.
cmp:
If given, uses as a comparation function between elements of prefix
and sequence. It removes elements that cmp(x, y) returns True.
Examples:
>>> ''.join(strip("ab", "abcd"))
'cd'
>>> strip(sk.repeat(3), range(6), partial=True, cmp=(X > Y))
sk.iter([3, 4, 5])
"""
if partial:
cmp = NOT_GIVEN.resolve(cmp, op.eq)
return Iter(_strip_partial(iter(prefix), iter(seq), cmp=cmp))
elif cmp is NOT_GIVEN:
return Iter(_strip_full(tuple(prefix), iter(seq)))
else:
return Iter(_strip_full_cmp(tuple(prefix), iter(seq), cmp)) | 8d2a9a62157e3b55adcc976d5c7693cb67513c92 | 12,238 |
def _read_unicode_table(instream, separator, startseq, encoding):
"""Read the Unicode table in a PSF2 file."""
raw_table = instream.read()
entries = raw_table.split(separator)[:-1]
table = []
for point, entry in enumerate(entries):
split = entry.split(startseq)
code_points = [_seq.decode(encoding) for _seq in split]
# first entry is separate code points, following entries (if any) are sequences
table.append([_c for _c in code_points[0]] + code_points[1:])
return table | e27e59b57d10cb20dd4ddc832c65cb8802984d44 | 12,239 |
from click.testing import CliRunner
def runner():
"""Provides a command-line test runner."""
return CliRunner() | 82b75c8dcaa0105c623a1caea5b459c97e3e18fd | 12,240 |
def matsubara_exponents(coup_strength, bath_broad, bath_freq, beta, N_exp):
"""
Calculates the exponentials for the correlation function for matsubara
terms. (t>=0)
Parameters
----------
coup_strength: float
The coupling strength parameter.
bath_broad: float
A parameter characterizing the FWHM of the spectral density, i.e.,
the cavity broadening.
bath_freq: float
The cavity frequency.
beta: float
The inverse temperature.
N_exp: int
The number of exponents to consider in the sum.
Returns
-------
ck: ndarray
A 1D array with the prefactors for the exponentials
vk: ndarray
A 1D array with the frequencies
"""
lam = coup_strength
gamma = bath_broad
w0 = bath_freq
N_exp = N_exp
omega = np.sqrt(w0 ** 2 - (gamma / 2) ** 2)
a = omega + 1j * gamma / 2.0
aa = np.conjugate(a)
coeff = (-4 * gamma * lam ** 2 / np.pi) * ((np.pi / beta) ** 2)
vk = np.array([-2 * np.pi * n / (beta) for n in range(1, N_exp)])
ck = np.array(
[
n
/ (
(a ** 2 + (2 * np.pi * n / beta) ** 2)
* (aa ** 2 + (2 * np.pi * n / beta) ** 2)
)
for n in range(1, N_exp)
]
)
return coeff * ck, vk | 4d6a1691234f12a5cbdec13b2520891c0f30eb77 | 12,241 |
def reverse(array):
"""Return `array` in reverse order.
Args:
array (list|string): Object to process.
Returns:
list|string: Reverse of object.
Example:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
.. versionadded:: 2.2.0
"""
# NOTE: Using this method to reverse object since it works for both lists
# and strings.
return array[::-1] | 5eb096d043d051d4456e08fae91fb52048686992 | 12,242 |
def compute_segregation_profile(gdf,
groups=None,
distances=None,
network=None,
decay='linear',
function='triangular',
precompute=True):
"""Compute multiscalar segregation profile.
This function calculates several Spatial Information Theory indices with
increasing distance parameters.
Parameters
----------
gdf : geopandas.GeoDataFrame
geodataframe with rows as observations and columns as population
variables. Note that if using a network distance, the coordinate
system for this gdf should be 4326. If using euclidian distance,
this must be projected into planar coordinates like state plane or UTM.
groups : list
list of variables .
distances : list
list of floats representing bandwidth distances that define a local
environment.
network : pandana.Network (optional)
A pandana.Network likely created with
`segregation.network.get_osm_network`.
decay : str (optional)
decay type to be used in pandana accessibility calculation (the
default is 'linear').
function: 'str' (optional)
which weighting function should be passed to pysal.lib.weights.Kernel
must be one of: 'triangular','uniform','quadratic','quartic','gaussian'
precompute: bool
Whether the pandana.Network instance should precompute the range
queries.This is true by default, but if you plan to calculate several
segregation profiles using the same network, then you can set this
parameter to `False` to avoid precomputing repeatedly inside the
function
Returns
-------
dict
dictionary with distances as keys and SIT statistics as values
Notes
-----
Based on Sean F. Reardon, Stephen A. Matthews, David O’Sullivan, Barrett A. Lee, Glenn Firebaugh, Chad R. Farrell, & Kendra Bischoff. (2008). The Geographic Scale of Metropolitan Racial Segregation. Demography, 45(3), 489–514. https://doi.org/10.1353/dem.0.0019.
Reference: :cite:`Reardon2008`.
"""
gdf = gdf.copy()
gdf[groups] = gdf[groups].astype(float)
indices = {}
indices[0] = MultiInformationTheory(gdf, groups).statistic
if network:
if not gdf.crs['init'] == 'epsg:4326':
gdf = gdf.to_crs(epsg=4326)
groups2 = ['acc_' + group for group in groups]
if precompute:
maxdist = max(distances)
network.precompute(maxdist)
for distance in distances:
distance = np.float(distance)
access = calc_access(gdf,
network,
decay=decay,
variables=groups,
distance=distance,
precompute=False)
sit = MultiInformationTheory(access, groups2)
indices[distance] = sit.statistic
else:
for distance in distances:
w = Kernel.from_dataframe(gdf,
bandwidth=distance,
function=function)
sit = SpatialInformationTheory(gdf, groups, w=w)
indices[distance] = sit.statistic
return indices | 3598d7c72660860330847758fc744fcd1b1f40ce | 12,243 |
def calculate_lbp_pixel(image, x, y):
"""Perform the LBP operator on a given pixel.
Order and format:
32 | 64 | 128
----+-----+-----
16 | 0 | 1
----+-----+-----
8 | 4 | 2
:param image: Input image
:type: numpy.ndarray
:param x: Column pixel of interest
:type: int
:param y: Row pixel of interst
:type: int
:return: LBP value
:rtype: numpy.ndarray
"""
center = image[x][y]
binary_code = np.empty(8)
binary_code[0] = threshold_pixel(image, center, x, y + 1) # Right
binary_code[1] = threshold_pixel(image, center, x + 1, y + 1) # Bottom Right
binary_code[2] = threshold_pixel(image, center, x + 1, y) # Bottom
binary_code[3] = threshold_pixel(image, center, x + 1, y - 1) # Bottom Left
binary_code[4] = threshold_pixel(image, center, x, y - 1) # Left
binary_code[5] = threshold_pixel(image, center, x - 1, y - 1) # Top Left
binary_code[6] = threshold_pixel(image, center, x - 1, y) # Top
binary_code[7] = threshold_pixel(image, center, x - 1, y + 1) # Top Right
weights = np.array([1, 2, 4, 8, 16, 32, 64, 128])
lbp_value = np.dot(binary_code, weights).astype(np.uint8)
return lbp_value | 14f6bd557355a71379b638e52f21d72ccd30a7cb | 12,244 |
def test_gradient_sparse_var():
"""
https://www.tensorflow.org/beta/guide/effective_tf2
"""
target = tf.constant([[1., 0., 0.], [1., 0., 0.]])
v = tf.Variable([0.5, 0.5])
x = tx.Lambda([],
fn=lambda _: tf.SparseTensor([[0, 0], [1, 1]], v, [2, 3]),
n_units=3,
var_list=v)
assert isinstance(x(), tf.SparseTensor)
assert len(x.trainable_variables) == 1
y = tx.Linear(x, n_units=3)
# a graph without inputs needs to have missing inputs declared
# otherwise it will try to add the inputs detected to inputs
graph = tx.Graph.build(inputs=None,
outputs=y)
fn = graph.as_function()
@tf.function
def loss(labels):
return tf.reduce_mean(tf.pow(labels - fn(), 2))
with tf.GradientTape() as tape:
loss_val = loss(target)
assert tx.same_shape(tape.gradient(loss_val, v), v.value()) | e43a84a052313fecd11eca60a027f00385cd252f | 12,245 |
def get_setup_and_moves(sgf_game, board=None):
"""Return the initial setup and the following moves from an Sgf_game.
Returns a pair (board, plays)
board -- boards.Board
plays -- list of pairs (colour, move)
moves are (row, col), or None for a pass.
The board represents the position described by AB and/or AW properties
in the root node.
The moves are from the game's 'leftmost' variation.
Raises ValueError if this position isn't legal.
Raises ValueError if there are any AB/AW/AE properties after the root
node.
Doesn't check whether the moves are legal.
If the optional 'board' parameter is provided, it must be an empty board of
the right size; the same object will be returned.
"""
size = sgf_game.get_size()
if board is None:
board = boards.Board(size)
else:
if board.side != size:
raise ValueError("wrong board size, must be %d" % size)
if not board.is_empty():
raise ValueError("board not empty")
root = sgf_game.get_root()
nodes = sgf_game.main_sequence_iter()
ab, aw, ae = root.get_setup_stones()
if ab or aw:
is_legal = board.apply_setup(ab, aw, ae)
if not is_legal:
raise ValueError("setup position not legal")
colour, raw = root.get_raw_move()
if colour is not None:
raise ValueError("mixed setup and moves in root node")
nodes.next()
moves = []
for node in nodes:
if node.has_setup_stones():
raise ValueError("setup properties after the root node")
colour, raw = node.get_raw_move()
if colour is not None:
moves.append((colour, sgf_properties.interpret_go_point(raw, size)))
return board, moves | a933c067baa49d8e6c7309f7762298244a192d2e | 12,246 |
def help_text_metadata(label=None, description=None, example=None):
"""
Standard interface to help specify the required metadata fields for helptext to
work correctly for a model.
:param str label: Alternative name for the model.
:param str description: Long description of the model.
:param example: A concrete example usage of the model.
:return dict: Dictionary of the help text metadata
"""
return {
'label': label,
'description': description,
'example': example
} | a1fb9c9a9419fe7ce60ed77bc6fadc97ed4523f8 | 12,247 |
def conv1d_stack(sequences, filters, activations, name=None):
"""Convolve a jagged batch of sequences with a stack of filters.
This is equivalent to running several `conv1d`s on each `sequences[i]` and
reassembling the results as a `Jagged`. The padding is always 'SAME'.
Args:
sequences: 4-D `Jagged` tensor.
filters: List of 3-D filters (one filter per layer). Must have odd width.
activations: List of activation functions to apply after each layer, or
None to indicate no activation.
name: Optional name for this operation.
Returns:
`Jagged` convolution results.
Raises:
TypeError: If sequences is not Jagged.
ValueError: If the filters or activations are invalid.
"""
if not isinstance(sequences, Jagged):
raise TypeError('Expected Jagged sequences, got %s' % type(Jagged))
if len(filters) != len(activations):
raise ValueError('Got %d filters != %d activations' %
(len(filters), len(activations)))
if not filters:
return sequences
with tf.name_scope(name, 'jagged_conv1d_stack') as name:
# Compute maximum filter width
filters = [tf.convert_to_tensor(f, name='filter') for f in filters]
width = 0
for filt in filters:
shape = filt.get_shape()
if shape.ndims != 3 or shape[0] is None or shape[0].value % 2 == 0:
raise ValueError('Expected known odd filter width, got shape %s' %
shape)
width = max(width, shape[0].value)
between = width // 2 # Rounds down since width is odd
# Add 'between' zeros between each sequence
flat = sequences.flat
sizes = flatten(sequences.sizes)
size = tf.size(sizes)
flat_shape = tf.shape(flat)
flat_len = flat_shape[0]
indices = (tf.range(flat_len) + repeats(between * tf.range(size), sizes))
padded_len = between * tf.nn.relu(size - 1) + flat_len
flat = tf.unsorted_segment_sum(flat, indices, padded_len)[None]
# Make a mask to reset between portions to zero
if len(filters) > 1:
mask = tf.unsorted_segment_sum(
tf.ones(flat_shape[:1], dtype=flat.dtype), indices, padded_len)
mask = mask[:, None]
# Do each convolution
for i, (filt, activation) in enumerate(zip(filters, activations)):
if i:
flat *= mask
flat = tf.nn.conv1d(flat, filt, stride=1, padding='SAME')
if activation is not None:
flat = activation(flat)
# Extract results and repackage as a Jagged
flat = tf.squeeze(flat, [0])
flat = tf.gather(flat, indices, name=name)
return Jagged(sequences.sizes, flat) | 33248627280ea0c127d710128790e58e0ca5bb9a | 12,248 |
def dh_mnthOfYear(value, pattern):
"""
Helper for decoding a single integer value.
The value should be >=1000, no conversion,
no rounding (used in month of the year)
"""
return dh_noConv(value, pattern, _formatLimit_MonthOfYear[0]) | 5dd1027a0713cb93ea6e3504f1a07517f228a037 | 12,249 |
import traceback
import six
def serialize_remote_exception(failure_info):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = six.text_type(failure.__class__.__name__)
mod_name = six.text_type(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data | 549b996afc2b07b9e72f69cd2c6ddaee4010f5af | 12,251 |
def process_polygon(coordinates):
"""Pass list of co-ordinates to Shapely Polygon function and get polygon object"""
return Polygon(coordinates) | fe644dc41e7951a030df511bb7e76b2e74883cae | 12,254 |
def split_function(vector, column, value):
"""
Split function
"""
return vector[column] >= value | c6129422fd5bf0b16229e6346adde5f50b203e7b | 12,255 |
def _excitation_operator( # pylint: disable=invalid-name
edge_list: np.ndarray, p: int, q: int, h1_pq: float
) -> SparsePauliOp:
"""Map an excitation operator to a Pauli operator.
Args:
edge_list: representation of graph specifying neighboring qubits.
p: First Fermionic-mode index.
q: Second Fermionic-mode index. You must ensure that p < q.
h1_pq: Numerical coefficient of the term.
Returns:
The result of the Fermionic to Pauli operator mapping.
""" # pylint: disable=missing-raises-doc
if p >= q:
raise ValueError("Expected p < q, got p = ", p, ", q = ", q)
b_a = _edge_operator_bi(edge_list, p)
b_b = _edge_operator_bi(edge_list, q)
a_ab = _edge_operator_aij(edge_list, p, q)
return (-1j * 0.5 * h1_pq) * ((b_b & a_ab) + (a_ab & b_a)) | 0ae0ace12884c507977cf2e555a354c80f83e7ad | 12,257 |
def send_message( message, node, username, password, resource, max_attempts=1 ):
""" broadcast this message thru lvalert """
tmpfilename = "tmpfile.json"
tmpfile = open(tmpfilename, "w")
tmpfile.write( message )
tmpfile.close()
cmd = "lvalert_send -a %s -b %s -r %s -n %s -m %d --file %s"%(username, password, resource, node, max_attempts, tmpfilename)
return sp.Popen(cmd.split()).wait() | 5ebec2f3487b431a6d1131b11c8ab2d672308b48 | 12,258 |
def create_bcs(field_to_subspace, Lx, Ly, solutes,
V_boundary,
enable_NS, enable_PF, enable_EC,
**namespace):
""" The boundary conditions are defined in terms of field. """
boundaries = dict(wall=[Wall()])
bcs = dict(
wall=dict()
)
bcs_pointwise = dict()
noslip = Fixed((0., 0.))
# Navier-Stokes
if enable_NS:
bcs["wall"]["u"] = noslip
bcs_pointwise["p"] = (0., "x[0] < DOLFIN_EPS && x[1] < DOLFIN_EPS")
# Electrochemistry
if enable_EC:
bcs["wall"]["V"] = Fixed(V_boundary)
return boundaries, bcs, bcs_pointwise | d1e72d30404ee68b877c6761a6309884e3054b6c | 12,259 |
def get_response_rows(response, template):
"""
Take in a list of responses and covert them to SSE.Rows based on the column type specified in template
The template should be a list of the form: ["str", "num", "dual", ...]
For string values use: "str"
For numeric values use: "num"
For dual values: "dual"
"""
response_rows = []
# For each row in the response list
for row in response:
i = 0
this_row = []
if len(template) > 1:
# For each column in the row
for col in row:
# Convert values to type SSE.Dual according to the template list
if template[i] == "str":
if col is None:
col = "\x00"
elif type(col) is not str:
col = "{0:.5f}".format(col)
this_row.append(SSE.Dual(strData=col))
elif template[i] == "num":
this_row.append(SSE.Dual(numData=col))
elif template[i] == "dual":
this_row.append(SSE.Dual(strData=col, numData=col))
i = i + 1
else:
# Convert values to type SSE.Dual according to the template list
if template[0] == "str":
if row is None:
row = "\x00"
elif type(row) is not str:
row = "{0:.5f}".format(row)
this_row.append(SSE.Dual(strData=row))
elif template[0] == "num":
this_row.append(SSE.Dual(numData=row))
elif template[0] == "dual":
this_row.append(SSE.Dual(strData=row, numData=row))
# Group columns into a iterable and add to the the response_rows
response_rows.append(iter(this_row))
# Values are then structured as SSE.Rows
response_rows = [SSE.Row(duals=duals) for duals in response_rows]
return response_rows | c3c3e4bf53929895959948836a77893b2f961221 | 12,260 |
def stations_within_radius(stations, centre, r):
"""function that returns a list of all stations (type MonitoringStation)
within radius r of a geographic coordinate x."""
close_stations = []
for station in stations:
if haversine(station.coord, centre) < float(r):
close_stations.append(station)
return close_stations | 9877020f56f25435d1dad6fae32ebbae0e7cfdaf | 12,261 |
import torch
def jacobian(model, x, output_class):
"""
Compute the output_class'th row of a Jacobian matrix. In other words,
compute the gradient wrt to the output_class.
:param model: forward pass function.
:param x: input tensor.
:param output_class: the output_fz class we want to compute the gradients.
:return: output_class'th row of the Jacobian matrix wrt x.
"""
xvar = replicate_input_withgrad(x)
scores = model(xvar)
# compute gradients for the class output_class wrt the input x
# using backpropagation
torch.sum(scores[:, output_class]).backward()
return xvar.grad.detach().clone() | 07364b8cc58d3ba51431d07e6bf5d164da7ab380 | 12,264 |
def render_face_orthographic(mesh, background=None):
"""
mesh location should be normalized
:param mesh:
:param background:
:return:
"""
mesh.visual.face_colors = np.array([0.05, 0.1, 0.2, 1])
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
# mesh = pyrender.Mesh.from_trimesh(mesh)
scene.add(mesh, pose=np.eye(4))
camera_pose = np.eye(4)
# camera_pose[0, 3] = 1
# camera_pose[1, 3] = 1
# camera_pose[2, 3] = -10
# camera_pose[0, 0] = 1
# camera_pose[1, 1] = -1
# camera_pose[2, 2] = -1
#
# camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
camera_pose[0, 3] = 1
camera_pose[1, 3] = 1
camera_pose[2, 3] = 10
camera_pose[0, 0] = 1
camera_pose[1, 1] = 1
camera_pose[2, 2] = 1
camera = pyrender.OrthographicCamera(xmag=1, ymag=1, zfar=100)
scene.add(camera, pose=camera_pose)
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=5.0)
scene.add(light, pose=camera_pose)
color, depth = r.render(scene)
scene.clear()
# print(color.shape)
color = np.array(color)
color = color[::-1]
if background is not None:
new_color = np.array(background)
new_color[color != 255] = color[color != 255]
color = new_color
return color | 21cc7adaaebec7d8114f8192ccdc133075a19bc3 | 12,265 |
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of ``phrase`` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase
String or list of strings to be returned as a string.
symbols
Iterable of characters allowed in ``phrase``.
If ``symbols`` is ``None``, no checking is performed.
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv | 64ddcedf19ecba17b169e4bc3a3c205fe3192eb7 | 12,266 |
def drawLaneOnImage(img):
"""
Find and draw the lane lines on the image `img`.
"""
left_fit, right_fit, left_fit_m, right_fit_m, _, _, _, _, _ = findLines(img)
output = drawLine(img, left_fit, right_fit)
return cv2.cvtColor( output, cv2.COLOR_BGR2RGB ) | aa42679c7ff8f90b906d8cf74af9207edeadc430 | 12,268 |
import numpy
def ss(a, axis=0):
### taken from SciPy
"""Squares each value in the passed array, adds these squares, and
returns the result.
Parameters
----------
a : array
axis : int or None
Returns
-------
The sum along the given axis for (a*a).
"""
a, axis = _chk_asarray(a, axis)
return numpy.sum(a*a, axis) | 75569fa96fd866f20b57c5f5c52a38e1b9663968 | 12,270 |
def Journaling_TypeInfo():
"""Journaling_TypeInfo() -> RTTI"""
return _DataModel.Journaling_TypeInfo() | 24cdb273b6463874e71668a304af3a13305fff24 | 12,271 |
def _maven_artifact(
group,
artifact,
version,
ownership_tag = None,
packaging = None,
classifier = None,
exclusions = None,
neverlink = None,
testonly = None,
tags = None,
flatten_transitive_deps = None,
aliases = None):
"""Defines maven artifact by coordinates.
Args:
group: The Maven artifact coordinate group name (ex: "com.google.guava").
artifact: The Maven artifact coordinate artifact name (ex: "guava").
version: The Maven artifact coordinate version name (ex: "1.20.1").
ownership_tag: 3rd party dependency owner responsible for its maintenance.
packaging:The Maven artifact coordinate packaging name (ex: "jar").
classifier: The Maven artifact coordinate classifier name (ex: "jdk11").
exclusions: Artifact dependencies to be excluded from resolution closure.
neverlink: neverlink value to set,
testonly: testonly value to set.
tags: Target tags.
flatten_transitive_deps: Define all transitive deps as direct deps.
aliases: aliases that will point to this dep.
"""
maven_artifact = {}
maven_artifact["group"] = group
maven_artifact["artifact"] = artifact
maven_artifact["version"] = version
maven_artifact["aliases"] = aliases
maven_artifact["tags"] = tags
maven_artifact["flatten_transitive_deps"] = flatten_transitive_deps
if packaging != None:
maven_artifact["packaging"] = packaging
if classifier != None:
maven_artifact["classifier"] = classifier
if exclusions != None:
maven_artifact["exclusions"] = exclusions
if neverlink != None:
maven_artifact["neverlink"] = neverlink
if testonly != None:
maven_artifact["testonly"] = testonly
if ownership_tag != None:
maven_artifact["ownership_tag"] = ownership_tag
return maven_artifact | 9f97cd8cadfc3ad1365cb6d291634a9362fea4e8 | 12,272 |
def get_anchors(n):
"""Get a list of NumPy arrays, each of them is an anchor node set"""
m = int(np.log2(n))
anchor_set_id = []
for i in range(m):
anchor_size = int(n / np.exp2(i + 1))
for _ in range(m):
anchor_set_id.append(np.random.choice(n, size=anchor_size, replace=False))
return anchor_set_id | 4adbaa291740ab3d9cb0a3d6b48c39665d8d5b06 | 12,274 |
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar))) | 7265103ddfc5c521fd9612524413cd15e237be9b | 12,275 |
def _filter_option_to_config_setting(flt, setting):
"""
Encapsulates the logic for associating a filter database option with the filter setting from relay_config
:param flt: the filter
:param setting: the option deserialized from the database
:return: the option as viewed from relay_config
"""
if setting is None:
raise ValueError("Could not find filter state for filter {0}."
" You need to register default filter state in projectoptions.defaults.".format(flt.spec.id))
is_enabled = setting != '0'
ret_val = {
'is_enabled': is_enabled
}
# special case for legacy browser.
# If the number of special cases increases we'll have to factor this functionality somewhere
if flt.spec.id == FilterStatKeys.LEGACY_BROWSER:
if is_enabled:
if setting == '1':
# old style filter
ret_val['default_filter'] = True
else:
# new style filter, per legacy browser type handling
# ret_val['options'] = setting.split(' ')
ret_val['options'] = list(setting)
return ret_val | 41694d340285b722daf91fb0badeb1ec33eb0587 | 12,276 |
def get_svg(accession, **kwargs):
"""
Returns a HMM sequence logo in SVG format.
Parameters
----------
accession : str
Pfam accession for desired HMM.
**kwargs :
Additional arguments are passed to :class:`LogoPlot`.
"""
logoplot = plot.LogoPlot(accession, **kwargs)
svg = logoplot.get_svg()
return svg | 952c6afa4d63f46be579cd70a4e2756b061b9f9b | 12,277 |
def wl_to_en( l ):
"""
Converts a wavelength, given in nm, to an energy in eV.
:param l: The wavelength to convert, in nm.
:returns: The corresponding energy in eV.
"""
a = phys.physical_constants[ 'electron volt-joule relationship' ][ 0 ] # J
return phys.Planck* phys.c/( a* l* 1e-9 ) | a9d428d7aed3a6c88a1906e026649ee74f700c81 | 12,278 |
from typing import Optional
def get_local_address_reaching(dest_ip: IPv4Address) -> Optional[IPv4Address]:
"""Get address of a local interface within same subnet as provided address."""
for iface in netifaces.interfaces():
for addr in netifaces.ifaddresses(iface).get(netifaces.AF_INET, []):
iface = IPv4Interface(addr["addr"] + "/" + addr["netmask"])
if dest_ip in iface.network:
return iface.ip
return None | ee7061633d72c3b0ac578baf6119e5437395ce17 | 12,279 |
def atSendCmdTest(cmd_name: 'str', params: 'list'):
""" 发送测试命令,方便调试 ATCore
"""
func_name = 'atSendCmdTest'
atserial.ATraderCmdTest_send(cmd_name, params)
res = recv_serial(func_name)
atReturnChecker(func_name, res.result)
return res.listResult | 4cbe127ea7291893d64d8554c5a405c24085320a | 12,280 |
def unlabeled_balls_in_unlabeled_boxes(balls, box_sizes):
"""
OVERVIEW
This function returns a generator that produces all distinct distributions of
indistinguishable balls among indistinguishable boxes, with specified box
sizes (capacities). This is a generalization of the most common formulation
of the problem, where each box is sufficiently large to accommodate all of
the balls. It might be asked, 'In what sense are the boxes indistinguishable
if they have different capacities?' The answer is that the box capacities
must be considered when distributing the balls, but once the balls have been
distributed, the identities of the boxes no longer matter.
CONSTRUCTOR INPUTS
n: the number of balls
box_sizes: This argument is a list of length 1 or greater. The length of
the list corresponds to the number of boxes. `box_sizes[i]` is a positive
integer that specifies the maximum capacity of the ith box. If
`box_sizes[i]` equals `n` (or greater), the ith box can accommodate all `n`
balls and thus effectively has unlimited capacity.
NOTE
For `unlabeled_balls_in_unlabeled_boxes`, the order of the elements of the
`box_sizes` list is unimportant because the code will sort it into non-
increasing order before any other processing is done.
"""
if not isinstance(balls, int):
raise TypeError("balls must be a non-negative integer.")
if balls < 0:
raise ValueError("balls must be a non-negative integer.")
if not isinstance(box_sizes, (list, tuple)):
raise ValueError("box_sizes must be a non-empty list or tuple.")
capacity= 0
for size in box_sizes:
if not isinstance(size, int):
raise TypeError("box_sizes must contain only positive integers.")
if size < 1:
raise ValueError("box_sizes must contain only positive integers.")
capacity+= size
if capacity < balls:
raise ValueError("The total capacity of the boxes is less than the "
"number of balls to be distributed.")
# Sort the box sizes so that the values decrease:
box_sizes= sorted(box_sizes, reverse=True)
return _unlabeled_balls_in_unlabeled_boxes(balls, box_sizes) | 13743a7207f1d4fd2635f35c9eaef0a9acf53fa0 | 12,281 |
def get_version():
"""Extract current version from __init__.py."""
with open("morphocell/__init__.py", encoding="utf-8") as fid:
for line in fid:
if line.startswith("__version__"):
VERSION = line.strip().split()[-1][1:-1]
break
return VERSION | 69a00c2e5544dfd8d86cdab8be53c17b73764aca | 12,282 |
def get_neighbor_v4_by_id(obj_id):
"""Return an NeighborV4 by id.
Args:
obj_id: Id of NeighborV4
"""
try:
obj = NeighborV4.get_by_pk(id=obj_id)
except NeighborV4NotFoundError as e:
raise NeighborV4DoesNotExistException(str(e))
return obj | 30b00e2fb1f954299a331ce6b198f1e5465122e5 | 12,283 |
from typing import Dict
def get_resources_json_obj(resource_name: str) -> Dict:
"""
Get a JSON object of a specified resource.
:param resource_name: The name of the resource.
:returns: The JSON object (in the form of a dictionary).
:raises Exception: An exception is raised if the specified resources does
not exist.
"""
resource_map = _get_resources(_get_resources_json()["resources"])
if resource_name not in resource_map:
raise Exception(
"Error: Resource with name '{}' does not exist".format(
resource_name
)
)
return resource_map[resource_name] | ce625ecbaad0ec4cea93da78c9c213e37ffef3ed | 12,284 |
def skip_if(predicate, reason=None):
"""Skip a test if predicate is true."""
reason = reason or predicate.__name__
def decorate(fn):
fn_name = fn.__name__
def maybe(*args, **kw):
if predicate():
msg = "'%s' skipped: %s" % (fn_name, reason)
raise SkipTest(msg)
else:
return fn(*args, **kw)
return update_wrapper(maybe, fn)
return decorate | 56089515f8cae4f977b1eac96a8de6c6ee59e711 | 12,285 |
from pyrap.measures import measures
from pyrap.quanta import quantity as q
def synthesized_uvw(ants, time, phase_dir, auto_correlations):
"""
Synthesizes new UVW coordinates based on time according to
NRAO CASA convention (same as in fixvis)
User should check these UVW coordinates carefully:
if time centroid was used to compute
original uvw coordinates the centroids
of these new coordinates may be wrong, depending on whether
data timesteps were heavily flagged.
"""
pytest.importorskip('pyrap')
dm = measures()
epoch = dm.epoch("UT1", q(time[0], "s"))
ref_dir = dm.direction("j2000",
q(phase_dir[0], "rad"),
q(phase_dir[1], "rad"))
ox, oy, oz = ants[0]
obs = dm.position("ITRF", q(ox, "m"), q(oy, "m"), q(oz, "m"))
# Setup local horizon coordinate frame with antenna 0 as reference position
dm.do_frame(obs)
dm.do_frame(ref_dir)
dm.do_frame(epoch)
ant1, ant2 = np.triu_indices(ants.shape[0],
0 if auto_correlations else 1)
ant1 = ant1.astype(np.int32)
ant2 = ant2.astype(np.int32)
ntime = time.shape[0]
nbl = ant1.shape[0]
rows = ntime * nbl
uvw = np.empty((rows, 3), dtype=np.float64)
# For each timestep
for ti, t in enumerate(time):
epoch = dm.epoch("UT1", q(t, "s"))
dm.do_frame(epoch)
ant_uvw = np.zeros_like(ants)
# Calculate antenna UVW positions
for ai, (x, y, z) in enumerate(ants):
bl = dm.baseline("ITRF",
q([x, ox], "m"),
q([y, oy], "m"),
q([z, oz], "m"))
ant_uvw[ai] = dm.to_uvw(bl)["xyz"].get_value()[0:3]
# Now calculate baseline UVW positions
# noting that ant1 - ant2 is the CASA convention
base = ti*nbl
uvw[base:base + nbl, :] = ant_uvw[ant1] - ant_uvw[ant2]
return ant1, ant2, uvw | f3261545f85981d353a05acf3176bf0317ea4c86 | 12,286 |
from datetime import datetime
def execute_pso_strategy(df, options, topology, retrain_params, commission, data_name, s_test, e_test, iters=100, normalization='exponential'):
"""
Execute particle swarm optimization strategy on data history contained in df
:param df: dataframe with historical data
:param options: dict with the following parameters
- c1 - cognitive parameter with which the particle follows its personal best
- c2 - social parameter with which the particle follows the swarm's global best position
- w - parameter that controls the inertia of the swarm's movement
:param commision: commission to be paid on each operation
:param data_name: quote data name
:param start_date: start date of simulation
:param end_date: end date of simulation
:return:
- PSO_Cerebro - execution engine
- PSO_Strategy - pso strategy instance
"""
print_execution_name("Estrategia: particle swar optimization")
strategy_name = 'particle_swarm_optimization'
info = {
'Mercado': data_name,
'Estrategia': strategy_name,
'Fecha inicial': s_test,
'Fecha final': e_test
}
# ------------ Obtenemos los conjuntos de train y test ------------ #
s_test_date = datetime.strptime(s_test, '%Y-%m-%d')
s_train = s_test_date.replace(year = s_test_date.year - 2)
#s_train = s_test_date - timedelta(days=180)
e_train = s_test_date - timedelta(days=1)
gen_representation = GeneticRepresentation(df, s_train, e_train, s_test, e_test)
# ------------ Fijamos hiperparámetros ------------ #
n_particles = topology['particles']
num_neighbours = topology['neighbours']
minkowski_p_norm = 2
options['k'] = num_neighbours
options['p'] = minkowski_p_norm
dimensions=len(gen_representation.moving_average_rules)+2
if normalization == 'exponential':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = -max_bound
elif normalization == 'l1':
max_bound = 2.0 * np.ones(dimensions-2)
min_bound = np.zeros(dimensions-2)
max_bound = np.append(max_bound, [0.9, 0.0])
min_bound = np.append(min_bound, [0.0, -0.9])
bounds = (min_bound, max_bound)
# Call instance of PSO
optimizer = ps.single.LocalBestPSO(n_particles=n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
static=True)
# Perform optimization
kwargs={'from_date': s_train, 'to_date': e_train, 'normalization': normalization}
best_cost, best_pos = optimizer.optimize(gen_representation.cost_function,
iters=iters,
n_processes=2,
**kwargs)
# Create an instance from CombinedSignalStrategy class and assign parameters
PSO_Strategy = CombinedSignalStrategy
w, buy_threshold, sell_threshold = get_split_w_threshold(best_pos)
"""
print("Umbral de compra: ", buy_threshold)
print("Umbral de venta: ", sell_threshold)
crosses = ["(" + str(cross[0]) + ", " + str(cross[1]) + ")" for cross in gen_representation.moving_average_rules]
y_pos = np.arange(len(crosses))
plt.bar(y_pos, w)
plt.xticks(y_pos, crosses)
plt.xticks(rotation='vertical')
plt.subplots_adjust(top=0.98, bottom=0.2, left=0.08, right=0.98, hspace=0.0, wspace=0.0)
plt.show()
"""
PSO_Strategy.w = w
PSO_Strategy.buy_threshold = buy_threshold
PSO_Strategy.sell_threshold = sell_threshold
PSO_Strategy.moving_average_rules = gen_representation.moving_average_rules
PSO_Strategy.moving_averages = gen_representation.moving_averages_test
PSO_Strategy.optimizer = optimizer
PSO_Strategy.gen_representation = gen_representation
PSO_Strategy.normalization = normalization
PSO_Strategy.retrain_params = retrain_params
df_test = gen_representation.df_test
df_train = gen_representation.df_train
PSO_Cerebro = execute_strategy(PSO_Strategy, df_test, commission, info, retrain_params)
return PSO_Cerebro, PSO_Strategy | a9a4fffe335ab34ca584a4fe1d3b6116a2a7866c | 12,287 |
def env_get(d, key, default, decoders=decoders, required=None):
"""
Look up ``key`` in ``d`` and decode it, or return ``default``.
"""
if required is None:
required = isinstance(default, type)
try:
value = d[key]
except KeyError:
if required:
raise
return default
dt = (default if default is None or isinstance(default, type)
else type(default))
for decoder in decoders:
if (decoder.decodes_to_type(dt) and
decoder.decodes_from_value(value)
):
try:
return decoder.decode(value)
except Exception as e:
logger.error("%s couldn't convert %s: %s: %s",
decoder.__class__.__name__, key,
e.__class__.__name__, e)
raise
raise ValueError("no suitable env decoder for {}".format(key)) | 844c6ac9931af97bdc92da6d5014659c3600d50e | 12,288 |
from sympy.functions.elementary.complexes import re, im
from .add import Add
from re import S
def get_integer_part(expr: 'Expr', no: int, options: OPT_DICT, return_ints=False) -> \
tUnion[TMP_RES, tTuple[int, int]]:
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
result = evalf(expr, assumed_size, options)
if result is S.ComplexInfinity:
raise ValueError("Cannot get integer part of Complex Infinity")
ire, iim, ire_acc, iim_acc = result
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire) - ire_acc, fastlog(iim) - iim_acc)
elif ire:
gap = fastlog(ire) - ire_acc
elif iim:
gap = fastlog(iim) - iim_acc
else:
# ... or maybe the expression was exactly zero
if return_ints:
return 0, 0
else:
return None, None, None, None
margin = 10
if gap >= -margin:
prec = margin + assumed_size + gap
ire, iim, ire_acc, iim_acc = evalf(
expr, prec, options)
else:
prec = assumed_size
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close).
def calc_part(re_im: 'Expr', nexpr: MPF_TUP):
_, _, exponent, _ = nexpr
is_int = exponent == 0
nint = int(to_int(nexpr, rnd))
if is_int:
# make sure that we had enough precision to distinguish
# between nint and the re or im part (re_im) of expr that
# was passed to calc_part
ire, iim, ire_acc, iim_acc = evalf(
re_im - nint, 10, options) # don't need much precision
assert not iim
size = -fastlog(ire) + 2 # -ve b/c ire is less than 1
if size > prec:
ire, iim, ire_acc, iim_acc = evalf(
re_im, size, options)
assert not iim
nexpr = ire
nint = int(to_int(nexpr, rnd))
_, _, new_exp, _ = ire
is_int = new_exp == 0
if not is_int:
# if there are subs and they all contain integer re/im parts
# then we can (hopefully) safely substitute them into the
# expression
s = options.get('subs', False)
if s:
doit = True
# use strict=False with as_int because we take
# 2.0 == 2
for v in s.values():
try:
as_int(v, strict=False)
except ValueError:
try:
[as_int(i, strict=False) for i in v.as_real_imag()]
continue
except (ValueError, AttributeError):
doit = False
break
if doit:
re_im = re_im.subs(s)
re_im = Add(re_im, -nint, evaluate=False)
x, _, x_acc, _ = evalf(re_im, 10, options)
try:
check_target(re_im, (x, None, x_acc, None), 3)
except PrecisionExhausted:
if not re_im.equals(0):
raise PrecisionExhausted
x = fzero
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, INF
re_, im_, re_acc, im_acc = None, None, None, None
if ire:
re_, re_acc = calc_part(re(expr, evaluate=False), ire)
if iim:
im_, im_acc = calc_part(im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re_ or fzero)), int(to_int(im_ or fzero))
return re_, im_, re_acc, im_acc | 6e00897786581134480a6d7fd16b559760a1a4e7 | 12,289 |
def inv_last_roundf(ns):
"""
ns -> States of nibbles
Predict the states of nibbles after passing through the inverse last round
of SomeCipher. Refer to `last_roundf()` for more details.
"""
return inv_shift_row(ns) | 12561f9815ecad4cd08909be1e0c77dd61500cce | 12,290 |
def get_screen(name, layer=None):
"""
:doc: screens
Returns the ScreenDisplayable with the given `name` on layer. `name`
is first interpreted as a tag name, and then a screen name. If the
screen is not showing, returns None.
This can also take a list of names, in which case the first screen
that is showing is returned.
This function can be used to check if a screen is showing::
if renpy.get_screen("say"):
text "The say screen is showing."
else:
text "The say screen is hidden."
"""
if layer is None:
layer = get_screen_layer(name)
if isinstance(name, basestring):
name = (name,)
sl = renpy.exports.scene_lists()
for tag in name:
sd = sl.get_displayable_by_tag(layer, tag)
if sd is not None:
return sd
for tag in name:
sd = sl.get_displayable_by_name(layer, (tag,))
if sd is not None:
return sd
return None | a6496e453a80f1ad286bbe5201aba92fa922794b | 12,291 |
import random
import csv
def generate_address_full(chance=None, variation=False, format=1):
"""
Function to generate the full address of the profile.
Args:
chance: Integer between 1-100 used for realistic variation. (not required)
variation: Boolean value indicating whether variation is requested. (optional)
format: String value used to indicate required format. (optional)
Options include:
-1 (Str value)
-2 (List value)
Returns:
The return value. String/List value containing the full address.
"""
if not chance:
chance = random.randint(1,100)
csv_file = open(canadian_data_file_name, 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
random_row = random.choice(list(csv_reader))
csv_file.close()
if format == 1 or format == "1":
return "%s %s, %s, %s, %s" % (generate_street_number(row=random_row),generate_street_name(chance=chance, variation=variation,row=random_row),generate_city(chance=chance, variation=variation,row=random_row),generate_province(chance=chance, variation=variation,row=random_row),generate_postal_code(chance=chance, variation=variation,row=random_row))
elif format == 2 or format == "2":
address_list=[]
address_list.append(generate_street_number(row=random_row))
address_list.append(generate_street_name(variation,row=random_row))
address_list.append(generate_city(variation,row=random_row))
address_list.append(generate_province(variation,row=random_row))
address_list.append(generate_postal_code(variation,row=random_row))
return address_list | 364cbe0f033d0500014583c550beb36a6cb6db55 | 12,292 |
import urllib
def binder_url(repo, branch="master", filepath=None):
"""
Build a binder url. If filepath is provided, the url will be for
the specific file.
Parameters
----------
repo: str
The repository in the form "username/reponame"
branch: str, optional
The branch, default "master"
filepath: str, optional
The path to a file in the repo, e.g. dir1/dir2/notebook.ipynb
Returns
-------
str
A binder url that will launch a notebook server
"""
if filepath is not None:
fpath = urllib.parse.quote(filepath, safe="%")
return resources.BINDER_URL_TEMPLATE_WITH_FILEPATH.format(
repo, branch, fpath
)
else:
return resources.BINDER_URL_TEMPLATE_NO_FILEPATH.format(repo, branch) | f9ac9e28a1cc6b88bce788e63668f1e4a4b45f61 | 12,293 |
def _create_group_hub_without_avatar(_khoros_object, _api_url, _payload):
"""This function creates a group hub with only a JSON payload and no avatar image.
.. versionadded:: 2.6.0
:param _khoros_object: The core :py:class:`khoros.Khoros` object
:type _khoros_object: class[khoros.Khoros]
:param _api_url: The API URL to utilize in the API request
:type _api_url: str
:param _payload: The JSON payload to be used in the API request
:type _payload: dict
:returns: The API response from the POST request
:raises: :py:exc:`khoros.errors.exceptions.APIConnectionError`,
:py:exc:`khoros.errors.exceptions.POSTRequestError`
"""
_headers = {'content-type': 'application/json'}
_response = api.post_request_with_retries(_api_url, _payload, khoros_object=_khoros_object, headers=_headers)
return _response | 43222666b5a5f5dcec91a4fa1025278f84275e9c | 12,294 |
def ulstrip(text):
"""
Strip Unicode extended whitespace from the left side of a string
"""
return text.lstrip(unicode_extended_whitespace) | 191e0654cdab79778c64ec874bbc9b945b0ed4a3 | 12,295 |
def clone_dcm_meta(dcm):
"""
Copy an existing pydicom Dataset as a basis for saving
another image
:param dcm: the pydicom dataset to be copied
:return:
"""
newdcm = pydi.Dataset()
for k, v in dcm.items():
newdcm[k] = v
newdcm.file_meta = mk_file_meta()
newdcm.is_little_endian = True
newdcm.is_implicit_VR = False
newdcm.SOPInstanceUID = newdcm.file_meta.MediaStorageSOPInstanceUID
newdcm.SOPClassUID = newdcm.file_meta.MediaStorageSOPClassUID
return newdcm | e208ee11241b4194bb0d02357e625d0ee4f52d2e | 12,296 |
import toml
import itertools
from pathlib import Path
def load_plate(toml_path):
"""\
Parse a TOML-formatted configuration file defining how each well in a
particular plate should be interpreted.
Below is a list of the keys that are understood in the configuration file:
'xlsx_path' [string]
The path to the XLSX file containing the plate reader data, relative to
the configuration file itself. If not specified, this script will look
for a file with the same name as the configuration file, but the
'.xlsx' extension, e.g. 'abc.xlsx' if the config file is 'abc.toml'.
'template' [string]
The path to another TOML file that should be interpreted as containing
default values for all possible settings.
'notes' [string]
A string that will be printed every time the file is visualized. This
is meant to reminder the user of any details relating to this
particular experiment (e.g. mistakes) that might affect interpretation
of the data.
The following keys relate to particular wells. Each of these keys can be
specified in any of four kinds of block: [well.A1], [row.A], [col.1], and
[plate]. The [well] block allows values to be set for individual wells ('A1'
in this example). The [row] and [col] blocks allow values to be set for
whole rows and columns ('A' and '1' in these examples). The [plate] block
allows values to be set for the whole plate. The same value can be set
multiple times, in which case the value from the most specific block will
take precedence.
"""
def recursive_merge(layout, defaults, overwrite=False):
for key, default in defaults.items():
if isinstance(default, dict):
layout.setdefault(key, {})
recursive_merge(layout[key], default)
else:
if overwrite or key not in layout:
layout[key] = default
def do_load_paths(toml_path, expected_ext='.xlsx'):
toml_path = Path(toml_path).resolve()
layout = toml.load(str(toml_path))
# Resolve the path(s) to actual data.
if 'path' in layout and 'paths' in layout:
raise ValueError(f"{toml_path} specifies both 'path' and 'paths'")
elif 'path' in layout:
path = toml_path.parent / layout['path']
layout['paths'] = {'default': path}
elif 'paths' in layout:
layout['paths'] = {
toml_path.parent / x
for x in layout['paths']
}
else:
default_path = toml_path.with_suffix(expected_ext)
if default_path.exists():
layout['paths'] = {'default': default_path}
# Include a remote file if one is specified.
if 'template' in layout:
layout['template'] = toml_path.parent / layout['template']
template = do_load_paths(layout['template'])
recursive_merge(layout, template)
return layout
layout = do_load_paths(toml_path)
# Apply any row or column defaults.
if 'well' not in layout:
layout['well'] = {}
rows = layout.get('row', {})
cols = layout.get('col', {})
# Create new wells implied by the 'row' and 'col' blocks.
for row, col in itertools.product(rows, cols):
layout['well'].setdefault(f'{row}{col}', {})
# Update any existing wells.
for well in layout.get('well', {}):
row, col = well[:1], well[1:]
recursive_merge(layout['well'][well], rows.get(row, {}))
recursive_merge(layout['well'][well], cols.get(col, {}))
# Apply any plate-wide defaults.
layout.setdefault('plate', {}),
for well in layout.get('well', {}):
recursive_merge(layout['well'][well], layout['plate'])
# If the experiment has any notes, print them out.
if 'notes' in layout:
print(toml_path)
print(layout['notes'].strip())
print()
return layout | cc92a9dae783de915628984979119ca9d2b591a2 | 12,297 |
def reduce_entropy(X, axis=-1):
"""
calculate the entropy over axis and reduce that axis
:param X:
:param axis:
:return:
"""
return -1 * np.sum(X * np.log(X+1E-12), axis=axis) | 68a7d86bf0ad204d989fddceee9e4f75c77a4cb5 | 12,298 |
def compile_pbt(lr: float = 5e-3, value_weight: float = 0.5):
"""
my default: 5e-3
# SAI: 1e-4
# KataGo: per-sample learning rate of 6e-5, except 2e-5 for the first 5mm samples
"""
input_shape = (N, N, dual_net.get_features_planes())
model = dual_net.build_model(input_shape)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=opt,
loss={
'policy': 'categorical_crossentropy',
'value': custom_BCE_loss},
loss_weights={
'policy': 0.50,
'value': value_weight},
metrics={
'policy': keras.metrics.CategoricalAccuracy(name="move_acc"),
})
return model | e625915c55a44a8c128431ae220401c451ee69a5 | 12,299 |
from typing import List
def _hostnames() -> List[str]:
"""Returns all host names from the ansible inventory."""
return sorted(_ANSIBLE_RUNNER.get_hosts()) | 8c1ed3f61887ff637d9a9091a20cc3f9e4144dde | 12,300 |
def seabass_to_pandas(path):
"""SeaBASS to Pandas DataFrame converter
Parameters
----------
path : str
path to an FCHECKed SeaBASS file
Returns
-------
pandas.DataFrame
"""
sb = readSB(path)
dataframe = pd.DataFrame.from_dict(sb.data)
return dataframe | 7988da0adb19e59d7c898658d2fe659b2d145606 | 12,301 |
def countVisits(item, value=None):
"""This function takes a pandas.Series of item tags, and an optional string for a specific tag
and returns a numpy.ndarray of the same size as the input, which contains either
1) a running count of unique transitions of item, if no target tag is given, or
2) a running count of the numer of entries to a run of target tag
:param item: a pandas Series of labels of events
:param value: optional value of the item to keep track of
:return: a running count of the unique values of items if value==None, or a running count of the specific value
"""
# make sure item is a 1-D np array or a Pandas Series
# if not isinstance(item, (pd.core.series.Series, np.ndarray) ):
assert (isinstance(item, pd.core.series.Series))
# create counter; this saves time, apparently
count = np.zeros((item.size), dtype=np.int)
if value is None:
# not specified, then we track any time item changes value
count[np.where(item != item.shift())] = 1
else:
# only when item==value
count[np.where(np.logical_and(item != item.shift(), item == value))] = 1
return count.cumsum() | dbb677cc356d867d7f861fe18e2d5c653598d20c | 12,302 |
import numpy as np
import torch
from pathlib import Path
def test_run_inference(ml_runner_with_container: MLRunner, tmp_path: Path) -> None:
"""
Test that run_inference gets called as expected.
"""
def _expected_files_exist() -> bool:
output_dir = ml_runner_with_container.container.outputs_folder
if not output_dir.is_dir():
return False
expected_files = ["test_mse.txt", "test_mae.txt"]
return all([(output_dir / p).exists() for p in expected_files])
# create the test data
N = 100
x = torch.rand((N, 1)) * 10
y = 0.2 * x + 0.1 * torch.randn(x.size())
xy = torch.cat((x, y), dim=1)
data_path = tmp_path / "hellocontainer.csv"
np.savetxt(data_path, xy.numpy(), delimiter=",")
expected_ckpt_path = ml_runner_with_container.container.outputs_folder / "checkpoints" / "last.ckpt"
assert not expected_ckpt_path.exists()
# update the container to look for test data at this location
ml_runner_with_container.container.local_dataset_dir = tmp_path
assert not _expected_files_exist()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path is None
ml_runner_with_container.run()
actual_train_ckpt_path = ml_runner_with_container.checkpoint_handler.get_recovery_or_checkpoint_path_train()
assert actual_train_ckpt_path == expected_ckpt_path
actual_test_ckpt_path = ml_runner_with_container.checkpoint_handler.get_checkpoints_to_test()
assert actual_test_ckpt_path == [expected_ckpt_path]
assert actual_test_ckpt_path[0].exists()
# After training, the outputs directory should now exist and contain the 2 error files
assert _expected_files_exist()
# if no checkpoint handler, no checkpoint paths will be saved and these are required for
# inference so ValueError will be raised
with pytest.raises(ValueError) as e:
ml_runner_with_container.checkpoint_handler = None # type: ignore
ml_runner_with_container.run()
assert "expects exactly 1 checkpoint for inference, but got 0" in str(e) | dc38c5582f8d69ff53f24c34d403ed3f14f964f9 | 12,303 |
def gen_accel_table(table_def):
"""generate an acceleration table"""
table = []
for i in range(1001):
table.append(0)
for limit_def in table_def:
range_start, range_end, limit = limit_def
for i in range(range_start, range_end + 1):
table[i] = limit
return table | 53d96db86068d893dfbb216e9e1283535cad9412 | 12,304 |
from typing import Tuple
import torch
def dataset_constructor(
config: ml_collections.ConfigDict,
) -> Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]:
"""
Create datasets loaders for the chosen datasets
:return: Tuple (training_set, validation_set, test_set)
"""
dataset = {
"AddProblem": AdditionProblem,
"CopyMemory": CopyMemory,
"MNIST": MNIST,
"CIFAR10": CIFAR10,
"SpeechCommands": SpeechCommands,
"CharTrajectories": CharTrajectories,
}[config.dataset]
training_set = dataset(
partition="train",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
test_set = dataset(
partition="test",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train
if config.sr_test == 0
else config.sr_test, # Test set can be sample differently.
dropped_rate=config.drop_rate,
)
if config.dataset in ["SpeechCommands", "CharTrajectories"]:
validation_set = dataset(
partition="val",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
else:
validation_set = None
return training_set, validation_set, test_set | 175e45640e85df7f76331dc99b60d73fccbbdc43 | 12,305 |
def get_metrics(actual_classes, pred_classes):
"""
Function to calculate performance metrics for the classifier
For each class, the following is calculated
TP: True positives = samples that were correctly put into the class
TN: True negatives = samples that were correctly not put into the class
FP: False positive = samples that were incorectly put into the class
FN: False negatives = samples that should be in the class but were put into
another class
Parameters
----------
pred_classes : neuron types predicted by the classifier
actual_classes : known neuron types
Returns
-------
conf_mat: Confusion matrix = a visual representation of the algorithm's performance
acc: Accuracy = the fraction of correctly classified samples
MK: Markedness = a measure of how trustworthy a classification is,
accounting for both positive and negative classifications.
Value close to 1 means the classifier makes mostly correct predictions, value
close to -1 means the classifier makes mostly wrong predictions.
"""
conf_mat = metrics.confusion_matrix(actual_classes, pred_classes)
acc = metrics.balanced_accuracy_score(actual_classes, pred_classes)
"""
the next portion of code is copied from:
https://towardsdatascience.com/multi-class-classification-extracting-performance-metrics-from-the-confusion-matrix-b379b427a872
"""
FP = conf_mat.sum(axis=0) - np.diag(conf_mat)
FN = conf_mat.sum(axis=1) - np.diag(conf_mat)
TP = np.diag(conf_mat)
TN = conf_mat.sum() - (FP + FN + TP)
FP = np.sum(FP)
FN = np.sum(FN)
TP = np.sum(TP)
TN = np.sum(TN)
"""
end of copied code
"""
MK = (TP/(TP+FP)) + (TN/(TN+FN)) - 1
return conf_mat, acc, MK | 925d80d146ca29984886324338b9f99688c721b8 | 12,309 |
import dateutil
def extract_tika_meta(meta):
"""Extracts and normalizes metadata from Apache Tika.
Returns a dict with the following keys set:
- content-type
- author
- date-created
- date-modified
- original-tika-meta
The dates are encoded in the ISO format."""
def _get_flat(dict, *keys):
item = None
for key in keys:
item = dict.get(key)
if item is not None:
break
if type(item) is list:
return item[0]
return item
def _get_bool(dict, *keys):
item = _get_flat(dict, *keys)
if not item:
return False
if type(item) is bool:
return item
return item.lower() == "true"
data = {
'content-type': _get_flat(meta,
'Content-Type',
'content-type'),
'author': _get_flat(meta,
'Author',
'meta:author',
'creator'),
'date-created': _get_flat(meta,
'Creation-Date',
'dcterms:created',
'meta:created',
'created'),
'date-modified': _get_flat(meta,
'Last-Modified',
'Last-Saved-Date',
'dcterms:modified',
'meta:modified',
'created'),
'original-tika-meta': meta
}
for key in ['date-modified', 'date-created']:
if data.get(key):
data[key] = dateutil.parser.parse(data[key]).isoformat()
return data | d5e73afa3b7747d31f295acb840c3730a3e60ed1 | 12,310 |
def __gen_pause_flow(testbed_config,
src_port_id,
flow_name,
pause_prio_list,
flow_dur_sec):
"""
Generate the configuration for a PFC pause storm
Args:
testbed_config (obj): L2/L3 config of a T0 testbed
src_port_id (int): ID of the source port
flow_name (str): flow' name
pause_prio_list (list): priorities to pause for PFC frames
flow_dur_sec (float): duration of the flow in second
Returns:
flow configuration (obj): including name, packet format, rate, ...
"""
pause_time = []
for x in range(8):
if x in pause_prio_list:
pause_time.append('ffff')
else:
pause_time.append('0000')
vector = pfc_class_enable_vector(pause_prio_list)
pause_pkt = Header(PfcPause(
dst=FieldPattern(choice='01:80:C2:00:00:01'),
src=FieldPattern(choice='00:00:fa:ce:fa:ce'),
class_enable_vector=FieldPattern(choice=vector),
pause_class_0=FieldPattern(choice=pause_time[0]),
pause_class_1=FieldPattern(choice=pause_time[1]),
pause_class_2=FieldPattern(choice=pause_time[2]),
pause_class_3=FieldPattern(choice=pause_time[3]),
pause_class_4=FieldPattern(choice=pause_time[4]),
pause_class_5=FieldPattern(choice=pause_time[5]),
pause_class_6=FieldPattern(choice=pause_time[6]),
pause_class_7=FieldPattern(choice=pause_time[7]),
))
dst_port_id = (src_port_id + 1) % len(testbed_config.devices)
pause_src_point = PortTxRx(tx_port_name=testbed_config.ports[src_port_id].name,
rx_port_name=testbed_config.ports[dst_port_id].name)
"""
The minimal fixed time duration in IXIA is 1 second.
To support smaller durations, we need to use # of packets
"""
speed_str = testbed_config.layer1[0].speed
speed_gbps = int(speed_str.split('_')[1])
pause_dur = 65535 * 64 * 8.0 / (speed_gbps * 1e9)
pps = int(2 / pause_dur)
pkt_cnt = pps * flow_dur_sec
pause_flow = Flow(
name=flow_name,
tx_rx=TxRx(pause_src_point),
packet=[pause_pkt],
size=Size(64),
rate=Rate('pps', value=pps),
duration=Duration(FixedPackets(packets=pkt_cnt, delay=0))
)
return pause_flow | 953a6d3a3741b6af0b06bd8165abd7350b838b41 | 12,311 |
def parse_str_to_bio(str, dia_act):
""" parse str to BIO format """
intent = parse_intent(dia_act)
w_arr, bio_arr = parse_slots(str, dia_act)
bio_arr[-1] = intent
return ' '.join(w_arr), ' '.join(bio_arr), intent | 951cd110acd5fa53def9e781c0ab9b545d2931b8 | 12,312 |
def train_early_stop(
update_fn, validation_fn, optimizer, state, max_epochs=1e4, **early_stop_args
):
"""Run update_fn until given validation metric validation_fn increases.
"""
logger = Logger()
check_early_stop = mask_scheduler(**early_stop_args)
for epoch in jnp.arange(max_epochs):
(optimizer, state), metrics, output = update_fn(optimizer, state)
if epoch % 1000 == 0:
print(f"Loss step {epoch}: {metrics['loss']}")
if epoch % 25 == 0:
val_metric = validation_fn(optimizer, state)
stop_training, optimizer = check_early_stop(val_metric, epoch, optimizer)
metrics = {**metrics, "validation_metric": val_metric}
logger.write(metrics, epoch)
if stop_training:
print("Converged.")
break
logger.close()
return optimizer, state | a9dc6e76d2796edacc0f55b06e9cf258a90dffea | 12,313 |
from typing import List
def get_povm_object_names() -> List[str]:
"""Return the list of valid povm-related object names.
Returns
-------
List[str]
the list of valid povm-related object names.
"""
names = ["pure_state_vectors", "matrices", "vectors", "povm"]
return names | cb80899b9b3a4aca4bfa1388c6ec9c61c59978a4 | 12,314 |
def choose(a,b):
""" n Choose r function """
a = op.abs(round(a))
b = op.abs(round(b))
if(b > a):
a, b = b, a
return factorial(a) / (factorial(b) * factorial(a-b)) | 30b70dc950e9f6d501cf5ef07bfed682dce41c43 | 12,315 |
from typing import List
import torch
from typing import Optional
def pad_and_stack_list_of_tensors(lst_embeddings: List[torch.Tensor], max_sequence_length: Optional[int] = None,
return_sequence_length: bool = False):
"""
it takes the list of embeddings as the input, then applies zero-padding and stacking to transform it as
@param lst_embeddings:
@param max_sequence_length:
"""
dim = -2 # second last axis. it must be the sequence dimension.
lst_seq_len = [embeddings.shape[dim] for embeddings in lst_embeddings]
if max_sequence_length is None:
max_sequence_length = max(lst_seq_len)
else:
n_max = max(lst_seq_len)
assert max_sequence_length >= n_max, \
f"`max_sequence_length` must be greater or equal to max. embeddings size: {n_max} > {max_sequence_length}"
lst_padded_embeddings = [pad_trailing_tensors(e_t, max_sequence_length) for e_t in lst_embeddings]
stacked_embeddings = torch.stack(lst_padded_embeddings)
if return_sequence_length:
return stacked_embeddings, lst_seq_len
else:
return stacked_embeddings | 78c3a11f7ff79798d9b86703318eabb8da32695a | 12,316 |
from typing import List
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, 'sql queries', of_type=str)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config_field=define_bigquery_query_config(),
required_resource_keys={'bigquery'},
metadata={'kind': 'sql', 'sql': '\n'.join(sql_queries)},
)
def bq_solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get('query_job_config', {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
'executing query %s with config: %s'
% (sql_query, cfg.to_api_repr() if cfg else '(no config provided)')
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return bq_solid | 0b7d71d6ec6aca87a581c8e0876bc1d88e7242c8 | 12,317 |
def mock_accession_unreplicated(
mocker: MockerFixture,
mock_accession_gc_backend,
mock_metadata,
lab: str,
award: str,
) -> Accession:
"""
Mocked accession instance with dummy __init__ that doesn't do anything and pre-baked
assembly property. @properties must be patched before instantiation
"""
mocker.patch.object(
Accession,
"experiment",
new_callable=PropertyMock(
return_value=EncodeExperiment(
{
"@id": "foo",
"assay_term_name": "microRNA",
"replicates": [
{"biological_replicate_number": 1, "status": "released"}
],
}
)
),
)
mocked_accession = AccessionMicroRna(
"imaginary_steps.json",
Analysis(mock_metadata, backend=mock_accession_gc_backend),
"mock_server.biz",
EncodeCommonMetadata(lab, award),
Recorder(use_in_memory_db=True),
no_log_file=True,
)
return mocked_accession | c221c34a72809737d22b76beff89a18eece128ff | 12,318 |
from typing import Optional
def get_prepared_statement(statement_name: Optional[str] = None,
work_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPreparedStatementResult:
"""
Resource schema for AWS::Athena::PreparedStatement
:param str statement_name: The name of the prepared statement.
:param str work_group: The name of the workgroup to which the prepared statement belongs.
"""
__args__ = dict()
__args__['statementName'] = statement_name
__args__['workGroup'] = work_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:athena:getPreparedStatement', __args__, opts=opts, typ=GetPreparedStatementResult).value
return AwaitableGetPreparedStatementResult(
description=__ret__.description,
query_statement=__ret__.query_statement) | e3bcd74b2bc9093a0fff822a4f35b0de4dab3e03 | 12,319 |
def handle_col(element, box, _get_image_from_uri, _base_url):
"""Handle the ``span`` attribute."""
if isinstance(box, boxes.TableColumnBox):
integer_attribute(element, box, 'span')
if box.span > 1:
# Generate multiple boxes
# http://lists.w3.org/Archives/Public/www-style/2011Nov/0293.html
return [box.copy() for _i in range(box.span)]
return [box] | ef9fe04982bbb278df1453104823235ecf23113f | 12,320 |
def get_dotted_field(input_dict: dict, accessor_string: str) -> dict:
"""Gets data from a dictionary using a dotted accessor-string.
Parameters
----------
input_dict : dict
A nested dictionary.
accessor_string : str
The value in the nested dict.
Returns
-------
dict
Data from the dictionary.
"""
current_data = input_dict
for chunk in accessor_string.split("."):
current_data = current_data.get(chunk, {})
return current_data | 2c82c0512384810e77a5fb53c73f67d2055dc98e | 12,321 |
import re
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca) | d3ac427172e34054119659adc55295ac27965e6c | 12,322 |
import pathlib
import json
import importlib
def read_datasets(path=None, filename="datasets.json"):
"""Read the serialized (JSON) dataset list
"""
if path is None:
path = _MODULE_DIR
else:
path = pathlib.Path(path)
with open(path / filename, 'r') as fr:
ds = json.load(fr)
# make the functions callable
for _, dset_opts in ds.items():
args = dset_opts.get('load_function_args', {})
kwargs = dset_opts.get('load_function_kwargs', {})
fail_func = partial(unknown_function, dset_opts['load_function_name'])
func_mod_name = dset_opts.get('load_function_module', None)
if func_mod_name:
func_mod = importlib.import_module(func_mod_name)
else:
func_mod = _MODULE
func_name = getattr(func_mod, dset_opts['load_function_name'], fail_func)
func = partial(func_name, *args, **kwargs)
dset_opts['load_function'] = func
return ds | ade3b9169d0f1db45d3358f27a54ea634f6d883e | 12,323 |
def as_actor(input, actor) :
"""Takes input and actor, and returns [as
<$actor>]$input[endas]."""
if " " in actor :
repla = "<%s>"%actor
else :
repla = actor
return "[as %s]%s[endas]" % (repla, input) | dc9bd33bd6b2156f4fa353db2a0b01bfa6dd1357 | 12,324 |
def error_403(request):
"""View rendered when encountering a 403 error."""
return error_view(request, 403, _("Forbidden"),
_("You are not allowed to acces to the resource %(res)s.")
% {"res": request.path}) | 1e104b006100f296ab8f816abae8272b35c9399b | 12,325 |
def _format_param(name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups) | 52904bdfb1cba7fe3175606bf77f5e46b3c7df80 | 12,326 |
def as_binary_vector(labels, num_classes):
"""
Construct binary label vector given a list of label indices.
Args:
labels (list): The input label list.
num_classes (int): Number of classes of the label vector.
Returns:
labels (numpy array): the resulting binary vector.
"""
label_arr = np.zeros((num_classes,))
for lbl in set(labels):
label_arr[lbl] = 1.0
return label_arr | 176a1148d90dcd336ea29ac13b73cc7a6c0cdc60 | 12,327 |
def evaluation_lda(model, data, dictionary, corpus):
""" Compute coherence score and perplexity.
params:
model: lda model
data: list of lists (tokenized)
dictionary
corpus
returns: coherence score, perplexity score
"""
coherence_model_lda = CoherenceModel(model=model, texts=data, dictionary=dictionary, coherence='c_v')
coherence = coherence_model_lda.get_coherence()
perplexity = model.log_perplexity(corpus)
return coherence, perplexity | c38e3ed3728b9a598ec0cf36c07d606daeb8f388 | 12,328 |
def get_map_with_square(map_info, square):
"""
build string of the map with its top left
bigger square without obstacle full
"""
map_string = ""
x_indices = list(range(square["x"], square["x"] + square["size"]))
y_indices = list(range(square["y"], square["y"] + square["size"]))
M = map_info["matrix"]
for y in range(map_info["line_num"]):
if map_string:
map_string += '\n'
for x in range(map_info["line_len"]):
if M[y][x]:
map_string += map_info["obstacle_char"]
elif x in x_indices and y in y_indices:
map_string += map_info["full_char"]
else:
map_string += map_info["empty_char"]
return map_string | 20d405edd8e5e86e943c297455ebfbeb54b669f8 | 12,329 |
def bgr_colormap():
"""
In cdict, the first column is interpolated between 0.0 & 1.0 - this indicates the value to be plotted
the second column specifies how interpolation should be done from below
the third column specifies how interpolation should be done from above
if the second column does not equal the third, then there will be a break in the colors
"""
darkness = 0.85 #0 is black, 1 is white
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(0.5, darkness, darkness),
(1.0, 0.0, 0.0))
}
return LinearSegmentedColormap("bgr", cdict) | ffeb0d415c237a5f8cc180e86bb08d73e443b133 | 12,330 |
def autovalidation_from_docstring():
"""
Test validation using JsonSchema
The default payload is invalid, try it, then change the age to a
valid integer and try again
---
tags:
- officer
parameters:
- name: body
in: body
required: true
schema:
id: Officer
required:
- name
- age
properties:
name:
type: string
description: The officer's name.
default: "James T. Kirk"
age:
type: integer
description: The officer's age (should be integer)
default: "138"
tags:
type: array
description: optional list of tags
default: ["starfleet", "captain", "enterprise", "dead"]
items:
type: string
responses:
200:
description: A single officer item
schema:
$ref: '#/definitions/Officer'
"""
data = request.json
return jsonify(data) | 82cb9d043666b465226712e6b12be94291ac5792 | 12,331 |
import requests
def get_vlan_groups(url, headers):
"""
Get dictionary of existing vlan groups
"""
vlan_groups = []
api_url = f"{url}/api/ipam/vlan-groups/"
response = requests.request("GET", api_url, headers=headers)
all_vlan_groups = response.json()["results"]
for vlan_group in all_vlan_groups:
vlan_group_info = dict()
vlan_group_info["name"] = vlan_group["name"]
vlan_group_info["state"] = "present"
if vlan_group["site"] is not None:
vlan_group_info["site"] = vlan_group["site"]["name"]
else:
vlan_group_info["site"] = None
vlan_groups.append(vlan_group_info)
return vlan_groups | c0494708e4d2cb5b61a8e4c7ac4136051b1903c7 | 12,332 |
def getLastReading(session: Session) -> Reading:
"""
Finds the last reading associated with the session
NB: Always returns a Reading, because every Session has at least 1 Reading
Args:
session (Session): A Session object representing the session record in the database
Returns:
datetime: Time object of last reading
"""
return Reading.objects.filter(session_id=session.pk).order_by("t").reverse()[:1].get() | 87f9e86316bf3975077797832225bbe9b027e648 | 12,333 |
def process_outlier(data, population_set):
"""
Parameters
----------
data
population_set
Returns
-------
"""
content = list()
for date in set(map(lambda x: x['date'], data)):
tmp_item = {
"date": date,
"value": list()
}
for value in filter(lambda d: d["date"] == date, data):
tmp_value = deepcopy(value)
del tmp_value["date"]
if population_set:
tmp_value["rate"] = round(
tmp_value["value"] / population_set[tmp_value["age"]] *
RATE_PER_POPULATION_FACTOR,
1
)
tmp_item["value"].append(tmp_value)
content.append(tmp_item)
return deepcopy(content) | e793aa85bf6b14406d495775a89d37a68ae6bf8b | 12,334 |
import six
def valid_http(http_success=HTTPOk, # type: Union[Type[HTTPSuccessful], Type[HTTPRedirection]]
http_kwargs=None, # type: Optional[ParamsType]
detail="", # type: Optional[Str]
content=None, # type: Optional[JSON]
content_type=CONTENT_TYPE_JSON, # type: Optional[Str]
): # type: (...) -> Union[HTTPSuccessful, HTTPRedirection]
"""
Returns successful HTTP with standardized information formatted with content type. (see :func:`raise_http` for HTTP
error calls)
:param http_success: any derived class from *valid* HTTP codes (<400) (default: `HTTPOk`)
:param http_kwargs: additional keyword arguments to pass to `http_success` when called
:param detail: additional message information (default: empty)
:param content: json formatted content to include
:param content_type: format in which to return the exception (one of `magpie.common.SUPPORTED_ACCEPT_TYPES`)
:returns: formatted successful response with additional details and HTTP code
"""
global RAISE_RECURSIVE_SAFEGUARD_COUNT # pylint: disable=W0603
content = dict() if content is None else content
detail = repr(detail) if not isinstance(detail, six.string_types) else detail
content_type = CONTENT_TYPE_JSON if content_type == CONTENT_TYPE_ANY else content_type
http_code, detail, content = validate_params(http_success, [HTTPSuccessful, HTTPRedirection],
detail, content, content_type)
json_body = format_content_json_str(http_code, detail, content, content_type)
resp = generate_response_http_format(http_success, http_kwargs, json_body, content_type=content_type)
RAISE_RECURSIVE_SAFEGUARD_COUNT = 0 # reset counter for future calls (don't accumulate for different requests)
return resp | 6c88712cd501291fe126b87086ee29700f44832b | 12,335 |
def operating_cf(cf_df):
"""Checks if the latest reported OCF (Cashflow) is positive.
Explanation of OCF: https://www.investopedia.com/terms/o/operatingcashflow.asp
cf_df = Cashflow Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
if (cf > 0):
return True
else:
return False | ed6a849fa504b79cd65c656d9a1318aaaeed52bf | 12,336 |
from io import StringIO
def generate_performance_scores(query_dataset, target_variable, candidate_datasets, params):
"""Generates all the performance scores.
"""
performance_scores = list()
# params
algorithm = params['regression_algorithm']
cluster_execution = params['cluster']
hdfs_address = params['hdfs_address']
hdfs_user = params['hdfs_user']
inner_join = params['inner_join']
# HDFS Client
hdfs_client = None
if cluster_execution:
# time.sleep(np.random.randint(1, 120)) # avoid opening multiple sockets at the same time
hdfs_client = InsecureClient(hdfs_address, user=hdfs_user)
# reading query dataset
query_data_str = read_file(query_dataset, hdfs_client, cluster_execution)
query_data = pd.read_csv(StringIO(query_data_str))
query_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# build model on query data only
_, scores_before = get_performance_scores(
query_data,
target_variable,
algorithm,
False
)
for candidate_dataset in candidate_datasets:
# reading candidate dataset
candidate_data_str = read_file(candidate_dataset, hdfs_client, cluster_execution)
candidate_data = pd.read_csv(StringIO(candidate_data_str))
candidate_data.set_index(
'key-for-ranking',
drop=True,
inplace=True
)
# join dataset
join_ = query_data.join(
candidate_data,
how='left',
rsuffix='_r'
)
if inner_join:
join_.dropna(inplace=True)
# build model on joined data
# print('[INFO] Generating performance scores for query dataset %s and candidate dataset %s ...' % (query_dataset, candidate_dataset))
imputation_strategy, scores_after = get_performance_scores(
join_,
target_variable,
algorithm,
not(inner_join)
)
# print('[INFO] Performance scores for query dataset %s and candidate dataset %s done!' % (query_dataset, candidate_dataset))
performance_scores.append(
generate_output_performance_data(
query_dataset=query_dataset,
target=target_variable,
candidate_dataset=candidate_dataset,
scores_before=scores_before,
scores_after=scores_after,
imputation_strategy=imputation_strategy
)
)
return performance_scores | b8cb09973f17aab2c16515a026747c3e006bfd35 | 12,337 |
import cmath
import math
def correct_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Applies the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sm=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
D =(1+(Sm[0,0]-Edf)*(Esf/Erf))*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf*Elr)/(Etf*Etr)
# print D
S11 =(Sm[0,0]-Edf)/(D*Erf)*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf)/(D*Etf*Etr)
S21 =((Sm[1,0]-Exr)/(D*Etf))*(1+(Sm[1,1]-Edr)*(Esr-Elf)/Err)
S12 = ((Sm[0,1]-Exf)/(D*Etr))*(1+(Sm[0,0]-Edf)*(Esf-Elr)/Erf)
S22 = (Sm[1,1]-Edr)/(D*Err)*(1+(Sm[0,0]-Edf)*(Esf/Erf))-(Sm[0,1]*Sm[1,0]*Elr)/(D*Etf*Etr)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out | e957c8eebd905b93b45e79a7349c1fca895c5430 | 12,338 |
def api_activity_logs(request):
"""Test utility."""
auth = get_auth(request)
obj = ActivityLogs(auth=auth)
check_apiobj(authobj=auth, apiobj=obj)
return obj | 7b13f382e71971b6ed93154a591a27f95fd81a2c | 12,339 |
def RNAshapes_parser(lines=None,order=True):
"""
Returns a list containing tuples of (sequence,pairs object,energy) for
every sequence
[[Seq,Pairs,Ene],[Seq,Pairs,Ene],...]
Structures will be ordered by the structure energy by default, of ordered
isnt desired set order to False
"""
result = lineParser(lines)
if order:
result = order_structs(result)
return result | 3c45a4f6efb190cb26512dea4a55c44292191e0f | 12,340 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Dict
from typing import Any
def get_case_strategy( # pylint: disable=too-many-locals
draw: Callable,
operation: APIOperation,
hooks: Optional[HookDispatcher] = None,
data_generation_method: DataGenerationMethod = DataGenerationMethod.default(),
path_parameters: Union[NotSet, Dict[str, Any]] = NOT_SET,
headers: Union[NotSet, Dict[str, Any]] = NOT_SET,
cookies: Union[NotSet, Dict[str, Any]] = NOT_SET,
query: Union[NotSet, Dict[str, Any]] = NOT_SET,
body: Any = NOT_SET,
) -> Any:
"""A strategy that creates `Case` instances.
Explicit `path_parameters`, `headers`, `cookies`, `query`, `body` arguments will be used in the resulting `Case`
object.
If such explicit parameters are composite (not `body`) and don't provide the whole set of parameters for that
location, then we generate what is missing and merge these two parts. Note that if parameters are optional, then
they may remain absent.
The primary purpose of this behavior is to prevent sending incomplete explicit examples by generating missing parts
as it works with `body`.
"""
to_strategy = DATA_GENERATION_METHOD_TO_STRATEGY_FACTORY[data_generation_method]
context = HookContext(operation)
with detect_invalid_schema(operation):
path_parameters_value = get_parameters_value(
path_parameters, "path", draw, operation, context, hooks, to_strategy
)
headers_value = get_parameters_value(headers, "header", draw, operation, context, hooks, to_strategy)
cookies_value = get_parameters_value(cookies, "cookie", draw, operation, context, hooks, to_strategy)
query_value = get_parameters_value(query, "query", draw, operation, context, hooks, to_strategy)
media_type = None
if body is NOT_SET:
if operation.body:
parameter = draw(st.sampled_from(operation.body.items))
strategy = _get_body_strategy(parameter, to_strategy, operation)
strategy = apply_hooks(operation, context, hooks, strategy, "body")
media_type = parameter.media_type
body = draw(strategy)
else:
media_types = operation.get_request_payload_content_types() or ["application/json"]
# Take the first available media type.
# POSSIBLE IMPROVEMENT:
# - Test examples for each available media type on Open API 2.0;
# - On Open API 3.0, media types are explicit, and each example has it.
# We can pass `OpenAPIBody.media_type` here from the examples handling code.
media_type = media_types[0]
if operation.schema.validate_schema and operation.method.upper() == "GET" and operation.body:
raise InvalidSchema("Body parameters are defined for GET request.")
return Case(
operation=operation,
media_type=media_type,
path_parameters=path_parameters_value,
headers=CaseInsensitiveDict(headers_value) if headers_value is not None else headers_value,
cookies=cookies_value,
query=query_value,
body=body,
data_generation_method=data_generation_method,
) | d46fde928b0ceaa3886904e35876c245e7fcb245 | 12,341 |
def type_from_value(value, visitor=None, node=None):
"""Given a Value from resolving an annotation, return the type."""
ctx = _Context(visitor, node)
return _type_from_value(value, ctx) | 92568581d8f7b47ac469d0575f549acb1b67c857 | 12,342 |
def _accesslen(data) -> int:
"""This was inspired by the `default_collate` function.
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/
"""
if isinstance(data, (tuple, list)):
item = data[0]
if not isinstance(item, (float, int, str)):
return len(item)
return len(data) | df709ee8a97c920a1413c9d7240f83d0406577a6 | 12,343 |
def createSkill(request, volunteer_id):
"""
Method to create skills and interests
:param request:
:param volunteer_id:
:return:
"""
if request.method == 'POST':
volunteer = Volunteer_User_Add_Ons.objects.get(pk=volunteer_id)
skills = request.POST.getlist('skills')
interests = request.POST.getlist('interests')
# call to create the skills
createInputToken(request, skills, 'Skill', volunteer_id)
# call to create the interests
createInputToken(request, interests, 'Interest', volunteer_id)
return HttpResponse('ok') | f612ef94b02664526018fd2ea948a36587cb15bf | 12,344 |
def analyticJacobian(robot : object, dq = 0.001, symbolic = False):
"""Using Homogeneous Transformation Matrices, this function computes Analytic Jacobian Matrix of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
Args:
robot (Serial): serial robot (this won't work with other type of robots)
dq (float, optional): step size for numerical derivative. Defaults to 0.001.
symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
Returns:
J (np.array): Inertial Analytic Jacobian Matrix (numerical)
J (SymPy Matrix): Inertial Analytic Jacobian Matrix (symbolical)
"""
# Calculate forward kinematics: f(q)
fkHTM = forwardHTM(robot, symbolic)
# Convert result into an Axis - Angle vector: x(q)
x = axisAngle(fkHTM[-1], symbolic)
if symbolic:
# Calculate Analytic Jacobian Matrix by differentiating Axis - Angle vector with SymPy functions
return nsimplify(trigsimp(x.jacobian(robot.qSymbolic)).evalf(), tolerance = 1e-10)
else:
# Get number of joints (generalized coordinates)
n = robot.jointsPositions.shape[0]
# Initializes jacobian matrix with zeros
J = np.zeros((6, n))
# Auxiliar variable to keep original joints positions
q = robot.jointsPositions.copy()
# Iterates through all colums (generalized coordinates)
for j in range(n):
# Set increment to current generalized coordinate: z[j] = q[j] + dq
robot.jointsPositions[j] += dq
# Calculate forward kinematics with step size: f(z) = f(q + dq)
f = forwardHTM(robot)
# Convert result into an Axis - Angle vector: X(q + dq)
X = axisAngle(f[-1])
# Calculate analytic jacobian matrix: [X(q + dq) - x(q)] / dq
J[: , j] = ((X - x) / dq).flatten()
# Eliminates step size by copying original values from auxiliar variable
robot.jointsPositions[:, :] = q
return J | a906148f26fea9bb9d833ac95dffde87a704e372 | 12,345 |
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5 | 829ad4fafb32cb18d8da7b8144be25746f892ce5 | 12,346 |
def triu_indices_from(arr, k=0):
"""
Returns the indices for the upper-triangle of `arr`.
Args:
arr (Union[Tensor, list, tuple]): 2-dimensional array.
k (int, optional): Diagonal offset, default is 0.
Returns:
triu_indices_from, tuple of 2 tensor, shape(N)
Indices for the upper-triangle of `arr`.
Raises:
TypeError: If `arr` cannot be converted to tensor, or `k` is not a number.
ValueError: If `arr` cannot be converted to a 2-dimensional tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.ones((3,3))
>>> print(np.triu_indices_from(tensor))
(Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
"""
arr = asarray(arr)
if arr.ndim != 2:
_raise_value_error("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) | b95a7ed3fac1810bdfe9659471cbcd2d14fc8c99 | 12,348 |
def func(var):
"""Function"""
return var + 1 | a6ca4247f7f7307c384708ed9535046e4ec7d4e3 | 12,350 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.