content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_all_snapshots(volume_ids):
"""
Creates the snapshots of all volumes in the provided list.
Params:
volume_ids (list): List of volumes attached to the instance
Returns:
None
"""
for i in volume_ids:
snapshot(i)
return True
|
c985bdce6b11e85cedb3d8951447fdf234e3aeb4
| 27,964 |
def stack_subsample_frames(x, stacking=1, subsampling=1):
""" Stacks frames together across feature dim, and then subsamples
x.shape: FEAT, TIME
output FEAT * stacking, TIME / subsampling
"""
# x.shape: FEAT, TIME
seq = []
x_len = tf.shape(x)[1]
for n in range(0, stacking):
tmp = x[:, n:x_len - stacking + 1 + n:subsampling]
seq.append(tmp)
print(seq)
x = tf.concat(seq, axis=0)
return x
|
6cab964588d01cdecec1862cd3c1db681923d20d
| 27,965 |
def compute_TVL1(prev, curr, TVL1, bound=20):
"""
Args:
prev (numpy.ndarray): a previous video frame, dimension is
`height` x `width`.
curr (numpy.ndarray): a current video frame, dimension is
`height` x `width`.
bound (int): specify the maximum and minimux of optical flow.
Return:
flow (numpy.ndarray): optical flow.
"""
# TVL1=cv2.optflow.DualTVL1OpticalFlow_create()
# TVL1 = cv2.DualTVL1OpticalFlow_create()
# TVL1=cv2.createOptFlow_DualTVL1()
flow = TVL1.calc(prev, curr, None)
assert flow.dtype == np.float32
flow = (flow + bound) * (255.0 / (2 * bound))
flow = np.round(flow).astype(int)
flow[flow >= 255] = 255
flow[flow <= 0] = 0
return flow
|
94d83e0cbfc8e20ed78ba0ab3763d5ddd4e2859a
| 27,966 |
def _fetch_all_namespace_permissions(cursor):
"""
Fetches all user-namespace-permissions mapping registered with Herd
:param: cursor to run hive queries
:return: list of all users that have READ on each respective namespace
"""
namespaces = _fetch_all_namespaces()
user_namespace_permissions = []
all_users = set()
for namespace in namespaces:
_print_info('Fetching namespace permissions for namespace: {}'.format(
namespace))
response = _fetch_herd_session() \
.get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,
HERD_REST_BASE_PATH,
'/userNamespaceAuthorizations/namespaces/{}').format(
namespace)) \
.json()
public_read = False
namespace_users = []
for authorization in response['userNamespaceAuthorizations']:
if 'READ' in authorization['namespacePermissions']:
namespace_users.append(
authorization['userNamespaceAuthorizationKey']['userId'])
# add each user to the global users set
all_users.add(
authorization['userNamespaceAuthorizationKey']['userId'])
# check if read-all is enabled on namespace
if authorization['userNamespaceAuthorizationKey']['userId'] == PUBLIC_READ_USER:
public_read = True
_print_info(
'Found {} users with READ permissions on namespace: {}'.format(
len(namespace_users), namespace))
user_namespace_permissions.append({
'namespace': namespace,
'users': namespace_users
})
# grant read to all users if read-all is enabled, otherwise - revoke
_print_info(
'Public read option enabled on namespace: \'{}\'? {}'.format(
namespace, public_read))
_manage_public_read(cursor, namespace, public_read)
# manage user-schemas for all users
for user in all_users:
_create_user_schema(cursor, user)
return user_namespace_permissions
|
0802b81a8d731cabbd51b1d3699c0ce64ac6c64a
| 27,967 |
def for_in_right(obj, callback=None):
"""This function is like :func:`for_in` except it iterates over the
properties in reverse order.
Args:
obj (list|dict): Object to process.
callback (mixed): Callback applied per iteration.
Returns:
list|dict: `obj`.
Example:
>>> data = {'product': 1}
>>> def cb(v): data['product'] *= v
>>> for_in_right([1, 2, 3, 4], cb)
[1, 2, 3, 4]
>>> data['product'] == 24
True
See Also:
- :func:`for_in_right` (main definition)
- :func:`for_own_right` (alias)
.. versionadded:: 1.0.0
"""
walk = (None for ret, _, _, _ in itercallback(obj, callback, reverse=True)
if ret is False)
next(walk, None)
return obj
|
6d85f7245cd454be61015ca69e1844d1dc830c86
| 27,968 |
def metadata_fake(batch_size):
"""Make a xr dataset"""
# get random OSGB center in the UK
lat = np.random.uniform(51, 55, batch_size)
lon = np.random.uniform(-2.5, 1, batch_size)
x_centers_osgb, y_centers_osgb = lat_lon_to_osgb(lat=lat, lon=lon)
# get random times
t0_datetimes_utc = make_t0_datetimes_utc(batch_size)
metadata_dict = {}
metadata_dict["batch_size"] = batch_size
metadata_dict["x_center_osgb"] = list(x_centers_osgb)
metadata_dict["y_center_osgb"] = list(y_centers_osgb)
metadata_dict["t0_datetime_utc"] = list(t0_datetimes_utc)
return Metadata(**metadata_dict)
|
fa55cb231e013f3b5c4193af9c5cfff6f79fce82
| 27,969 |
def delete_document(ix: str, docid: str):
"""
delete a document
PUT request body should be a json {field: value} mapping of fields to update
"""
check_role(Role.WRITER, _index(ix))
try:
elastic.delete_document(ix, docid)
except elasticsearch.exceptions.NotFoundError:
abort(404)
return '', HTTPStatus.OK
|
a87c7c23b31ce24da83b81e8d241d4173542a930
| 27,970 |
def sentence_segment(doc, candidate_pos):
"""Store those words only in cadidate_pos"""
sentences = []
for sent in doc.sents:
selected_words = []
for token in sent:
# Store words only with cadidate POS tag
if token.pos_ in candidate_pos and token.is_stop is False and len(token.text) > 1:
selected_words.append(token.text.lower())
sentences.append(selected_words)
return sentences
|
6c56d47470e60edddfedfeb476aa7833be765218
| 27,971 |
def get_speckle_spatial_freq(image, pos, cx, cy, lambdaoverd, angle=None):
""" returns the spatial frequency of the speckle defined in the area of aperture mask """
""" lambdaoverd = nb of pixels per lambda/D """
nx, ny =image.shape[0], image.shape[1]
k_xy = (np.roll(pos,1,axis=0)-[cx,cy])/lambdaoverd # fwhm is in pixels per lbd over D
#k_mod = np.sqrt(k_sp[0]**2. + k_sp[1]**2.)
k_xy = snm.rotateXY(k_xy[0], k_xy[1], thetadeg = -1.0*angle)
ipdb.set_trace()
return k_xy
|
beace113642545f74ba3a49a4a15b0308d0f4535
| 27,972 |
def verify_ping(
device,
address,
loss_rate=0,
count=None,
max_time=30,
check_interval=10):
""" Verify ping loss rate on ip address provided
Args:
device ('obj'): Device object
address ('str'): Address value
loss_rate ('int'): Expected loss rate value
count ('int'): Count value for ping command
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
Boolean
Raises:
None
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
if count:
cmd = 'ping {address} count {count}'.format(
address=address,
count=count
)
else:
cmd = 'ping {address}'.format(
address=address
)
try:
out = device.parse(cmd)
except SchemaEmptyParserError as e:
timeout.sleep()
continue
# Example dictionary structure:
# {
# "ping": {
# "address": "10.189.5.94",
# "data-bytes": 56,
# "result": [
# {
# "bytes": 64,
# "from": "10.189.5.94",
# "icmp-seq": 0,
# "time": "2.261",
# "ttl": 62
# },
# ],
# "source": "10.189.5.94",
# "statistics": {
# "loss-rate": 0,
# "received": 1,
# "round-trip": {
# "avg": "2.175",
# "max": "2.399",
# "min": "1.823",
# "stddev": "0.191"
# },
# "send": 1
# }
# }
# }
loss_rate_found = Dq(out).get_values("loss-rate", 0)
if loss_rate_found == loss_rate:
return True
return False
|
df19e0815a49388189bf480623c156b9992a2fe2
| 27,973 |
def get_default(key):
""" get the default value for the specified key """
func = registry.defaults.get(key)
return func()
|
081588445955da66d9988e962d2a360ed1193240
| 27,975 |
from typing import List
def antisymmetric(r: Relation) -> (bool, List):
"""Kiểm tra tính phản xứng của r"""
antisymmetric_tuple = []
for x, y in r:
if x == y:
continue
if (y, x) in r:
return False, [((x, y), (y, x))]
antisymmetric_tuple.append(((x, y), (y, x)))
return True, antisymmetric_tuple
|
d7a7900192850a9b86a56263fec5daea551a034f
| 27,976 |
def calc_negative_predictive_value(cause, actual, predicted):
"""Calculate negative predictive value (NPV) for a single cause
Negative predictive value is the number of prediction correctly determined
to not belong to the given cause over the total number of predicted to
not be the cause:
.. math::
NPV = \\frac{TN}{NP} = \\frac{TN}{TN + FP}
where TN is the number of true negative predictions, NP is the number of
negative predictions, and FP is the number of false positive predictions.
Args:
cause: a label in the actual and predicted series
actual (pd.Series): true individual level classification
predicted (pd.Series): individual level prediction
Returns:
float
"""
true_negative = ((actual != cause) & (predicted != cause)).sum()
n_not_predicted = (predicted != cause).sum()
return true_negative / n_not_predicted if n_not_predicted else np.nan
|
f89976fc5ec9c03e5d8d42a8265ba92e87d91ec8
| 27,977 |
def print_atom_swap(swap):
"""Return atom swap string for DL CONTROL"""
return "{} {}".format(swap["id1"], swap["id2"])
|
4c2fa18434e7a66b98b9716b89a26b622b588cd6
| 27,978 |
def dot_product_area_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
attention_image_summary=None,
save_weights_to=None,
dropout_broadcast_dims=None,
max_area_width=1,
max_area_height=1,
memory_height=1,
area_key_mode="mean",
area_value_mode="sum",
top_k_areas=0,
area_temperature=1.0,
training=True):
"""Dot-product area attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
attention_image_summary: the callback for making image summary of attention.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than rank of q.
Specifies in which dimensions to broadcast the dropout decisions.
max_area_width: the max width allowed for an area.
max_area_height: the max height allowed for an area.
memory_height: the height of the memory.
area_key_mode: the mode for computing area keys, which can be "mean",
"concat", "sum", "sample_concat", and "sample_sum".
area_value_mode: the mode for computing area values, which can be either
"mean", or "sum".
top_k_areas: Use the top key areas for attention.
area_temperature: the temperature for attention softmax.
training: indicating if it is in the training mode.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
tf.logging.info("dot_product_area_attention: "
"area_h=%d, area_w=%d, mem_h=%d, "
"area_key_mode=%s, area_value_mode=%s, "
"area_temperature=%f",
max_area_height, max_area_width, memory_height,
area_key_mode, area_value_mode,
area_temperature)
with tf.variable_scope(
name, default_name="dot_product_area_attention",
values=[q, k, v]) as scope:
mem_shape = common_layers.shape_list(k)
batch_size = mem_shape[0]
head_size = mem_shape[1]
length = mem_shape[2]
depth = mem_shape[3]
k_area = compute_area_key(
tf.reshape(k, [-1, length, depth]),
max_area_width=max_area_width,
max_area_height=max_area_height,
height=memory_height,
mode=area_key_mode,
training=training)
if area_value_mode == "mean":
v_area, _, _, _, _ = compute_area_features(
tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
elif area_value_mode == "max":
v_area, _, _ = basic_pool(tf.reshape(v, [-1, length, depth]),
max_area_width=max_area_width,
max_area_height=max_area_height,
height=memory_height,
fn=tf.reduce_max)
elif area_value_mode == "sum":
_, _, v_area, _, _ = compute_area_features(
tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
else:
raise ValueError("Unsupported area value mode=%s" % area_value_mode)
k = tf.reshape(k_area, [batch_size, head_size, -1, depth])
v = tf.reshape(v_area, [batch_size, head_size, -1, depth])
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
if bias is not None:
bias = common_layers.cast_like(bias, logits)
with tf.name_scope("compute_area_att_bias", values=[bias]):
bias_shape = common_layers.shape_list(bias)
mem_length = bias_shape[-1]
bias_values = tf.reshape(
tf.to_float(tf.less(bias, -1)), [-1, mem_length, 1])
_, _, padding_sum, _, _ = compute_area_features(
bias_values, max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
bias = tf.where(
tf.cast(tf.to_int32(padding_sum), tf.bool),
tf.fill(tf.shape(padding_sum), -np.inf),
tf.zeros_like(padding_sum, dtype=tf.float32))
bias = tf.reshape(bias,
[bias_shape[0], bias_shape[1],
bias_shape[2], -1])
logits += bias
logits = logits / area_temperature
weights = tf.nn.softmax(logits, name="attention_weights")
if top_k_areas > 0:
tf.logging.info("area_attention top_k_areas=%d", top_k_areas)
top_k = tf.minimum(common_layers.shape_list(weights)[-1], top_k_areas)
top_weights, _ = tf.nn.top_k(weights, k=top_k)
min_values = tf.reduce_min(top_weights, -1, keepdims=True)
weights = tf.where(tf.greater_equal(weights, min_values),
weights, tf.zeros_like(weights))
weights = tf.div(weights, tf.reduce_sum(weights, -1, keepdims=True))
if save_weights_to is not None:
save_weights_to[scope.name] = weights
save_weights_to[scope.name + "/logits"] = logits
# Drop out attention links for each head.
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and attention_image_summary:
attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v)
|
947864002406597931c663340e1a258ac2ae5bed
| 27,980 |
def random_transform(x, seed=None):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
np.random.seed(seed)
img_row_axis = 0
img_col_axis = 1
img_channel_axis = 2
rotation_range = 10
theta = np.deg2rad(np.random.uniform(-rotation_range, rotation_range))
height_shift_range = width_shift_range = 0.2
if height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-height_shift_range,
height_shift_range)
if np.max(height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-width_shift_range,
width_shift_range)
if np.max(width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
zoom_range = (0.9, 1.1)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode='nearest')
return x
|
9f0b09dd4c5b0a0f9f00ae15682a27645894b064
| 27,983 |
def global_avg_pooling_forward(z):
"""
全局平均池化前向过程
:param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数
:return:
"""
return np.mean(np.mean(z, axis=-1), axis=-1)
|
f12efc7bd368af81164246fcb39a27f9de7e122d
| 27,985 |
def label_encoder(adata):
"""
Encode labels of Annotated `adata` matrix using sklearn.preprocessing.LabelEncoder class.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
labels: numpy nd-array
Array of encoded labels
"""
le = preprocessing.LabelEncoder()
labels = le.fit_transform(adata.obs["condition"].tolist())
return labels.reshape(-1, 1), le
|
421aa578a965b2e8e66204a368e1c42348148ef6
| 27,987 |
def is_plus_or_minus(token_type: TokenType) -> bool:
"""Check if token is a plus or minus."""
return is_plus(token_type) or is_minus(token_type)
|
1f0210505e8e882f07380ffd0d412a62f1d4d44f
| 27,988 |
def gen_data(test_size=TEST_SIZE, channels=CHANNELS,
width=WIDTH, height=HEIGHT,
mmean=0, vmean=1, channel_last=False, fc_output=False):
"""
Generate random data to pass through the layer
NOTE:
- The generated data should not be normal, so that the layer can try to
normalize the data. Therefore the mean of the data is drawn from
another normal distribution which is centered around 1. This occurs for
the synthetic activations and synthetic deltas. The variances are
drawn from a folded normal distribution.
Return:
inputs: synthetic input activations
deltas_in: synthetic deltas for bprop
"""
if fc_output:
shape = (test_size, channels)
else:
shape = (test_size, channels, width, height)
if channel_last:
shape = (test_size, width, height, channels)
inputs = np.random.normal(loc=0, scale=1, size=shape)
# Generate random deltas for testing bprop
deltas_in = np.random.normal(loc=0,
scale=np.abs(vmean * np.random.randn(*shape)),
size=shape)
return inputs.astype(np.float32), deltas_in.astype(np.float32)
|
0030330bf93d6abb34f41575aaf1f45a52199393
| 27,989 |
from typing import Union
from typing import List
def plot_r2_pvalues(
model: mofa_model,
factors: Union[int, List[int], str, List[str]] = None,
n_iter: int = 100,
groups_df: pd.DataFrame = None,
group_label: str = None,
view=0,
fdr: bool = True,
cmap="binary_r",
**kwargs,
):
"""
Plot R2 values for the model
Parameters
----------
model : mofa_model
Factor model
factors : optional
Index of a factor (or indices of factors) to use (all factors by default)
view : optional
Make a plot for a cetrain view (first view by default)
groups_df : optional pd.DataFrame
Data frame with samples (cells) as index and first column as group assignment
group_label : optional
Sample (cell) metadata column to be used as group assignment
fdr : optional bool
If plot corrected PValues (FDR)
cmap : optional
The colourmap for the heatmap (default is 'binary_r' with darker colour for smaller PValues)
"""
r2 = model.get_r2_null(
factors=factors,
groups_df=groups_df,
group_label=group_label,
n_iter=n_iter,
return_pvalues=True,
fdr=fdr,
)
pvalue_column = "FDR" if fdr else "PValue"
# Select a certain view if necessary
if view is not None:
view = model.views[view] if isinstance(view, int) else view
r2 = r2[r2["View"] == view]
r2_df = r2.sort_values("PValue").pivot(
index="Factor", columns="Group", values=pvalue_column
)
# Sort by factor index
r2_df.index = r2_df.index.astype("category")
r2_df.index = r2_df.index.reorder_categories(
sorted(r2_df.index.categories, key=lambda x: int(x.split("Factor")[1]))
)
r2_df = r2_df.sort_values("Factor")
g = sns.heatmap(r2_df.sort_index(level=0, ascending=False), cmap=cmap, **kwargs)
g.set_yticklabels(g.yaxis.get_ticklabels(), rotation=0)
return g
|
784a5333514a270bdf69960fa1a857668b414e5a
| 27,990 |
import itertools
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax
|
f2f690a410d933ecdffee1898b9d991482a5eb67
| 27,991 |
def _check_lfs_hook(client, paths):
"""Pull the specified paths from external storage."""
return client.check_requires_tracking(*paths)
|
403b3db59f6eeec72c8f4a3b18808997b0f34724
| 27,992 |
def name_to_zamid(name):
"""Converts a nuclide's name into the nuclide's z-a-m id.
Parameters
----------
name: str
Name of a nuclide
"""
dic = d.nuc_name_dic
elt_name = name.split('-')[0]
na = int(name.split('-')[1].replace('*',''))
if '*' in name:
state = 1
else:
state = 0
zzaaam = 10000*d.nuc_name_dic[elt_name] + na*10 + state
zamid = str(zzaaam)
return zamid
|
89129a288a93c96f3e24003b6dee2adba81dc935
| 27,994 |
def sample_category(name):
"""Create and return a sample category"""
return models.Category.objects.create(name=name)
|
b9b38954520611ca7808592200ebf871da90bab6
| 27,995 |
def plan_add(request):
"""
测试计划添加
:param request:
:return:
"""
user_id = request.session.get('user_id', '')
if not get_user(user_id):
request.session['login_from'] = '/base/plan/'
return HttpResponseRedirect('/login/')
else:
if request.method == 'POST':
prj_list = is_superuser(user_id)
plan_name = request.POST['plan_name'].strip()
content = request.POST.getlist("case_id")
msg = plan_info_logic(plan_name, content)
if msg != 'ok':
log.error('plan add error:{}'.format(msg))
return render(request, 'base/plan/add.html', {'error': msg, "prj_list": prj_list})
else:
prj_id = request.POST['prj_id']
project = Project.objects.get(prj_id=prj_id)
is_locust = request.POST['is_locust']
is_task = request.POST['is_task']
env_id = request.POST['env_id']
environment = Environment.objects.get(env_id=env_id)
description = request.POST['description']
username = request.session.get('user', '')
if is_locust == '1':
Plan.objects.filter(is_locust=1).update(is_locust=0)
if is_task == '1':
Plan.objects.filter(is_task=1).update(is_task=0)
plan = Plan(plan_name=plan_name, project=project, environment=environment, description=description,
content=content, is_locust=is_locust, is_task=is_task, update_user=username)
plan.save()
log.info('add plan {} success. plan info: {} // {} // {} // {} //{} //{}'.
format(plan_name, project, environment, description, content, is_locust, is_task))
return HttpResponseRedirect("/base/plan/")
elif request.method == 'GET':
prj_list = is_superuser(user_id)
info = {"prj_list": prj_list}
return render(request, "base/plan/add.html", info)
|
4da776fd83e30019fbd6cdb1b659d8626e0620cc
| 27,999 |
import itertools
def get_state_vect_cols(prefix=''):
"""Get the column names of the state vector components with the
provided `prefix`.
:param prefix: The prefix that is used in front of the state vector
components in the column names, examples are `physics_pred` and
`physics_err` or none
:type prefix: str
:return: A list of the 6 names of the prefixed state vector components
:rtype: [str]
"""
if prefix:
prefix += '_'
vectors = ['r', 'v']
components = ['x', 'y', 'z']
col_names = [f'{prefix}{v}_{c}'
for v, c
in itertools.product(vectors, components)]
return col_names
|
d61c5ebd2aad8c679dda50fa1e310ebf11480e01
| 28,001 |
import six
import shlex
def parse_options(options=None, api=False):
"""
Parse given option string
:param options:
:type options:
:param api
:type api: boolean
:return:
:rtype:
"""
if isinstance(options, six.string_types):
args = shlex.split(options)
options = vars(argument_parser.parse_args(args))
elif options is None:
if api:
options = {}
else:
options = vars(argument_parser.parse_args())
elif not isinstance(options, dict):
options = vars(argument_parser.parse_args(options))
return options
|
f8a2b3671dab3ffc5f23bd937181324bc1c0d9c7
| 28,003 |
import random
from datetime import datetime
def data_for_column(column: dict, kwargs: dict, size: int) -> list:
"""Generates data for schema column
:param dict column: Column definition
:param dict kwargs: Faker keyword arguments
:param int size: Number of rows
:return: List of random data for a column
:rtype: list
"""
data = []
data_type = column.get('type', 'empty')
try:
method = getattr(fake, data_type)
except AttributeError:
raise AttributeError(f"Exception at column {column.get('name', '')}, '{data_type}' is not a valid data type")
percent_empty = column.get('percent_empty', 0)
for _ in range(size):
if random.random() <= percent_empty:
data.append(None)
else:
datum = method(**kwargs)
if isinstance(datum, (datetime.date, datetime.datetime)):
datum = datum.strftime(column['format']) if 'format' in column else datum.isoformat()
data.append(datum)
return data
|
d2ba76d48d80cc256f1959d8fa617b81301119d0
| 28,004 |
def peak_bin(peaks, i):
"""Return the (bin) index of the ith largest peak. Peaks is a list of tuples (i, x[i])
of peak indices i and values x[i], sorted in decreasing order by peak value."""
if len(peaks) > i:
return peaks[i][0]
else:
return np.nan
|
fc667fe04c856e3090ded9ca8eb0a45d51cda74a
| 28,005 |
def fetch_all(path, params=None, client=default_client):
"""
Args:
path (str): The path for which we want to retrieve all entries.
Returns:
list: All entries stored in database for a given model. You can add a
filter to the model name like this: "tasks?project_id=project-id"
"""
return get(url_path_join("data", path), params=params, client=client)
|
d663414388b9b6e105fab42d8e4d9cde558322cf
| 28,006 |
from datetime import datetime
import calendar
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
table = "alldata_%s" % (station[:2],)
nt = NetworkTable("%sCLIMATE" % (station[:2],))
cursor.execute("""
SELECT year, month, avg((high+low)/2.) from """+table+"""
WHERE station = %s and day < %s and year > 1892
GROUP by year, month ORDER by year ASC
""", (station, datetime.date.today().replace(day=1)))
if cursor.rowcount == 0:
raise ValueError("No results found for query")
for rownum, row in enumerate(cursor):
if rownum == 0:
baseyear = row[0]
avgs = np.ones((datetime.datetime.now().year - baseyear + 1,
12)) * -99.
avgs[row[0]-baseyear, row[1]-1] = row[2]
matrix = np.zeros((12, 12))
lastyear = np.zeros((12, 12))
rows = []
for i in range(12):
for j in range(12):
# How many years was i warmer than j
t = np.where(np.logical_and(avgs[:, j] > -99,
np.logical_and(avgs[:, i] > avgs[:, j],
avgs[:, i] > -99)),
1, 0)
matrix[i, j] = np.sum(t)
lastyear[i, j] = datetime.datetime.now().year - np.argmax(t[::-1])
lyear = lastyear[i, j] if matrix[i, j] > 0 else None
rows.append(dict(month1=(i+1), month2=(j+1), years=matrix[i, j],
lastyear=lyear))
df = pd.DataFrame(rows)
(fig, ax) = plt.subplots(1, 1, sharex=True, figsize=(8, 6))
x, y = np.meshgrid(np.arange(-0.5, 12.5, 1), np.arange(-0.5, 12.5, 1))
res = ax.pcolormesh(x, y, np.transpose(matrix))
for i in range(12):
for j in range(12):
txt = ax.text(i, j, "%s" % (
"%.0f" % (matrix[i, j],) if i != j else '-'),
va='center', ha='center', color='white')
txt.set_path_effects([PathEffects.withStroke(linewidth=2,
foreground="k")])
if matrix[i, j] > 0 and matrix[i, j] < 10:
txt = ax.text(i, j-0.5, "%.0f" % (lastyear[i, j],),
fontsize=9,
va='bottom', ha='center', color='white')
txt.set_path_effects([PathEffects.withStroke(linewidth=2,
foreground="k")])
ax.set_xticks(range(0, 12))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_yticks(range(0, 12))
ax.set_yticklabels(calendar.month_abbr[1:])
ax.set_xlim(-0.5, 11.5)
ax.set_ylim(-0.5, 11.5)
ax.set_title(("[%s] %s\nYears that Month was Warmer than other Month"
) % (station, nt.sts[station]['name']))
fig.colorbar(res)
ax.set_xlabel("This Month was Warmer than...")
ax.set_ylabel("...this month for same year")
return fig, df
|
4b11cee286494963afb43cfc5b6ab7e56c281476
| 28,007 |
def link_library_dynamic(hs, dep_info, object_files, my_pkg_id):
"""Link a dynamic library for the package using given object files.
Returns:
File: Produced dynamic library.
"""
dynamic_library = hs.actions.declare_file(
"lib{0}-ghc{1}.{2}".format(
pkg_id.library_name(hs, my_pkg_id),
hs.toolchain.version,
_so_extension(hs),
)
)
args = hs.actions.args()
args.add(["-shared", "-dynamic"])
# Work around macOS linker limits. This fix has landed in GHC HEAD, but is
# not yet in a release; plus, we still want to support older versions of
# GHC. For details, see: https://phabricator.haskell.org/D4714
if hs.toolchain.is_darwin:
args.add(["-optl-Wl,-dead_strip_dylibs"])
for package in set.to_list(dep_info.package_ids):
args.add(["-package-id", package])
# XXX This should be really dep_info.direct_prebuilt_deps, but since we
# cannot add prebuilt_dependencies to the "depends" field on package
# registration (see a comment there), we have to pass all transitive
# prebuilt_dependencies on linking like this.
for package in set.to_list(dep_info.prebuilt_dependencies):
args.add(["-package", package])
for cache in set.to_list(dep_info.package_caches):
args.add(["-package-db", cache.dirname])
_add_external_libraries(args, dep_info.external_libraries.values())
args.add([ f.path for f in object_files ])
solibs = set.union(
set.from_list(dep_info.external_libraries),
dep_info.dynamic_libraries,
)
if hs.toolchain.is_darwin:
dynamic_library_tmp = hs.actions.declare_file(dynamic_library.basename + ".temp")
_fix_linker_paths(
hs,
dynamic_library_tmp,
dynamic_library,
dep_info.external_libraries
)
args.add(["-optl-Wl,-headerpad_max_install_names"])
else:
dynamic_library_tmp = dynamic_library
for rpath in set.to_list(_infer_rpaths(dynamic_library, solibs)):
args.add(["-optl-Wl,-rpath," + rpath])
args.add(["-o", dynamic_library_tmp.path])
hs.actions.run(
inputs = depset(transitive = [
depset(object_files),
set.to_depset(dep_info.package_caches),
set.to_depset(dep_info.dynamic_libraries),
depset(dep_info.external_libraries.values()),
]),
outputs = [dynamic_library_tmp],
mnemonic = "HaskellLinkDynamicLibrary",
executable = hs.tools.ghc,
arguments = [args]
)
return dynamic_library
|
5171d75c71b52e2487ff1d349add86c042a84062
| 28,008 |
def save_mvgcca_latents_space(X, W, model, path, prefix, epochs):
"""Saves the list containing the common latent space Z and all the views latent space Z_m.
- X : [np.array(n x d1),...,np.array(n x dM)] multivews features ; n number of instances; dm dimension of views m ; M number of views
- W : np.array(n x n) weighted adjacency matrix
- model : trained model MVGCCA
- path : str
- epochs (which epochs is saved): str
"""
if prefix != '' :
prefix = "_"+prefix
Z_list = get_mvgcca_latents_space(X, W, model)
key = ['t'+str(s) for s in range(len(Z_list))]
dictionary = dict(zip(key, Z_list))
sio.savemat(path+"latent_space_"+str(epochs+1)+'epochs'+prefix+'.mat',dictionary)
return Z_list[0]
|
dc0fbb15dd73e44bf1b1b2c74b173cfb6b8cf1d8
| 28,009 |
def TDC_sampling(in_channels, mode='downsampling'):
"""
wrapper_function: -> TIC_sampling
[B, in_channels, T, F] => [B, in_channels, T, F//2 or F*2]
in_channels: number of input channels
"""
return TIC_sampling(in_channels, mode)
|
8458e9fe9bfd6bc92af2940b4c3ea5d2f09eb40a
| 28,010 |
def bmxbm(s, t, batch_first=True):
"""
Batched matrix and batched matrix multiplication.
"""
if batch_first:
equation = "aij,ajk->aik"
else:
equation = "ija,jka->ika"
return tf.einsum(equation, s, t)
|
6ac60eb1ffeed2caad312fd4691d689e705986c0
| 28,011 |
import re
def get_all_semantic_case_ids():
"""Get iterator over test sorted IDs of all cases in the SBML semantic
suite"""
pattern = re.compile(r'\d{5}')
return sorted(str(x.name) for x in SBML_SEMANTIC_CASES_DIR.iterdir()
if pattern.match(x.name))
|
d4a5cba008010f02398bb61c32f06450610de350
| 28,012 |
def generate_points(n=500, min_=0, max_=1):
"""
Generate a list of n points.
Parameters
----------
n : int
min_ : float
max_ : float
Returns
-------
list
List of length n with tuples (x, y) where x is in [min_, max_] and
y is either 0 or 1.
"""
assert max_ > min_
ret = []
np.random.seed(seed=42)
for x in np.linspace(min_, max_, n):
noise = np.random.random()
def f(x):
"""Some function."""
return 2.0*x+100.0
ret.append((x, f(x)+noise))
return ret
|
fe2dbe0ed281716a465804d67014badab96fb414
| 28,013 |
def gce(nvf):
"""
Write the necessary code for launch the VNF using GCE
:param nvf:
:return: vagrantfile code
"""
element = Template(u'''\
config.vm.box = "{{image}}"
config.vm.provider :google do |google, override|
google.google_project_id = {{google_project_id}}
google.google_client_email = {{google_client_email}}
google.google_json_key_location = {{google_json_key_location}}
override.ssh.username = {{username}}
''')
vim = Gce.objects.get(name=nvf.vim.name)
element = element.render(
name=nvf.name,
username=nvf.operator.name,
image=nvf.vnf.image,
google_project_id=vim.google_project_id,
google_client_email=vim.google_client_email,
google_json_key_location=vim.google_json_key_location,
)
return element
|
588c2472b2a957a1eda64bef526b6410103b72b2
| 28,014 |
from datetime import datetime
def get_ethpm_birth_block(
w3: Web3, from_block: int, to_block: int, target_timestamp: int
) -> int:
"""
Returns the closest block found before the target_timestamp
"""
version_release_date = datetime.fromtimestamp(target_timestamp)
while from_block < to_block:
mid = BlockNumber((from_block + to_block) // 2)
target = datetime.fromtimestamp(w3.eth.getBlock(mid)["timestamp"])
if target > version_release_date:
to_block = mid
elif target < version_release_date:
from_block = mid + 1
else:
return mid - 1
raise BlockNotFoundError(
f"Cannot find closest block to timestamp: {target_timestamp} "
f"in range given {from_block} - {to_block}."
)
|
c2448152cea2a3c9a9dd227a5126e2dd0767b773
| 28,016 |
def Line(p0, p1=None, c="r", alpha=1, lw=1, dotted=False, res=None):
"""
Build the line segment between points `p0` and `p1`.
If `p0` is a list of points returns the line connecting them.
A 2D set of coords can also be passed as p0=[x..], p1=[y..].
:param c: color name, number, or list of [R,G,B] colors.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
:param lw: line width.
:param bool dotted: draw a dotted line
:param int res: number of intermediate points in the segment
"""
# detect if user is passing a 2D ist of points as p0=xlist, p1=ylist:
if len(p0) > 3:
if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1):
# assume input is 2D xlist, ylist
p0 = list(zip(p0, p1))
p1 = None
# detect if user is passing a list of points:
if utils.isSequence(p0[0]):
ppoints = vtk.vtkPoints() # Generate the polyline
dim = len((p0[0]))
if dim == 2:
for i, p in enumerate(p0):
ppoints.InsertPoint(i, p[0], p[1], 0)
else:
ppoints.SetData(numpy_to_vtk(p0, deep=True))
lines = vtk.vtkCellArray() # Create the polyline.
lines.InsertNextCell(len(p0))
for i in range(len(p0)):
lines.InsertCellPoint(i)
poly = vtk.vtkPolyData()
poly.SetPoints(ppoints)
poly.SetLines(lines)
else: # or just 2 points to link
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p0)
lineSource.SetPoint2(p1)
if res:
lineSource.SetResolution(res)
lineSource.Update()
poly = lineSource.GetOutput()
actor = Actor(poly, c, alpha)
actor.GetProperty().SetLineWidth(lw)
if dotted:
actor.GetProperty().SetLineStipplePattern(0xF0F0)
actor.GetProperty().SetLineStippleRepeatFactor(1)
actor.base = np.array(p0)
actor.top = np.array(p1)
settings.collectable_actors.append(actor)
return actor
|
1a56c260ad0d3478b51db03fa267898c637bf819
| 28,017 |
def date_dd(dataset, source):
"""Display 3 blocks: 1. image of the patent, 2. choice block, 3. text block for date. 2 is
artifical and should be ignored"""
def get_stream():
# Load the directory of images and add options to each task
stream = Images(source)
for eg in stream:
eg["options"] = OPTIONS
yield eg
return {
"dataset": dataset,
"view_id": "blocks",
"config": {
"choice_style": "single",
"blocks": [
{"view_id": "choice", "text": None},
{
"view_id": "text_input",
"field_rows": 1,
"field_label": "Publication year (yyyy)",
},
],
},
"stream": get_stream(),
}
|
fae232b97ab4d758aceea806ebc95816db3cb044
| 28,018 |
def sort(array=[12,4,5,6,7,3,1,15]):
"""Sort the array by using quicksort."""
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif x > pivot:
greater.append(x)
# Don't forget to return something!
return sort(less)+equal+sort(greater) # Just use the + operator to join lists
# Note that you want equal ^^^^^ not pivot
else: # You need to handle the part at the end of the recursion - when you only have one element in your array, just return the array.
return array
|
bc31df069f8e985d620032b9053bd8f13880780f
| 28,019 |
from typing import Optional
async def remove_completed_game(player_id: str, game_id: str) -> Optional[dict]:
"""
Updates the player's current games by removing a game from it.
:param player_id: the object id of the player
:param game_id: the object id of the game
:return: an awaitable resolving to the updated document, or None if one is not found
"""
collection = await db.get_player_collection()
updated_player = await collection.find_one_and_update(
{"_id": PydanticObjectID(player_id)},
{"$pull": {"completed_games": PydanticObjectID(game_id)}},
return_document=ReturnDocument.AFTER,
)
return updated_player
|
2e5f4ec3af053d1f1685e6a576d8027db585bc87
| 28,021 |
def is_multioutput(y):
"""Whether the target y is multi-output (or multi-index)"""
return hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1
|
bcdaa46c304fec50c173dffca5f1f1d5d8871a58
| 28,023 |
def read_all(db: Session):
""" Get all dimensions.
:param db:
:return: List[QuestionModel]
"""
question = db.query(QuestionModel).all()
return question
|
a854c4667dc30918cd1e9ec767d65fa8ad1fb5ca
| 28,024 |
def get_all_tenants(context):
"""Returns a list of all tenants stored in repository.
:param context: context of the transaction
"""
return context.session.query(db_models.AristaProvisionedProjects)
|
62d8fed653f5b8e380caa47f5f408ecab860a58b
| 28,025 |
def total_sub_pixels_2d_from(mask_2d: np.ndarray, sub_size: int) -> int:
"""
Returns the total number of sub-pixels in unmasked pixels in a mask.
Parameters
----------
mask_2d : np.ndarray
A 2D array of bools, where `False` values are unmasked and included when counting sub pixels.
sub_size : int
The size of the sub-grid that each pixel of the 2D mask array is divided into.
Returns
-------
int
The total number of sub pixels that are unmasked.
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
total_sub_pixels = total_sub_pixels_from_mask(mask=mask, sub_size=2)
"""
return total_pixels_2d_from(mask_2d) * sub_size ** 2
|
98461ffe073172db596570630ccfbd27384c7e3a
| 28,027 |
from simtk import unit as simtk_unit
import torch
def formaldehyde_conformer(formaldehyde) -> torch.Tensor:
"""Returns a conformer [A] of formaldehyde with an ordering which matches the
``formaldehyde`` fixture."""
formaldehyde.generate_conformers(n_conformers=1)
conformer = formaldehyde.conformers[0].value_in_unit(simtk_unit.angstrom)
return torch.from_numpy(conformer).type(torch.float)
|
f5a9a19f6dd8e26a121e496161fa6da7b8f63047
| 28,029 |
import warnings
def reduce_function(op_func, input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Handler function for Tensorflow depreciation of keep_dims for tf 1.8
and above, but tf 1.4 requires keep_dims
:param op_func: expects the function to handle eg: tf.reduce_sum.
:param input_tensor: The tensor to reduce. Should have numeric type.
:param axis: The dimensions to reduce. If None (the default),
reduces all dimensions. Must be in the range
[-rank(input_tensor), rank(input_tensor)).
:param keepdims: If true, retains reduced dimensions with length 1.
:param name: A name for the operation (optional).
:param reduction_indices: The old (deprecated) name for axis.
:param keep_dims: Deprecated alias for keepdims.
:return: outputs same value as op_func.
"""
if LooseVersion(tf.__version__) < LooseVersion('1.8.0'):
warning = "Running on tensorflow version " + \
LooseVersion(tf.__version__).vstring + \
". Support for this version in CleverHans is deprecated " + \
"and may be removed on or after 2019-01-26"
warnings.warn(warning)
out = op_func(input_tensor, axis=axis,
keep_dims=keepdims, name=name,
reduction_indices=reduction_indices)
else:
out = op_func(input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices)
return out
|
f6433479bcb01a8fc5dfc2c08dd70bf2fe500e94
| 28,030 |
from typing import Mapping
def filter_dict(function_or_value, dict_to_filter):
"""
Filter by value
>>> filter_dict(123, {'a': 123, 'b': 1234})
{'b': 1234}
Filter by value not applicable
>>> filter_dict(123, {'a': 1234, 'b': 5123})
{'a': 1234, 'b': 5123}
Embedded filter by value
>>> filter_dict(123, {'a': {'c': 123}, 'b': 1234})
{'b': 1234}
Embedded with extra by value
>>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 1234})
{'a': {'d': 432}, 'b': 1234}
Embedded mixed filter
>>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 123, 'e': 'test'})
{'a': {'d': 432}, 'e': 'test'}
Filter by callable
>>> filter_dict(lambda x: x % 2 == 0, {'a': 532, 'b': 891})
{'a': 532}
Filter by callable not applicable
>>> filter_dict(lambda x: x % 2 == 0, {'a': 538, 'b': 8})
{'a': 538, 'b': 8}
Embedded filter by callable
>>> filter_dict(lambda x: bool(x), {'a': {'c': False}, 'b': 'test'})
{'b': 'test'}
Embedded with extra by callable
>>> filter_dict(
... lambda x: 'a' in x, {'a': {'c': 'ba', 'd': 'tt'}, 'b': 'd'})
{'a': {'c': 'ba'}}
Embedded mixed filter
>>> filter_dict(
... lambda x: bool(x), {'a': {'c': True, 'd': 0}, 'b': 'test', 'e': []}
... )
{'a': {'c': True}, 'b': 'test'}
"""
func = function_or_value
if not callable(function_or_value):
def new_func(value):
return value != function_or_value
func = new_func
result = {}
for key, value in dict_to_filter.items():
if isinstance(value, Mapping):
value = filter_dict(func, value)
if value:
result[key] = value
elif func(value):
result[key] = value
return result
|
6403f716c21a1cfef046174899183858837bb92e
| 28,031 |
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
|
ba0f11d8645f3dcc5ccc48ec718de0c6ff624930
| 28,033 |
import torch
import math
def irfft(x, res):
"""
:param x: tensor of shape [..., m]
:return: tensor of shape [..., alpha]
"""
assert res % 2 == 1
*size, sm = x.shape
x = x.reshape(-1, sm)
x = torch.cat([
x.new_zeros(x.shape[0], (res - sm) // 2),
x,
x.new_zeros(x.shape[0], (res - sm) // 2),
], dim=-1)
assert x.shape[1] == res
l = res // 2
x = torch.stack([
torch.cat([
x[:, l:l + 1],
x[:, l + 1:].div(math.sqrt(2))
], dim=1),
torch.cat([
torch.zeros_like(x[:, :1]),
x[:, :l].flip(-1).div(-math.sqrt(2)),
], dim=1),
], dim=-1)
x = torch.irfft(x, 1) * res
return x.reshape(*size, res)
|
8f383523bc0c4ed6895d8aad0aca2758401d2fe5
| 28,034 |
import torch
def calc_ranks(idx, label, pred_score):
"""Calculating triples score ranks.
Args:
idx ([type]): The id of the entity to be predicted.
label ([type]): The id of existing triples, to calc filtered results.
pred_score ([type]): The score of the triple predicted by the model.
Returns:
ranks: The rank of the triple to be predicted, dim [batch_size].
"""
b_range = torch.arange(pred_score.size()[0])
target_pred = pred_score[b_range, idx]
pred_score = torch.where(label.bool(), -torch.ones_like(pred_score) * 10000000, pred_score)
pred_score[b_range, idx] = target_pred
ranks = (
1
+ torch.argsort(
torch.argsort(pred_score, dim=1, descending=True), dim=1, descending=False
)[b_range, idx]
)
return ranks
|
1f3d56c9a93afdd314c9a244319ef78668426481
| 28,035 |
def GBT(trainingData, testData):
"""
Gradient Boosted Tree Regression Model
:param trainingData:
:param testData:
:return: Trained model, predictions
"""
gbt = GBTRegressor( maxIter=100, maxDepth=6, seed=42)
model = gbt.fit(trainingData)
predictions = model.transform(testData)
return model, predictions
|
4e17c7188ccdd2676463a705a4e3ab4ccbc5adeb
| 28,036 |
def fromcolumns(cols, header=None, missing=None):
"""View a sequence of columns as a table, e.g.::
>>> import petl as etl
>>> cols = [[0, 1, 2], ['a', 'b', 'c']]
>>> tbl = etl.fromcolumns(cols)
>>> tbl
+----+-----+
| f0 | f1 |
+====+=====+
| 0 | 'a' |
+----+-----+
| 1 | 'b' |
+----+-----+
| 2 | 'c' |
+----+-----+
If columns are not the same length, values will be padded to the length
of the longest column with `missing`, which is None by default, e.g.::
>>> cols = [[0, 1, 2], ['a', 'b']]
>>> tbl = etl.fromcolumns(cols, missing='NA')
>>> tbl
+----+------+
| f0 | f1 |
+====+======+
| 0 | 'a' |
+----+------+
| 1 | 'b' |
+----+------+
| 2 | 'NA' |
+----+------+
See also :func:`petl.io.json.fromdicts`.
.. versionadded:: 1.1.0
"""
return ColumnsView(cols, header=header, missing=missing)
|
c033e0fbc11e18a73eb8216e4a3a2c79a0756bb8
| 28,037 |
import re
def function_sql(field, mysql_result_list):
"""
替换MySQL查询结果的方法
:param field: 第一个参数是yaml文件里面定义的字段
:param mysql_result_list: 第二个参数是MySQL查询结果列表
:return:
"""
if "{__SQL" in field:
mysql_index_list = re.findall("{__SQL(.+?)}", field)
# 获取索引列表
for i in mysql_index_list:
mysql_value = mysql_result_list[int(i)]
if type(mysql_value) != str:
mysql_value = str(mysql_value)
field = field.replace("{__SQL" + i + "}", mysql_value)
else:
pass
return field
# 返回替换后的字段
|
769881ae5e3a7caa036c977785827e219e5ab92b
| 28,038 |
def enable_dropout(model, rate=None, custom_objects={}):
"""
Enables the droput layer - used for monte carlo droput based uncertainty computation
Note: the weights needs to be reloaded after calling this model
>>> model = enable_dropout(model)
>>> model.load_weights('path to model weight')
:param model:
:param rate:
:param custom_objects:
:return:
"""
if(rate is not None): assert rate >= 0 and rate < 1, 'dropout rate is out of range'
model_config = model.get_config()
for i in range(len(model_config['layers'])):
class_name = model_config['layers'][i]['class_name']
if (class_name == 'SpatialDropout2D' or class_name =='Dropout' ):
model_config['layers'][i]['inbound_nodes'][0][0][-1]['training'] = True
if (rate is not None): model_config['layers'][i]['config']['rate'] = rate
#print('dropout enabled')
model = tf.keras.models.Model.from_config(model_config, custom_objects=custom_objects)
return model
|
2268c23bc5598fcf0befe76a15f0dbc444e28828
| 28,040 |
import ctypes
def get_max_torque_norm(p_state, idx_image=-1, idx_chain=-1):
"""Returns the current maximum norm of the torque acting on any spin."""
return float(_Get_MaxTorqueNorm(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
|
b6ae73ef269a192b5aafc96e939a0ab1f9a937be
| 28,041 |
def filter_by_zscore(data, features, remove_z):
"""Remove rows with |z scores| > remove_z"""
return data[(np.abs(np.nan_to_num(zscore(data[features]), posinf=0.0, neginf=0.0)) < remove_z).all(axis=1)]
|
bbaad3ee7879d64dafb2e45062c6cbe97ff457bc
| 28,042 |
def _GetProperty(obj, components):
"""Grabs a property from obj."""
if obj is None:
return None
elif not components:
return obj
elif (isinstance(components[0], _Key) and
isinstance(obj, dict)):
return _GetProperty(obj.get(components[0]), components[1:])
elif (isinstance(components[0], _Index) and isinstance(obj, list) and
components[0] < len(obj)):
return _GetProperty(obj[components[0]], components[1:])
elif (isinstance(components[0], _Slice) and
isinstance(obj, list)):
return [_GetProperty(item, components[1:]) for item in obj]
else:
return None
|
d887613e06078fcde887d51c8f83cc9ddc8f16f8
| 28,043 |
def make_similarity_function(similarity=None, distance=None, radius=None):
"""
Function creating a similarity function returning True if the compared
items are similar from a variety of functions & parameters.
Basically, if a distance function is given, it will be inverted and if
a radius is given, the returned function will return whether the
distance/similarity between two items is under/over it.
Args:
similarity (callable, optional): Similarity function.
distance (callable, optional): Distance function.
radius (number, optional): Radius.
Returns:
function: A similarity function with signature (A, B) -> bool
"""
if similarity is None and distance is None:
raise TypeError('fog.clustering: need at least a similarity or distance function.')
if radius is not None:
if similarity:
return lambda A, B: similarity(A, B) >= radius
else:
return lambda A, B: distance(A, B) <= radius
else:
if similarity:
return similarity
else:
return lambda A, B: not distance(A, B)
|
b8eeeeb466f21f2b3605941253f56392c3e41e88
| 28,044 |
from typing import Optional
def prepare_error_message(message: str, error_context: Optional[str] = None) -> str:
"""
If `error_context` is not None prepend that to error message.
"""
if error_context is not None:
return error_context + ": " + message
else:
return message
|
ea95d40797fcc431412990706d5c098a07986156
| 28,045 |
def _options_from_args(args):
"""Returns a QRCodeOptions instance from the provided arguments.
"""
options = args.get('options')
if options:
if not isinstance(options, QRCodeOptions):
raise TypeError('The options argument must be of type QRCodeOptions.')
else:
# Convert the string "None" into None
kw = {k: v if v != 'None' else None for k, v in args.items()}
options = QRCodeOptions(**kw)
return options
|
ff895e537a0d2c00f42e10f827b8176865902774
| 28,046 |
def calc_q_rq_H(region, R_type):
"""単位面積当たりの必要暖房能力
Args:
region(int): 省エネルギー地域区分
R_type(string): 暖冷房区画の種類
Returns:
float: 単位面積当たりの必要暖房能力
Raises:
ValueError: R_type が '主たる居室' または 'その他の居室' 以外の場合に発生する
"""
table_3 = get_table_3()
if R_type == '主たる居室':
return table_3[region - 1][0]
elif R_type == 'その他の居室':
return table_3[region - 1][1]
else:
raise ValueError(R_type)
|
1b413f0d83d723e1ef01c558cbc54e8afddc65ac
| 28,047 |
def tuple_compare_lt(left, right):
"""Compare two 'TupleOf' instances by comparing their individual elements."""
for i in range(min(len(left), len(right))):
if left[i] > right[i]:
return False
if left[i] < right[i]:
return True
return len(left) < len(right)
|
8f93d0c1336fd63d7c7f04cf54680de25acfdafb
| 28,048 |
def multilevel_roi_align(inputs, boxes, image_shape, crop_size: int = 7):
"""Perform a batch multilevel roi_align on the inputs
Arguments:
- *inputs*: A list of tensors of shape [batch_size, width, height, channel]
representing the pyramid.
- *boxes*: A tensor and shape [batch_size, num_boxes, (y1, x1, y2, x2)]
- *image_shape*: A tuple with the height and the width of the original image input image
Returns:
A tensor and shape [batch_size * num_boxes, 7, 7, channel]
"""
boxes_per_level, box_indices_per_level, pos_per_level = match_boxes_to_their_pyramid_level(
boxes, len(inputs))
tensors_per_level = []
for tensor, target_boxes, box_indices in zip(inputs, boxes_per_level, box_indices_per_level):
tensors_per_level.append(
roi_align(tensor, target_boxes, box_indices, image_shape, crop_size))
tensors = tf.concat(values=tensors_per_level, axis=0)
original_pos = tf.concat(values=pos_per_level, axis=0)
# Reorder the tensor per batch
indices_to_reorder_boxes = tf.math.invert_permutation(original_pos)
tensors = tf.gather(tensors, indices_to_reorder_boxes)
return tensors
|
3b150e6b6bcada3d3633f1edf61a99a566792849
| 28,049 |
def login():
"""Login user"""
# Instantiate login form
form = LoginForm()
username = form.username.data
if form.validate_on_submit():
# Query database for username and validate form submission
user = User.query.filter_by(username=username).first()
# if user exists
if user:
# Compare hashed password from submission to database password
if check_password_hash(user.password, form.password.data):
login_user(user, remember=True)
flash(f"Logged in successfully as {form.username.data}.", category="success")
# Return user to the home page
return redirect(url_for("index"))
else:
flash("Incorrect password..please try again", category="error")
else:
flash("Username does not exist", category="error")
return render_template("login.html", form=form)
|
fa1e1814d71bcbf04fda08b282f3f1a58965dcfb
| 28,050 |
from datetime import datetime
import uuid
def serialize(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat(sep='T')
return serial
if isinstance(obj, uuid.UUID):
serial = str(obj)
return serial
try:
return obj.__dict__
except AttributeError:
return str(obj)
except Exception as e:
strval = 'unknown obj'
exceptval = 'unknown err'
try:
strval = str(obj)
exceptval = repr(e)
except Exception:
pass
return 'json fail {} {}'.format(exceptval, strval)
|
c20abaac68e8f8c8314a6dbbaee128b54110705c
| 28,051 |
def clean_names_AZ(col):
"""
Removes any non-alpha characters (excluding spaces) from a string.
Replaces these characters with an empty space. Trims outer whitespace.
Example
--------
>>> Input: "JOHN SMITH 2000"
>>> Output: "JOHN SMITH"
"""
return trim(regexp_replace(col, "[^A-Z ]+", ""))
|
4db710ec573087df59109046ea2a965c7545f1a2
| 28,052 |
def check_continent_node_membership(continents, continent_node_id):
"""The function checks that a node continent is bound
to the corresponding relation through 'label' membership.
"""
assert continent_node_id[0] == 'n', ("A node expected in "
"check_continent_node_membership()")
errors = []
for cont_id, cont_data in continents.items():
continent_rel_id, continent_rel_data = cont_id, cont_data
if (cont_id[0] == 'r' and
is_the_same_continent(continents[continent_node_id],
continent_rel_data) and
continent_node_id not in continent_rel_data.get('labels', [])
):
errors.append(f"Node {continent_node_id} represents the same "
f"continent as relation {continent_rel_id} "
"but is not its label")
return errors
|
7ef0895e26fdd495f54ac58ea35513178f00eb19
| 28,053 |
import string
def remove_punctuation(input_string):
"""
remove the punctuation of input
Parameters
----------
input_string : string
Returns
-------
output_string : string
string without punctuation
###from assignment encoder
"""
out_string= ''
for item in input_string:
if item not in string.punctuation:
out_string= out_string+item
return out_string
|
2bbd1dc90d37c1ad16698092b6269c0fe601d902
| 28,054 |
from typing import Any
def field_value_between(value: Any = None, field: str = None,
lower: float = None, upper: float = None) -> bool:
"""
Validate value at the given field to be between the lower/upper boundaries.
"""
if not value:
return False
if not isinstance(value, list):
value = [value]
for v in value:
current = v.get(field)
if current is None:
return False
if (lower > float(current)) or (float(current) > upper):
return False
return True
|
4ff2dfa814f0ddda7efca3ce19f137a0d86b9f40
| 28,055 |
import yaml
def j2_to_json(path_in, path_out, **kwargs):
"""Render a yaml.j2 chart to JSON.
Args:
path_in: the j2 template path
path_out: the JSON path to write to
kwargs: data to pass to the j2 template
Returns:
the file path and JSON string
"""
return pipe(
render_yaml(path_in, **kwargs),
yaml.load,
write_json(filepath=path_out) # pylint: disable=no-value-for-parameter
)
|
2cd41eb29e293e44772855f7d66e7425eedaec8d
| 28,056 |
def user_logged_out(connection,user):
"""
update login status to false when user has logged out
:param connection:
:param user:
:return:
"""
with connection:
return connection.execute(UPDATE_USER_LOGIN_STATUS_TO_FALSE,(user,))
|
b355fa6e74180adb7504e60602cb164095e1898d
| 28,057 |
def findGrayscaleTilesInImage(img):
""" Find chessboard and convert into input tiles for CNN """
if img is None:
return None, None
# Convert to grayscale numpy array
img_arr = np.asarray(img.convert("L"), dtype=np.float32)
# Use computer vision to find orthorectified chessboard corners in image
corners = findChessboardCorners(img_arr)
if corners is None:
return None, None
# Pull grayscale tiles out given image and chessboard corners
tiles = getChessTilesGray(img_arr, corners)
# Return both the tiles as well as chessboard corner locations in the image
return tiles, corners
|
d3431c519f53c0a56b144dde8196d58000f2f788
| 28,058 |
def run(df, docs, columns):
"""
converts each column to type int
:param df:
:param columns:
:return:
"""
for doc in docs:
doc.start("t07 - Change type of {} to int".format(str(columns).replace("'", "")), df)
for column in columns:
df[column] = df[column].astype(int)
for doc in docs:
doc.end(df)
return df
|
5d360a764ad30a80c39d58f9aeb520d7c57f7903
| 28,059 |
import requests
def get_articles():
"""
Retreives the articles list (via an API request)
"""
endpoint = "%s%s" % (
settings.API_BASE_URL,
reverse("api:articles-list")
)
headers = DEFAULT_REQUESTS_HEADERS
r = requests.get(
endpoint,
headers=DEFAULT_REQUESTS_HEADERS
)
if r.status_code != 200:
raise UnexpectedApiResponse(
endpoint=endpoint,
method="GET",
payload=None,
response_status_code=r.status_code,
response_content=r.content
)
raw_data = decode_requests_response(r.content)
serializer = ArticleSerializer(data=raw_data, many=True)
serializer.is_valid(raise_exception=True)
articles = serializer.validated_data
return articles
|
fb2b59cc301890b8c6f4c6c115b6f08f4a4cbe72
| 28,060 |
import numpy
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None, preconditioner = None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
precontioner: numpy.ndarray, used for preconditioning CG (PCG),
it is a one dim array on the M's diagnol indices.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
if preconditioner is None:
preconditioner = numpy.ones(x0.shape[0])
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall,
'preconditioner': preconditioner,}
res = _minimize_newton_pcg(f, x0, args, fprime, fhess, fhess_p,callback=callback, **opts)
# res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
|
bb2d4c3d1303adebe856f6c3ac13cd92beeee0ab
| 28,061 |
def add_missing_flow_by_fields(flowby_partial_df, flowbyfields):
"""
Add in missing fields to have a complete and ordered
:param flowby_partial_df: Either flowbyactivity or flowbysector df
:param flowbyfields: Either flow_by_activity_fields, flow_by_sector_fields, or flow_by_sector_collapsed_fields
:return:
"""
for k in flowbyfields.keys():
if k not in flowby_partial_df.columns:
flowby_partial_df[k] = None
# convert data types to match those defined in flow_by_activity_fields
for k, v in flowbyfields.items():
flowby_partial_df[k] = flowby_partial_df[k].astype(v[0]['dtype'])
# Resort it so order is correct
flowby_partial_df = flowby_partial_df[flowbyfields.keys()]
return flowby_partial_df
|
49eb8810c7c2c4e852a40aa86e2d2d2a8506f253
| 28,062 |
from datetime import datetime
def calcular_diferencia_dias(fin_dia):
"""
Obtiene la diferencia de dias entre una fecha y hoy
"""
hoy = datetime.now()
end = datetime.strptime(str(fin_dia), '%Y-%m-%d')
return abs(end - hoy).days
|
41b732f3bb09d2deca4be034273a5fed74971386
| 28,063 |
def matrix_base_mpl(matrix, positions, substitutions, conservation=None,
secondary_structure=None, wildtype_sequence=None,
min_value=None, max_value=None,
ax=None, colormap=plt.cm.RdBu_r,
colormap_conservation=plt.cm.Oranges, na_color="#bbbbbb",
title=None, position_label_size=8, substitution_label_size=8,
show_colorbar=True, colorbar_indicate_bounds=False,
show_wt_char=True, label_filter=None, secondary_structure_style=None):
"""
Matplotlib-based mutation matrix plotting. This is the base plotting function,
see plot_mutation_matrix() for more convenient access.
Parameters
----------
matrix : np.array(float)
2D numpy array with values for individual single mutations
(first axis: position, second axis: substitution)
positions : list(int) or list(str)
List of positions along x-axis of matrix
(length has to agree with first dimension of matrix)
substitutions : list(str)
List of substitutions along y-axis of matrix
(length has to agree with second dimension of matrix)
conservation : list(float) or np.array(float), optional (default: None)
Positional conservation along sequence. Values must range
between 0 (not conserved) and 1 (fully conserved). If given,
will plot conservation along bottom of mutation matrix.
secondary_structure : str or list(str), optional (default: None)
Secondary structure for each position along sequence. If given,
will draw secondary structure cartoon on top of matrix.
wildtype_sequence : str or list(str), optional (default: None)
Sequence of wild-type symbols. If given, will indicate wild-type
entries in matrix with a dot.
min_value : float, optional (default: None)
Threshold colormap at this minimum value. If None, defaults to
minimum value in matrix; if max_value is also None, defaults to
-max(abs(matrix))
max_value : float, optional (default: None)
Threshold colormap at this maximum value. If None, defaults to
maximum value in matrix; if min_value is also None, defaults to
max(abs(matrix))
ax : Matplotlib axes object, optional (default: None)
Draw mutation matrix on this axis. If None, new figure and axis
will be created.
colormap : matplotlib colormap object, optional (default: plt.cm.RdBu_r)
Maps mutation effects to colors of matrix cells.
colormap_conservation: matplotlib colormap object, optional (default: plt.cm.Oranges)
Maps sequence conservation to colors of conservation vector plot.
na_color : str, optional (default: "#bbbbbb")
Color for missing values in matrix
title : str, optional (default: None)
If given, set title of plot to this value.
position_label_size : int, optional (default: 8)
Font size of x-axis labels.
substitution_label_size : int, optional (default: 8)
Font size of y-axis labels.
show_colorbar : bool, optional (default: True)
If True, show colorbar next to matrix.
colorbar_indicate_bounds : bool, optional (default: False)
If True, add greater-than/less-than signs to limits of colorbar
to indicate that colors were thresholded at min_value/max_value
show_wt_char : bool, optional (default: True)
Display wild-type symbol in axis labels
label_filter : function, optional (default: None)
Function with one argument (integer) that determines if a certain position
label will be printed (if label_filter(pos)==True) or not.
secondary_structure_style : dict, optional (default: None)
Pass on as **kwargs to evcouplings.visualize.pairs.secondary_structure_cartoon
to determine appearance of secondary structure cartoon.
Returns
-------
ax : Matplotlib axes object
Axes on which mutation matrix was drawn
"""
LINEWIDTH = 0.0
LABEL_X_OFFSET = 0.55
LABEL_Y_OFFSET = 0.45
def _draw_rect(x_range, y_range, linewidth):
r = plt.Rectangle(
(min(x_range), min(y_range)),
max(x_range) - min(x_range), max(y_range) - min(y_range),
fc='None', linewidth=linewidth
)
ax.add_patch(r)
matrix_width = matrix.shape[0]
matrix_height = len(substitutions)
# mask NaN entries in mutation matrix
matrix_masked = np.ma.masked_where(np.isnan(matrix), matrix)
# figure out maximum and minimum values for color map
if max_value is None and min_value is None:
max_value = np.abs(matrix_masked).max()
min_value = -max_value
elif min_value is None:
min_value = matrix_masked.min()
elif max_value is None:
max_value = matrix_masked.max()
# set NaN color value in colormaps
colormap = deepcopy(colormap)
colormap.set_bad(na_color)
colormap_conservation = deepcopy(colormap_conservation)
colormap_conservation.set_bad(na_color)
# determine size of plot (depends on how much tracks
# with information we will add)
num_rows = (
len(substitutions) +
(conservation is not None) +
(secondary_structure is not None)
)
ratio = matrix_width / float(num_rows)
# create axis, if not given
if ax is None:
fig = plt.figure(figsize=(ratio * 5, 5))
ax = fig.gca()
# make square-shaped matrix cells
ax.set_aspect("equal", "box")
# define matrix coordinates
# always add +1 because coordinates are used by
# pcolor(mesh) as beginning and start of rectangles
x_range = np.array(range(matrix_width + 1))
y_range = np.array(range(matrix_height + 1))
y_range_avg = range(-2, 0)
x_range_avg = range(matrix_width + 1, matrix_width + 3)
y_range_cons = np.array(y_range_avg) - 1.5
# coordinates for text labels (fixed axis)
x_left_subs = min(x_range) - 1
x_right_subs = max(x_range_avg) + 1
if conservation is None:
y_bottom_res = min(y_range_avg) - 0.5
else:
y_bottom_res = min(y_range_cons) - 0.5
# coordinates for additional annotation
y_ss = max(y_range) + 2
# 1) main mutation matrix
X, Y = np.meshgrid(x_range, y_range)
cm = ax.pcolormesh(
X, Y, matrix_masked.T, cmap=colormap, vmax=max_value, vmin=min_value
)
_draw_rect(x_range, y_range, LINEWIDTH)
# 2) mean column effect (bottom "subplot")
mean_pos = np.mean(matrix_masked, axis=1)[:, np.newaxis]
X_pos, Y_pos = np.meshgrid(x_range, y_range_avg)
ax.pcolormesh(
X_pos, Y_pos, mean_pos.T, cmap=colormap, vmax=max_value, vmin=min_value
)
_draw_rect(x_range, y_range_avg, LINEWIDTH)
# 3) amino acid average (right "subplot")
mean_aa = np.mean(matrix_masked, axis=0)[:, np.newaxis]
X_aa, Y_aa = np.meshgrid(x_range_avg, y_range)
ax.pcolormesh(X_aa, Y_aa, mean_aa, cmap=colormap, vmax=max_value, vmin=min_value)
_draw_rect(x_range_avg, y_range, LINEWIDTH)
# mark wildtype residues
if wildtype_sequence is not None:
subs_list = list(substitutions)
for i, wt in enumerate(wildtype_sequence):
# skip unspecified entries
if wt is not None and wt != "":
marker = plt.Circle(
(x_range[i] + 0.5, y_range[subs_list.index(wt)] + 0.5),
0.1, fc='k', axes=ax
)
ax.add_patch(marker)
# put labels along both axes of matrix
# x-axis (positions)
for i, pos in zip(x_range, positions):
# filter labels, if selected
if label_filter is not None and not label_filter(pos):
continue
# determine what position label should be
if show_wt_char and wildtype_sequence is not None:
wt_symbol = wildtype_sequence[i]
if type(pos) is tuple and len(pos) == 2:
# label will be in format segment AA pos, eg B_1 A 151
label = "{} {} {}".format(pos[0], wt_symbol, pos[1])
else:
label = "{} {}".format(wt_symbol, pos)
else:
if type(pos) is tuple:
label = " ".join(map(str, pos))
else:
label = str(pos)
ax.text(
i + LABEL_X_OFFSET, y_bottom_res, label,
size=position_label_size,
horizontalalignment='center',
verticalalignment='top',
rotation=90
)
# y-axis (substitutions)
for j, subs in zip(y_range, substitutions):
# put on lefthand side of matrix...
ax.text(
x_left_subs, j + LABEL_Y_OFFSET, subs,
size=substitution_label_size,
horizontalalignment='center',
verticalalignment='center'
)
# ...and on right-hand side of matrix
ax.text(
x_right_subs, j + LABEL_Y_OFFSET, subs,
size=substitution_label_size,
horizontalalignment='center', verticalalignment='center'
)
# draw colorbar
if show_colorbar:
cb = plt.colorbar(
cm, ticks=[min_value, max_value],
shrink=0.3, pad=0.15 / ratio, aspect=8
)
if colorbar_indicate_bounds:
symbol_min, symbol_max = u"\u2264", u"\u2265"
else:
symbol_min, symbol_max = "", ""
cb.ax.set_yticklabels(
[
"{symbol} {value:>+{width}.1f}".format(
symbol=s, value=v, width=0
) for (v, s) in [(min_value, symbol_min), (max_value, symbol_max)]
]
)
cb.ax.xaxis.set_ticks_position("none")
cb.ax.yaxis.set_ticks_position("none")
cb.outline.set_linewidth(0)
# plot secondary structure cartoon
if secondary_structure is not None:
# if no style given for secondary structure, set default
if secondary_structure_style is None:
secondary_structure_style = {
"width": 0.8,
"line_width": 2,
"strand_width_factor": 0.5,
"helix_turn_length": 2,
"min_sse_length": 2,
}
start, end, sse = find_secondary_structure_segments(secondary_structure)
secondary_structure_cartoon(
sse, sequence_start=start, sequence_end=end, center=y_ss, ax=ax,
**secondary_structure_style
)
# plot conservation
if conservation is not None:
conservation = np.array(conservation)[:, np.newaxis]
cons_masked = np.ma.masked_where(np.isnan(conservation), conservation)
X_cons, Y_cons = np.meshgrid(x_range, y_range_cons)
ax.pcolormesh(
X_cons, Y_cons, cons_masked.T, cmap=colormap_conservation, vmax=1, vmin=0
)
_draw_rect(x_range, y_range_cons, LINEWIDTH)
# remove chart junk
for line in ['top', 'bottom', 'right', 'left']:
ax.spines[line].set_visible(False)
ax.xaxis.set_ticks_position("none")
ax.yaxis.set_ticks_position("none")
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
if title is not None:
ax.set_title(title)
return ax
|
ef661fd556b3ba2e4c313e032e8ef3be532bb73d
| 28,064 |
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multi-dimensional Laplace filter using Gaussian second derivatives.
Args:
input (cupy.ndarray): The input array.
sigma (scalar or sequence of scalar): Standard deviations for each axis
of Gaussian kernel. A single value applies to all axes.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
kwargs (dict, optional):
dict of extra keyword arguments to pass ``gaussian_filter()``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.gaussian_laplace`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
def derivative2(input, axis, output, mode, cval):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval)
|
6b5f184b658dd446a4f3ec7de0ee126f33663b0c
| 28,065 |
def perimeter_mask(image, corner_fraction=0.035):
"""
Create boolean mask for image with a perimeter marked as True.
The perimeter is the same width as the corners created by corner_mask.
Args:
image : the image to work with
corner_fraction: determines the width of the perimeter
Returns:
boolean 2D array with corners marked True
"""
v, h = image.shape
n = int(v * corner_fraction)
m = int(h * corner_fraction)
the_mask = np.full_like(image, False, dtype=np.bool)
the_mask[:, :m] = True
the_mask[:, -m:] = True
the_mask[:n, :] = True
the_mask[-n:, :] = True
return the_mask
|
afc755dccfffa9ff68e060a6af3da0d38d323178
| 28,067 |
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
|
1fa3ffdbb301b55a48fc1912baab84006705e15f
| 28,068 |
import regex
def convert_version_to_tuple(version: str) -> VersionTuple:
"""
Convert version info from string representation to tuple representation.
The tuple representation is convenient for direct comparison.
"""
m = regex.fullmatch(r"(?P<major>\d+)\.(?P<minor>\d+)", version)
if not m:
raise ValueError(f"{version} is not a valid version")
major, minor = m.group("major", "minor")
version_tuple = (int(major), int(minor))
return version_tuple
|
6c197988ae2c98481f9b16f90f9ae3f7072ac7c8
| 28,069 |
from typing import Callable
def SU3GradientTF(
f: Callable[[Tensor], Tensor],
x: Tensor,
) -> tuple[Tensor, Tensor]:
"""Compute gradient using TensorFlow GradientTape.
y = f(x) must be a real scalar value.
Returns:
- (f(x), D), where D = T^a D^a = T^a ∂_a f(x)
NOTE: Use real vector derivatives, e.g.
D^a = ∂_a f(x)
= ∂_t f(exp(T^a) x) |_{t=0}
"""
zeros = tf.zeros(8)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(zeros)
y = f(tf.linalg.matmul(exp(su3fromvec(zeros)), x))
d = tape.gradient(y, zeros)
return y, d
|
93b029e0a2854e651d4c6ea5995f8d952f9a64e6
| 28,070 |
def create_app(config):
"""Flask application factory.
Returns:
Flask Application with BrazilDataCubeDB extension prepared.
"""
app = Flask(__name__)
BrazilDataCubeDB(app)
return app
|
d8ba6d7306508e4a55f9f3dbee5d17df16c56820
| 28,071 |
import string
def genpass_comprehension(length=8, chars=string.letters+string.digits):
"""Generate password using a list comprehension.
"""
# Can be rewritten as a list comprehension.
return ''.join([choice(chars) for i in range(length)])
|
d77b89e2872eef92390d08f555adbb52f9da1c34
| 28,072 |
import functools
def typed(*types):
"""Type annotation.
The final type is the output type.
"""
if len(types) < 1:
raise SyntaxError('Too few arguments: typed{}'.format(types))
if len(types) > 3:
raise NotImplementedError('Too many arguments: typed{}'.format(types))
result_type = types[-1]
arg_types = types[:-1]
def decorator_0(fun):
@functools.wraps(fun)
def typed_fun():
return result_type(fun())
return typed_fun
def decorator_1(fun):
@functools.wraps(fun)
def typed_fun(arg):
arg = arg_types[0](arg)
return result_type(fun(arg))
return typed_fun
def decorator_2(fun):
@functools.wraps(fun)
def typed_fun(arg0, arg1):
arg0 = arg_types[0](arg0)
arg1 = arg_types[1](arg1)
return result_type(fun(arg0, arg1))
return typed_fun
return [decorator_0, decorator_1, decorator_2][len(arg_types)]
|
90f100bebd5778d36eee1ad04b7c831b003ce604
| 28,073 |
from typing import Tuple
def insert_linebreaks(
input_fragments: StyleAndTextTuples,
max_line_width: int,
truncate_long_lines: bool = True) -> Tuple[StyleAndTextTuples, int]:
"""Add line breaks at max_line_width if truncate_long_lines is True.
Returns input_fragments with each character as it's own formatted text
tuple."""
fragments: StyleAndTextTuples = []
total_width = 0
line_width = 0
line_height = 0
new_break_inserted = False
for item in input_fragments:
# Check for non-printable fragment; doesn't affect the width.
if '[ZeroWidthEscape]' in item[0]:
fragments.append(item)
continue
new_item_style = item[0]
# For each character in the fragment
for character in item[1]:
# Get the width respecting double width characters
width = get_cwidth(character)
# Increment counters
total_width += width
line_width += width
# Save this character as it's own fragment
if line_width <= max_line_width:
if not new_break_inserted or character != '\n':
fragments.append((new_item_style, character))
# Was a line break just inserted?
if character == '\n':
# Increase height
line_height += 1
new_break_inserted = False
# Reset width to zero even if we are beyond the max line width.
if character == '\n':
line_width = 0
# Are we at the limit for this line?
elif line_width == max_line_width:
# Insert a new linebreak fragment
fragments.append((new_item_style, '\n'))
# Increase height
line_height += 1
# Set a flag for skipping the next character if it is also a
# line break.
new_break_inserted = True
if not truncate_long_lines:
# Reset line width to zero
line_width = 0
# Check if the string ends in a final line break
last_fragment_style = fragments[-1][0]
last_fragment_text = fragments[-1][1]
if not last_fragment_text.endswith('\n'):
# Add a line break if none exists
fragments.append((last_fragment_style, '\n'))
line_height += 1
return fragments, line_height
|
ec9faf8ff80e3500487634b759a136dc2deca684
| 28,074 |
def score_reactant_combination(candidate_combination, scoring_fcn):
""" Generates a score for a combination of reactant candidates according to the criteria. """
# Extract only the reactant candidate compound ID's.
reactant_ids = [combo[0] for combo in candidate_combination]
# Score the reactant candidate combinations according to the specified criteria.
if scoring_fcn == "similarity":
combination_score = np.mean([combo[1] for combo in candidate_combination])
else:
combination_score = 0.0
return reactant_ids, combination_score
|
715a21bf24af0a60ba3ea421b7bf8dcebcca17fc
| 28,075 |
def get_named_entities(df):
"""
Count the named entities that are neither A nor B.
Hopefully this correlates with class "Neither".
:param df: competition data with one extra field spacy_nlp_doc: precomputed nlp(text)
:return:
"""
named_df = pd.DataFrame(0, index=df.index, columns=["named_ent"])
with timer('Extracting named entities'):
for i in range(len(df)):
doc = df.loc[i, "spacy_nlp_doc"]
A = df.loc[i, "A"]
B = df.loc[i, "B"]
A_offset = df.loc[i, "A-offset"]
B_offset = df.loc[i, "B-offset"]
P_offset = df.loc[i, "Pronoun-offset"]
# count persons that are not A or B
# spacy's entities are spans, not tokens
# e.g. "Cheryl Cassidy" is one entity
ent_list = [ent for ent in doc.ents if (ent.label_ == "PERSON" and ent.text != A and ent.text != B)]
named_df.loc[i, "named_ent"] = len(ent_list)
return named_df
|
65469fe65c8808943343d952fd82ebe62bb9df97
| 28,078 |
def normalize(vectors):
"""
Normalize a set of vectors.
The length of the returned vectors will be unity.
Parameters
----------
vectors : np.ndarray
Set of vectors of any length, except zero.
"""
if len(vectors.shape) == 1:
return vectors / np.linalg.norm(vectors)
norm = np.linalg.norm(vectors, axis=1)
return vectors / norm[:, np.newaxis]
|
839104d17a3ccbfd1191474bf95076445b4b0464
| 28,079 |
def get_all_requests(current_user):
"""Gets all requests"""
all_requests = []
for request in request_model.requests.values():
all_requests.append(request)
return jsonify(all_requests)
|
bcadfb936826b3a33f809cc95af1a991c5bf741e
| 28,080 |
def RunManifestExe(target, source, env):
"""Calls RunManifest for updating an executable (resource_num=1)."""
return RunManifest(target, source, env, resource_num=1)
|
629ffccb7b163514bd91c790894bdfec3683110e
| 28,081 |
import torch
def dist_reduce_tensor(tensor, dst=0):
"""Reduce to specific rank"""
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
dist.reduce(tensor, dst=dst)
if get_rank() == dst:
tensor.div_(world_size)
return tensor
|
d64d153145bffaf454dd3f46154db156b600bac3
| 28,082 |
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_file(source_file_name)
print('File {} uploaded to {}.'.format(
source_file_name,
destination_blob_name))
return destination_blob_name
|
b63d6bb0ede33d68d684b98968e3e94efbd0c5df
| 28,083 |
def get_lines(matrix, loc):
"""Returns lines that pass though `loc`. Matrix can be indices.
Args:
matrix: a N by N matrix representing the board
loc: a tuple of loc coordinates
Returns:
Numerical values on the horizontal, vertical, and diagonal lines that
pass through loc.
Examples 1:
>>> m = np.array([[0, 0, 1],
>>> [1, 2, 4],
>>> [6, 3, 2]])
>>> get_lines(m, (0, 1))
(array([0, 2, 3]), array([0, 0, 1]), array([0, 4]), array([0, 1]))
Example 2:
>>> m.shape
(3, 3)
>>> ind = np.indices(m.shape)
>>> ind # ind.shape = (2,3,3)
array([[[0, 0, 0],
[1, 1, 1],
[2, 2, 2]],
[[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]])
>>> ind2 = np.moveaxis(ind, 0, -1)
>>> ind2.shape
(3, 3, 2)
>>> ind2
array([[[0, 0],
[0, 1],
[0, 2]],
[[1, 0],
[1, 1],
[1, 2]],
[[2, 0],
[2, 1],
[2, 2]]])
>>> get_lines(ind2, (0,1))
(array([[0, 1], [1, 1], [2, 1]]),
array([[0, 0], [0, 1], [0, 2]]),
array([[0, 1], [1, 2]]),
array([[0, 1], [1, 0]]))
"""
i, j = loc
flat = matrix.reshape(-1, *matrix.shape[2:])
w = matrix.shape[0]
h = matrix.shape[1]
def flat_pos(pos):
"""Returns the flattened index of element (i,j)."""
return pos[0] * h + pos[1]
pos = flat_pos((i, j))
# index for flipping matrix across different axis
ic = w - 1 - i
jc = h - 1 - j
# top left
tl = (i - j, 0) if i > j else (0, j - i)
tl = flat_pos(tl)
# bottom left
bl = (w - 1 - (ic - j), 0) if ic > j else (w - 1, j - ic)
bl = flat_pos(bl)
# top right
tr = (i - jc, h - 1) if i > jc else (0, h - 1 - (jc - i))
tr = flat_pos(tr)
# bottom right
br = (w - 1 - (ic - jc), h - 1) if ic > jc else (w - 1, h - 1 - (jc - ic))
br = flat_pos(br)
hor = matrix[:, j]
ver = matrix[i, :]
diag_right = np.concatenate([flat[tl:pos:h + 1], flat[pos:br + 1:h + 1]])
diag_left = np.concatenate([flat[tr:pos:h - 1], flat[pos:bl + 1:h - 1]])
return hor, ver, diag_right, diag_left
|
43909460e847d5dde88216cc37b902a56ba2d261
| 28,084 |
from bs4 import BeautifulSoup
from typing import Dict
def process_citations_in_paragraph(para_el: BeautifulSoup, sp: BeautifulSoup, bibs: Dict, bracket: bool) -> Dict:
"""
Process all citations in paragraph and generate a dict for surface forms
:param para_el:
:param sp:
:param bibs:
:param bracket:
:return:
"""
# CHECK if range between two surface forms is appropriate for bracket style expansion
def _get_surface_range(start_surface, end_surface):
span1_match = SINGLE_BRACKET_REGEX.match(start_surface)
span2_match = SINGLE_BRACKET_REGEX.match(end_surface)
if span1_match and span2_match:
# get numbers corresponding to citations
span1_num = int(span1_match.group(1))
span2_num = int(span2_match.group(1))
# expand if range is between 1 and 20
if 1 < span2_num - span1_num < 20:
return span1_num, span2_num
return None
# CREATE BIBREF range between two reference ids, e.g. BIBREF1-BIBREF4 -> BIBREF1 BIBREF2 BIBREF3 BIBREF4
def _create_ref_id_range(start_ref_id, end_ref_id):
start_ref_num = int(start_ref_id[6:])
end_ref_num = int(end_ref_id[6:])
return [f'BIBREF{curr_ref_num}' for curr_ref_num in range(start_ref_num, end_ref_num + 1)]
# CREATE surface form range between two bracket strings, e.g. [1]-[4] -> [1] [2] [3] [4]
def _create_surface_range(start_number, end_number):
return [f'[{n}]' for n in range(start_number, end_number + 1)]
# create citation dict with keywords
cite_map = dict()
tokgen = UniqTokenGenerator('CITETOKEN')
for rtag in para_el.find_all('ref'):
try:
raw_coord = rtag.attrs.get("coords", None)
coord = ''
if raw_coord != None:
raw_coord = raw_coord.split(';')[0]
coord = list(map(float, raw_coord.split(',')))
coord = {
'page': coord[0],
'left': coord[1],
'top': coord[2],
'width': coord[3],
'height': coord[4]
}
# get surface span, e.g. [3]
surface_span = rtag.text.strip()
# check if target is available (#b2 -> BID2)
if rtag.get('target'):
# normalize reference string
rtag_ref_id = normalize_grobid_id(rtag.get('target'))
# skip if rtag ref_id not in bibliography
if rtag_ref_id not in bibs:
cite_key = tokgen.next()
rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (None, surface_span, coord)
continue
# if bracket style, only keep if surface form is bracket
if bracket:
# valid bracket span
if surface_span and (surface_span[0] == '[' or surface_span[-1] == ']' or surface_span[-1] == ','):
pass
# invalid, replace tag with surface form and continue to next ref tag
else:
rtag.replace_with(sp.new_string(f" {surface_span} "))
continue
# not bracket, add cite span and move on
else:
cite_key = tokgen.next()
rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (rtag_ref_id, surface_span, coord)
continue
### EXTRA PROCESSING FOR BRACKET STYLE CITATIONS; EXPAND RANGES ###
# look backward for range marker, e.g. [1]-*[3]*
backward_between_span = ""
for sib in rtag.previous_siblings:
if sib.name == 'ref':
break
elif type(sib) == NavigableString:
backward_between_span += sib
else:
break
# check if there's a backwards expansion, e.g. need to expand [1]-[3] -> [1] [2] [3]
if is_expansion_string(backward_between_span):
# get surface number range
surface_num_range = _get_surface_range(
rtag.find_previous_sibling('ref').text.strip(),
surface_span
)
# if the surface number range is reasonable (range < 20, in order), EXPAND
if surface_num_range:
# delete previous ref tag and anything in between (i.e. delete "-" and extra spaces)
for sib in rtag.previous_siblings:
if sib.name == 'ref':
break
elif type(sib) == NavigableString:
sib.replace_with(sp.new_string(""))
else:
break
# get ref id of previous ref, e.g. [1] (#b0 -> BID0)
previous_rtag = rtag.find_previous_sibling('ref')
previous_rtag_ref_id = normalize_grobid_id(previous_rtag.get('target'))
previous_rtag.decompose()
# replace this ref tag with the full range expansion, e.g. [3] (#b2 -> BID1 BID2)
id_range = _create_ref_id_range(previous_rtag_ref_id, rtag_ref_id)
surface_range = _create_surface_range(surface_num_range[0], surface_num_range[1])
replace_string = ''
for range_ref_id, range_surface_form in zip(id_range, surface_range):
# only replace if ref id is in bibliography, else add none
if range_ref_id in bibs:
cite_key = tokgen.next()
cite_map[cite_key] = (range_ref_id, range_surface_form, coord)
else:
cite_key = tokgen.next()
cite_map[cite_key] = (None, range_surface_form, coord)
replace_string += cite_key + ' '
rtag.replace_with(sp.new_string(f" {replace_string} "))
# ELSE do not expand backwards and replace previous and current rtag with appropriate ref id
else:
# add mapping between ref id and surface form for previous ref tag
previous_rtag = rtag.find_previous_sibling('ref')
previous_rtag_ref_id = normalize_grobid_id(previous_rtag.get('target'))
previous_rtag_surface = previous_rtag.text.strip()
cite_key = tokgen.next()
previous_rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (previous_rtag_ref_id, previous_rtag_surface, coord)
# add mapping between ref id and surface form for current reftag
cite_key = tokgen.next()
rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (rtag_ref_id, surface_span, coord)
else:
# look forward and see if expansion string, e.g. *[1]*-[3]
forward_between_span = ""
for sib in rtag.next_siblings:
if sib.name == 'ref':
break
elif type(sib) == NavigableString:
forward_between_span += sib
else:
break
# look forward for range marker (if is a range, continue -- range will be expanded
# when we get to the second value)
if is_expansion_string(forward_between_span):
continue
# else treat like normal reference
else:
cite_key = tokgen.next()
rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (rtag_ref_id, surface_span, coord)
else:
cite_key = tokgen.next()
rtag.replace_with(sp.new_string(f" {cite_key} "))
cite_map[cite_key] = (None, surface_span, coord)
except Exception as exception:
print(exception)
continue
return cite_map
|
74418fafc2a2d828b702555b79b515d9b16d9f10
| 28,085 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.