content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import inspect
def deprecated(removal_version, hint_message=None, subject=None, ensure_stderr=False):
"""Marks a function or method as deprecated.
A removal version must be supplied and it must be greater than the current 'pantsbuild.pants'
version.
When choosing a removal version there is a natural tension between the code-base, which benefits
from short deprecation cycles, and the user-base which may prefer to deal with deprecations less
frequently. As a rule of thumb, if the hint message can fully convey corrective action
succinctly and you judge the impact to be on the small side (effects custom tasks as opposed to
effecting BUILD files), lean towards the next release version as the removal version; otherwise,
consider initiating a discussion to win consensus on a reasonable removal version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:param str hint_message: An optional hint pointing to alternatives to the deprecation.
:param str subject: The name of the subject that has been deprecated for logging clarity. Defaults
to the name of the decorated function/method.
:param bool ensure_stderr: Forwarded to `ensure_stderr` in warn_or_error().
:raises DeprecationApplicationError if the @deprecation is applied improperly.
"""
validate_deprecation_semver(removal_version, 'removal version')
def decorator(func):
if not inspect.isfunction(func):
raise BadDecoratorNestingError('The @deprecated decorator must be applied innermost of all '
'decorators.')
func_full_name = '{}.{}'.format(func.__module__, func.__name__)
@wraps(func)
def wrapper(*args, **kwargs):
warn_or_error(removal_version, subject or func_full_name, hint_message,
ensure_stderr=ensure_stderr)
return func(*args, **kwargs)
return wrapper
return decorator | 84b2ef33a40d8f28eba27e29679338093875eb25 | 11,199 |
def prepare_comparator(comparator_path):
""" Processes the comparator path from the benchmark specification. Imports the object
dynamically.
Parameters
----------
comparator_path : str
Path to the python script file containing the comparator definition.
Returns
-------
ccobra.CCobraComparator
Comparator object.
"""
comp = None
with contextmanager.dir_context(comparator_path):
imp = modelimporter.ModelImporter(comparator_path, superclass=CCobraComparator)
comp = imp.instantiate()
if not comp:
raise ValueError('Failed to instantiate comparator class.')
return comp | 63b9f863a3d68a6fc7bb36c9da909004859e469b | 11,200 |
import csv
def get_genes(path):
"""Returns a list of genes from a DE results table"""
with open(path) as gene_list:
gene_list = csv.reader(gene_list)
gene_list = [row[0] for row in gene_list if row[0].startswith('P')]
return gene_list | 9deed781edc0514348b7f6c2f6ac2d302f30295d | 11,201 |
import types
def getNoncaptureMovesForRegularPiece(theGame, pieceLocation):
""" This returns a GameNode for every legal move of a regular piece """
moveList = []
xBoard = pieceLocation.get_x_board()
yBoard = pieceLocation.get_y_board()
pieceDestinationLeft = None
pieceDestinationRight = None
if theGame.getState(pieceLocation) is types.PLAYER_A_REGULAR:
# Player A moves in positive Y increments
moveDelta = 1
elif theGame.getState(pieceLocation) is types.PLAYER_B_REGULAR:
# Player B moves in negative Y increments
moveDelta = -1
pieceDestinationLeft = getCoordinateHelper(xBoard - 1, yBoard + moveDelta)
pieceDestinationRight = getCoordinateHelper(xBoard + 1, yBoard + moveDelta)
if (pieceDestinationLeft and
destinationIsEmpty(theGame, pieceDestinationLeft)):
moveList.append(makePieceMove(theGame,
pieceDestinationLeft,
pieceLocation))
if (pieceDestinationRight and
destinationIsEmpty(theGame, pieceDestinationRight)):
moveList.append(makePieceMove(theGame,
pieceDestinationRight,
pieceLocation))
return moveList | 13ad1e5cc4fcbd17b55b66703b41a49e5283efb8 | 11,203 |
import pathlib
def _top_level_package_filenames(tarball_paths):
"""Transform the iterable of npm tarball paths to the top-level files contained within the package."""
paths = []
for path in tarball_paths:
parts = pathlib.PurePath(path).parts
if parts[0] == "package" and len(parts) == 2:
paths.append(parts[1])
return frozenset(paths) | 6b9b825eff14fe2e40f33c2caac104cf9869b277 | 11,204 |
def scale(X_train, X_test, type='MinMaxScaler', tuning_mode= True):
"""
This function apply Min Max or Standard scaling to a divided set of features divided as train and test data
Args:
The two dataframes:
X_train: a pandas dataframe with features of the training window
X_test: a pandas dataframe with features of the test window
tuning_mode: a boolean parameter set for cases when tuning is made. Automatically set to True unless provided a False.
Return:
Two arrays coming from the original dataframes after applying StandardScaler() or MinMaxScaler(), where the standarization is made using the X_train features
"""
# Create an Scaler instance
scaler = MinMaxScaler()
if type=='StandardScaler':
scaler = StandardScaler()
# Apply the scaler model to fit the X_train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrames using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
if tuning_mode == True:
print(f"X_train_scaled shape: {X_train_scaled.shape}")
print(f"X_test_scaled shape: {X_test_scaled.shape}")
return X_train_scaled, X_test_scaled | 58d283856d789158847138c002e9f1f19d4beb9f | 11,205 |
def calc_positions(pl, pr, region1, region3, w, xi, t, gamma, dustFrac=0.):
"""
:return: tuple of positions in the following order ->
Head of Rarefaction: xhd, Foot of Rarefaction: xft,
Contact Discontinuity: xcd, Shock: xsh
"""
p1, rho1 = region1[:2] # don't need velocity
p3, rho3, u3 = region3
c1 = sound_speed(gamma, p1, rho1, dustFrac)
c3 = sound_speed(gamma, p3, rho3, dustFrac)
if pl > pr:
xsh = xi + w * t
xcd = xi + u3 * t
xft = xi + (u3 - c3) * t
xhd = xi - c1 * t
else:
# pr > pl
xsh = xi - w * t
xcd = xi - u3 * t
xft = xi - (u3 - c3) * t
xhd = xi + c1 * t
return xhd, xft, xcd, xsh | 8034d327c4d9c9c771137e3eff49f8627315b10a | 11,206 |
def accuracy(targets, predictions, weights=None):
"""Computes the categorical accuracy.
Given a set of ground truth values and a set of predicted labels as tensors of
the same shape, it returns a tensor of the same shape with 1.0 in those position
where the ground truth value and the predicted one are equal, 0.0 otherwise.
So, if the grount truth is [[1, 2, 3], [0, 9, 23]] and the predicted labels
are [[1, 2, 4], [9, 0, 23]] the result will be: [[1, 1, 0], [0, 0, 1]].
Arguments:
target: the gold truth values `Tensor`, with `tf.int32` as `dtype`. It has rank
`[d_0, d_1, ..., d_{r-1}]` and the last value is supposed to range between
`0` and `num_classes - 1`, where `num_classes` is the number of possible classes.
predictions: the predicted values `Tensor` with `tf.float32` as `dtype`. It can
have shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` and
represents the probability distribution across the output classes generated by
the model -- so that the predicted label is the one coming from argmax over the
last dimension. Alternatively it can be of the same shape, `dtype` and format of
`target`, and it will considered as the predicted labels.
weights: coefficients for the metric. This must be scalar or of same rank as `target`.
Returns:
values: a `Tensor` of `dtype=tf.float32` and of the same shape as `targest`
representing the accuracy, weighted according to the input argument `weights`.
weights: a `Tensor` of `dtype=tf.float32` and of the same shape of `values`
representing the weighted scheme for the streaming average on `values`, which
is the same tensor of the input `weights` argument.
"""
trank = targets.get_shape().ndims
prank = predictions.get_shape().ndims
if prank > trank:
diff = prank - trank
if diff > 1:
raise ValueError(
"""Rank of `predictions` must be equal to rank of `label` """
"""or greater of 1, found %d and %d instead.""" % (prank, trank))
predictions = tf.argmax(predictions, axis=-1) # tf.int64!!!
predictions = tf.cast(predictions, tf.int32)
is_equal = tf.equal(targets, predictions)
is_equal = tf.cast(is_equal, tf.float32)
if weights is not None:
is_equal = tf.multiply(is_equal, weights)
return is_equal, weights | 08dba6fab0f09c8d1507d00a8501192b2f008499 | 11,208 |
import re
def jp_author_name_normalized(name):
"""Construct the author name as P. Szekely."""
clean = name.replace('.',' ').replace(',',' ').replace(';', ' ')
clean = asciiChars(clean, '')
names = re.sub(r'\s+', ' ', clean.strip()).split(' ');
last_word = names[-1]
if len(last_word) == 1:
# The last word is an initial, so we accumulate all words before it that are not initials
# that will be our last name
i = 0;
index = -1 # index of last word that is not an initial
for n in names:
if len(n)>1:
index = i
else:
names[i] = n + '.'
i = i + 1;
if index == -1 or index == len(names) - 1:
return ' '.join(names).title();
last = names[index]
first = ' '.join(names[0:index]) + ' '.join(names[index + 1:])
return (first + ' ' + last).title()
else:
i = 0
for n in names:
if len(n) == 1:
names[i] = n + '.'
elif i < len(names) - 1:
names[i] = n[0] + '.'
i = i + 1
return ' '.join(names).title(); | 61e5aa4290611266d4c96c67a870ce4c375a291c | 11,209 |
import math
def palette(tensor, shape, name=None, time=0.0, speed=1.0):
"""
Another approach to image coloration
https://iquilezles.org/www/articles/palettes/palettes.htm
"""
if not name:
return tensor
channel_shape = [shape[0], shape[1], 3]
p = palettes[name]
offset = p["offset"] * tf.ones(channel_shape)
amp = p["amp"] * tf.ones(channel_shape)
freq = p["freq"] * tf.ones(channel_shape)
phase = p["phase"] * tf.ones(channel_shape) + time
# Multiply value_map's result x .875, in case the image is just black and white (0 == 1, we don't want a solid color image)
return offset + amp * tf.math.cos(math.tau * (freq * value.value_map(tensor, shape, keepdims=True, with_normalize=False) * .875 + phase)) | 5717a405a68f54257082f15ce54ccb1fe3d52bd1 | 11,210 |
def binarize_categorical_columns(
input_train_df, input_test_df, categorical_columns):
"""Function to converting categorical features to one-hot encodings."""
# Binarize categorical columns.
binarized_train_df = pd.get_dummies(
input_train_df, columns=categorical_columns)
binarized_test_df = pd.get_dummies(
input_test_df, columns=categorical_columns)
# Make sure the train and test dataframes have the same binarized columns.
# Identify columns in train set not in test set and fill them in test set.
test_df_missing_cols = set(binarized_train_df.columns) - set(
binarized_test_df.columns)
for c in test_df_missing_cols:
binarized_test_df[c] = 0
# Identify columns in test set not in train set and fill them in train set.
train_df_missing_cols = set(binarized_test_df.columns) - set(
binarized_train_df.columns)
for c in train_df_missing_cols:
binarized_train_df[c] = 0
# Just to be sure that both train and test df"s have same columns.
binarized_train_df = binarized_train_df[binarized_test_df.columns]
return binarized_train_df, binarized_test_df | 537b5271298ee00b60fd16039ba63d02c61189b9 | 11,211 |
def smallest_sval(X, solver='lobpcg', **kws):
"""
Computes the smallest singular value of a matrix using
scipy.sparse.linalg.svds
Parameters
----------
X: array-like
solver: str
Which solver to use. Must be one of ['lobpcg', 'arpack']
**kws
Kws for svds
Output
------
smallest_sval: float
The smallest singular value of X
"""
# for 1d arrays return the frobenius norm
if min(X.shape) == 1:
return np.sqrt((X.reshape(-1) ** 2).sum())
return svds(X, k=1, which='SM', solver=solver, **kws)[1].item() | ba5436fbc347eb050cc69c3b7dec7df414d31403 | 11,212 |
def mean_ndcg_score(u_scores, u_labels, wtype="max"):
"""Mean Normalize Discounted cumulative gain (NDCG) for all users.
Parameters
----------
u_score : array of arrays, shape = [num_users]
Each array is the predicted scores, shape = [n_samples[u]]
u_label : array of arrays, shape = [num_users]
Each array is the ground truth label, shape = [n_samples[u]]
wtype : 'log' or 'max'
type for discounts
Returns
-------
mean_ndcg : array, shape = [num_users]
mean ndcg for each user (averaged among all rank)
avg_ndcg : array, shape = [max(n_samples)], averaged ndcg at each
position (averaged among all users for given rank)
"""
num_users = len(u_scores)
n_samples = [len(scores) for scores in u_scores]
max_sample = max(n_samples)
count = np.zeros(max_sample)
mean_ndcg = np.zeros(num_users)
avg_ndcg = np.zeros(max_sample)
for u in range(num_users):
ndcg = ndcg_score(u_scores[u], u_labels[u], wtype)
avg_ndcg[: n_samples[u]] += ndcg
count[: n_samples[u]] += 1
mean_ndcg[u] = ndcg.mean()
return mean_ndcg, avg_ndcg / count | 6a6e60182ec8bf2e3677779a5397d76fe492740c | 11,213 |
from typing import List
import torch
def average_precision(predictions: List, targets: List,
iou_threshold: float = 0.5) -> torch.Tensor:
"""Calculates average precision for given inputs
Args:
predictions (List): [Ni,5 dimensional as xmin,ymin,xmax,ymax,conf]
targets (List): [Mi,4 dimensional as xmin,ymin,xmax,ymax]
iou_threshold (float, optional): iou threshold for ap score. Defaults to 0.5.
Raises:
AssertionError: [description]
Returns:
torch.Tensor: average precision score
"""
assert len(predictions) == len(targets), "prediction and ground truths must be equal in lenght"
assert len(predictions) > 0, "given input list lenght must be greater than 0"
device = predictions[0].device
sorted_table, M = _generate_prediction_table(predictions, targets, device=device)
N = sorted_table.size(0)
if N == 0:
# pylint: disable=not-callable
return torch.tensor([0], dtype=torch.float32, device=device)
accumulated_tp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
accumulated_fp = torch.zeros(sorted_table.size(0), dtype=torch.float32, device=device)
sorted_table[sorted_table[:, 0] < iou_threshold, 1] = 0.
tp = 0
fp = 0
for i, row in enumerate(sorted_table):
# row : 3 as iou,tp,confidence
if row[1] == 1.:
tp += 1
else:
fp += 1
accumulated_tp[i] = tp
accumulated_fp[i] = fp
precision = accumulated_tp / torch.arange(1, N+1, dtype=torch.float32, device=device)
recall = accumulated_tp / (M + 1e-16)
unique_recalls = recall.unique_consecutive()
auc = torch.empty(unique_recalls.size(0), dtype=torch.float32, device=device)
# pylint: disable=not-callable
last_value = torch.tensor(0, dtype=torch.float32, device=device)
for i, recall_value in enumerate(unique_recalls):
mask = recall == recall_value # N,
p_mul = precision[mask].max() # get max p
auc[i] = p_mul * (recall_value-last_value)
last_value = recall_value
return auc.sum() | e32efe5501fee9140f9b10d2a29fa779af4be18b | 11,215 |
def new_post(update: Update, context: CallbackContext) -> int:
"""Start the conversation, display any stored data and ask user for input."""
# init empty list to store dicts w/ info about each uploaded photo
context.user_data['photos'] = []
reply_text = "Initiate conversation: new post "
# if context.user_data:
# reply_text += (
# f"Current data: {', '.join(context.user_data.keys())}."
# )
# else:
reply_text += (
"Enter title"
)
update.message.reply_text(reply_text, reply_markup=markup)
return ENTER_TITLE | 1b49d20628aaafdf6d61f1cb92cc2f123e742fac | 11,216 |
from typing import List
from typing import Union
from typing import Tuple
from typing import Sequence
def _get_geometry_type_from_list(
features: List, allowed_features: List[Union[Tuple, Sequence]]
) -> Tuple[str]:
"""
Gets the Geometry type from a List, otherwise it raises an exception.
:param features: input feature as a list
:return: tuple with extracted geometry types
"""
geometry_type = tuple()
n_dim = get_input_dimensions(features)
if n_dim == 1 and all(
isinstance(el, (dict, *allowed_features[0])) for el in features
):
return tuple(
map(
lambda geom: _get_geometry_type_from_feature(geom, allowed_features),
features,
)
)
elif all(isinstance(el, (list, tuple, int, float)) for el in features):
feature_type = [
k for k, v in dimensions.items() if v == n_dim and k in allowed_features[1]
]
if len(feature_type) == 1:
geometry_type += (feature_type[0],)
else:
raise InvalidInput(
error_code_messages["InvalidGeometry"](allowed_features[1])
)
else:
raise InvalidInput(error_code_messages["InvalidGeometry"](allowed_features[1]))
return geometry_type | c6ec2e68e3f5667a9fa31ab72b76b0257e4a7221 | 11,217 |
def save_network_to_path(interactions, path):
"""Save dataframe to a tab-separated file at path."""
return interactions.to_csv(path, sep='\t', index=False, na_rep=str(None)) | f189c6e8f7791f1f97c32847f03e0cc2e167ae90 | 11,218 |
from typing import Optional
from typing import Dict
from typing import Tuple
from typing import Union
def apply_sql(
query: str,
output_name: Optional[str],
found: Dict[str, beam.PCollection],
run: bool = True) -> Tuple[str, Union[PValue, SqlNode], SqlChain]:
"""Applies a SqlTransform with the given sql and queried PCollections.
Args:
query: The SQL query executed in the magic.
output_name: (optional) The output variable name in __main__ module.
found: The PCollections with variable names found to be used in the query.
run: Whether to prepare the SQL pipeline for a local run or not.
Returns:
A tuple of values. First str value is the output variable name in
__main__ module, auto-generated if not provided. Second value: if run,
it's a PValue; otherwise, a SqlNode tracks the SQL without applying it or
executing it. Third value: SqlChain is a chain of SqlNodes that have been
applied.
"""
output_name = _generate_output_name(output_name, query, found)
query, sql_source, chain = _build_query_components(
query, found, output_name, run)
if run:
try:
output = sql_source | SqlTransform(query)
# Declare a variable with the output_name and output value in the
# __main__ module so that the user can use the output smoothly.
output_name, output = create_var_in_main(output_name, output)
_LOGGER.info(
"The output PCollection variable is %s with element_type %s",
output_name,
pformat_namedtuple(output.element_type))
return output_name, output, chain
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
on_error('Error when applying the Beam SQL: %s', e)
else:
return output_name, chain.current, chain | 26541c4e946a3d7b8dd1783d8b429bceb0b7b6bf | 11,219 |
from typing import Tuple
from datetime import datetime
def check_course_time_conflict(current_course: Course,
user: NaturalPerson) -> Tuple[bool, str]:
"""
检查当前选择课程的时间和已选课程是否冲突
"""
selected_courses = Course.objects.activated().filter(
participant_set__person=user,
participant_set__status__in=[
CourseParticipant.Status.SELECT,
CourseParticipant.Status.SUCCESS,
]).prefetch_related("time_set")
def time_hash(time: datetime):
return time.weekday() * 1440 + time.hour * 60 + time.minute
# 因为选择的课最多只能有6门,所以暂时用暴力算法
for current_course_time in current_course.time_set.all():
# 当前选择课程的上课时间
current_start_time = current_course_time.start
current_end_time = current_course_time.end
for course in selected_courses:
for course_time in course.time_set.all():
start_time = course_time.start
end_time = course_time.end
# 效率不高,有待改进
if not (time_hash(current_start_time) >= time_hash(end_time) or
time_hash(current_end_time) <= time_hash(start_time)):
# 发生冲突
return True, \
f"《{current_course.name}》和《{course.name}》的上课时间发生冲突!"
# 没有冲突
return False, ""
'''
# 循环较少的写法
from django.db.models import Q
conflict_course_names = set()
for current_course_time in current_course.time_set.all():
# 冲突时间
conflict_times = CourseTime.objects.filter(
# 已选的课程
Q(course__in=selected_courses),
# 开始比当前的结束时间早
(Q(start__week_day=current_course_time.end.weekday() + 1,
start__time__lte=current_course_time.end.time())
| Q(start__week_day__lt=current_course_time.end.weekday() + 1))
# 结束比当前的开始时间晚
& (Q(end__week_day=current_course_time.start.weekday() + 1,
end__time__gte=current_course_time.start.time())
| Q(end__week_day__gt=current_course_time.start.weekday() + 1))
)
if conflict_times.exists():
# return True, f'《{conflict_times.first().course.name}》'
conflict_course_names.union(
conflict_times.values_list('course__name', flat=True))
conflict_count = len(conflict_course_names)
# 有冲突
if conflict_count:
return conflict_count, f'《{"》《".join(conflict_course_names)}》'
# 没有冲突
return conflict_count, ""
''' | 2b5a6d621463018b64d0d2d44ac8827b687cee67 | 11,220 |
def or_func(a, b):
"""Creates a new list out of the two supplied by applying the function to each
equally-positioned pair in the lists. The returned list is truncated to the
length of the shorter of the two input lists"""
return a or b | 0f90173e05910ebc7e81079d99bfdbbb1c0ee66b | 11,221 |
import typing
def json_loads(
value: typing.Union[bytes, bytearray, str]
) -> typing.Union[
typing.List[typing.Dict[str, typing.Any]], typing.Dict[str, typing.Any]
]:
"""Practical json dumps helper function, bytes, bytearray, and
str input are accepted. supports for ``orjson``, ``simplejson`.
In case of orjson, if the input exists as bytes (was read directly from a source),
it is recommended to pass bytes. This has lower memory usage and lower latency.
The input must be valid UTF-8."""
if json_mod.__name__ != "orjson" and isinstance(value, (bytes, bytearray)):
value = value.decode("utf8", "strict")
return json_mod.loads(value) | 9fe8664df512b52b565d68f3433abc346112c974 | 11,222 |
import json
def execute_create_payment(client, create_payment_request):
"""
Create a payment. Automatically creates an NR for use.
:param client:
:param create_payment_request:
:return:
"""
headers = get_test_headers()
draft_nr = setup_draft_nr(client)
nr_id = draft_nr.get('id')
payment_action = 'COMPLETE'
# POST /api/v1/payments/<int:nr_id>/<string:payment_action>
request_uri = API_BASE_URI + str(nr_id) + '/' + payment_action
path = request_uri
body = json.dumps(create_payment_request)
log_request_path(path)
response = client.post(path, data=body, headers=headers)
assert response.status_code == 201
payload = json.loads(response.data)
verify_payment_payload(payload)
assert payload.get('statusCode') == 'CREATED'
return payload | 56ff5e99f577d64cd71c451f9502220585b1d920 | 11,223 |
from typing import List
def collect_contrib_features(
project: 'ballet.project.Project'
) -> List[Feature]:
"""Collect contributed features for a project at project_root
For a project ``foo``, walks modules within the ``foo.features.contrib``
subpackage. A single object that is an instance of ``ballet.Feature`` is
imported if present in each module. The resulting ``Feature`` objects are
collected.
Args:
project: project object
Returns:
collected features
"""
contrib = project.resolve('features.contrib')
return _collect_contrib_features(contrib) | 8c76f968d7fc75bba2eb6fee9447e49de2d53694 | 11,224 |
def verify_token(token):
""" Basic auth method """
curr_user = User.check_token(token) if token else None
return curr_user is not None | 95027a9d0235521819b5ca262082b29fe252dc1b | 11,227 |
from typing import Sequence
import random
def generate_test_cases(n_tests: int, min_len: int, max_len: int, min_dim: int, max_dim: int) \
-> Sequence[Sequence[int]]:
"""
:param n_tests: number of test to generate
:param min_len: minimum number of matrices for each test case
:param max_len: maximum number of matrices for each test case
:param min_dim: minimum dimension for each matrix (applies both for rows and columns)
:param max_dim: maximum dimension for each matrix (applies both for rows and columns)
:return:
"""
solutions = []
for n in range(n_tests):
test_len = random.randint(min_len, max_len)
dims = tuple([random.randint(min_dim, max_dim) for _ in range(test_len)])
solution = memoized_mcm(dims=dims)[0]
solutions.append([dims, solution])
return solutions | 63a20aa5d94456597c29523019e2699aedf4ab5f | 11,228 |
from typing import List
def calc_neighbours(
adata: AnnData,
distance: float = None,
index: bool = True,
verbose: bool = True,
) -> List:
"""Calculate the proportion of known ligand-receptor co-expression among the neighbouring spots or within spots
Parameters
----------
adata: AnnData The data object to scan
distance: float Distance to determine the neighbours (default: closest), distance=0 means within spot
index: bool Indicates whether to return neighbours as indices to other spots or names of other spots.
Returns
-------
neighbours: numba.typed.List List of np.array's indicating neighbours by indices for each spot.
"""
if verbose:
print("Calculating neighbours...")
# get neighbour spots for each spot according to the specified distance
coor = adata.obs[["imagerow", "imagecol"]]
point_tree = spatial.cKDTree(coor)
neighbours = []
for i, spot in enumerate(adata.obs_names):
if distance == 0:
neighbours.append(np.array([i if index else spot]))
else:
n_index = point_tree.query_ball_point(
np.array(
[adata.obs["imagerow"].loc[spot], adata.obs["imagecol"].loc[spot]]
),
distance,
)
if index:
n_index = np.array(n_index, dtype=np.int_)
neighbours.append(n_index[n_index != i])
else:
n_spots = adata.obs_names[n_index]
neighbours.append(n_spots[n_spots != spot])
typed_neighs = List()
[typed_neighs.append(neigh) for neigh in neighbours]
n_neighs = np.array([len(neigh) for neigh in neighbours])
if verbose:
print(
f"{len(np.where(n_neighs==0)[0])} spots with no neighbours, "
f"{int(np.median(n_neighs))} median spot neighbours."
)
if np.all(n_neighs == 0):
raise Exception(
"All spots have no neighbours at current distance,"
" set distance to higher value, or distance=0 for "
"within-spot mode."
)
return typed_neighs | 1fdbc372f2249115b0ace55bf5d89c54a1143523 | 11,229 |
from typing import Optional
from typing import Tuple
from typing import List
def _build_tree_string(
root: Optional[Node],
curr_index: int,
include_index: bool = False,
delimiter: str = "-",
) -> Tuple[List[str], int, int, int]:
"""Recursively walk down the binary tree and build a pretty-print string.
In each recursive call, a "box" of characters visually representing the
current (sub)tree is constructed line by line. Each line is padded with
whitespaces to ensure all lines in the box have the same length. Then the
box, its width, and start-end positions of its root node value repr string
(required for drawing branches) are sent up to the parent call. The parent
call then combines its left and right sub-boxes to build a larger box etc.
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param curr_index: Level-order_ index of the current node (root node is 0).
:type curr_index: int
:param include_index: If set to True, include the level-order_ node indexes using
the following format: ``{index}{delimiter}{value}`` (default: False).
:type include_index: bool
:param delimiter: Delimiter character between the node index and the node
value (default: '-').
:type delimiter:
:return: Box of characters visually representing the current subtree, width
of the box, and start-end positions of the repr string of the new root
node value.
:rtype: ([str], int, int, int)
.. _Level-order:
https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
"""
if root is None:
return [], 0, 0, 0
line1 = []
line2 = []
if include_index:
node_repr = "{}{}{}".format(curr_index, delimiter, root.val)
else:
node_repr = str(root.val)
new_root_width = gap_size = len(node_repr)
# Get the left and right sub-boxes, their widths, and root repr positions
l_box, l_box_width, l_root_start, l_root_end = _build_tree_string(
root.left, 2 * curr_index + 1, include_index, delimiter
)
r_box, r_box_width, r_root_start, r_root_end = _build_tree_string(
root.right, 2 * curr_index + 2, include_index, delimiter
)
# Draw the branch connecting the current root node to the left sub-box
# Pad the line with whitespaces where necessary
if l_box_width > 0:
l_root = (l_root_start + l_root_end) // 2 + 1
line1.append(" " * (l_root + 1))
line1.append("_" * (l_box_width - l_root))
line2.append(" " * l_root + "/")
line2.append(" " * (l_box_width - l_root))
new_root_start = l_box_width + 1
gap_size += 1
else:
new_root_start = 0
# Draw the representation of the current root node
line1.append(node_repr)
line2.append(" " * new_root_width)
# Draw the branch connecting the current root node to the right sub-box
# Pad the line with whitespaces where necessary
if r_box_width > 0:
r_root = (r_root_start + r_root_end) // 2
line1.append("_" * r_root)
line1.append(" " * (r_box_width - r_root + 1))
line2.append(" " * r_root + "\\")
line2.append(" " * (r_box_width - r_root))
gap_size += 1
new_root_end = new_root_start + new_root_width - 1
# Combine the left and right sub-boxes with the branches drawn above
gap = " " * gap_size
new_box = ["".join(line1), "".join(line2)]
for i in range(max(len(l_box), len(r_box))):
l_line = l_box[i] if i < len(l_box) else " " * l_box_width
r_line = r_box[i] if i < len(r_box) else " " * r_box_width
new_box.append(l_line + gap + r_line)
# Return the new box, its width and its root repr positions
return new_box, len(new_box[0]), new_root_start, new_root_end | 988a5816647ca31b19c25a3017061ec71cdabc85 | 11,230 |
def growth(xs, ys , x):
"""
growth function
pre:
xs,ys are arrays of known x and y values. x is a scaler or np.array
of values to calculate new y values for
post:
return new y values
"""
xs = np.array(xs)
ys = np.log(np.array(ys))
xy_bar = np.average(xs*ys)
x_bar = np.average(xs)
y_bar = np.average(ys)
x_sq_bar = np.average(xs**2)
beta = (xy_bar - x_bar*y_bar)/(x_sq_bar- x_bar**2)
alpha = y_bar - beta* x_bar
return np.exp(alpha + beta * x) | 5a47077ebfcca5284e29a02c467ba059d6350182 | 11,231 |
def remove_short_transition(transition_sites,thresh=11):
"""
removes transitions that are too close from others.
"""
if len(transition_sites) < 4:
return transition_sites
for i in range(len(transition_sites) - 1):
forward_difference = transition_sites[i+1] - transition_sites[i]
if forward_difference <= thresh:
transition_sites[i] = transition_sites[-1]
transition_sites.append(0)
transition_sites = list(set(transition_sites))
transition_sites = sorted(transition_sites)
return transition_sites | 5ae188ef1314f4416b3baba07f45cceb41dc4c7a | 11,232 |
def load_image(file_path):
"""
Load data from an image.
Parameters
----------
file_path : str
Path to the file.
Returns
-------
float
2D array.
"""
if "\\" in file_path:
raise ValueError(
"Please use a file path following the Unix convention")
mat = None
try:
mat = np.asarray(Image.open(file_path), dtype=np.float32)
except IOError:
print(("No such file or directory: {}").format(file_path))
raise
if len(mat.shape) > 2:
axis_m = np.argmin(mat.shape)
mat = np.mean(mat, axis=axis_m)
return mat | 1399e3299e8697097a0c92f39c122735ab63c633 | 11,233 |
import numpy as np
from glad.util import argmin_datetime,haversine
def absolute_dispersion(drifters,starttime,time):
"""
Calculates absolute dispersion A^2, given desired current and
initial time.
Parameters
----------
drifters : GladDrifter instance, list, ndarray
A list or numpy array of GladDrifter instances.
starttime : datetime instance
Start time.
time : datetime instance
Time at which to compute absolute dispersion.
Returns
-------
A2 : float
Absolute dispersion in km^2.
"""
if not isinstance(drifters,list):
drifters = [drifters]
dist_squared = []
for d in drifters:
if not (d.has_time(starttime) and d.has_time(time)):
continue
n1 = argmin_datetime(time,d.time)
n0 = argmin_datetime(starttime,d.time)
dist_squared.append(haversine(d.lon[n1],d.lat[n1],\
d.lon[n0],d.lat[n0])**2)
A2 = np.mean(dist_squared)
return A2 | 1cf22d344994c913f294d005558afd73eb21cd2a | 11,234 |
import logging
def add_xgis_url(df: gpd.geodataframe.GeoDataFrame) -> gpd.geodataframe.GeoDataFrame:
""" Adding x-gis URL which will let the user check the result
:param df: gdf to use
"""
# Generaring url from string
df.reset_index(inplace=True) # resetting index
xy_tog = df[c.X].astype(str) + "," + df[c.Y].astype(str)
kinnistu_str = df[c.kinnistu_nr].astype(str)
# Final URL
x_gis_url = c.X_GIS_URL_yua + "?" + "punkt=" + xy_tog + "&moot=500" + "&tooltip=Kinnistu nr: " + kinnistu_str
# Adding new column
df[c.URL] = x_gis_url
logging.info("\tAdded URL to dataframe")
return df | a220169ba79b452b8cbb0d0b2aee11229c09fdae | 11,235 |
from typing import Optional
from typing import Sequence
def get_compute_capacity_reservation_instance_shapes(availability_domain: Optional[str] = None,
compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetComputeCapacityReservationInstanceShapesFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeCapacityReservationInstanceShapesResult:
"""
This data source provides the list of Compute Capacity Reservation Instance Shapes in Oracle Cloud Infrastructure Core service.
Lists the shapes that can be reserved within the specified compartment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_compute_capacity_reservation_instance_shapes = oci.core.get_compute_capacity_reservation_instance_shapes(compartment_id=var["compartment_id"],
availability_domain=var["compute_capacity_reservation_instance_shape_availability_domain"],
display_name=var["compute_capacity_reservation_instance_shape_display_name"])
```
:param str availability_domain: The name of the availability domain. Example: `Uocm:PHX-AD-1`
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
:param str display_name: A filter to return only resources that match the given display name exactly.
"""
__args__ = dict()
__args__['availabilityDomain'] = availability_domain
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:core/getComputeCapacityReservationInstanceShapes:getComputeCapacityReservationInstanceShapes', __args__, opts=opts, typ=GetComputeCapacityReservationInstanceShapesResult).value
return AwaitableGetComputeCapacityReservationInstanceShapesResult(
availability_domain=__ret__.availability_domain,
compartment_id=__ret__.compartment_id,
compute_capacity_reservation_instance_shapes=__ret__.compute_capacity_reservation_instance_shapes,
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id) | d294586d5aac79bb452d93bf3424daa403923188 | 11,236 |
import re
def is_branch_or_version(string):
"""Tries to figure out if passed argument is branch or version.
Returns 'branch', 'version', or False if deduction failed.
Branch is either 'master' or something like 3.12.x;
version is something like 3.12.5,
optionally followed by letter (3.12.5b) for aplha/beta/gamma...zeta,
optionally followed by release (3.12.5-2).
"""
if string == "master" or re.match("3\.\\d+\.x$", string):
return "branch"
if re.match("3\\.\\d+\\.\\d+[a-z]?(-\\d+)?$", string):
return "version"
return None | 6a5ad7cb7af29b6ce0e39ff86171f0f230929fb3 | 11,237 |
def eval_regressors(regressor_factories, gen_one_data, batch_size=1, names=None):
"""Evaluates an iterable of regressors on some test data of size
:batch_size: generated from :gen_one_data:.
"""
X, y = dg.BatchData.batch(gen_one_data, batch_size)
return _eval_regressors(regressor_factories, X, y, names=names) | 89c63d5c8c4697370ff2e63baf96ec456565fe42 | 11,238 |
def beautify():
"""Set reasonable defaults matplotlib.
This method replaces matplotlib's default rgb/cmyk colors with the
colarized colors. It also does:
* re-orders the default color cycle
* sets the default linewidth
* replaces the defaault 'RdBu' cmap
* sets the default cmap to 'RdBu'
Examples
--------
You can safely call ``beautify`` right after you've imported the
``plot`` module.
>>> from wyrm import plot
>>> plot.beautify()
"""
def to_mpl_format(r, g, b):
"""Convert 0..255 t0 0..1."""
return r / 256, g / 256, b / 256
# The solarized color palette
base03 = to_mpl_format( 0, 43, 54)
base02 = to_mpl_format( 7, 54, 66)
base01 = to_mpl_format( 88, 110, 117)
base00 = to_mpl_format(101, 123, 131)
base0 = to_mpl_format(131, 148, 150)
base1 = to_mpl_format(147, 161, 161)
base2 = to_mpl_format(238, 232, 213)
base3 = to_mpl_format(253, 246, 227)
yellow = to_mpl_format(181, 137, 0)
orange = to_mpl_format(203, 75, 22)
red = to_mpl_format(220, 50, 47)
magenta = to_mpl_format(211, 54, 130)
violet = to_mpl_format(108, 113, 196)
blue = to_mpl_format( 38, 139, 210)
cyan = to_mpl_format( 42, 161, 152)
green = to_mpl_format(133, 153, 0)
white = (1, 1, 1)#base3
black = base03
# Tverwrite the default color values with our new ones. Those
# single-letter colors are used all over the place in matplotlib, so
# this setting has a huge effect.
mpl.colors.ColorConverter.colors = {
'b': blue,
'c': cyan,
'g': green,
'k': black,
'm': magenta,
'r': red,
'w': white,
'y': yellow
}
# Redefine the existing 'RdBu' (Red-Blue) colormap, with our new
# colors for red and blue
cdict = {
'red' : ((0., blue[0], blue[0]), (0.5, white[0], white[0]), (1., magenta[0], magenta[0])),
'green': ((0., blue[1], blue[1]), (0.5, white[1], white[1]), (1., magenta[1], magenta[1])),
'blue' : ((0., blue[2], blue[2]), (0.5, white[2], white[2]), (1., magenta[2], magenta[2]))
}
mpl.cm.register_cmap('RdBu', data=cdict)
# Reorder the default color cycle
mpl.rcParams['axes.color_cycle'] = ['b', 'm', 'g', 'r', 'c', 'y', 'k']
# Set linewidth in plots to 2
mpl.rcParams['lines.linewidth'] = 2
# Set default cmap
mpl.rcParams['image.cmap'] = 'RdBu' | da08523ed69b2bb97af06cc1e51f5bdf2e412faa | 11,239 |
import yaml
from typing import OrderedDict
def ordered_load(stream, loader=yaml.SafeLoader, object_pairs_hook=OrderedDict):
"""Load YAML, preserving the ordering of all data."""
class OrderedLoader(loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader) | d3b6a9c84e895a63a85de39dbb5a849d6c224ecd | 11,241 |
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args) | f1e7b1a8cf966f3d28966919cf2e4c5e82cfb45c | 11,242 |
def heur_best_from_now(state):
"""
This heuristics computes the cost based in put all weight in the launch with the lowest variable cost.
@param state: state to compute the cost.
@return: cost
"""
try:
return min([launch.compute_variable_cost(state.left_weight()) for launch in state.launches[state.launch_nr:]])
except ValueError:
return 0 | 050c7c718ad849e8e7fc6892de7097c3bd0f83dd | 11,243 |
def get_ngram(text, ns=[1]):
"""
获取文本的ngram等特征
:param text: str
:return: list
"""
if type(ns) != list:
raise RuntimeError("ns of function get_ngram() must be list!")
for n in ns:
if n < 1:
raise RuntimeError("enum of ns must '>1'!")
len_text = len(text)
ngrams = []
for n in ns:
ngram_n = []
for i in range(len_text):
if i + n <= len_text:
ngram_n.append(text[i:i + n])
else:
break
if not ngram_n:
ngram_n.append(text)
ngrams += ngram_n
return ngrams | 3826fcdce46b455762417528ac9f31a0552b5a04 | 11,244 |
from scipy.stats import entropy
def entropy(df):
"""Return Shannon Entropy for purchases of each user."""
mask = df.credit_debit.eq('debit')
df = df[mask]
num_cats = df.auto_tag.nunique()
def calc_entropy(user, num_cats):
total_purchases = len(user)
cat_purchases = user.groupby('auto_tag').size()
probs = (cat_purchases + 1) / (total_purchases + num_cats)
return entropy(probs, base=2)
g = df.groupby('user_id')
return g.apply(calc_entropy, num_cats).rename('entropy') | dd3f0e1d3865de151ce4a451c2def148d58da9da | 11,245 |
def splitter(h):
""" Splits dictionary numbers by the decimal point."""
if type(h) is dict:
for k, i in h.items():
h[k] = str(i).split('.');
if type(h) is list:
for n in range(0, len(h)):
h[n] = splitter(h[n])
return h | 1eb5e38a02ce310a068d8c1c9df2790658722662 | 11,246 |
def listall_comments():
"""Lists rule-based labels
Returns:
list: A list of FileTypeComments
"""
return listall('comment') | 5f26a0632497309e13d624437767467811d8faa3 | 11,247 |
import random
import hashlib
import time
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
"%s%s%s" % (
random.getstate(),
time.time(),
UNSECURE_RANDOM_STRING)
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)]) | 9a8df9402b8ccde30ae289eb759708f506ef6087 | 11,248 |
import html
def make_html_doc(body, root, resource_dir=None, title=None, meta=None):
"""Generate HTML document
Parameters
----------
body : fmtxt-object
FMTXT object which should be formatted into an HTML document.
root : str
Path to the directory in which the HTML file is going to be located.
resource_dir : None | str
Name for the directory containing resources like images, relative to
root. If None, images are embedded.
title : None | FMText
Document title. The default is to try to infer the title from the body
or use "Untitled".
meta : dict
Meta-information for document head.
Returns
-------
html : str
HTML document.
"""
if title is None:
if hasattr(body, '_site_title') and body._site_title is not None:
title = html(body._site_title)
elif hasattr(body, '_heading'):
title = html(body._heading)
else:
title = "Untitled"
if meta:
meta = '<meta %s>\n' % ' '.join('%s=%r' % x for x in meta.items())
else:
meta = ''
style = '\n'.join(('', '<style>', STYLE, '</style>'))
env = {'root': root, 'resource_dir': resource_dir}
txt_body = html(body, env)
return _html_doc_template.format(meta=meta, title=title, style=style,
body=txt_body) | 6794050593440cafafa080e46b435675fbdad648 | 11,249 |
def blur(img):
"""
:param img:
:return:
"""
blank_img = SimpleImage.blank(img.width, img.height)
for x in range(1, img.width-1):
for y in range(1, img.height-1):
left1_pixel = img.get_pixel(x-1, y-1)
left2_pixel = img.get_pixel(x-1, y)
left3_pixel = img.get_pixel(x-1, y+1)
center1_pixel = img.get_pixel(x, y-1)
center2_pixel = img.get_pixel(x, y)
center3_pixel = img.get_pixel(x, y+1)
right1_pixel = img.get_pixel(x+1, y-1)
right2_pixel = img.get_pixel(x+1, y)
right3_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (left1_pixel.red + left2_pixel.red + left3_pixel.red + center1_pixel.red + center2_pixel.red
+ center3_pixel.red + right1_pixel.red + right2_pixel.red + right3_pixel.red) // 9
new_pixel.green = (left1_pixel.green + left2_pixel.green + left3_pixel.green + center1_pixel.green +
center2_pixel.green + center3_pixel.green + right1_pixel.green + right2_pixel.green +
right3_pixel.green) // 9
new_pixel.blue = (left1_pixel.blue + left2_pixel.blue + left3_pixel.blue + center1_pixel.blue +
center2_pixel.blue + center3_pixel.blue + right1_pixel.blue + right2_pixel.blue +
right3_pixel.blue) // 9
for x in range(1):
for y in range(1, img.height-1):
"""edge x=0"""
edge1_pixel = img.get_pixel(x, y-1)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x, y+1)
edge4_pixel = img.get_pixel(x+1, y-1)
edge5_pixel = img.get_pixel(x+1, y)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = blank_img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(img.width-1, img.width):
for y in range(1, img.height-1):
"""edge x=width-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x-1, y)
edge3_pixel = img.get_pixel(x-1, y+1)
edge4_pixel = img.get_pixel(x, y-1)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(1):
"""edge y=0"""
edge1_pixel = img.get_pixel(x-1, y)
edge2_pixel = img.get_pixel(x, y)
edge3_pixel = img.get_pixel(x+1, y)
edge4_pixel = img.get_pixel(x-1, y+1)
edge5_pixel = img.get_pixel(x, y+1)
edge6_pixel = img.get_pixel(x+1, y+1)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
for x in range(1, img.width-1):
for y in range(img.height-1, img.height):
"""edge y=height-1"""
edge1_pixel = img.get_pixel(x-1, y-1)
edge2_pixel = img.get_pixel(x, y-1)
edge3_pixel = img.get_pixel(x+1, y-1)
edge4_pixel = img.get_pixel(x-1, y)
edge5_pixel = img.get_pixel(x, y)
edge6_pixel = img.get_pixel(x+1, y)
new_pixel = img.get_pixel(x, y)
new_pixel.red = (edge1_pixel.red + edge2_pixel.red + edge3_pixel.red + edge4_pixel.red + edge5_pixel.red +
edge6_pixel.red) // 6
new_pixel.green = (edge1_pixel.green + edge2_pixel.green + edge3_pixel.green + edge4_pixel.green +
edge5_pixel.green + edge6_pixel.green) // 6
new_pixel.blue = (edge1_pixel.blue + edge2_pixel.blue + edge3_pixel.blue + edge4_pixel.blue +
edge5_pixel.blue + edge6_pixel.blue) // 6
return blank_img | b2e04d8134b1d295e497b28a43f93223bcaf881f | 11,250 |
def generalized_euler_solver(descr, coefs, rho0, v0, t, x, bc="periodic", num_integrator_steps=1, fix_vvx_term=True):
"""Solver for Euler hydro system.
Builds RHS of the Euler equation Dv_t = f(...) from symbolic description.
"""
t_pde = np.linspace(t[0], t[-1], len(t)*num_integrator_steps) # Create a refined t-grid
nt, nx = len(t), len(x)
rho_ev, v_ev = np.zeros((len(t), nx)), np.zeros((len(t), nx))
rho, v = rho0, v0
dt = t_pde[1] - t_pde[0]
dx = x[1] - x[0]
for it, t in enumerate(t_pde):
rhox = FiniteDiff(rho, dx, 1, bc)
vx = FiniteDiff(v, dx, 1, bc)
rho_t = -(rhox*v + vx*rho)
rho_next = rho + dt*rho_t
# Add RHS terms to Dt(v) = v_t+v*v_x = f (...)
f = np.sum([coefs[i]*get_euler_term_from_descr(descr_i, rho, v, x) \
for i, descr_i in enumerate(descr)], axis=0)
v_t = f
if fix_vvx_term:
v_t -= v*vx
v_next = v + dt*v_t # D_t(v) = f(rho, v, ...)
step = it // num_integrator_steps
if it % num_integrator_steps == 0:
rho_ev[step, :] = rho.copy()
v_ev[step, :] = v.copy()
rho = rho_next.copy()
v = v_next.copy()
if np.isnan(np.sum(rho)):
# Solution exploded, interrupt
return np.array([np.nan]), np.array([np.nan])
return rho_ev, v_ev | 5bc1fefbb3c1e13da37d2e37fdefa1be44812a36 | 11,251 |
def pick_unassigned_variable(board, strategy, unassigned_heap):
"""
:returns: (row_index, col_index)
"""
if strategy == Strategies.FIRST_FOUND:
return __pick_unassigned_variable_first_found(board)
elif strategy == Strategies.MIN_ROW:
return __pick_unassigned_variable_min_row(board)
else:
(rowi, coli) = (-1, -1)
if strategy == Strategies.MIN_HEAP:
(rowi, coli) = __pick_unassigned_variable_heap(board, unassigned_heap)
else:
(rowi, coli) = __pick_unassigned_variable_heap_2(board, unassigned_heap)
# update the heap
unassigned_heap["row"][rowi] -= 1
unassigned_heap["col"][coli] -= 1
ssi = get_subsquare_index((rowi, coli))
unassigned_heap["subsquare"][ssi] -= 1
return (rowi, coli) | a96bb4d5e047f44fb79dd2eae68532c8e54296d4 | 11,254 |
def CONTAINS_INTS_FILTER(arg_value):
"""Only keeps int sequences or int tensors."""
return arg_value.elem_type is int or arg_value.has_int_dtypes() | c4452c5e6bbd9ead32359d8638a6bf1e49b600ba | 11,255 |
def pad_renderable(renderable, offset):
"""
Pad a renderable, subject to a particular truncation offset.
"""
if offset < 0:
raise Exception("invalid offset!")
if offset == 0:
return RenderGroup(_RULE, Padding(renderable, 1))
if offset == 1:
return Padding(renderable, 1)
else:
return Padding(renderable, (0, 1, 1, 1)) | eed05f632e00f8cb8a2539f59402c4c200159f4c | 11,256 |
import frappe.modules
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions) | c6f68a63433010f2a0270fdc0f373e96e63001c5 | 11,257 |
def create_api_app(global_conf, **local_conf):
"""Creates MainAPI application"""
controllers = {}
api_version = global_conf.get('api_version')
if api_version == 'v2.0':
controllers.update({
'/log/single': v2_logs.Logs()
})
elif api_version == 'v3.0':
controllers.update({
'/logs': v3_logs.Logs()
})
wsgi_app = falcon.API(
request_type=request.Request
)
for route, ctrl in controllers.items():
wsgi_app.add_route(route, ctrl)
error_handlers.register_error_handlers(wsgi_app)
return wsgi_app | 1ec0f155dfdc482fa6b1b41afb18d037636fc9ba | 11,259 |
def gauss_4deg(x,b, ampl,cent,sigm):
""" Simple 3 parameter Gaussian
Args:
x
b (float): Floor
ampl (float): Amplitude
cent (float): Centroid
sigm (float): sigma
Returns:
float or ndarray: Evaluated Gausssian
"""
return b + ampl*np.exp(-1.*(cent-x)**2/2/sigm**2) | 42f58844c91423220176b0f93ead4a4fd6dbd608 | 11,260 |
import requests
def get_spreads(pair, since):
"""Returns last recent spreads"""
api_command = API_LINK + f'Spreads?pair={pair}&since={since}'
resp = requests.get(api_command).json()
if not resp['error']: # empty
return resp
return resp['error'] | 7e32b1abf2988bd5eeb439d9308a1a6320ba8b87 | 11,261 |
def quat2expmap(q):
"""
Converts a quaternion to an exponential map
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args
q: 1x4 quaternion, w, x, y, z
Returns
r: 1x3 exponential map
Raises
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q) - 1) > 1e-3):
raise (ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2(sinhalftheta, coshalftheta)
theta = np.mod(theta + 2 * np.pi, 2 * np.pi)
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r | 1b68e2ad62f46402f06c9ba2c459c33d464d84e4 | 11,263 |
def candidate_results_for_race_type(result_form, race_type, num_results=None):
"""Return the candidates and results for a result form and race type.
:param result_form: The result form to return data for.
:param race_type: The race type to get results for, get component results
if this is None.
:param num_results: Enforce a particular number of results, default None.
:returns: A list of tuples containing the candidate and all results for
that candidate.
"""
return get_candidates(get_results_for_race_type(result_form, race_type),
num_results) | 22bff04bd0b59b6c566f792794119929913efd42 | 11,265 |
def sort_2metals(metals):
"""
Handles iterable or string of 2 metals and returns them
in alphabetical order
Args:
metals (str || iterable): two metal element names
Returns:
(tuple): element names in alphabetical order
"""
# return None's if metals is None
if metals is None:
return None, None
if isinstance(metals, str):
if len(metals) != 4:
raise ValueError('str can only have two elements.')
metal1, metal2 = sorted([metals[:2], metals[2:]])
else:
metal1, metal2 = sorted(metals)
return metal1.title(), metal2.title() | dab922797a6c7b94d6489d8fc4d9c1d99f3ee35c | 11,266 |
def parse_join(tables, operation, left, right):
"""
Parses a join from the where clause
"""
# Verify Left
table_name = left['column']['table']
column_name = left['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
left = (table_name, column_name, column_location)
# Verify Right
table_name = right['column']['table']
column_name = right['column']['name']
# If table and column, check that table for presense
if table_name is not None and not tables[table_name].has_column(column_name):
write_error("ERROR: Column reference \"{}\" does not exist in table \"{}\"", column_name, table_name)
# If no table, check for ambiguous column
present_in_tables = { key for (key, value) in tables.iteritems() if value.has_column(column_name)}
if table_name is None and len(present_in_tables) > 1:
write_error("ERROR: Column reference \"{}\" is ambiguous; present in multiple tables: {}.", column_name, ", ".join(present_in_tables))
if len(present_in_tables) == 1:
table_name = present_in_tables.pop()
column_location = tables[table_name].column_location(column_name)
right = (table_name, column_name, column_location)
# Are join types compatible
if tables[left[0]].column_type(left[1]) != tables[right[0]].column_type(right[1]):
write_error("ERROR: Column join types are incompatible.")
return Join(operation, left, right) | 1d5f912dfe634c21dea7640ad12271e4ac34b277 | 11,267 |
def address_id_handler(id):
"""
GET - called as /addresses/25
PUT - called to update as /addresses/25?address='abc'&lat=25&lon=89
DELETE - called as /addresses/25
:param id:
:return:
"""
if request.method == 'GET':
return jsonify(read_address(session, address_id=id))
elif request.method == 'PUT':
address = request.form.get('address','dummy')
lat = request.form.get('lat',0.1)
lon = request.form.get('lon',0.1)
update_address(session, address_id=id, search_string=address, lat=lat, lon=lon)
return jsonify({'success': True})
elif request.method == 'DELETE':
delete_address(session, id) | 9bb3fd813842aac0262417dcb59e4b0cffadc1f0 | 11,268 |
from neptune_mlflow.sync import sync as run_sync
def sync(path, project):
"""Upload mlflow runs data to Neptune.
PATH is a directory where Neptune will look for `mlruns` directory with mlflow data.
Examples:
neptune mlflow .
neptune mlflow /path
neptune mlflow /path --project username/sandbox
"""
# We do not want to import anything if process was executed for autocompletion purposes.
return run_sync(path=path, project=project) | 5fb258969ea02a93774b2fb350c3d3fc3d436752 | 11,269 |
def prefix_search_heuristic_split(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding with heuristic to speed up the algorithm.
Speed up prefix computation by splitting sequence into subsequences as described by Graves (p66).
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, _ = mat.shape
# split sequence into 3 subsequences, splitting points should be roughly placed at 1/3 and 2/3
split_targets = [int(max_T * 1 / 3), int(max_T * 2 / 3)]
best = [{'target': s, 'bestDist': max_T, 'bestIdx': s} for s in split_targets]
# find good splitting points (blanks above threshold)
thres = 0.9
for t in range(max_T):
for b in best:
if mat[t, blank_idx] > thres and abs(t - b['target']) < b['bestDist']:
b['bestDist'] = abs(t - b['target'])
b['bestIdx'] = t
break
# splitting points plus begin and end of sequence
ranges = [0] + [b['bestIdx'] for b in best] + [max_T]
# do prefix search for each subsequence and concatenate results
res = ''
for i in range(len(ranges) - 1):
beg = ranges[i]
end = ranges[i + 1]
res += prefix_search(mat[beg: end, :], chars)
return res | c7894012ff1fe4d2ef0baab6bb0375dc2167ea58 | 11,270 |
def _make_set_permissions_url(calendar_id, userid, level):
"""
:return: the URL string for GET request call
to Trumba SetPermissions method
"""
return "{0}?CalendarID={1}&Email={2}@uw.edu&Level={3}".format(
set_permission_url_prefix, calendar_id, userid, level) | 669a64f8a17d87777dea3d007d690b132d8cb266 | 11,271 |
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs) | dcb26dd286a88288cf9afb824a69d468188436be | 11,272 |
from datetime import datetime
def datetime_without_seconds(date: datetime) -> datetime:
"""
Returns given datetime with seconds and microseconds set to 0
"""
return date.replace(second=0, microsecond=0) | de30c7770d84751b555c78e045f37783030d8970 | 11,273 |
import copy
def update_br(request):
"""
更新会议室
:param request:
:return:
"""
if request.method == 'POST':
dbs = request.dbsession
app_path = request.registry.settings['app_path']
br = dbs.query(HasBoardroom).filter(HasBoardroom.id == request.POST.get('br_id', 0)).first()
old_br = copy.deepcopy(br)
new_name = request.POST.get('br_name', '')
if old_br.name != new_name:
msg = check_brm_name(dbs, room_name=request.POST.get('br_name', ''), org_id=request.POST.get('org_id', 0))
if not msg:
br.name = new_name
else:
return {
'resultFlag': 'failed',
'error_msg': msg
}
br.org_id = request.POST.get('org_id', 0)
br.config = request.POST.get('br_config', '')
br.description = request.POST.get('br_desc', '')
room_pic = request.POST.get('room_pic', '')
if room_pic:
room_pic = request.session['#room_pic']
br.picture = IMG_RPATH + str(br.org_id) + '/' + room_pic
room_logo1 = request.POST.get('room_logo1', '')
if room_logo1:
room_logo1 = request.session['#room_logo1']
br.logo1 = IMG_RPATH + str(br.org_id) + '/' + room_logo1
room_logo2 = request.POST.get('room_logo2', '')
if room_logo2:
room_logo2 = request.session['#room_logo2']
br.logo2 = IMG_RPATH + str(br.org_id) + '/' + room_logo2
room_btn = request.POST.get('room_btn', '')
if room_btn:
room_btn = request.session['#room_btn']
br.button_img = IMG_RPATH + str(br.org_id) + '/' + room_btn
room_bgd = request.POST.get('room_bgd', '')
if room_bgd:
room_bgd = request.session['#room_bgd']
br.background = IMG_RPATH + str(br.org_id) + '/' + room_bgd
br.state = request.POST.get('state', 1)
org_id = br.org_id
if old_br.org_id != int(org_id):
update_pic(old_br, br)
new_br = copy.deepcopy(br)
msg = update(dbs, br)
if not msg:
if room_pic:
delete_pic(old_br.picture, app_path)
move_pic(room_pic, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.picture, new_br.picture, app_path)
if room_logo1:
delete_pic(old_br.logo1, app_path)
move_pic(room_logo1, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo1, new_br.logo1, app_path)
if room_logo2:
delete_pic(old_br.logo2, app_path)
move_pic(room_logo2, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.logo2, new_br.logo2, app_path)
if room_btn:
delete_pic(old_br.button_img, app_path)
move_pic(room_btn, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.button_img, new_br.button_img, app_path)
if room_bgd:
delete_pic(old_br.background, app_path)
move_pic(room_bgd, org_id, app_path)
elif old_br.org_id != int(org_id):
move_piv_org(old_br.background, new_br.background, app_path)
json_str = {
'resultFlag': 'failed' if msg else 'success',
'error_msg': msg
}
HyLog.log_update(request.client_addr, request.session['userAccount'],
request.POST.get('br_id') + ' failed' if msg else 'success', 'boardroom')
return json_str
return {} | 46fa2c9cb20f7335b539f2a4e77f566e93e6bf7c | 11,274 |
def find_north_pole(valid_rooms):
"""
Decode the room names and find the north pole.
Args:
valid_rooms (list): A list of valid rooms to decode/search.
Returns:
tuple
"""
global NORTH_POLE_NAME
for room in valid_rooms:
room_name, sector_id, checksum = room
decoded_name = decode_room_name(room_name, sector_id)
if decoded_name == NORTH_POLE_NAME:
return decoded_name, sector_id | b3da9a66838c024bb6ae68be8104b3e80f79d0d7 | 11,275 |
def cli(gene1, gene2, gene3, frameness, keep_exon, fusion_fraction,
add_insertion, total_coverage, output, common_filename):
"""[Simulator] Fusion generator."""
normal_coverage = total_coverage * (1. - fusion_fraction)
fusion_coverage = total_coverage * fusion_fraction
normal_ref = generate_normal_reference([gene1, gene2, gene3], output, common_filename)
fusion_ref = generate_fusion_reference([gene1, gene2, gene3], output,
keep_exon, frameness, add_insertion, common_filename)
normal_fastq = generate_fastq(normal_ref, output, 'normal', normal_coverage)
fusion_fastq = generate_fastq(fusion_ref, output, 'fusion', fusion_coverage)
merged1, merged2 = merge_fastq(normal_fastq, fusion_fastq, output, common_filename)
# chimerascan_bedpe = run_chimerascan(merged1, merged2, output)
# print chimerascan_bedpe
# generate_manifest(merged1, merged2)
# run_detango(merged1, merged2, output)
return merged1, merged2 | ed52c518bf3c88623510fa07996fdec205936f4a | 11,276 |
def build_upsample_layer(cfg, *args, **kwargs):
"""Build upsample layer.
Args:
cfg (dict): The upsample layer config, which should contain:
- type (str): Layer type.
- scale_factor (int): Upsample ratio, which is not applicable to
deconv.
- layer args: Args needed to instantiate a upsample layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding conv layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding conv layer.
Returns:
nn.Module: Created upsample layer.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'typename' not in cfg:
raise KeyError(
f'the cfg dict must contain the key "typename", but got {cfg}')
cfg_ = cfg.copy()
layer_type = cfg_.pop('typename')
upsample = registry.get(layer_type, 'upsample')
if upsample is nn.Upsample:
cfg_['mode'] = layer_type
layer = upsample(*args, **kwargs, **cfg_)
return layer | 30121657746d3b5ec1f374d339a9d33b48a24724 | 11,277 |
def ctime_ticks(t):
"""This is for backwards compatibility and should not be used."""
return tsc_time.TSC_from_ticks(t).ctime() | 841f879db7e8fa7aa436ceddeb69c76c5d707f17 | 11,278 |
def vitruvian_loss(input, mask, dataset):
"""Vitruvian loss implementation"""
if dataset == "itop":
# 1 - 2 e 1 - 3 -> collo spalle
# 2 - 4 e 3 - 5 -> spalle gomito
# 4 - 6 e 5 - 7 -> gomito mano
# 9 - 11 e 10 - 12 -> anca ginocchio
# 11 - 13 e 12 - 14 -> ginocchio piede
loss = _vitruvian_calculate(input, [1, 2, 1, 3], mask)
loss += _vitruvian_calculate(input, [2, 4, 3, 5], mask)
loss += _vitruvian_calculate(input, [4, 6, 5, 7], mask)
loss += _vitruvian_calculate(input, [9, 11, 10, 12], mask)
loss += _vitruvian_calculate(input, [11, 13, 12, 14], mask)
elif dataset in ("watch_n_patch", "wnp", "watch-n-patch"):
# 20 - 4 e 20 - 8 -> spine shoulder spalle
# 4 - 5 e 8 - 9 -> spalle gomito
# 5 - 6 e 9 - 10 -> gomito polso
# 6 - 7 e 10 - 11 -> polso mano
# 12 - 0 e 0 - 16 -> anche spine base
# 12 - 13 e 16 - 17 -> anca ginocchio
# 13 - 14 e 17 - 18 -> ginocchio caviglia
# 14 - 15 e 18 - 19 -> caviglia piede
limbs = [
[20, 4, 20, 8],
[4, 5, 8, 9],
[5, 6, 9, 10],
[6, 7, 10, 11],
[0, 12, 0, 16],
[12, 13, 16, 17],
[13, 14, 17, 18],
[14, 15, 18, 19],
]
loss = 0.0
for limb in limbs:
loss += _vitruvian_calculate(input, limb, mask)
return loss | b609b28e15a1b53bccfd69d3ac0938be1f6ccee6 | 11,279 |
def LoadElement(href, only_etag=False):
"""
Return an instance of a element as a ElementCache dict
used as a cache.
:rtype ElementCache
"""
request = SMCRequest(href=href)
request.exception = FetchElementFailed
result = request.read()
if only_etag:
return result.etag
return ElementCache(
result.json, etag=result.etag) | 8e47fbc2aa745c97c9b93401302ff13435e8e9e7 | 11,280 |
from typing import Iterable
def xreplace_indices(exprs, mapper, candidates=None, only_rhs=False):
"""
Create new expressions from ``exprs``, by replacing all index variables
specified in mapper appearing as a tensor index. Only tensors whose symbolic
name appears in ``candidates`` are considered if ``candidates`` is not None.
"""
get = lambda i: i.rhs if only_rhs is True else i
handle = flatten(retrieve_indexed(get(i)) for i in as_tuple(exprs))
if candidates is not None:
handle = [i for i in handle if i.base.label in candidates]
mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle]))
replaced = [i.xreplace(mapper) for i in as_tuple(exprs)]
return replaced if isinstance(exprs, Iterable) else replaced[0] | 808918b8852ff23e40f34aa26cbc6433a9bdf102 | 11,282 |
import six
def format_ratio(in_str, separator='/'):
""" Convert a string representing a rational value to a decimal value.
Args:
in_str (str): Input string.
separator (str): Separator character used to extract numerator and
denominator, if not found in ``in_str`` whitespace is used.
Returns:
An integer or float value with 2 digits precision or ``in_str`` if
formating has failed.
>>> format_ratio('48000/1')
48000
>>> format_ratio('24000 1000')
24
>>> format_ratio('24000 1001')
23.98
>>> format_ratio('1,77')
'1,77'
>>> format_ratio(1.77)
1.77
"""
if not isinstance(in_str, six.string_types):
return in_str
try:
sep = separator if separator in in_str else ' '
ratio = in_str.split(sep)
if len(ratio) == 2:
ratio = round(float(ratio[0]) / float(ratio[1]), 2)
else:
ratio = float(ratio[0])
if ratio.is_integer():
ratio = int(ratio)
return ratio
except ValueError:
return in_str | 308ec972df6e57e87e24c26e769311d652118aee | 11,283 |
def fill_tidal_data(da,fill_time=True):
"""
Extract tidal harmonics from an incomplete xarray DataArray, use
those to fill in the gaps and return a complete DataArray.
Uses all 37 of the standard NOAA harmonics, may not be stable
with short time series.
A 5-day lowpass is removed from the harmonic decomposition, and added
back in afterwards.
Assumes that the DataArray has a 'time' coordinate with datetime64 values.
The time dimension must be dense enough to extract an exact time step
If fill_time is True, holes in the time coordinate will be filled, too.
"""
diffs=np.diff(da.time)
dt=np.median(diffs)
if fill_time:
gaps=np.nonzero(diffs>1.5*dt)[0]
pieces=[]
last=0
for gap_i in gaps:
# gap_i=10 means that the 10th diff was too big
# that means the jump from 10 to 11 was too big
# the preceding piece should go through 9, so
# exclusive of gap_i
pieces.append(da.time.values[last:gap_i])
pieces.append(np.arange( da.time.values[gap_i],
da.time.values[gap_i+1],
dt))
last=gap_i+1
pieces.append(da.time.values[last:])
dense_times=np.concatenate(pieces)
dense_values=np.nan*np.zeros(len(dense_times),np.float64)
dense_values[ np.searchsorted(dense_times,da.time.values) ] = da.values
da=xr.DataArray(dense_values,
dims=['time'],coords=[dense_times])
else:
pass
dnums=utils.to_dnum(da.time)
data=da.values
# lowpass at about 5 days, splitting out low/high components
winsize=int( np.timedelta64(5,'D') / dt )
data_lp=filters.lowpass_fir(data,winsize)
data_hp=data - data_lp
valid=np.isfinite(data_hp)
omegas=harm_decomp.noaa_37_omegas() # as rad/sec
harmonics=harm_decomp.decompose(dnums[valid]*86400,data_hp[valid],omegas)
dense=harm_decomp.recompose(dnums*86400,harmonics,omegas)
data_recon=utils.fill_invalid(data_lp) + dense
data_filled=data.copy()
missing=np.isnan(data_filled)
data_filled[missing] = data_recon[missing]
fda=xr.DataArray(data_filled,coords=[da.time],dims=['time'])
return fda | fbb881ca3a47778c8c6ee8b3804953fb5b806b4e | 11,284 |
def wrapper_unit_scaling(x, T, s_ref, n_gt, *args, **kwargs):
"""Normalize segments to unit-length and use center-duration format
"""
xc = segment_format(x, 'b2c')
init_ref = np.repeat(s_ref[:, 0], n_gt)
return segment_unit_scaling(xc, T, init_ref) | 28f29fd81143c1bb12d4e062e680de2643662bd1 | 11,285 |
import copy
import time
import tqdm
import torch
def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device):
"""beam search | greedy decoding implemented by fairseq"""
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
gen_args = copy.copy(args)
with open_dict(gen_args):
gen_args.beam = beam_size
generator = task.build_generator(models, gen_args)
data_size = len(data_lines)
all_results = []
logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}')
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size, batch_size)):
batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]]
batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines]
lengths = torch.LongTensor([t.numel() for t in batch_ids])
batch_dataset = task.build_dataset_for_inference(batch_ids, lengths)
batch_dataset.left_pad_source = True
batch = batch_dataset.collater(batch_dataset)
batch = utils.apply_to_sample(lambda t: t.to(device), batch)
translations = generator.generate(models, batch, prefix_tokens=None)
results = []
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos])
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | a412f619f79f39f77d6df83cefbbe38170d77109 | 11,286 |
def retrieve_features(dataframe):
"""
Retrieves features (X) from dataframe
:param dataframe:
:return:
"""
return list(dataframe["tweet"]) | 69118d6d0b9503500f6fa4b24fb844af4ff25644 | 11,287 |
def atarzia_short_MD_settings():
"""My default settings for short, crude cage optimizations in stk.
Modified on 26/04/19.
"""
Settings = {
'output_dir': None,
'timeout': None,
'force_field': 16,
'temperature': 700, # K
'conformers': 50,
'time_step': 1, # fs
'eq_time': 50, # ps
'simulation_time': 1000, # ps -- 1 ns
'maximum_iterations': 2500,
'minimum_gradient': 0.05,
'use_cache': False
}
return Settings | 490a59e9551a324b571d4f67c4e7885144f2fe77 | 11,288 |
def result_by_score_from_csv(f, score, ascending=True):
"""Return result with the best defined score"""
df = pd.read_csv(f)
df.sort_values(score, ascending=ascending, inplace=True)
return df.loc[0, ["pdb_code", score]].tolist() | ba41c6cfc26d830685c43265eaa21c496173311e | 11,289 |
from typing import Mapping
from typing import Tuple
def get_default_hand_connection_style(
) -> Mapping[Tuple[int, int], DrawingSpec]:
"""Returns the default hand connection drawing style.
Returns:
A mapping from each hand connection to the default drawing spec.
"""
hand_connection_style = {}
for k, v in _HAND_CONNECTION_STYLE.items():
for connection in k:
hand_connection_style[connection] = v
return hand_connection_style | 7cbc020f746e2dacd31664f9ddbe71fb98fc1942 | 11,291 |
def lists_to_html_table(a_list):
"""
Converts a list of lists to a HTML table. First list becomes the header of the table.
Useful while sending email from the code
:param list(list) a_list: values in the form of list of lists
:return: HTML table representation corresponding to the values in the lists
:rtype: str
"""
header = "<tr><th>%s</th></tr>" % ("</th><th>".join(a_list[0]))
body = ""
if len(a_list) > 1:
for sub_list in a_list[1:]:
body += "<tr><td>%s</td></tr>\n" % ("</td><td>".join(sub_list))
return "<table>%s\n%s</table>" % (header, body) | cc244ec7f0bccedba2bb3bd66e29b3f43160f8c1 | 11,292 |
def from_matrix_vector(matrix, vector):
"""Combine a matrix and vector into a homogeneous transform.
Combine a rotation matrix and translation vector into a transform
in homogeneous coordinates.
Parameters
----------
matrix : ndarray
An NxM array representing the the linear part of the transform
a transform from an M-dimensional space to an N-dimensional space.
vector : ndarray
A 1xN array representing the translation.
Returns
-------
xform : ndarray
An N+1xM+1 transform matrix.
See Also
--------
to_matrix_vector
"""
nin, nout = matrix.shape
t = np.zeros((nin+1,nout+1), matrix.dtype)
t[0:nin, 0:nout] = matrix
t[nin, nout] = 1.
t[0:nin, nout] = vector
return t | d4d49a4217d82c93b77aa1b50fc7a8d70875d11a | 11,293 |
def take_closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before | 6dea2edfb83bb8d78e7140d9bcc9b8f30441a3bf | 11,294 |
def check_host(host):
""" Helper function to get the hostname in desired format """
if not ('http' in host and '//' in host) and host[len(host) - 1] == '/':
return ''.join(['http://', host[:len(host) - 1]])
elif not ('http' in host and '//' in host):
return ''.join(['http://', host])
elif host[len(host) - 1] == '/':
return host[:len(host) - 1]
else:
return host | 0d035f616ce539f0a822aa1426cf3cd0eb766d04 | 11,295 |
from datetime import datetime
def task_time_slot_add(request, task_id, response_format='html'):
"""Time slot add to preselected task"""
task = get_object_or_404(Task, pk=task_id)
if not request.user.profile.has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
task_time_slot = TaskTimeSlot(
task=task, time_to=datetime.now(), user=request.user.profile)
form = TaskTimeSlotForm(
request.user.profile, task_id, request.POST, instance=task_time_slot)
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif form.is_valid():
task_time_slot = form.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.profile, task_id)
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'form': form,
'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_time_add', context,
context_instance=RequestContext(request), response_format=response_format) | 250fbb87a80f2c2431b5e95b7c437f0b9562a1bc | 11,296 |
from typing import Any
def get_artist_names(res: dict[str, Any]) -> str:
"""
Retrieves all artist names for a given input to the "album" key of a response.
"""
artists = []
for artist in res["artists"]:
artists.append(artist["name"])
artists_str = ", ".join(artists)
return artists_str | 2913c813e7e6097cb2cb3d3dfb84f831bbc0a6e7 | 11,297 |
def json_response(func):
"""
View decorator function that converts the dictionary response
returned by a view function to django JsonResponse.
"""
@wraps(func)
def func_wrapper(request, *args, **kwargs):
func_response = func(request, *args, **kwargs)
status_code = func_response.get('status_code', 200)
return JsonResponse(func_response, status=status_code)
return func_wrapper | ad4a304b9e1434d7d0832fc9b535e2fd37228ad8 | 11,298 |
def clean_df(df):
"""return : pandas.core.frame.DataFrame"""
df.index = pd.DatetimeIndex(df.comm_time)
df = df.sort_index()
df = df[~(np.abs(df.com_per-df.com_per.mean())>(3*df.com_per.std()))]#清洗出三个标准差之外的数据,人均有关的计算用df2
df = df.drop('_id',1)
df = df.drop_duplicates()
return df | 055f0efcc79e0e551620a602cb8d9e8244d58a7e | 11,299 |
from typing import Iterator
from typing import Tuple
from typing import Any
import itertools
def _nonnull_powerset(iterable) -> Iterator[Tuple[Any]]:
"""Returns powerset of iterable, minus the empty set."""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(1, len(s) + 1)) | ad02ab8ac02004adb54310bc639c6e2d84f19b02 | 11,300 |
import yaml
def _parse_obs_status_file(filename):
"""
Parse a yaml file and return a dictionary.
The dictionary will be of the form: {'obs': [], 'bad': [], 'mags: []}
:param filename:
:return:
"""
with open(filename) as fh:
status = yaml.load(fh, Loader=yaml.SafeLoader)
if 'obs' not in status:
status['obs'] = []
if 'bad' not in status:
status['bad'] = []
if 'mags' not in status:
status['mags'] = []
if hasattr(status['bad'], 'items'):
status['bad'] = list(status['bad'].items())
return status | 389fc921867367964001e5fc2f56a7fa7defd7c8 | 11,301 |
def extract_optional_suffix(r):
"""
a | a b -> a b?
"""
modified = False
def match_replace_fn(o):
if isinstance(o, Antlr4Selection):
potential_prefix = None
potential_prefix_i = None
to_remove = []
for i, c in enumerate(o):
if potential_prefix is None:
potential_prefix = c
potential_prefix_i = i
else:
# check if the potential_prefix is really a prefix of this rule
is_prefix, suffix = is_prefix_of_elem(potential_prefix, c)
if is_prefix:
# put suffix as a optional to a prefix
if list(iter_non_visuals(suffix)):
if not isinstance(potential_prefix, Antlr4Sequence):
assert o[potential_prefix_i] is potential_prefix
potential_prefix = Antlr4Sequence([potential_prefix, ])
o[potential_prefix_i] = potential_prefix
if len(suffix) == 1:
suffix = suffix[0]
else:
suffix = Antlr4Sequence(suffix)
potential_prefix.append(Antlr4Option(suffix))
to_remove.append(c)
potential_prefix = None
potential_prefix_i = None
modified = True
else:
potential_prefix = c
potential_prefix_i = i
for c in to_remove:
o.remove(c)
if len(o) == 1:
return Antlr4Sequence([o[0], ])
replace_item_by_sequence(r, match_replace_fn)
return modified | a1d4e6702ed1b23e0f94a44e5bea6fae16b47e17 | 11,302 |
def _heading_index(config, info, token, stack, level, blockquote_depth):
"""Get the next heading level, adjusting `stack` as a side effect."""
# Treat chapter titles specially.
if level == 1:
return tuple(str(i) for i in stack)
# Moving up
if level > len(stack):
if (level > len(stack) + 1) and (blockquote_depth == 0):
err(f"Heading {level} out of place", info, token)
while len(stack) < level:
stack.append(1)
# Same level
elif level == len(stack):
stack[-1] += 1
# Going down
else:
while len(stack) > level:
stack.pop()
stack[-1] += 1
# Report.
return tuple(str(i) for i in stack) | f42dd5c6aae942da687310d7bef81f70bfadad83 | 11,303 |
from typing import List
def sin_salida_naive(vuelos: Data) -> List[str]:
"""Retorna una lista de aeropuertos a los cuales hayan llegado
vuelos pero no hayan salido vuelos de este.
:param vuelos: Información de los vuelos.
:vuelos type: Dict[str, Dict[str, Union[str, float]]]
:return: Lista de aeropuertos
:rtype: List[str]
"""
salidas, llegadas, aeropuertos = [], [], []
for vuelo in vuelos.values():
salidas.append(vuelo['origen'])
llegadas.append(vuelo['destino'])
for aeropuerto in llegadas:
if aeropuerto not in salidas:
aeropuertos.append(aeropuerto)
return aeropuertos | 136b7c1e3428cecee5d3bc7046fac815276288e5 | 11,304 |
def converter(doc):
"""
This is a function for converting various kinds of objects we see
inside a graffle document.
"""
if doc.nodeName == "#text":
return str(doc.data)
elif doc.nodeName == "string":
return str(doc.firstChild.data)
elif doc.nodeName == 'integer':
return int(doc.firstChild.data)
elif doc.nodeName == 'real':
return float(doc.firstChild.data)
elif doc.nodeName == 'dict':
return convert_dict(doc)
elif doc.nodeName == 'array':
return convert_list(doc)
elif doc.nodeName == 'plist':
return convert_list(doc)
else:
return 'unknown:' + doc.nodeName | 8820aa739f4b96251033c191ea405157cfe1e9fb | 11,305 |
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c)) | b5e8a68fc535c186fdbadc8a669ed3dec0da3aee | 11,306 |
def details_from_params(
params: QueryParams,
items_per_page: int,
items_per_page_async: int = -1,
) -> common.Details:
"""Create details from request params."""
try:
page = int(params.get('page', 1))
except (ValueError, TypeError):
page = 1
try:
anchor = int(params.get('anchor', 1))
except (ValueError, TypeError):
anchor = -1
return common.Details(
page=max(1, page),
anchor=anchor,
items_per_page=items_per_page,
items_per_page_async=items_per_page_async,
) | 50e20619bc4f32af6811a3416b2d2f93820ba44a | 11,307 |
from typing import Tuple
from typing import Mapping
def _signature_pre_process_predict(
signature: _SignatureDef) -> Tuple[Text, Mapping[Text, Text]]:
"""Returns input tensor name and output alias tensor names from signature.
Args:
signature: SignatureDef
Returns:
A tuple of input tensor name and output alias tensor names.
"""
input_tensor_names = [value.name for value in signature.inputs.values()]
input_tensor_types = dict([
(key, value.dtype) for key, value in signature.inputs.items()
])
output_alias_tensor_names = dict([
(key, output.name) for key, output in signature.outputs.items()
])
return input_tensor_names, input_tensor_types, output_alias_tensor_names | a53af746f7cebc7c3baaa316458dd3e7b88c2c38 | 11,309 |
def style_95_read_mode(line, patterns):
"""Style the EAC 95 read mode line."""
# Burst mode doesn't have multiple settings in one line
if ',' not in line:
return style_setting(line, 'bad')
split_line = line.split(':', 1)
read_mode = split_line[0].rstrip()
line = line.replace(read_mode, '<span class="log5">{}</span>'.format(read_mode), 1)
parts = split_line[1].lstrip().split(' ', 1)
parts[1:] = [part.strip() for part in parts[1].split(',')]
num = 0
p = patterns['95 settings']
for setting in [
p['Read mode'],
p['C2 pointers'],
p['Accurate stream'],
p['Audio cache'],
]:
if num == len(parts):
break
class_ = 'good' if setting in line else 'bad'
line = line.replace(
parts[num], '<span class="{}">{}</span>'.format(class_, parts[num]), 1
)
num += 1
return line | 25c347acb87702f19ebcea85a3b4f0257df101ae | 11,310 |
def trange(
client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close"
):
"""This will return a dataframe of true range for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
trange = t.TRANGE(df[highcol].values, df[lowcol].values, df[closecol].values)
return pd.DataFrame(
{
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"trange": trange,
}
) | 94cf95eb86575a66e015e46fd81a5a8085277255 | 11,311 |
def ConvolveUsingAlm(map_in, psf_alm):
"""Convolve a map using a set of pre-computed ALM
Parameters
----------
map_in : array_like
HEALPix map to be convolved
psf_alm : array_like
The ALM represenation of the PSF
Returns
-------
map_out : array_like
The smeared map
"""
norm = map_in.sum()
nside = hp.pixelfunc.npix2nside(map_in.size)
almmap = hp.sphtfunc.map2alm(map_in)
almmap *= psf_alm
outmap = hp.sphtfunc.alm2map(almmap, nside)
outmap *= norm / outmap.sum()
return outmap | 0ebfa49c605f57ea2cc59b2686da8995dc01881f | 11,312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.