content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def read_key_value(file):
"""支持注释,支持中文"""
return_dict = {}
lines = readlines(file)
for line in lines:
line = line.strip().split(':')
if line[0][0] == '#':
continue
key = line[0].strip()
value = line[1].strip()
return_dict[key] = value
return return_dict | 9fdac43783c066872a05cbd59488add7a2dc54c0 | 19,733 |
def binarize_image(image):
"""Binarize image pixel values to 0 and 255."""
unique_values = np.unique(image)
if len(unique_values) == 2:
if (unique_values == np.array([0., 255.])).all():
return image
mean = image.mean()
image[image > mean] = 255
image[image <= mean] = 0
return image | 6e4a621b0a2ff06d6a6bf5c0eb45f1028e6d526f | 19,734 |
from typing import Type
def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]:
"""A reference to the :class: `LineMatcher`.
This is instantiable with a list of lines (without their trailing newlines).
This is useful for testing large texts, such as the output of commands.
"""
return LineMatcher | 86c05df8f099ba66e62ae0bb071b2999cbb4f082 | 19,735 |
def Delay(opts, args):
"""Sleeps for a while
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the duration
the sleep
@rtype: int
@return: the desired exit code
"""
delay = float(args[0])
op = opcodes.OpTestDelay(duration=delay,
on_master=opts.on_master,
on_nodes=opts.on_nodes,
repeat=opts.repeat,
interruptible=opts.interruptible,
no_locks=opts.no_locks)
SubmitOrSend(op, opts)
return 0 | c9ecd6cb3dbdcd5ae48c527f2f6769789b05664d | 19,736 |
def logout():
""" Simply loading the logout page while logged in will log the user out """
logout_user()
return render_template(f"{app_name}/logout.html") | 33191c6870a0aac8fcdebb0349b93196e2ed0ba8 | 19,737 |
def duration_to_timedelta(obj):
"""Converts duration to timedelta
>>> duration_to_timedelta("10m")
>>> datetime.timedelta(0, 600)
"""
matches = DURATION_PATTERN.search(obj)
matches = matches.groupdict(default="0")
matches = {k: int(v) for k, v in matches.items()}
return timedelta(**matches) | fcfa67e6667b232a6647cb71fff543a45a6d3475 | 19,739 |
async def create_mock_hlk_sw16_connection(fail):
"""Create a mock HLK-SW16 client."""
client = MockSW16Client(fail)
await client.setup()
return client | 14589398e268a76637994f2883f2cd824a14a81b | 19,740 |
def inv_dist_weight(distances, b):
"""Inverse distance weight
Parameters
----------
distances : numpy.array of floats
Distances to point of interest
b : float
The parameter of the inverse distance weight. The higher, the
higher the influence of closeby stations.
Returns
-------
lambdas : numpy.array of floats
The lambda parameters of the stations
"""
lambdas = 1/distances**b / np.sum(1/distances**b)
return lambdas | c7e857bba312277b193ce5eda7467b8b0bf8bd75 | 19,741 |
import pytz
def load_inferred_fishing(table, id_list, project_id, threshold=True):
"""Load inferred data and generate comparison data
"""
query_template = """
SELECT vessel_id, start_time, end_time, nnet_score FROM
TABLE_DATE_RANGE([{table}],
TIMESTAMP('{year}-01-01'), TIMESTAMP('{year}-12-31'))
WHERE vessel_id in ({ids})
"""
ids = ','.join('"{}"'.format(x) for x in id_list)
ranges = defaultdict(list)
for year in range(2012, 2018):
query = query_template.format(table=table, year=year, ids=ids)
print(query)
for x in pd.read_gbq(query, project_id=project_id).itertuples():
score = x.nnet_score
if threshold:
score = score > 0.5
start = x.start_time.replace(tzinfo=pytz.utc)
end = x.end_time.replace(tzinfo=pytz.utc)
ranges[x.vessel_id].append(FishingRange(score, start, end))
print([(key, len(val)) for (key, val) in ranges.items()])
return ranges | fba7e007b38d141e91c0608cbd609a2d3b474b4b | 19,742 |
from typing import Any
def is_optional(value: Any) -> CheckerReturn:
"""
It is a rather special validator because it never returns False and emits an exception
signal when the value is correct instead of returning True.
Its user should catch the signal to short-circuit the validation chain.
"""
if value is None:
raise exceptions.ValueNotRequired()
return True | 25e45617ca5584dc2470d9e76ef884596c465917 | 19,743 |
from typing import Union
from typing import Tuple
from typing import List
def approximate_bounding_box_dyn_obstacles(obj: list, time_step=0) -> Union[
Tuple[list], None]:
"""
Compute bounding box of dynamic obstacles at time step
:param obj: All possible objects. DynamicObstacles are filtered.
:return:
"""
def update_bounds(new_point: np.ndarray, bounds: List[list]):
"""Update bounds with new point"""
if new_point[0] < bounds[0][0]:
bounds[0][0] = new_point[0]
if new_point[1] < bounds[1][0]:
bounds[1][0] = new_point[1]
if new_point[0] > bounds[0][1]:
bounds[0][1] = new_point[0]
if new_point[1] > bounds[1][1]:
bounds[1][1] = new_point[1]
return bounds
dynamic_obstacles_filtered = []
for o in obj:
if type(o) == DynamicObstacle:
dynamic_obstacles_filtered.append(o)
elif type(o) == Scenario:
dynamic_obstacles_filtered.extend(o.dynamic_obstacles)
x_int = [np.inf, -np.inf]
y_int = [np.inf, -np.inf]
bounds = [x_int, y_int]
shapely_set = None
for obs in dynamic_obstacles_filtered:
occ = obs.occupancy_at_time(time_step)
if occ is None:
continue
shape = occ.shape
if hasattr(shape, "_shapely_polygon"):
if shapely_set is None:
shapely_set = shape._shapely_polygon
else:
shapely_set = shapely_set.union(shape._shapely_polygon)
elif hasattr(shape, 'center'): # Rectangle, Circle
bounds = update_bounds(shape.center, bounds=bounds)
elif hasattr(shape, 'vertices'): # Polygon, Triangle
v = shape.vertices
bounds = update_bounds(np.min(v, axis=0), bounds=bounds)
bounds = update_bounds(np.max(v, axis=0), bounds=bounds)
envelope_bounds = shapely_set.envelope.bounds
envelope_bounds = np.array(envelope_bounds).reshape((2, 2))
bounds = update_bounds(envelope_bounds[0], bounds)
bounds = update_bounds(envelope_bounds[1], bounds)
if np.inf in bounds[0] or -np.inf in bounds[0] or np.inf in bounds[
1] or -np.inf in bounds[1]:
return None
else:
return tuple(bounds) | 3a8fc28c2a47b50b9d0acc49f0818031e357fffa | 19,744 |
def binary_class_accuracy_score(y_pred, data):
"""LightGBM binary class accuracy-score function.
Parameters
----------
y_pred
LightGBM predictions.
data
LightGBM ``'Dataset'``.
Returns
-------
(eval_name, eval_result, is_higher_better)
``'eval_name'`` : string
is always 'accuracy' - the name of the metric
``'eval_result'`` : float
is the result of the metric
``'is_higher_better'`` : bool
is always 'True' because higher accuracy score is better
See Also
--------
* `sklearn.metrics.accuracy_score: <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html>`
* `LightGBM Training API: <https://lightgbm.readthedocs.io/en/latest/Python-API.html#training-api>`
"""
y_true = data.get_label()
y_pred = np.round(y_pred)
return 'accuracy', accuracy_score(y_true, y_pred), True | 53f68931a96e3d32bed622dae09239ee2b96d762 | 19,746 |
def is_prime(n):
"""Given an integer n, return True if n is prime and False if not.
"""
return True | 17d2d7bdf95a9d3e037e911a3271688013413fb7 | 19,748 |
import logging
def get_request_file():
"""
Method to implement REST API call of GET on address /file
"""
try:
content_file = open("html/file_get.html", "r")
content = content_file.read()
except:
logging.info("Could not load source HTML file '%s'")
raise
return content | 7a084d5455b1797d851388ac40de23c5c35fb381 | 19,752 |
from collections import Counter
from typing import Iterable
def sock_merchant(arr: Iterable[int]) -> int:
"""
>>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])
3
>>> sock_merchant([6, 5, 2, 3, 5, 2, 2, 1, 1, 5, 1, 3, 3, 3, 5])
6
"""
count = Counter(arr).values()
ret = sum(n // 2 for n in count)
return ret | 1b3b8d37ccb3494ed774e26a41ebba32c87a632c | 19,753 |
def new_user_registration(email: str) -> dict:
"""Alert the CIDC admin mailing list to a new user registration."""
subject = "New User Registration"
html_content = (
f"A new user, {email}, has registered for the CIMAC-CIDC Data Portal ({ENV}). If you are a CIDC Admin, "
"please visit the accounts management tab in the Portal to review their request."
)
email = {
"to_emails": [CIDC_MAILING_LIST],
"subject": subject,
"html_content": html_content,
}
return email | ee4c57e45d15b8e65bd8e702633d527bc2f6db1f | 19,754 |
def article_detail():
"""文章详情"""
id = request.form.get('id')
if id is None:
raise Exception('ARTICLE_NOT_EXIST')
article = Article.find(id)
if article is None:
raise Exception('ARTICLE_NOT_EXIST')
# 获取标签
if article.tags is None:
article.tags = []
else:
all_tags = Tag.find_all({'_id': {'$in': article.tags}})
all_tags = {str(tag._id): {'id': str(tag._id), 'name': tag.name} for tag in all_tags}
article.tags = [all_tags[str(id)] for id in article.tags if str(id) in all_tags]
return {'article': article.filter('title', 'draft', 'tags',
img=lambda article: images.url(article.img) if article.img else '')} | 534cb77384b65cb4b88bedd1a82daeb93766714a | 19,755 |
def add_state_names_column(my_df):
"""
Add a column of corresponding state names to a dataframe
Params (my_df) a DataFrame with a column called "abbrev" that has state abbreviations.
Return a copy of the original dataframe, but with an extra column.
"""
new_df = my_df.copy()
names_map = {"CA": "Cali", "CO": "Colorado", "CT": "Connecticut", "NJ": "New Jersey"}
new_df = df["name"] = new_df["abbrev"].map(names_map)
return my_df | 4a9eb49ef2cda11d8135eb33ec43d99422e067b6 | 19,756 |
import glob
def list_subdir_paths(directory):
"""
Generates a list of subdirectory paths
:param directory: str pathname of target parent directory
:return: list of paths for each subdirectory in the target parent
directory
"""
subdir_paths = glob("{}/*/".format(directory))
return subdir_paths | df8ec80096b900ad8ceac3bc013fe47b21b4fd54 | 19,757 |
import math
def logic_method_with_bkg(plots_per_cycle, cycle_time, sigma_s=160, m=3, n=4):
"""
:param plots_per_cycle:
:param cycle_time:
:param sigma_s:
:param m:
:param n:
:return:
"""
N = plots_per_cycle.shape[0] # number of cycles
tracks = [] # ret
track_cnt = 0
# 取滑动窗口
succeed = False
for i in range(2, N - n): # cycle i
if succeed:
break
# 取滑窗(连续5个cycle)
window = slide_window(plots_per_cycle, n, start_cycle=i, skip_cycle=2)
# ----------对窗口中进行m/n统计
# 构建mapping链
K = min([cycle_plots.shape[0] for cycle_plots in window]) # 最小公共点迹数
mappings = defaultdict(dict)
for j in range(len(window) - 1, 0, -1):
# ----- 构建相邻cycle的mapping
mapping = matching_plots_nn(window[j], window[j - 1], K)
# -----
if len(set(mapping.values())) != len(set(mapping.keys())):
break
else:
mappings[j] = mapping
if len(mappings) < m: # 至少有m个cycle有效数据, 对应m-1个mapping
continue # 滑动到下一个window
# 对mapping结果进行排序(按照key降序排列)
mappings = sorted(mappings.items(), key=lambda x: x[0], reverse=True)
# print(mappings)
# 构建暂时航迹
for k in range(K): # 遍历每个暂时航迹
# ----- 航迹状态记录
# 窗口检出数计数: 每个暂时航迹单独计数
n_pass = 0
# 窗口运动状态记录: 每个航迹单独记录(速度, 加速度, 航向偏转角)
window_states = defaultdict(dict)
# -----
# ----- 构建暂时航迹组成的点迹(plots)
plot_ids = []
id = -1
# 提取倒序第一个有效cycle的第k个plot id
keys = mappings[0][1].keys()
keys = sorted(keys, reverse=False) # 按照当前window最大的有效cycle的点迹序号升序排列
id = keys[k]
plot_ids.append(id)
# 按照mapping链递推其余cycle的plot id
for (c, mapping) in mappings: # mapping已经按照cycle倒序排列过了
id = mapping[id] # 倒推映射链plot id
plot_ids.append(id)
# print(ids) # ids是按照cycle倒排的
# 根据ids链接构建plot链: 暂时航迹
cycle_ids = [c for (c, mapping) in mappings] # 按照cycle编号倒排
cycle_ids.extend([mappings[-1][0] - 1])
assert len(cycle_ids) == len(plot_ids)
plots = [window[cycle][plot_id]
for cycle, plot_id in zip(cycle_ids, plot_ids)]
# print(plots)
# window内逐一门限测试
# for l, (cycle_id, plot) in enumerate(zip(cycle_ids_to_test, plots_to_test)):
for l in range(len(plots) - 2):
cycle_id = cycle_ids[l]
# 构建连续三个cycle的plots
# plots_2 = [plots[l + 1], plots[l]]
plots_3 = [plots[l + 2], plots[l + 1], plots[l]]
# plot_plots(plots_2, [cycle_ids[l+1], cycle_ids[l]])
# plot_plots(plots_3, [cycle_ids[l+2], cycle_ids[l+1], cycle_ids[l]])
# 估算当前点迹的运动状态
v, a, angle_in_radians = get_v_a_angle(plots_3, cycle_time)
# v = get_v(plots_2, cycle_time)
# 航向偏移角度估算
angle_in_degrees = math.degrees(angle_in_radians)
angle_in_degrees = angle_in_degrees if angle_in_degrees >= 0.0 else angle_in_degrees + 360.0
angle_in_degrees = angle_in_degrees if angle_in_degrees <= 360.0 else angle_in_degrees - 360.0
# 初始波门判定: j是当前判定序列的第二次扫描
if start_gate_check(cycle_time, plots[l + 2], plots[l + 1], v0=340):
# --- 对通过初始波门判定的航迹建立暂时航迹, 继续判断相关波门
# 相关(跟踪)波门判定page71-72
if relate_gate_check(cycle_time, v, a, plots[l + 2], plots[l + 1], plots[l], sigma_s=sigma_s):
n_pass += 1
# window运动状态记录
state_dict = {
'cycle': cycle_id,
'x': plots[l][0],
'y': plots[l][1],
'v': v,
'a': a,
'angle_in_degrees': angle_in_degrees
}
window_states[cycle_id] = state_dict
## ----- 记录window中最前面的两个点迹的运动状态
if l == len(plots) - 2 - 1:
print('Add plot for the first 2 plots in the window...')
plots_2 = [plots[l + 1], plots[l]]
v = get_v(plots_2, cycle_time)
# window第1号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 1,
'x': plots[l + 1][0],
'y': plots[l + 1][1],
'v': v,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 1] = state_dict
# window第0号点迹运动状态记录
state_dict = {
'cycle': cycle_id - 2,
'x': plots[l + 2][0],
'y': plots[l + 2][1],
'v': -1,
'a': -1,
'angle_in_degrees': -1
}
window_states[cycle_id - 2] = state_dict
else:
print('Track init failed @cycle{:d}, object(plot) is not in relating gate.'.format(i))
else:
print('Track init failed @cycle{:d} @window{:d}, object(plot) is not in the starting gate.'
.format(i, j))
# 判定是否当前航迹初始化成功
if n_pass >= m:
print(
'Track {:d} inited successfully @cycle {:d}.'.format(k, i))
# -----初始化航迹对象
track = Track()
track.id_ = track_cnt # 航迹编号
track.state_ = 2 # 航迹状态: 可靠航迹
track.init_cycle_ = i # 航迹起始cycle
window_states = sorted(window_states.items(
), key=lambda x: x[0], reverse=False) # 升序重排
# 添加已初始化点迹
for k, v in window_states:
# print(k, v)
plot = Plot(v['cycle'], v['x'], v['y'],
v['v'], v['a'], v['angle_in_degrees'])
plot.state_ = 1 # 'Related'
plot.correlated_track_id_ = track.id_
track.add_plot(plot)
track.quality_counter_ += 1 # 航迹质量得分更新
tracks.append(track)
# -----
# 更新航迹编号
track_cnt += 1
# 航迹起始成功标识
succeed = True
# 清空窗口状态
window_states = defaultdict(dict)
# 跳出当前航迹检测, 到下一个暂时航迹
continue
return succeed, tracks | a77e8a9d116187dce674c33ca098012bb6b22363 | 19,758 |
from typing import Union
def bias_scan(
data: pd.DataFrame,
observations: pd.Series,
expectations: Union[pd.Series, pd.DataFrame] = None,
favorable_value: Union[str, float] = None,
overpredicted: bool = True,
scoring: Union[str, ScoringFunction] = "Bernoulli",
num_iters: int = 10,
penalty: float = 1e-17,
mode: str = "binary",
**kwargs,
):
"""
scan to find the highest scoring subset of records
:param data (dataframe): the dataset (containing the features) the model was trained on
:param observations (series): ground truth (correct) target values
:param expectations (series, dataframe, optional): pandas series estimated targets
as returned by a model for binary, continuous and ordinal modes.
If mode is nominal, this is a dataframe with columns containing expectations for each nominal class.
If None, model is assumed to be a dumb model that predicts the mean of the targets
or 1/(num of categories) for nominal mode.
:param favorable_value(str, float, optional): Should be high or low or float if the mode in [binary, ordinal, or continuous].
If float, value has to be minimum or maximum in the observations column. Defaults to high if None for these modes.
Support for float left in to keep the intuition clear in binary classification tasks.
If mode is nominal, favorable values should be one of the unique categories in the observations.
Defaults to a one-vs-all scan if None for nominal mode.
:param overpredicted (bool, optional): flag for group to scan for.
True means we scan for a group whose expectations/predictions are systematically higher than observed.
In other words, True means we scan for a group whose observeed is systematically lower than the expectations.
False means we scan for a group whose expectations/predictions are systematically lower than observed.
In other words, False means we scan for a group whose observed is systematically higher than the expectations.
:param scoring (str or class): One of 'Bernoulli', 'Gaussian', 'Poisson', or 'BerkJones' or subclass of
:class:`aif360.metrics.mdss.ScoringFunctions.ScoringFunction`.
:param num_iters (int, optional): number of iterations (random restarts). Should be positive.
:param penalty (float,optional): penalty term. Should be positive. The penalty term as with any regularization parameter may need to be
tuned for ones use case. The higher the penalty, the less complex (number of features and feature values) the
highest scoring subset that gets returned is.
:param mode: one of ['binary', 'continuous', 'nominal', 'ordinal']. Defaults to binary.
In nominal mode, up to 10 categories are supported by default.
To increase this, pass in keyword argument max_nominal = integer value.
:returns: the highest scoring subset and the score or dict of the highest scoring subset and the score for each category in nominal mode
"""
# Ensure correct mode is passed in.
modes = ["binary", "continuous", "nominal", "ordinal"]
assert mode in modes, f"Expected one of {modes}, got {mode}."
# Set correct favorable value (this tells us if higher or lower is better)
min_val, max_val = observations.min(), observations.max()
uniques = list(observations.unique())
if favorable_value == 'high':
favorable_value = max_val
elif favorable_value == 'low':
favorable_value = min_val
elif favorable_value is None:
if mode in ["binary", "ordinal", "continuous"]:
favorable_value = max_val # Default to higher is better
elif mode == "nominal":
favorable_value = "flag-all" # Default to scan through all categories
assert favorable_value in [
"flag-all",
*uniques,
], f"Expected one of {uniques}, got {favorable_value}."
assert favorable_value in [
min_val,
max_val,
"flag-all",
*uniques,
], f"Favorable_value should be high, low, or one of categories {uniques}, got {favorable_value}."
# Set appropriate direction for scanner depending on mode and overppredicted flag
if mode in ["ordinal", "continuous"]:
if favorable_value == max_val:
kwargs["direction"] = "negative" if overpredicted else "positive"
else:
kwargs["direction"] = "positive" if overpredicted else "negative"
else:
kwargs["direction"] = "negative" if overpredicted else "positive"
# Set expectations to mean targets for non-nominal modes
if expectations is None and mode != "nominal":
expectations = pd.Series(observations.mean(), index=observations.index)
# Set appropriate scoring function
if scoring == "Bernoulli":
scoring = Bernoulli(**kwargs)
elif scoring == "BerkJones":
scoring = BerkJones(**kwargs)
elif scoring == "Gaussian":
scoring = Gaussian(**kwargs)
elif scoring == "Poisson":
scoring = Poisson(**kwargs)
else:
scoring = scoring(**kwargs)
if mode == "binary": # Flip observations if favorable_value is 0 in binary mode.
observations = pd.Series(observations == favorable_value, dtype=int)
elif mode == "nominal":
unique_outs = set(sorted(observations.unique()))
size_unique_outs = len(unique_outs)
if expectations is not None: # Set expectations to 1/(num of categories) for nominal mode
expectations_cols = set(sorted(expectations.columns))
assert (
unique_outs == expectations_cols
), f"Expected {unique_outs} in expectation columns, got {expectations_cols}"
else:
expectations = pd.Series(
1 / observations.nunique(), index=observations.index
)
max_nominal = kwargs.get("max_nominal", 10)
assert (
size_unique_outs <= max_nominal
), f"Nominal mode only support up to {max_nominal} labels, got {size_unique_outs}. Use keyword argument max_nominal to increase the limit."
if favorable_value != "flag-all": # If favorable flag is set, use one-vs-others strategy to scan, else use one-vs-all strategy
observations = observations.map({favorable_value: 1})
observations = observations.fillna(0)
if isinstance(expectations, pd.DataFrame):
expectations = expectations[favorable_value]
else:
results = {}
orig_observations = observations.copy()
orig_expectations = expectations.copy()
for unique in uniques:
observations = orig_observations.map({unique: 1})
observations = observations.fillna(0)
if isinstance(expectations, pd.DataFrame):
expectations = orig_expectations[unique]
scanner = MDSS(scoring)
result = scanner.scan(
data, expectations, observations, penalty, num_iters, mode=mode
)
results[unique] = result
return results
scanner = MDSS(scoring)
return scanner.scan(data, expectations, observations, penalty, num_iters, mode=mode) | 735ccc6c3054e981a7aee9681d892e226316ed41 | 19,759 |
def int_from_bin_list(lst):
"""Convert a list of 0s and 1s into an integer
Args:
lst (list or numpy.array): list of 0s and 1s
Returns:
int: resulting integer
"""
return int("".join(str(x) for x in lst), 2) | a41b2578780019ed1266442d76462fb89ba2a0fb | 19,760 |
def validate_array_input(arr, dtype, arr_name):
"""Check if array has correct type and is numerical.
This function checks if the input is either a list, numpy.ndarray or
pandas.Series of numerical values, converts it to a numpy.ndarray and
throws an error in case of incorrect data.
Args:
arr: Array of data
dtype: One of numpy's dtypes
arr_name: String specifing the variable name, so that the error
message can be adapted correctly.
Returns:
A as numpy.ndarray converted array of values with a datatype
specified in the input argument.
Raises:
ValueError: In case non-numerical data is passed
TypeError: If the error is neither a list, a numpy.ndarray nor a
pandas.Series
"""
# Check for correct data type
if isinstance(arr, (list, np.ndarray, pd.Series)):
# Try to convert as numpy array
try:
arr = np.array(arr, dtype=dtype).flatten()
except:
msg = ["The data in the parameter array '{}'".format(arr_name),
" must be purely numerical."]
raise ValueError("".join(msg))
else:
msg = ["The array {} must be either a list, ".format(arr_name),
"numpy.ndarray or pandas.Series"]
raise TypeError("".join(msg))
# return converted array
return arr | 72829dad46aa6e5054cd0d49ff0206083781bddd | 19,761 |
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
def ClassifyBehavior(data, bp_1="snout",bp_2="ear_L", bp_3="ear_R", bp_4="tail", dimensions = 2,distance=28,**kwargs):
"""
Returns an array with the cluster by frame, an array with the embedding data in low-dimensional
space and the clusterization model.
Parameters
----------
data : pandas DataFrame
The input tracking data.
bp_1 : str
Body part representing snout.
bp_2 : str
Body part representing left ear.
bp_3 : str
Body part representing right ear.
bp_4 : str
Body part representing tail.
dimensions : int
Dimension of the embedded space.
distance : int
The linkage distance threshold above which, clusters will not be merged.
startIndex : int, optional
Initial index.
n_jobs : int, optional
The number of parallel jobs to run for neighbors search.
verbose : int, optional
Verbosity level.
perplexity : float, optional
The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
Returns
-------
cluster_labels : array
Array with the cluster by frame.
X_transformed : array
Embedding of the training data in low-dimensional space.
model : Obj
AgglomerativeClustering model.
See Also
--------
For more information and usage examples: https://github.com/pyratlib/pyrat
Notes
-----
This function was developed based on DLC outputs and is able to support
matplotlib configurations."""
startIndex = kwargs.get('startIndex')
n_jobs = kwargs.get('n_jobs')
verbose = kwargs.get('verbose')
perplexity = kwargs.get("perplexity")
if type(startIndex) == type(None):
startIndex = 0
if type(n_jobs) == type(None):
n_jobs=-1
if type(verbose) == type(None):
verbose=0
if type(perplexity) == type(None):
perplexity=500
values = (data.iloc[2:,1:].values).astype(np.float)
lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
nose = np.concatenate(((values[:,lista1.index(bp_1+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_1+" - y")]).reshape(1,-1).T), axis=1)
earr = np.concatenate(((values[:,lista1.index(bp_2+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_2+" - y")]).reshape(1,-1).T), axis=1)
earl = np.concatenate(((values[:,lista1.index(bp_3+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_3+" - y")]).reshape(1,-1).T), axis=1)
tail = np.concatenate(((values[:,lista1.index(bp_4+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bp_4+" - y")]).reshape(1,-1).T), axis=1)
bodyparts = [nose, earr, earl, tail]
distances = []
for k in range(len(bodyparts[0])):
frame_distances = []
for i in range(len(bodyparts)):
distance_row = []
for j in range( len(bodyparts) ):
distance_row.append(np.linalg.norm(bodyparts[i][k] - bodyparts[j][k]))
frame_distances.append(distance_row)
distances.append(frame_distances)
distances2 = np.asarray(distances)
for i in range(4):
for k in range(4):
distances2[:, i, j] = distances2[:, i, j]/np.max(distances2[:, i, j])
d = []
for i in range(distances2.shape[0]):
d.append(distances2[i, np.triu_indices(4, k = 1)[0], np.triu_indices(4, k = 1)[1]])
d = StandardScaler().fit_transform(d)
embedding = TSNE(n_components=dimensions, n_jobs=n_jobs, verbose=verbose, perplexity=perplexity)
X_transformed = embedding.fit_transform(d[startIndex:])
model = AgglomerativeClustering(n_clusters=None,distance_threshold=distance)
model = model.fit(d[startIndex:])
cluster_labels = model.labels_
return cluster_labels, X_transformed, model | 002e11bd0b6050fcfa8b50df0f0b24a3cc36bed7 | 19,762 |
def grab_inputs(board):
"""
Asks for inputs and returns a row, col. Also updates the board state.
"""
keepasking = True
while keepasking:
try:
row = int(input("Input row"))
col = int(input("Input column "))
except (EOFError, KeyboardInterrupt):
print('Cya nerd')
exit()
except:
print("That's not an integer you mongoloid.")
else: # If it's an int
valid_board = board.update_board(row, col)
if valid_board == False:
print("Your row or col is out of range. Try ranges 0-2 and make sure there's nothing there already.")
else: # If it's a valid board
keepasking = False
return row, col | 0fb840348ff645d9f2a48e1c028d99bff0bf31fe | 19,763 |
def start_session():
""" This function is what initializes the application."""
welcome_msg = render_template('welcome')
return question(welcome_msg) | ce666a48f078e49a0df98b5087c71cb1e548e905 | 19,764 |
def solve(filename):
"""
Run a sample, do the analysis and store a program to apply to a test case
"""
arc = Arc(filename)
arc.print_training_outputs()
return arc.solve() | 2a23021bb31508fd67c4be178684bb2da7d1d7c9 | 19,765 |
from typing import Sequence
def extract_item(item, prefix=None, entry=None):
"""a helper function to extract sequence, will extract values from
a dicom sequence depending on the type.
Parameters
==========
item: an item from a sequence.
"""
# First call, we define entry to be a lookup dictionary
if entry is None:
entry = {}
# Skip raw data elements
if not isinstance(item, RawDataElement):
header = item.keyword
# If there is no header or field, we can't evaluate
if header in [None, ""]:
return entry
if prefix is not None:
header = "%s__%s" % (prefix, header)
value = item.value
if isinstance(value, bytes):
value = value.decode("utf-8")
if isinstance(value, Sequence):
return extract_sequence(value, prefix=header)
entry[header] = value
return entry | a4c8c99bcd54baefdbaa95469bb3a289c2811cfc | 19,766 |
def route_counts(session, origin_code, dest_code):
""" Get count of flight routes between origin and dest. """
routes = session.tables["Flight Route"]
# airports = session.tables["Reporting Airport"]
# origin = airports["Reporting Airport"] == origin_code
origin = SelectorClause(
"Reporting Airport", REPORTING_AIRPORT_CODE, [origin_code], session=session
)
dest = routes["Origin Destination"] == dest_code
audience = routes * origin & dest
return audience.select().count | ad35a36b6874bcf45107d7217acdb6bae097b305 | 19,767 |
from pathlib import Path
def generate_master_bias(
science_frame : CCDData,
bias_path : Path,
use_cache : bool=True
) -> CCDData:
"""
"""
cache_path = generate_cache_path(science_frame, bias_path) / 'bias'
cache_file = cache_path / 'master.fits'
if use_cache and cache_file.is_file():
ccd = CCDData.read(cache_file)
if ccd is not None:
return ccd
cache_path.mkdir(parents=True, exist_ok=True)
ccd = calibrate_bias(science_frame, bias_path)
if ccd is not None:
ccd.write(cache_file)
return ccd | 207ca95109694d16e154088c7e3a12880f01d037 | 19,768 |
def RetryOnException(retry_checker,
max_retries,
sleep_multiplier=0,
retry_backoff_factor=1):
"""Decorater which retries the function call if |retry_checker| returns true.
Args:
retry_checker: A callback function which should take an exception instance
and return True if functor(*args, **kwargs) should be retried
when such exception is raised, and return False if it should
not be retried.
max_retries: Maximum number of retries allowed.
sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
retry_backoff_factor is 1. Will sleep
sleep_multiplier * (
retry_backoff_factor ** (attempt_count - 1))
if retry_backoff_factor != 1.
retry_backoff_factor: See explanation of sleep_multiplier.
Returns:
The function wrapper.
"""
def _Wrapper(func):
def _FunctionWrapper(*args, **kwargs):
return Retry(retry_checker, max_retries, func, sleep_multiplier,
retry_backoff_factor, *args, **kwargs)
return _FunctionWrapper
return _Wrapper | a721e14c7d5d98e2151f4108dcff18cb0de225e3 | 19,769 |
import csv
import itertools
def ParseCsvFile(fp):
"""Parse dstat results file in csv format.
Args:
file: string. Name of the file.
Returns:
A tuple of list of dstat labels and ndarray containing parsed data.
"""
reader = csv.reader(fp)
headers = list(itertools.islice(reader, 5))
if len(headers) != 5:
raise ValueError(
'Expected exactly 5 header lines got {}\n{}'.format(
len(headers), headers))
if 'Dstat' not in headers[0][0]:
raise ValueError(
'Expected first header cell to contain "Dstat"\n{}'.format(
headers[0]))
if 'Host:' not in headers[2][0]:
raise ValueError(('Expected first cell in third line to be '
'"Host:"\n{}').format(headers[2]))
categories = next(reader)
# Categories are not repeated; copy category name across columns in the
# same category
for i, category in enumerate(categories):
if not categories[i]:
categories[i] = categories[i - 1]
labels = next(reader)
if len(labels) != len(categories):
raise ValueError((
'Number of categories ({}) does not match number of '
'labels ({})\nCategories: {}\nLabels:{}').format(
len(categories), len(labels), categories, labels))
# Generate new column names
labels = ['%s__%s' % x for x in zip(labels, categories)]
data = []
for i, row in enumerate(reader):
# Remove the trailing comma
if len(row) == len(labels) + 1:
if row[-1]:
raise ValueError(('Expected the last element of row {0} to be empty,'
' found {1}').format(row, row[-1]))
row = row[:-1]
if len(labels) != len(row):
raise ValueError(('Number of labels ({}) does not match number of '
'columns ({}) in row {}:\n{}').format(
len(labels), len(row), i, row))
data.append(row)
return labels, np.array(data, dtype=float) | 39381d0f9eaab1ab139d4d660257aeaec6e765ca | 19,770 |
def uid_to_device_name(uid):
"""
Turn UID into its corresponding device name.
"""
return device_id_to_name(uid_to_device_id(uid)) | e4ec879bb1619fd1e215c94084117b3ce6b237bc | 19,771 |
def zonal_convergence(u, h, dx, dy, dy_u, ocean_u):
"""Compute convergence of zonal flow.
Returns -(hu)_x taking account of the curvature of the grid.
"""
res = create_var(u.shape)
for j in range(u.shape[-2]):
for i in range(u.shape[-1]):
res[j, i] = (-1) * (
h[j, cx(i + 1)] * u[j, cx(i + 1)] * dy_u[j, cx(i + 1)] * ocean_u[j, cx(i + 1)]
- h[j, i] * u[j, i] * dy_u[j, i] * ocean_u[j, i]
) / (dx[j, i] * dy[j, i])
return res | 42a0ee78e0c4d8f78a600a9dd72aa03aa104f560 | 19,772 |
def filterPoints(solutions, corners):
"""Remove solutions if they are not whithin the perimeter.
This function use shapely as the mathematical computaions for non rectangular
shapes are quite heavy.
Args:
solutions: A list of candidate points.
corners: The perimeter of the garden (list of LEDs).
Returns:
A list of points filtered.
"""
coords = []
for i in corners:
if i.inPerimeter:
coords.append((i.point.X, i.point.Y))
polygon = shapely.geometry.polygon.Polygon(coords)
solutions_2 = [value.toShapely() for value in solutions
if polygon.contains(value.toShapely())]
return [Point(v.x, v.y) for v in solutions_2] | 55c6e824d46e934eb30c6ecca45f516f09f0bff2 | 19,773 |
from typing import Set
def get_migrations_from_old_config_key_startswith(old_config_key_start: str) -> Set[AbstractPropertyMigration]:
"""
Get all migrations where old_config_key starts with given value
"""
ret = set()
for migration in get_history():
if isinstance(migration, AbstractPropertyMigration) and \
migration.old_config_key and \
migration.old_config_key.startswith(old_config_key_start):
ret.add(migration)
return ret | c8224af6e9a675ed940bf61de984a8dd01f634d5 | 19,774 |
def bbox_mapping(bboxes,
img_shape,
scale_factor,
flip,
flip_direction, # ='horizontal',
tile_offset):
"""Map bboxes from the original image scale to testing scale."""
new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
# add by hui ############################################
assert tile_offset is None or (isinstance(tile_offset, (tuple, list)) and len(tile_offset) == 2), \
"tile_offset must be None or (dx, dy) or [dx, dy]"
if tile_offset is not None:
dx, dy = tile_offset
new_bboxes[:, [0, 2]] -= dx
new_bboxes[:, [1, 3]] -= dy
h, w, c = img_shape
new_bboxes[:, [0, 2]] = new_bboxes[:, [0, 2]].clamp(0, w - 1)
new_bboxes[:, [1, 3]] = new_bboxes[:, [1, 3]].clamp(0, h - 1)
W, H = new_bboxes[:, 2] - new_bboxes[:, 0], new_bboxes[:, 3] - new_bboxes[:, 1]
keep = (W >= 2) & (H >= 2)
new_bboxes = new_bboxes[keep]
# #################################################################
return new_bboxes | a5fb8283eb6c379ef516db3a72c50d34c58ea8e6 | 19,775 |
import torch
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
if not rotation_matrix.shape[-2:] == (3, 4):
raise ValueError(
"Input size must be a N x 3 x 4 tensor. Got {}".format(
rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1
# mask_c1 = mask_d2 * (1 - mask_d0_d1)
mask_c1 = mask_d2 * (~mask_d0_d1)
# mask_c2 = (1 - mask_d2) * mask_d0_nd1
mask_c2 = (~mask_d2) * mask_d0_nd1
# mask_c3 = (1 - mask_d2) * (1 - mask_d0_nd1)
mask_c3 = (~mask_d2) * (~mask_d0_nd1)
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q | 3198dcd9f7a058a54be0d607cc66f543ea8e46f8 | 19,777 |
import calendar
from datetime import datetime
def get_first_day_and_last_day_by_month(months=0):
"""获取某月份的第一天的日期和最后一天的日期
:param months: int, 负数表示过去的月数,正数表示未来的
:return tuple: (某月第一天日期, 某月最后一天日期)
"""
day = get_today() + relativedelta(months=months)
year = day.year
month = day.month
# 获取某年某月的第一天的星期和该月总天数
_, month_range = calendar.monthrange(year, month)
first = datetime.date(year=year, month=month, day=1)
last = datetime.date(year=year, month=month, day=month_range)
return first, last | 1aea5aa0c1abcc8382212315f0e34cf0f33968b9 | 19,778 |
def kmeans(X, C):
"""The Loyd's algorithm for the k-centers problems.
X : data matrix
C : initial centers
"""
C = C.copy()
V = np.zeros(C.shape[0])
for x in X:
idx = np.argmin(((C - x)**2).sum(1))
V[idx] += 1
eta = 1.0 / V[idx]
C[idx] = (1.0 - eta) * C[idx] + eta * x
return C | 3006c10bf9091a39f4808781e4b484fc24f2ae3f | 19,779 |
def data_block(block_str):
""" Parses all of the NASA polynomials in the species block of the
mechanism file and subsequently pulls all of the species names
and thermochemical properties.
:param block_str: string for thermo block
:type block_str: str
:return data_block: all the data from the data string for each species
:rtype: list(list(str/float))
"""
thm_dstr_lst = data_strings(block_str)
thm_dat_lst = tuple(zip(
map(species_name, thm_dstr_lst),
map(temperatures, thm_dstr_lst),
map(low_coefficients, thm_dstr_lst),
map(high_coefficients, thm_dstr_lst)))
return thm_dat_lst | bfcf457e164002cd4ab8c6c852117ebe24f437ab | 19,781 |
from functools import reduce
from re import S
def risch_norman(f, x, rewrite=False):
"""Computes indefinite integral using extended Risch-Norman algorithm,
also known as parallel Risch. This is a simplified version of full
recursive Risch algorithm. It is designed for integrating various
classes of functions including transcendental elementary or special
functions like Airy, Bessel, Whittaker and Lambert.
The main difference between this algorithm and the recursive one
is that rather than computing a tower of differential extensions
in a recursive way, it handles all cases in one shot. That's why
it is called parallel Risch algorithm. This makes it much faster
than the original approach.
Another benefit is that it doesn't require to rewrite expressions
in terms of complex exponentials. Rather it uses tangents and so
antiderivatives are being found in a more familliar form.
Risch-Norman algorithm can also handle special functions very
easily without any additional effort. Just differentiation
method must be known for a given function.
Note that this algorithm is not a decision procedure. If it
computes an antiderivative for a given integral then it's a
proof that such function exists. However when it fails then
there still may exist an antiderivative and a fallback to
recurrsive Risch algorithm would be necessary.
The question if this algorithm can be made a full featured
decision procedure still remains open.
For more information on the implemented algorithm refer to:
[1] K. Geddes, L.Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[2] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[3] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[4] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
"""
f = Basic.sympify(f)
if not f.has(x):
return f * x
rewritables = {
(sin, cos, cot) : tan,
(sinh, cosh, coth) : tanh,
}
if rewrite:
for candidates, rule in rewritables.iteritems():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.iterkeys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f)
for g in set(terms):
h = g.diff(x)
if not isinstance(h, Basic.Zero):
terms |= components(h)
terms = [ g for g in terms if g.has(x) ]
V, in_terms, out_terms = [], [], {}
for i, term in enumerate(terms):
V += [ Symbol('x%s' % i) ]
N = term.count_ops(symbolic=False)
in_terms += [ (N, term, V[-1]) ]
out_terms[V[-1]] = term
in_terms.sort(lambda u, v: int(v[0] - u[0]))
def substitute(expr):
for _, g, symbol in in_terms:
expr = expr.subs(g, symbol)
return expr
diffs = [ substitute(g.diff(x)) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
denom = reduce(lambda p, q: lcm(p, q, V), denoms)
numers = [ normal(denom * g, *V) for g in diffs ]
def derivation(h):
return Basic.Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def deflation(p):
for y in p.atoms(Basic.Symbol):
if not isinstance(derivation(p), Basic.Zero):
c, q = p.as_polynomial(y).as_primitive()
return deflation(c) * gcd(q, q.diff(y))
else:
return p
def splitter(p):
for y in p.atoms(Basic.Symbol):
if not isinstance(derivation(y), Basic.Zero):
c, q = p.as_polynomial(y).as_primitive()
q = q.as_basic()
h = gcd(q, derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = splitter(c)
if s.as_polynomial(y).degree() == 0:
return (c_split[0], q * c_split[1])
q_split = splitter(normal(q / s, *V))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = []
for term in terms:
if isinstance(term, Basic.Function):
if isinstance(term, Basic.tan):
special += [ (1 + substitute(term)**2, False) ]
elif isinstance(term.func, tanh):
special += [ (1 + substitute(term), False),
(1 - substitute(term), False) ]
#elif isinstance(term.func, Basic.LambertW):
# special += [ (substitute(term), True) ]
ff = substitute(f)
P, Q = ff.as_numer_denom()
u_split = splitter(denom)
v_split = splitter(Q)
s = u_split[0] * Basic.Mul(*[ g for g, a in special if a ])
a, b, c = [ p.as_polynomial(*V).degree() for p in [s, P, Q] ]
candidate_denom = s * v_split[0] * deflation(v_split[1])
monoms = monomials(V, 1 + a + max(b, c))
linear = False
while True:
coeffs, candidate, factors = [], S.Zero, set()
for i, monomial in enumerate(monoms):
coeffs += [ Symbol('A%s' % i, dummy=True) ]
candidate += coeffs[-1] * monomial
candidate /= candidate_denom
polys = [ v_split[0], v_split[1], u_split[0]] + [ s[0] for s in special ]
for irreducibles in [ factorization(p, linear) for p in polys ]:
factors |= irreducibles
for i, irreducible in enumerate(factors):
if not isinstance(irreducible, Basic.Number):
coeffs += [ Symbol('B%s' % i, dummy=True) ]
candidate += coeffs[-1] * Basic.log(irreducible)
h = together(ff - derivation(candidate) / denom)
numerator = h.as_numer_denom()[0].expand()
if not isinstance(numerator, Basic.Add):
numerator = [numerator]
collected = {}
for term in numerator:
coeff, depend = term.as_independent(*V)
if depend in collected:
collected[depend] += coeff
else:
collected[depend] = coeff
solutions = solve(collected.values(), coeffs)
if solutions is None:
if linear:
break
else:
linear = True
else:
break
if solutions is not None:
antideriv = candidate.subs_dict(solutions)
for C in coeffs:
if C not in solutions:
antideriv = antideriv.subs(C, S.Zero)
antideriv = simplify(antideriv.subs_dict(out_terms)).expand()
if isinstance(antideriv, Basic.Add):
return Basic.Add(*antideriv.as_coeff_factors()[1])
else:
return antideriv
else:
if not rewrite:
return risch_norman(f, x, rewrite=True)
else:
return None | 12dd2cbd724566344d73bff48ed46b33d2b84730 | 19,782 |
def preprocess_spectra(fluxes, interpolated_sn, sn_array, y_offset_array):
"""preprocesses a batch of spectra, adding noise according to specified sn profile, and applies continuum error
INPUTS
fluxes: length n 2D array with flux values for a spectrum
interpolated_sn: length n 1D array with relative sn values for each pixel
sn_array: 2d array dims (num examples, 1) with sn selected for each example
y_offset_array: same as sn array but with y_offsets
OUTPUTS
fluxes: length n 2D array with preprocessed fluxes for a spectrum
"""
n_pixels = np.size(fluxes[0, :])
n_stars = np.size(fluxes[:, 1])
base_stddev = 1.0 / sn_array[:, 0]
for i in range(n_stars):
noise_array = np.random.normal(0.0, scale=base_stddev[i], size=n_pixels)
fluxes[i, :] += noise_array*interpolated_sn
fluxes += y_offset_array
return fluxes | 552d42b3835f3bc60930ae6f05f1544c924e940b | 19,785 |
import json
def read_config(path=None):
"""
Function for reading in the config.json file
"""
#create the filepath
if path:
if "config.json" in path:
file_path = path
else:
file_path = f"{path}/config.json"
else:
file_path = "config.json"
#load in config
try:
with open(file_path, "r") as json_file:
config = json.load(json_file)
except Exception:
raise Exception("Your config file is corrupt (wrong syntax, missing values, ...)")
return config | 3e3612879645509acb74f184085f7e584afbf822 | 19,786 |
import json
def ema_incentive(ds):
"""
Parse stream name 'incentive--org.md2k.ema_scheduler--phone'. Convert json column to multiple columns.
Args:
ds: Windowed/grouped DataStream object
Returns:
ds: Windowed/grouped DataStream object.
"""
schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("incentive", FloatType()),
StructField("total_incentive", FloatType()),
StructField("ema_id", StringType()),
StructField("data_quality", FloatType())
])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def parse_ema_incentive(user_data):
all_vals = []
for index, row in user_data.iterrows():
ema = row["incentive"]
if not isinstance(ema, dict):
ema = json.loads(ema)
incentive = ema["incentive"]
total_incentive = ema["totalIncentive"]
ema_id = ema["emaId"]
data_quality = ema["dataQuality"]
all_vals.append([row["timestamp"],row["localtime"], row["user"],1,incentive,total_incentive,ema_id,data_quality])
return pd.DataFrame(all_vals,columns=['timestamp','localtime', 'user', 'version','incentive','total_incentive','ema_id','data_quality'])
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(parse_ema_incentive)
return DataStream(data=data, metadata=Metadata()) | ad6d6a08906dc5aab5a1ea2d0895eb84eac44f44 | 19,787 |
def read_fingerprint(finger_name: str) -> np.ndarray:
"""
Given the file "x_y_z" name this function returns a vector with
the fingerprint data.
:param finger_name: A string with the format "x_y_z".
:return: A vector (1x256) containing the fingerprint data.
"""
base_path = "rawData/QFM16_"
path = base_path + finger_name + ".txt"
return read_finger_file(path) | 21b88afffdb016699ad6a0ed635931096ebe8bc1 | 19,788 |
def read_data(data_path):
"""This function reads in the histogram data from the provided path
and returns a pandas dataframe
"""
histogram_df = None # Your code goes here
return histogram_df | 5b927246c9298743c22d8a9fc497175aa9600c24 | 19,790 |
def get_number_of_tickets():
"""Get number of tickets to enter from user"""
num_tickets = 0
while num_tickets == 0:
try:
num_tickets = int(input('How many tickets do you want to get?\n'))
except:
print ("Invalid entry for number of tickets.")
return num_tickets | 3703a4ed64867a9884328c09f0fd32e763265e95 | 19,792 |
def scrape(file):
""" scrapes rankings, counts from agg.txt file"""
D={}
G={}
with open(file,'r') as f:
for line in f:
L = line.split(' ')
qid = L[1][4:]
if qid not in D:
D[qid]=[]
G[qid]=[]
#ground truth
G[qid].append(int(L[0]))
#extract ranks
ranks=[]
for i in range(2,27):
[l,rank]=L[i].split(':')
if rank != 'NULL':
ranks.append(int(rank))
else:
ranks.append(0)
D[qid].append(ranks)
C={};N={}
for qid in D:
C[qid]=[]
N[qid] = len(D[qid])
A= np.array(D[qid])
assert A.shape[1] == 25
for i in range(25):
l = A[:,i]
ranked = np.where(l>0)[0]
ranking = ranked[np.argsort(l[ranked])]
C[qid].append(ranking)
#pickle.dump(C,open('MQ-lists.p','wb'))
return C,N,G | cad6525a9ae43f8366ae7e0efec14dd8b2921d27 | 19,793 |
import hashlib
import binascii
def private_key_to_WIF(private_key):
"""
Convert the hex private key into Wallet Import Format for easier wallet
importing. This function is only called if a wallet with a balance is
found. Because that event is rare, this function is not significant to the
main pipeline of the program and is not timed.
"""
digest = hashlib.sha256(binascii.unhexlify('80' + private_key)).hexdigest()
var = hashlib.sha256(binascii.unhexlify(digest)).hexdigest()
var = binascii.unhexlify('80' + private_key + var[0:8])
alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
value = pad = 0
result = ''
for i, c in enumerate(var[::-1]): value += 256**i * c
while value >= len(alphabet):
div, mod = divmod(value, len(alphabet))
result, value = chars[mod] + result, div
result = chars[value] + result
for c in var:
if c == 0: pad += 1
else: break
return chars[0] * pad + result | 20e7a767fdfb689f586fc566a94ec37f86a88e52 | 19,794 |
def woodbury_solve_vec(C, v, p):
""" Vectorzed woodbury solve --- overkill
Computes the matrix vector product (Sigma)^{-1} p
where
Sigma = CCt + diag(exp(a))
C = D x r real valued matrix
v = D dimensional real valued vector
The point of this function is that you never have to explicitly
represent the full DxD matrix to do this multiplication --- hopefully
that will cut down on memory allocations, allow for better scaling
in comments below, we write Sigma = CCt + A, where A = diag(exp(v))
"""
# set up vectorization
if C.ndim == 2:
C = np.expand_dims(C, 0)
assert v.ndim == 1, "v shape mismatched"
assert p.ndim == 1, "p shape mismatched"
v = np.expand_dims(v, 0)
p = np.expand_dims(p, 0)
bsize, D, r = np.shape(C)
# compute the inverse of the digaonal copmonent
inv_v = np.exp(-v) # A^{-1}
aC = C*inv_v[:, :, None] # A^{-1} C
# low rank, r x r term: (Ir + Ct A^{-1} C)
r_term = np.einsum('ijk,ijh->ikh', C, aC) + \
np.eye(r)
# compute inverse term (broadcasts over first axis)
# (Ir + Ct A^{-1} C)^{-1} (Ct A^{-1})
# in einsum notation:
# - i indexes minibatch (vectorization)
# - r indexes rank dimension
# - d indexes D dimension (obs dimension)
inv_term = np.linalg.solve(r_term, np.swapaxes(aC, 1, 2))
back_term = np.einsum('idr,id->ir', aC, p) # (Ct A^{-1} p)
Sigvs = inv_v*p - np.einsum('ird,ir->id', inv_term, back_term)
return Sigvs | 875ab6709b82cd8865a4396b88cbd10a2847e608 | 19,795 |
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) | 7e0cbcd5d709405b32ba79c93cbf1ef6e98195f6 | 19,796 |
def pvfactors_engine_run(data, pvarray_parameters, parallel=0, mode='full'):
"""My wrapper function to launch the pvfactors engine in parallel. It is mostly for Windows use.
In Linux you can directly call run_parallel_engine. It uses MyReportBuilder to generate the output.
Args:
data (pandas DataFrame): The data to fit the model.
pvarray_parameters (dict): The pvfactors dict describing the simulation.
parallel (int, optional): Number of threads to launch. Defaults to 0 (just calls PVEngine.run_all_timesteps)
mode (str): full or fast depending on the type of back irraadiances. See pvfactors doc.
Returns:
pandas DataFrame: The results of the simulation, as desired in MyReportBuilder.
"""
n, row = _get_cut(pvarray_parameters['cut'])
rb = Report(n, row)
if parallel>1:
report = run_parallel_engine(rb, pvarray_parameters, data.index,
data.dni, data.dhi,
data.zenith, data.azimuth,
data.surface_tilt, data.surface_azimuth,
data.albedo, n_processes=parallel)
else:
pvarray = OrderedPVArray.init_from_dict(pvarray_parameters)
engine = PVEngine(pvarray)
engine.fit(data.index,
data.dni,
data.dhi,
data.zenith,
data.azimuth,
data.surface_tilt,
data.surface_azimuth,
data.albedo,
data.ghi)
if mode == 'full': report = engine.run_full_mode(rb.build)
else: report = engine.run_fast_mode(rb.build, pvrow_index=0, segment_index=0)
df_report = pd.DataFrame(report, index=data.index).fillna(0)
return df_report | 1118838fed39e19e31997db9102fdba70283bed8 | 19,797 |
def get_service_button(button_text, service, element="#bottom_right_div"):
""" Generate a button that calls the std_srvs/Empty service when pressed """
print "Adding a service button!"
return str(render.service_button(button_text, service, element)) | ce8e2a0ec029762c4e19210c9986aef7e78b55d9 | 19,798 |
def create_train_test_set(data, labels, test_size):
"""
Splits dataframe into train/test set
Inputs:
data: encoded dataframe containing encoded name chars
labels: encoded label dataframe
test_size: percentage of input data set to use for test set
Returns:
data_train: Subset of data set for training
data_test : Subset of data set for test
label_train: Subset of label set for training
label_test: Subset of label set for testing
"""
data_train, data_test, label_train, label_test = skMS.train_test_split(data, labels, test_size=test_size)
return [data_train, data_test, label_train, label_test] | ffeedf0cf4b7b8b1ffa552f0573d33263d216d99 | 19,799 |
def hdf_diff(*args, **kwargs):
""":deprecated: use `diff_blocks` (will be removed in 1.1.1)"""
return diff_blocks(*args, **kwargs) | 3c05a908cc32c2ba4e481ff0de41f78249e2ef02 | 19,801 |
import glob
def determine_epsilon():
"""
We follow Learning Compact Geomtric Features to compute this hyperparameter, which unfortunately we didn't use later.
"""
base_dir = '../dataset/3DMatch/test/*/03_Transformed/*.ply'
files = sorted(glob.glob(base_dir), key=natural_key)
etas = []
for eachfile in files:
pcd = o3d.io.read_point_cloud(eachfile)
pcd = pcd.voxel_down_sample(0.025)
pcd_tree = o3d.geometry.KDTreeFlann(pcd)
distances = []
for i, point in enumerate(pcd.points):
[count, vec1, vec2] = pcd_tree.search_knn_vector_3d(point, 2)
distances.append(np.sqrt(vec2[1]))
etai = np.median(distances)
etas.append(etai)
return np.median(etas) | a6af243ebeb37e046e9f23c86080822bff4f490d | 19,802 |
def sort_ipv4_addresses_with_mask(ip_address_iterable):
"""
Sort IPv4 addresses in CIDR notation
| :param iter ip_address_iterable: An iterable container of IPv4 CIDR notated addresses
| :return list : A sorted list of IPv4 CIDR notated addresses
"""
return sorted(
ip_address_iterable,
key=lambda addr: (
int(addr.split('.')[0]),
int(addr.split('.')[1]),
int(addr.split('.')[2]),
int(addr.split('.')[3].split('/')[0]),
int(addr.split('.')[3].split('/')[1])
)
) | 97517b2518b81cb8ce4cfca19c5512dae6bae686 | 19,803 |
def _subattribute_from_json(data: JsonDict) -> SubAttribute:
"""Make a SubAttribute from JSON data (deserialize)
Args:
data: JSON data received from Tamr server.
"""
cp = deepcopy(data)
d = {}
d["name"] = cp["name"]
d["is_nullable"] = cp["isNullable"]
d["type"] = from_json(cp["type"])
return SubAttribute(**d) | 4fde9e1eb456fd42b8ad8e49ad893a62ba01eba4 | 19,804 |
def compare_asts(ast1, ast2):
"""Compare two ast trees. Return True if they are equal."""
# import leo.core.leoGlobals as g
# Compare the two parse trees.
try:
_compare_asts(ast1, ast2)
except AstNotEqual:
dump_ast(ast1, tag='AST BEFORE')
dump_ast(ast2, tag='AST AFTER')
if g.unitTesting:
raise
return False
except Exception:
g.warning(f"Unexpected exception")
g.es_exception()
return False
return True | 32ab70f1fa31f9ae6cab4e9f5ba91ff71a9e79f8 | 19,805 |
import json
def shit():
"""Ready to go deep into the shit?
Parse --data from -X POST -H 'Content-Type: application/json'
and send it to the space background
"""
try:
body = json.loads(request.data)
except Exception as e:
abort(400, e)
if not body:
abort(400, "Missing data")
if "title" not in body:
abort(400, "Missing `title` param")
if "artist" not in body:
abort(400, "Missing `artist` param")
if "client_id" not in body:
"""client_id is used to send back
the lyriks through the Notifier aka Flash.
"""
abort(400, "Missing `client_id` param")
# send data to our Background Worker aka Iron Rogue
rogue(body["title"], body["artist"], body["client_id"])
return make_response(jsonify({
"code": 202,
"message": "request accepted and send into the shit"
}), 202) | bdc435566aeaafac8144775188478cb28724802b | 19,806 |
def clear_settings(site_name): # untested - do I need/want this?
"""update settings to empty dict instead of initialized)
"""
return update_settings(site_name, {}) | a6cbd9bc43ce5bc7159bc75d4cab8c703d73e8cd | 19,807 |
def reorder_jmultis_det_terms(jmulti_output, constant, seasons):
"""
In case of seasonal terms and a trend term we have to reorder them to make
the outputs from JMulTi and sm2 comparable.
JMulTi's ordering is: [constant], [seasonal terms], [trend term] while
in sm2 it is: [constant], [trend term], [seasonal terms]
Parameters
----------
jmulti_output : ndarray (neqs x number_of_deterministic_terms)
constant : bool
Indicates whether there is a constant term or not in jmulti_output.
seasons : int
Number of seasons in the model. That means there are seasons-1
columns for seasonal terms in jmulti_output
Returns
-------
reordered : ndarray (neqs x number_of_deterministic_terms)
jmulti_output reordered such that the order of deterministic terms
matches that of sm2.
"""
if seasons == 0:
return jmulti_output
constant = int(constant)
const_column = jmulti_output[:, :constant]
season_columns = jmulti_output[:, constant:constant + seasons - 1].copy()
trend_columns = jmulti_output[:, constant + seasons - 1:].copy()
return np.hstack((const_column,
trend_columns,
season_columns)) | 91fd48e14addf264f00a6e898af8d934bcd84cca | 19,808 |
def test_require_gdal_version_param_values():
"""Parameter values are allowed for all versions >= 1.0"""
for values in [('bar',), ['bar'], {'bar'}]:
@require_gdal_version('1.0', param='foo', values=values)
def a(foo=None):
return foo
assert a() is None
assert a('bar') == 'bar'
assert a(foo='bar') == 'bar' | de11d8f6f0720c1b3ef6aa957f8386e8436b1e73 | 19,809 |
def nav_get_element(nav_expr, side, dts, xule_context):
"""Get the element or set of elements on the from or to side of a navigation expression'
This determines the from/to elements of a navigation expression. If the navigation expression includes the from/to component, this will be evaluated.
The result can be a qname, concept or a set/list of qname or concepts.
Arguments:
nav_expr (dictionary): The navigation expression AST node
side (string): Either 'from' or 'to'.
xule_context (XuleRuleContext): The processing context
Returns:
None - indicates that the side is not in the navigation expression
set of concepts - the set of the concepts if the side evaluates to a set or list of concept/concepts
"""
if side in nav_expr:
side_value = evaluate(nav_expr[side], xule_context)
if side_value.type == 'qname':
concept = XuleProperties.get_concept(dts, side_value.value)
if concept is None:
return set()
else:
return {concept, }
elif side_value.type == 'concept':
return {side_value.value, }
elif side_value.type in ('set', 'list'):
concepts = set()
for item in side_value.value:
if item.type == 'qname':
concept = XuleProperties.get_concept(dts, item.value)
if concept is not None:
concepts.add(concept)
elif item.type == 'concept':
concepts.add(item.value)
else:
raise XuleProcessingError(_(
"In navigation, expecting a collection of concepts or concepts, but found {}.".format(
item.type)))
return concepts
else:
raise XuleProcessingError(
_("In navigation, expecting a concept or qname, but found {}.".format(side_value.type)))
else:
return None | db29b0c2e7832c2b386dd602c77d18a37c2c1307 | 19,810 |
def do_pivot(df: pd.DataFrame, row_name: str, col_name: str, metric_name: str):
"""
Works with df.pivot, except preserves the ordering of the rows and columns
in the pivoted dataframe
"""
original_row_indices = df[row_name].unique()
original_col_indices = df[col_name].unique()
pivoted = df.pivot(index=row_name, columns=col_name, values=metric_name)
pivoted = pivoted[original_col_indices]
pivoted = pivoted.reindex(original_row_indices).reset_index()
pivoted.columns.name = None
return pivoted | 70df87eb7d1ca19116ec04854bee635a66f02908 | 19,811 |
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay) | e6a13c50b021785ddcb278c449ba6f9be9271106 | 19,812 |
def stat(file_name):
"""
Read information from a FreeSurfer stats file.
Read information from a FreeSurfer stats file, e.g., `subject/stats/lh.aparc.stats` or `aseg.stats`. A stats file is a text file that contains a data table and various meta data.
Parameters
----------
file_name: string
The path to the stats file.
Returns
-------
dictionary of strings (includes nested sub dicts)
The result dictionary, containing the following 4 keys:
- 'ignored_lines': list of strings. The list of lines that were not parsed in a special way. This is raw data.
- 'measures': string list of dimension (n, m) if there are n measures with m properties each stored in the stats file.
- 'table_data': string list of dimension (i, j) when there are i lines containing j values each in the table stored in the stats file. You may want to convert the columns to the proper data types and put the result into several numpy arrays or a single Pandas data frame.
- 'table_column_headers': string list. The names for the columns for the table_data. This information is parsed from the table_meta_data and given here for convenience.
- 'table_meta_data': dictionary. The full table_meta_data. Stores properties in key, value sub dictionaries. For simple table properties, the dictionaries are keys of the returned dictionary. The only exception is the information on the table columns (header data). This information can be found under the key `column_info_`, which contains one dictionary for each column. In these dictionaries, data is stored as explained for simple table properties.
Examples
--------
Read the `aseg.stats` file for a subject:
>>> import brainload as bl
>>> stats = bl.stats('/path/to/study/subject1/stats/aseg.stats')
Collect some data, just to show the data structures.
>>> print(len(stats['measures'])) # Will print the number of measures.
>>> print("|".join(stats['measures'][0])) # Print all data on the first measure.
Now lets print the table_data:
>>> num_data_rows = len(stats['table_data'])
>>> num_entries_per_row = len(stats['table_data'][0])
And get some information on the table columns (the table header):
>>> print stats['table_meta_data']['NTableCols'] # will print "10" (from a simple table property stored directly in the dictionary).
Get the names of all the data columns:
>>> print ",".join(stats['table_column_headers'])
Get the name of the first column:
>>> first_column_name = stats['table_column_headers'][0]
More detailed information on the individual columns can be found under the special `column_info_` key if needed:
>>> column2_info_dict = stats['table_meta_data']['column_info_']['2']
>>> print(column2_info_dict['some_key']) # will print the value
Note that all data is returned as string type, you will need to covert it to float (or whatever) yourself.
"""
lines = nit._read_text_file_lines(file_name)
return _parse_stats_lines(lines) | bf64bf488d34a32eebcb66472a3fa567bf5a368d | 19,813 |
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x | 7b40bf05c9d47c7921b1377f0d2235c483e6ba2e | 19,814 |
from typing import Union
from typing import Dict
from typing import Any
from typing import Iterable
from typing import Type
from typing import Optional
import dataclasses
from typing import TypeVar
from typing import cast
def parse_model(data: Union[Dict[str, Any], Iterable[Any], Any],
cls: Union[Type[TModel], Type[Any]],
rename_keys: Optional[Dict[str, str]] = None) \
-> Union[TModel, Any]:
"""Instantiates an object of the provided class cls for a provided mapping.
Instantiates an object of a class specifying a model for the provided
mapping. An entry in the mapping must be provided for all non-optional
attribute of the class. Keys are expected to be in CapsWords, matching
the snake_case corresponding class attributes. Any additional entries
found in the mapping that do not correspond to class attributes are
ignored.
Args:
data: Dictionary containing pairs with the names of the attributes
and their respective values provided to instantiate the model
class.
cls: The model class to instantiate.
rename_keys: Key names to rename to match model attribute names,
used when an automated translation of the name from CapsWords
to snake_case is to sufficient. Renaming must provide the name
in CapsWords.
Returns:
The instantiated model class object.
Raises:
TypeError: Cannot parse the value of a class attribute to the
appropriate type.
NotImplementedError: The type of a class attribute is not supported.
"""
if cls is not NoneType and dataclasses.is_dataclass(cls) \
and isinstance(data, dict):
if rename_keys:
for k, r, in rename_keys.items():
if k in data:
data[r] = data.pop(k)
field_names = set(f.name for f in dataclasses.fields(cls))
field_types = {f.name: f.type for f in dataclasses.fields(cls)}
parsed_data: Dict[str, Any] = {}
for key, value in data.items():
key = _to_snake_case(key)
if key in field_names:
field_type = field_types[key]
parsed_data[key] = parse_model(value, field_type)
args = []
for f in dataclasses.fields(cls):
if f.name in parsed_data:
a = parsed_data[f.name]
elif f.default is not dataclasses.MISSING:
a = f.default
else:
fc = getattr(f, 'default_factory')
if fc is not dataclasses.MISSING:
a = fc()
else:
raise TypeError(f'Cannot initialize class {cls}. '
f'Missing required parameter {f.name}')
args.append(a)
return cls(*args)
field_type_origin = getattr(cls, '__origin__', None)
if field_type_origin is Union:
for candidate_cls in getattr(cls, '__args__', []):
try:
return parse_model(data, candidate_cls)
except (TypeError, ValueError):
pass
raise ValueError(f'Cannot parse value {data} as {cls}')
if field_type_origin is list and isinstance(data, Iterable):
list_field_type = getattr(cls, '__args__', [])[0]
if type(list_field_type) is TypeVar:
return list(data)
return [parse_model(v, list_field_type) for v in data]
if field_type_origin is tuple and isinstance(data, Iterable):
tuple_field_types = getattr(cls, '__args__', [])
if not tuple_field_types:
return tuple(data)
return tuple(parse_model(v, tuple_field_types[0]) for v in data)
parsable_classes = tuple(getattr(ParsableClass, '__args__', []))
if cls in parsable_classes:
return _parse_class(data, cast(Type[ParsableClass], cls))
raise NotImplementedError(f'Cannot parse data {data} as {cls}.') | 85ba92ac4c3e9df8e96612017b94db73ea53d19e | 19,815 |
def findTopEyelid(imsz, imageiris, irl, icl, rowp, rp, ret_top=None):
"""
Description:
Mask for the top eyelid region.
Input:
imsz - Size of the eye image.
imageiris - Image of the iris region.
irl -
icl -
rowp - y-coordinate of the inner circle centre.
rp - radius of the inner circle centre.
ret_top - Just used for returning result when using multiprocess.
Output:
mask - Map of noise that will be masked with NaN values.
"""
topeyelid = imageiris[0: rowp - irl - rp, :]
lines = findline(topeyelid)
mask = np.zeros(imsz, dtype=float)
if lines.size > 0:
xl, yl = linecoords(lines, topeyelid.shape)
yl = np.round(yl + irl - 1).astype(int)
xl = np.round(xl + icl - 1).astype(int)
yla = np.max(yl)
y2 = np.arange(yla)
mask[yl, xl] = np.nan
grid = np.meshgrid(y2, xl)
mask[grid] = np.nan
# Return
if ret_top is not None:
ret_top[0] = mask
return mask | 9c01e0966a1800ed76f5370cbc382a801af08d67 | 19,816 |
def script_with_queue_path(tmpdir):
"""
Pytest fixture to return a path to a script with main() which takes
a queue and procedure as arguments and adds procedure process ID to queue.
"""
path = tmpdir.join("script_with_queue.py")
path.write(
"""
def main(queue, procedure):
queue.put(procedure.pid)
"""
)
return f"file://{str(path)}" | 7c2c2b4c308f91d951496c53c9bdda214f64c776 | 19,817 |
import configparser
def readini(inifile):
""" This function will read in data from a configureation file.
Inputs
inifile- The name of the configuration file.
Outputs
params - A dictionary with keys from INIOPTIONS that holds all of
the plotting parameters.
"""
if inifile is None:
return
config =configparser()
config.read(inifile)
params={i:None for i in INIOPTIONS}
# Read in data from ini file
for ip in config.options('params'):
# get the original param name
rname = config.get('paramsnames',ip)
# get the parameter and split it up
params[rname] = config.get('params',ip)
params[rname]=params[rname].split(" ")
# If its a single object try to
if len(params[rname])==1:
params[rname]=params[rname][0]
try:
params[rname]=float(params[rname])
except Exception:
pass
else:
for a in range(len(params[rname])):
try:
params[rname][a]=float(params[rname][a])
except Exception:
pass
# turn the time bounds to time stamps
if not params['timebounds']is None:
timelist = params['timebounds']
params['timebounds']=str2posix(timelist)
# which times will have names
if params['TextList'] is None:
params['TextList']=[]
# change param height to a list of lists
if not params['paramheight'] is None:
l1 = params['paramheight'][::2]
l2 = params['paramheight'][1::2]
params['paramheight']=[[i,j] for i,j in zip(l1,l2)]
if not params['paramlim'] is None:
l1 = params['paramlim'][::2]
l2 = params['paramlim'][1::2]
params['paramlim']=[[i,j] for i,j in zip(l1,l2)]
# Default for reinterp is false
if params['reinterp']is None:
params['reinterp']=False
else:
params['reinterp'] = params['reinterp'].lower()=='yes'
return params | eb5800f00cc8e58557e11fb9ff525e7f407c9eab | 19,818 |
def get_param_store():
"""
Returns the ParamStore
"""
return _PYRO_PARAM_STORE | d71ab10f2029fab735268956590094d8c94dd150 | 19,819 |
def secret_add(secret):
"""
Return a lambda that adds the argument from the lambda to the argument passed into secret_add.
:param secret: secret number to add (integer)
:return: lambda that takes a number and adds it to the secret
"""
return lambda addend: secret + addend | 151f1cff9f0e0bbb43650d63592ba0c2cb05611e | 19,820 |
def morseToBoolArr(code, sps, wpm, fs=None):
""" morse code to boolean array
Args:
code (str): morse code
sps: Samples per second
wpm: Words per minute
fs: Farnsworth speed
Returns:
boolean numpy array
"""
dps = wpmToDps(wpm) # dots per second
baseSampleCount = sps / dps
samplesPerDot = int(round(baseSampleCount))
samplesPerDash = int(round(baseSampleCount * DASH_WIDTH))
samplesBetweenElements = int(round(baseSampleCount))
farnsworthScale = farnsworthScaleFactor(wpm, fs)
samplesBetweenLetters = int(round(baseSampleCount * CHAR_SPACE * farnsworthScale))
samplesBetweenWords = int(round(baseSampleCount * WORD_SPACE * farnsworthScale))
dotArr = np.ones(samplesPerDot, dtype=np.bool)
dashArr = np.ones(samplesPerDash, dtype=np.bool)
eGapArr = np.zeros(samplesBetweenElements, dtype=np.bool)
cGapArr = np.zeros(samplesBetweenLetters, dtype=np.bool)
wGapArr = np.zeros(samplesBetweenWords, dtype=np.bool)
pieces = []
prevWasSpace = False
prevWasElement = False
for c in code:
if (c == DOT or c == DASH) and prevWasElement:
pieces.append(eGapArr)
if c == DOT:
pieces.append(dotArr)
prevWasSpace, prevWasElement = False, True
elif c == DASH:
pieces.append(dashArr)
prevWasSpace, prevWasElement = False, True
else: # Assume the char is a space otherwise
if prevWasSpace:
pieces[-1] = wGapArr
else:
pieces.append(cGapArr)
prevWasSpace, prevWasElement = True, False
return np.concatenate(pieces) | 90b4225a2a9979ac7f813a1f964b64ef9310ee23 | 19,821 |
import csv
import array
def LaserOptikMirrorTransmission(interpolated_wavelengths,refractive_index = "100", shift_spectrum=7,rescale_factor=0.622222):
"""
Can be used for any wavelengths in the range 400 to 800 (UNITS: nm)
Uses supplied calculation from LaserOptik
Interpolate over selected wavelengths: returns a function which takes wavelength (nm) as argument
Shifts transmission spectrum with calibration still to come, likewise for "rescale_factor"
"refractive_index" argument is only for backwards compatibility
"""
reflectivity_folder = data_root_folder + folder_separator+ "calibration_data" + folder_separator
#reflectivity_folder = "./"
reflectivity_filename = "LaserOptik20160129_Theorie_T.DAT"
fname = reflectivity_folder+reflectivity_filename
res = csv.reader(open(fname), delimiter='\t')
refl_text = [x for x in res][1:] #removes column headings
original_wavelengths = array([float(l[0]) for l in refl_text])
original_transmissions = array([float(l[1]) for l in refl_text])
original_reflectivities = 1-original_transmissions
#
wavelength_shift = 0
if shift_spectrum == "planar": #shift to be measured
wavelength_shift = 0
elif shift_spectrum == "spherical":
wavelength_shift = 0 # shift to be measured
elif isinstance(shift_spectrum,Number):
wavelength_shift = shift_spectrum
#
interpolated_transmission_func = interp1d(original_wavelengths,original_transmissions)
interpolated_transmissions = interpolated_transmission_func(interpolated_wavelengths + wavelength_shift)
#Transmission to be calibrated at at least one narrow wavelength
#Assume transmission scales with this factor at all wavelengths [not well justified assumption]
interpolated_transmissions = interpolated_transmissions / rescale_factor
return interpolated_transmissions | a3eafd7a788ddcfdedd8e25081f6e7cc1fd03cc5 | 19,822 |
import math
def menu(prompt, titles, cols=1, col_by_col=True, exc_on_cancel=None,
caption=None, default=None):
"""Show a simple menu.
If the input is not allowed the prompt will be shown again. The
input can be cancelled with EOF (``^D``).
The caller has to take care that the menu will fit in the terminal.
::
def update():
...
def sort(desc=True, duration=True):
...
items = (
('Update', update),
('Sort duration desc', sort),
('Sort duration asc', sort, False),
('Sort size desc', sort, True, False),
('Sort size asc', sort, False, False),
)
i = menu('> ', tuple(x[0] for x in items))
print()
if i is not None:
items[i][1](*items[i][2:])
.. raw:: html
<pre style="color:#FFFFFF;background-color:#000000">[1] Update
[2] Sort duration desc
[3] Sort duration asc
[4] Sort size desc
[5] Sort size asc
> </pre>
:param str prompt: the prompt
:param tuple titles: the titles of the menu options
:param int cols: number of columns
:param bool col_by_col: if ``True`` the menu will be filled
column-by-column, otherwise row-by-row
:param bool exc_on_cancel: if ``True`` an EOF will cause an Exception;
if ``None`` the value of ``exception_on_cancel``
will be used
:param str caption: caption for the menu
:param int default: number of the default menu option
:return: index of the selected option in ``titles`` or None if cancelled
and ``exc_on_cancel=False``
:rtype: int or None
:raises EOFError: if input was cancelled and ``exc_on_cancel=True``
:raises TypeError: if ``titles`` is not a tuple or ``default`` is not
an integer
.. versionadded:: 0.4.0
.. versionchanged:: 0.6.0
Add parameter ``caption``
.. versionchanged:: 0.17.0
Add parameter ``default``
"""
if default is not None:
check_type(default, int, 'default')
if not (0 < default <= (len(titles))):
raise ValueError(
f'default must be > 0 and <= {len(titles)}, got {default}')
check_type(titles, tuple, 'titles')
rows = math.ceil(len(titles) / cols)
num_width = len(str(len(titles)))
title_width = max(map(len, titles))
if col_by_col:
indices = (x + rows * y for x in range(rows) for y in range(cols))
else:
indices = range(len(titles))
lines = []
row = []
for cnt, idx in enumerate(indices, 1):
if idx < len(titles):
row.append(f'[{idx + 1:{num_width}}] {titles[idx]:{title_width}}')
if cnt % cols == 0:
lines.append(' '.join(row))
lines.append('\n')
row.clear()
if row:
lines.append(' '.join(row))
lines.append('\n')
if caption:
width = max(len(caption), max(map(len, lines)))
text = caption.center(width) + '\n' + '-' * width + '\n'
else:
text = ''
text += ''.join(lines) + prompt
def f(s):
i = int(s)
if 0 < i <= len(titles):
return i - 1
raise ValueError
return read(text, check=f, exc_on_cancel=exc_on_cancel,
default=str(default)) | d4d24cddf40f314c31415685c4eecdc51da2aca2 | 19,823 |
from typing import Dict
from typing import Any
def dict_to_annotation(annotation_dict: Dict[str, Any], ignore_extra_keys = True) -> Annotation:
"""Calls specific Category object constructor based on the structure of the `annotation_dict`.
Args:
annotation_dict (Dict[str, Any]): One of COCO Annotation dictionaries.
ignore_extra_keys (bool, optional): Ignore the fact dictionary has more fields than specified in dataset. Defaults to True.
Raises:
ValueError: If `annotation_dict` has unspecified structure.
Returns:
Annotation: Dataclass category generated from the `annotation_dict`.
"""
if set(DICT_TO_ANNOTATION_MAP['object_detection']).issubset(annotation_dict.keys()):
return ObjectDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['keypoint_detection']).issubset(annotation_dict.keys()):
return KeypointDetectionAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['panoptic_segmentation']).issubset(annotation_dict.keys()):
return PanopticSegmentationAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['image_captioning']).issubset(annotation_dict.keys()):
return ImageCaptioningAnnotation.from_dict(annotation_dict, ignore_extra_keys)
elif set(DICT_TO_ANNOTATION_MAP['dense_pose']).issubset(annotation_dict.keys()):
return DensePoseAnnotation.from_dict(annotation_dict, ignore_extra_keys)
raise ValueError(
"Unexpected annotation structure. Consider manually creating COCO dataset."
"\nAnd extending one of existing objects or create new following one of the Protocols structure."
) | 846c353ab4d15a0558c5cabced30e14a57b5ed64 | 19,824 |
def getAggregation(values):
"""
Produces a dictionary mapping raw states to aggregated states in the form
{raw_state:aggregated_state}
"""
unique_values = list(set(values))
aggregation = {i:unique_values.index(v) for i, v in enumerate(values)}
aggregation['n'] = len(unique_values)
return aggregation | 606bee7c7055b8f2a95bc061dc1883713198b506 | 19,825 |
def create_anonymous_client():
"""Creates an anonymous s3 client. This is useful if you need to read an object created by an anonymous user, which
the normal client won't have access to.
"""
return boto3.client('s3', config=Config(signature_version=UNSIGNED)) | 1c321d2c42b41b19a4fad66e67199f63c2b04338 | 19,826 |
def predictionTopK(pdt, k):
"""预测值中topk
@param pdt 预测结果,nupmy数组格式
@param k 前k个结果
@return topk结果,numpy数组格式
"""
m, n = np.shape(pdt)
ret = []
for i in range(m):
curNums = pdt[i]
tmp = topK(curNums.tolist()[0], k)
ret.append(tmp)
return np.mat(ret) | 57740bb3e2f4521d14273194dc024e8a91347241 | 19,827 |
import re
def parseCsv(file_content):
"""
parseCsv
========
parser a string file from Shimadzu analysis, returning a
dictonary with current, livetime and sample ID
Parameters
----------
file_content : str
shimadzu output csv content
Returns
-------
dic
dic with irradiation parameters
"""
irradiation_parameters = {}
irradiation_parameters['sample'] = file_content.split(',')[0].split(':')[1].replace("\"", "").strip()
irradiation_parameters['current'] = re.sub(' +',' ',file_content.split(',')[12]).split(' ')[3]
irradiation_parameters['current'] = int(re.findall('\d+', irradiation_parameters['current'])[0])
irradiation_parameters['livetime'] = int(re.sub(' +',' ',file_content.split(',')[12]).split(' ')[13])
return(irradiation_parameters) | cc20a906c23093994ce53358d92453cd4a9ab459 | 19,831 |
def s_from_v(speed, time=None):
"""
Calculate {distance} from {speed}
The chosen scheme: speed at [i] represents the distance from [i] to [i+1].
This means distance.diff() and time.diff() are shifted by one index from
speed. I have chosen to extrapolate the position at the first index by
assuming we start at a cumulative distance of 0.
Args:
{speed_arg}
{time_arg} Default None.
Returns:
{distance_returns}
"""
if time is None:
time = pd.Series([i for i in range(len(speed))])
# Should this assume the index at position 0 is 0, or should this
# assume the only NaN is at position 0? Assumpts either way...
return (speed.shift(1) * time.diff()).cumsum().fillna(0) | 42fa996371af55c97235c2260f1c7c873e7e9e5b | 19,833 |
def fake_categorize_file(tmpdir_factory):
"""Creates a simple categorize for testing."""
file_name = tmpdir_factory.mktemp("data").join("categorize.nc")
root_grp = netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC")
n_points = 7
root_grp.createDimension('time', n_points)
var = root_grp.createVariable('time', 'f8', 'time')
var[:] = np.arange(n_points)
var = root_grp.createVariable('category_bits', 'i4', 'time')
var[:] = [0, 1, 2, 4, 8, 16, 32]
var = root_grp.createVariable('quality_bits', 'i4', 'time')
var[:] = [0, 1, 2, 4, 8, 16, 32]
root_grp.close()
return file_name | 17b8e106f19200291bf2a77805542ecf2bb395fe | 19,834 |
def load_unpack_npz(path):
"""
Simple helper function to circumvent hardcoding of
keyword arguments for NumPy zip loading and saving.
This assumes that the first entry of the zipped array
contains the keys (in-order) for the rest of the array.
Parameters
----------
path : string
Path to load the NumPy zip file
Returns
----------
data : dict
Unpacked dictionary with specified keys inserted
"""
# Load the NumPy zip file at the path
data = dict(np.load(path, allow_pickle=True))
# Extract the key names stored in the dictionary
keys = data.pop(list(data.keys())[0])
# Obtain the names of the saved keys
old_keys = list(data.keys())
# Re-add all of the entries of the data with the specified keys
for i in range(len(keys)):
data[keys[i]] = data.pop(old_keys[i])
return data | ae5d573a480a87d3c09e8de783d9b94558870c15 | 19,836 |
import re
def is_valid_email(email):
"""
Check if a string is a valid email.
Returns a Boolean.
"""
try:
return re.match(EMAIL_RE, email) is not None
except TypeError:
return False | 736b3f141e6f3a99644d51c672738d63d64d604e | 19,837 |
import re
def _create_matcher(utterance):
"""Create a regex that matches the utterance."""
# Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL
# Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name}
parts = re.split(r'({\w+}|\[[\w\s]+\] *)', utterance)
# Pattern to extract name from GROUP part. Matches {name}
group_matcher = re.compile(r'{(\w+)}')
# Pattern to extract text from OPTIONAL part. Matches [the color]
optional_matcher = re.compile(r'\[([\w ]+)\] *')
pattern = ['^']
for part in parts:
group_match = group_matcher.match(part)
optional_match = optional_matcher.match(part)
# Normal part
if group_match is None and optional_match is None:
pattern.append(part)
continue
# Group part
if group_match is not None:
pattern.append(
r'(?P<{}>[\w ]+?)\s*'.format(group_match.groups()[0]))
# Optional part
elif optional_match is not None:
pattern.append(r'(?:{} *)?'.format(optional_match.groups()[0]))
pattern.append('$')
return re.compile(''.join(pattern), re.I) | ecf126488f827c65379efc58794136499dfa87dd | 19,838 |
def top_compartment_air_CO2(setpoints: Setpoints, states: States, weather: Weather):
"""
Equation 2.13 / 8.13
cap_CO2_Top * top_CO2 = mass_CO2_flux_AirTop - mass_CO2_flux_TopOut
"""
cap_CO2_Top = Coefficients.Construction.greenhouse_height - Coefficients.Construction.air_height # Note: line 46 / setDepParams / GreenLight
mass_CO2_flux_AirTop = greenhouse_air_and_above_thermal_screen_CO2_flux(states, setpoints, weather)
mass_CO2_flux_TopOut = above_thermal_screen_and_outdoor_CO2_flux(states, setpoints, weather)
return (mass_CO2_flux_AirTop - mass_CO2_flux_TopOut) / cap_CO2_Top | 7f294de2f669c2224ccbd09d8200e9b91ddd6ebe | 19,840 |
def coning_sculling(gyro, accel, order=1):
"""Apply coning and sculling corrections to inertial readings.
The algorithm assumes a polynomial model for the angular velocity and the
specific force, fitting coefficients by considering previous time
intervals. The algorithm for a linear approximation is well known and
described in [1]_ and [2]_.
The accelerometer readings are also corrected for body frame rotation
during a sampling period.
Parameters
----------
gyro : array_like, shape (n_readings, 3)
Gyro readings.
accel : array_like, shape (n_readings, 3)
Accelerometer readings.
order : {0, 1, 2}, optional
Angular velocity and specific force polynomial model order.
Note that 0 means not applying non-commutative corrections at all.
Default is 1.
Returns
-------
theta : ndarray, shape (n_readings, 3)
Estimated rotation vectors.
dv : ndarray, shape (n_readings, 3)
Estimated velocity increments.
References
----------
.. [1] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm
Design Part 1: Attitude Algorithms", Journal of Guidance, Control,
and Dynamics 1998, Vol. 21, no. 2.
.. [2] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm
Design Part 2: Velocity and Position Algorithms", Journal of
Guidance, Control, and Dynamics 1998, Vol. 21, no. 2.
"""
if order not in [0, 1, 2]:
raise ValueError("`order` must be 1, 2 or 3.")
gyro = np.asarray(gyro)
accel = np.asarray(accel)
if order == 0:
coning = 0
sculling = 0
elif order == 1:
coning = np.vstack((np.zeros(3), np.cross(gyro[:-1], gyro[1:]) / 12))
sculling = np.vstack((np.zeros(3),
(np.cross(gyro[:-1], accel[1:]) +
np.cross(accel[:-1], gyro[1:])) / 12))
elif order == 2:
coning = (-121 * np.cross(gyro[2:], gyro[1:-1]) +
31 * np.cross(gyro[2:], gyro[:-2]) -
np.cross(gyro[1:-1], gyro[:-2])) / 720
sculling = (-121 * np.cross(gyro[2:], accel[1:-1]) +
31 * np.cross(gyro[2:], accel[:-2]) -
np.cross(gyro[1:-1], accel[:-2]) -
121 * np.cross(accel[2:], gyro[1:-1]) +
31 * np.cross(accel[2:], gyro[:-2]) -
np.cross(accel[1:-1], gyro[:-2])) / 720
coning = np.vstack((np.zeros((2, 3)), coning))
sculling = np.vstack((np.zeros((2, 3)), sculling))
else:
assert False
rc = 0.5 * np.cross(gyro, accel)
return gyro + coning, accel + sculling + rc | 61f57383488d2d42cb6d63fec5d6d99faa8e2cf2 | 19,841 |
def add():
"""Add a task.
:url: /add/
:returns: job
"""
job = scheduler.add_job(
func=task2,
trigger="interval",
seconds=10,
id="test job 2",
name="test job 2",
replace_existing=True,
)
return "%s added!" % job.name | 88ef667e05a37ec190ba6e01df23fd617821afe0 | 19,842 |
def magnitude(x: float, y: float, z: float) -> float:
""" Magnitude of x, y, z acceleration √(x²+y²+z²)
Dispatch <float>
Args:
x (float): X-axis of acceleration
y (float): Y-axis of acceleration
z (float): Z-axis of acceleration
Returns:
float: Magnitude of acceleration
Dispatch <pd.DataFrame>
Args:
df (pd.DataFrame): Dataframe containing acceleration columns
xcol (str): X-axis column name, default 'x'
ycol (str): Y-axis column name, default 'y'
zcol (str): Z-axis column name, default 'z'
Returns:
float: Magnitude of acceleration
"""
return np.sqrt(x**2 + y**2 + z**2) | ecb0543b52c385a9b294e87e4b17346b9705f3f8 | 19,843 |
def dauth( bot, input ):
"""Toggle whether channel should be auth enabled by default"""
if not input.admin:
return False
if not input.origin[0] == ID.HON_SC_CHANNEL_MSG:
bot.reply("Run me from channel intended for the default auth!")
else:
cname = bot.id2chan[input.origin[2]]
authed = False
if cname in bot.config.default_auth:
bot.config.set_del( 'default_auth', cname )
else:
bot.config.set_add( 'default_auth', cname )
authed = True
bot.reply( "Default auth in this channel is now " + ( authed and "enabled" or "disabled" ) ) | a031e1feb45956503c30ddacd40d59a5f65f30ca | 19,844 |
import json
def write_to_disk(func):
"""
decorator used to write the data into disk during each checkpoint to help us to resume the operation
Args:
func:
Returns:
"""
def wrapper(*args, **kwargs):
func(*args, **kwargs)
with open("checkpoint.json", "r") as f:
f.write(json.dumps(args[0]))
return wrapper | d3614b7b75adf40021c31263fbbcdfdda025d1a3 | 19,845 |
def get_hourly_total_exchange_volume_in_period_from_db_trades(tc_db, start_time, end_time):
"""
Get the exchange volume for this exchange in this period from our saved version
of the trade history.
"""
# Watch this query for performance.
results = tc_db.query(
func.hour(EHTrade.timestamp),
func.sum(EHTrade._volume),
)\
.filter(EHTrade.timestamp >= start_time)\
.filter(EHTrade.timestamp < end_time)\
.group_by(func.hour(EHTrade.timestamp))\
.all()
formatted_results = []
for row in results:
hour = row[0]
timestamp = Delorean(start_time, 'UTC').next_hour(hour).datetime
volume = Money(row[1], 'BTC')
formatted_results.append([
timestamp,
volume,
])
formatted_results = sorted(formatted_results, key=lambda r: r[0])
return formatted_results | 7d46327e8c89d928d7e208d9a00d7b199345e636 | 19,847 |
import typing
def descending(sorting_func: typing.Any) -> typing.Any:
"""
Modify a sorting function to sort in descending order.
:param sorting_func: the original sorting function
:return: the modified sorting function
"""
def modified_sorting_func(current_columns, original_columns, sorting_func=sorting_func):
return sqlalchemy.sql.desc(sorting_func(current_columns, original_columns))
modified_sorting_func.require_original_columns = getattr(sorting_func, 'require_original_columns', False)
return modified_sorting_func | 30afe7202648950af5d415f3a2da3af5e9ab9d8f | 19,848 |
def circleColor(renderer, x, y, rad, color):
"""Draws an unfilled circle to the renderer with a given color.
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x (int): The X coordinate of the center of the circle.
y (int): The Y coordinate of the center of the circle.
rad (int): The radius (in pixels) of the circle.
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure.
"""
return _funcs["circleColor"](renderer, x, y, rad, color) | 6b3475f918cfb0867799710e5c53e5eea326a2c2 | 19,849 |
def _get_ref_init_error(dpde, error, **kwargs):
"""
Function that identifies where the continuous gyro begins, initiates and
then carries the static errors during the continuous modes.
"""
temp = [0.0]
for coeff, inc in zip(dpde[1:, 2], error.survey.inc_rad[1:]):
if inc > kwargs['header']['XY Static Gyro']['End Inc']:
temp.append(temp[-1])
else:
temp.append(coeff)
dpde[:, 2] = temp
return dpde | 45f4072139f007f65872223c624581b7433ea2aa | 19,850 |
def parse_internal_ballot(line):
"""
Parse an internal ballot line (with or without a trailing newline).
This function allows leading and trailing spaces. ValueError is
raised if one of the values does not parse to an integer.
An internal ballot line is a space-delimited string of integers of the
form--
"WEIGHT CHOICE1 CHOICE2 CHOICE3 ...".
"""
ints = parse_integer_line(line)
weight = next(ints)
choices = tuple(ints)
return weight, choices | 9433a496f26dd3511ff343686402e941a617f775 | 19,853 |
def get_fratio(*args):
"""
"""
cmtr = get_cmtr(*args)
cme = get_cme(*args)
fratio = cmtr / cme
return fratio | 2a93d1211929346fc333837b711ed0eb01f34b2b | 19,854 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.