content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Callable
def char_pred(pred: Callable[[int], bool]) -> Parser:
"""Parses a single character passing a given predicate."""
def f(x):
if pred(x):
return value(x)
else:
raise Failure(f"Character '{chr(x)}' fails predicate"
" `{pred.__name__}`")
return item >> f | 2b7be4f740e7f7afad1ef66c0d544208d679fc5c | 18,489 |
def convert_bound(bound, coord_max, coord_var):
"""
This function will return a converted bound which which matches the
range of the given input file.
Parameters
----------
bound : np.array
1-dimensional 2-element numpy array which represent the lower
and upper bounding box on this coordinate, respectively.
coord_max : integer
The max value which is possible given this coordinate. For
example, the max for longitude is 360.
coord_var : xarray.DataArray
The xarray variable for some coordinate.
Returns
-------
np.array
1-dimensional 2-element number array which represents the lower
and upper bounding box on this coordinate and has been converted
based on the valid bounds coordinate range of the dataset.
Notes
-----
Assumption that 0 is always on the prime meridian/equator.
"""
scale = coord_var.attrs.get('scale_factor', 1.0)
offset = coord_var.attrs.get('add_offset', 0.0)
valid_min = coord_var.attrs.get('valid_min', None)
if valid_min is None or valid_min > 0:
# If coord var doesn't contain valid min, attempt to find
# manually. Note: Given the perfect storm, this could still fail
# to find the actual bounds.
# Filter out _FillValue from data before calculating min and max
fill_value = coord_var.attrs.get('_FillValue', None)
var_values = coord_var.values
if fill_value:
var_values = np.where(var_values != fill_value, var_values, np.nan)
var_min = np.nanmin(var_values)
var_max = np.nanmax(var_values)
if 0 <= var_min <= var_max <= (coord_max / scale):
valid_min = 0
# If the file coords are 0 --> max
if valid_min == 0:
bound = (bound + coord_max) % coord_max
# If the right/top bound is 0, set to max.
if bound[1] == 0:
bound[1] = coord_max
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([0, coord_max])
# If the file longitude is -coord_max/2 --> coord_max/2
if valid_min != 0:
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([-(coord_max / 2), coord_max / 2])
# Calculate scale and offset so the bounds match the coord data
return apply_scale_offset(scale, offset, bound) | 5784167af65b2f406bfa5c428f1421a8915359f3 | 18,490 |
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus'] | 757963dce9d9dc00be54ffbcbf694656f2f770e9 | 18,491 |
from functools import reduce
def cc_filter_set_variables(operator, bk_biz_id, bk_obj_id, bk_obj_value):
"""
通过集群ID、过滤属性ID、过滤属性值,过滤集群
:param operator: 操作者
:param bk_biz_id: 业务ID
:param bk_obj_id: 过滤属性ID
:param bk_obj_value: 过滤属性值
:return:
"""
client = get_client_by_user(operator)
obj_value_list = bk_obj_value.split(",")
results = []
# 多个过滤属性值时循环请求接口
for obj_value in obj_value_list:
kwargs = {
"bk_biz_id": int(bk_biz_id),
"condition": {bk_obj_id: obj_value},
}
result = client.cc.search_set(kwargs)
if not result["result"]:
err_msg = _(
"[cc_filter_set_variables] 调用 cc.search_set 接口获取集群失败, " "kwargs={kwargs}, result={result}"
).format(kwargs=kwargs, result=result)
logger.error(err_msg)
raise ApiRequestError(err_msg)
results += result["data"]["info"]
if not results:
return [], set()
bk_attributes = reduce(set.intersection, [set(result.keys()) for result in results])
return results, bk_attributes | 3e1f849c59d3e3553f1c8b6f725a36921cae9451 | 18,492 |
def distance(mags, spt, spt_unc):
"""
mags is a dictionary of bright and faint mags
set a bias
"""
res={}
f110w=mags['F110W']
f140w=mags['F140W']
f160w=mags['F160W']
relations=POLYNOMIAL_RELATIONS['abs_mags']
nsample=1000
for k in mags.keys():
#take the standard deviation
spt=make_spt_number(spt)
absmag_scatter=relations[k][1]
spts=np.random.normal(spt, spt_unc, nsample)
#trim out spectral types outside range of validitiy
mask=(spts<15) & (spts >40)
absmags=(relations[k][0])(spts)[~mask]
#total_uncertainty
mag_unc=(absmag_scatter**2+mags[k][1]**2)**0.5
relmags=np.random.normal(mags[k][0], mag_unc, nsample)[~mask]
dists=get_distance(absmags, relmags)
res[str('dist')+k]=np.nanmedian(dists)
res[str('dist_er')+k]=np.nanstd(dists)
return res | cece86e80c03ce8d9753fe864bd746e68500fca3 | 18,493 |
import math
def foo(X):
"""The function to evaluate"""
ret = []
for x in X:
r = 2*math.sqrt(sum([n*n for n in x]));
if r == 0:
ret.append(0)
else:
ret.append(math.sin(r) / r);
return ret | 7b241cf45757cdf9a5a28ee56c59ee41099ccb1e | 18,495 |
def measure_curv(left_fit, right_fit, plot_points, ym_per_pix, xm_per_pix):
"""
calculates the curvature using a given polynom
Args:
left_fit ([type]): [description]
right_fit ([type]): [description]
plot_points ([type]): [description]
"""
#get the max y value (start of the lane) this is the place we want to calc the curvature
y_curve = np.max(plot_points)
#calculate/defin the new polynom values to get m instead of pixel
cofA_left = xm_per_pix / (ym_per_pix**2) * left_fit[0]
cofB_left = (xm_per_pix/ym_per_pix) * left_fit[1]
cofA_right = xm_per_pix / (ym_per_pix**2) * right_fit[0]
cofB_right = (xm_per_pix/ym_per_pix) * right_fit[1]
#calculate the curvature using the formula: R = (1+(2Ay+B)^2)^3/2)/|2A| with y = A*y^2+B*y+C
left_curv_m = ((1+(2*cofA_left*y_curve*ym_per_pix+cofB_left)**2)**(2/2))/np.absolute(2*cofA_left)
right_curv_m = ((1+(2*cofA_right*y_curve*ym_per_pix+cofB_right)**2)**(2/2))/np.absolute(2*cofA_right)
#calculate the mean curvature (curvatur from the middle of the lane)
curv_mean = (left_curv_m + right_curv_m) / 2
return curv_mean, left_curv_m, right_curv_m | 7ae6d1e390906c3011349716aad0d0640a4c3a65 | 18,496 |
import logging
from datetime import datetime
def get_external_dns(result):
"""
Function to validate the ip address. Used to extract EXTERNAL_DNS server information
Args:
result(dict): Input result dictionary with all network parameters and boolean flags
Returns:
result(dict): The updated result dictionary with network parameters
Raises:
Exception on Invalid IP addresses
"""
logging.info('[%s] - Collect the external dns.', datetime.datetime.today())
try:
is_answer = False
while not is_answer:
external_dns = case_check(input("Do you have public EXTERNAL DNS IP servers? y or n \n"))
if external_dns == 'n' or external_dns == 'y':
result['external_dns_flag'] = external_dns
is_answer = True
if external_dns == 'y':
is_internal = False
while not is_internal:
external = case_check(
input("Enter the EXTERNAL DNS public IP address(s) comma separated or 's' to skip \n"))
if external == 's':
result['external_dns_flag'] = 's'
logging.info("EXTERNAL_DNS option skipped by user ")
break
if len(external) > 0:
result, is_internal = ip_validation('EXTERNAL_DNS', external, result, is_internal)
else:
print(f'{Style.RED}Wrong value! Please input y or n{Style.RESET}')
return result
except Exception as error:
logging.error(error) | 14531bcd17dbc036f417ec7eca6d24e9c7931e6f | 18,497 |
def __process_agent(agent_param):
"""Get the agent id and namespace from an input param."""
if not agent_param.endswith('TEXT'):
param_parts = agent_param.split('@')
if len(param_parts) == 2:
ag, ns = param_parts
elif len(param_parts) == 1:
ag = agent_param
ns = 'HGNC-SYMBOL'
else:
raise DbAPIError('Unrecognized agent spec: \"%s\"' % agent_param)
else:
ag = agent_param[:-5]
ns = 'TEXT'
if ns == 'HGNC-SYMBOL':
original_ag = ag
ag = hgnc_client.get_hgnc_id(original_ag)
if ag is None and 'None' not in agent_param:
raise DbAPIError('Invalid agent name: \"%s\"' % original_ag)
ns = 'HGNC'
return ag, ns | 49ebaa4c435422066c0e2345e4cf056caebbdc9e | 18,498 |
def inference(predictions_op, true_labels_op, display, sess):
""" Perform inference per batch on pre-trained model.
This function performs inference and computes the CER per utterance.
Args:
predictions_op: Prediction op
true_labels_op: True Labels op
display: print sample predictions if True
sess: default session to evaluate the ops.
Returns:
char_err_rate: list of CER per utterance.
"""
char_err_rate = []
# Perform inference of batch worth of data at a time.
[predictions, true_labels] = sess.run([predictions_op,
true_labels_op])
pred_label = sparse_to_labels(predictions[0][0])
actual_label = sparse_to_labels(true_labels)
for (label, pred) in zip(actual_label, pred_label):
char_err_rate.append(distance(label, pred)/len(label))
if display:
# Print sample responses
for i in range(ARGS.batch_size):
print(actual_label[i] + ' vs ' + pred_label[i])
return char_err_rate | 5e58ab3fff91a2fb5450b37f0bf41b2681d297d9 | 18,499 |
from typing import Optional
from typing import Any
def callback_with_answer_and_close_window(
on_change: Optional[OnQuestionChangeCallback], window: Window
) -> OnQuestionChangeCallback:
"""Create a callback that calls both the on_change and window.close methods."""
def inner(answer: Any) -> None:
if on_change is not None:
on_change(answer)
# Use kill rather than close because we don't want to call the on_cancel that using
# the close button would do.
window.kill()
return inner | 70ac59a8fcb3634f49b49d2ffa150fa20072b485 | 18,500 |
import json
import requests
def nlu_tuling(question, loc="上海"):
"""图灵 API
"""
url = 'http://www.tuling123.com/openapi/api'
data = {
'key': "fd2a2710a7e01001f97dc3a663603fa1",
'info': question,
"loc": loc,
'userid': mac_address
}
try:
r = json.loads(requests.post(url=url, data=data).text)
except:
return
if not r['code'] in (100000, 200000, 302000, 308000, 313000, 314000): return
if r['code'] == 100000: # 文本类
return '\n'.join([r['text'].replace('<br>','\n')])
elif r['code'] == 200000: # 链接类
return '\n'.join([r['text'].replace('<br>','\n'), r['url']])
elif r['code'] == 302000: # 新闻类
l = [r['text'].replace('<br>','\n')]
for n in r['list']: l.append('%s - %s'%(n['article'], n['detailurl']))
return '\n'.join(l)
elif r['code'] == 308000: # 菜谱类
l = [r['text'].replace('<br>','\n')]
for n in r['list']: l.append('%s - %s'%(n['name'], n['detailurl']))
return '\n'.join(l)
elif r['code'] == 313000: # 儿歌类
return '\n'.join([r['text'].replace('<br>','\n')])
elif r['code'] == 314000: # 诗词类
return '\n'.join([r['text'].replace('<br>','\n')]) | 6d4ffd7675d27f3316635e72e6f3c02d13e243a6 | 18,501 |
import typing
from typing import Any
from typing import Dict
def Lines(
apply_clip: bool = True,
close_path: bool = False,
color: ndarray = None,
colors: list = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"],
curves_subset: list = [],
display_legend: bool = False,
enable_hover: bool = True,
fill: str = "none",
fill_colors: list = [],
fill_opacities: list = [],
interactions: dict = {"hover": "tooltip"},
interpolation: str = "linear",
labels: list = [],
labels_visibility: str = "none",
line_style: str = "solid",
marker: str = None,
marker_size: int = 64,
opacities: list = [],
preserve_domain: dict = {},
scales: dict = {},
scales_metadata: dict = {
"x": {"orientation": "horizontal", "dimension": "x"},
"y": {"orientation": "vertical", "dimension": "y"},
"color": {"dimension": "color"},
},
selected: ndarray = None,
selected_style: dict = {},
stroke_width: float = 2.0,
tooltip: Element[ipywidgets.widgets.domwidget.DOMWidget] = None,
tooltip_location: str = "mouse",
tooltip_style: dict = {"opacity": 0.9},
unselected_style: dict = {},
visible: bool = True,
x: ndarray = np.array([]),
y: ndarray = np.array([]),
on_apply_clip: typing.Callable[[bool], Any] = None,
on_close_path: typing.Callable[[bool], Any] = None,
on_color: typing.Callable[[ndarray], Any] = None,
on_colors: typing.Callable[[list], Any] = None,
on_curves_subset: typing.Callable[[list], Any] = None,
on_display_legend: typing.Callable[[bool], Any] = None,
on_enable_hover: typing.Callable[[bool], Any] = None,
on_fill: typing.Callable[[str], Any] = None,
on_fill_colors: typing.Callable[[list], Any] = None,
on_fill_opacities: typing.Callable[[list], Any] = None,
on_interactions: typing.Callable[[dict], Any] = None,
on_interpolation: typing.Callable[[str], Any] = None,
on_labels: typing.Callable[[list], Any] = None,
on_labels_visibility: typing.Callable[[str], Any] = None,
on_line_style: typing.Callable[[str], Any] = None,
on_marker: typing.Callable[[str], Any] = None,
on_marker_size: typing.Callable[[int], Any] = None,
on_opacities: typing.Callable[[list], Any] = None,
on_preserve_domain: typing.Callable[[dict], Any] = None,
on_scales: typing.Callable[[dict], Any] = None,
on_scales_metadata: typing.Callable[[dict], Any] = None,
on_selected: typing.Callable[[ndarray], Any] = None,
on_selected_style: typing.Callable[[dict], Any] = None,
on_stroke_width: typing.Callable[[float], Any] = None,
on_tooltip: typing.Callable[[Element[ipywidgets.widgets.domwidget.DOMWidget]], Any] = None,
on_tooltip_location: typing.Callable[[str], Any] = None,
on_tooltip_style: typing.Callable[[dict], Any] = None,
on_unselected_style: typing.Callable[[dict], Any] = None,
on_visible: typing.Callable[[bool], Any] = None,
on_x: typing.Callable[[ndarray], Any] = None,
on_y: typing.Callable[[ndarray], Any] = None,
) -> Element[bqplot.marks.Lines]:
"""Lines mark.
In the case of the Lines mark, scales for 'x' and 'y' MUST be provided.
Attributes
----------
icon: string (class-level attribute)
Font-awesome icon for the respective mark
name: string (class-level attribute)
User-friendly name of the mark
colors: list of colors (default: CATEGORY10)
List of colors of the Lines. If the list is shorter than the number
of lines, the colors are reused.
close_path: bool (default: False)
Whether to close the paths or not.
fill: {'none', 'bottom', 'top', 'inside', 'between'}
Fill in the area defined by the curves
fill_colors: list of colors (default: [])
Fill colors for the areas. Defaults to stroke-colors when no
color provided
opacities: list of floats (default: [])
Opacity for the lines and patches. Defaults to 1 when the list is too
short, or the element of the list is set to None.
fill_opacities: list of floats (default: [])
Opacity for the areas. Defaults to 1 when the list is too
short, or the element of the list is set to None.
stroke_width: float (default: 2)
Stroke width of the Lines
labels_visibility: {'none', 'label'}
Visibility of the curve labels
curves_subset: list of integers or None (default: [])
If set to None, all the lines are displayed. Otherwise, only the items
in the list will have full opacity, while others will be faded.
line_style: {'solid', 'dashed', 'dotted', 'dash_dotted'}
Line style.
interpolation: {'linear', 'basis', 'cardinal', 'monotone'}
Interpolation scheme used for interpolation between the data points
provided. Please refer to the svg interpolate documentation for details
about the different interpolation schemes.
marker: {'circle', 'cross', 'diamond', 'square', 'triangle-down', 'triangle-up', 'arrow', 'rectangle', 'ellipse'}
Marker shape
marker_size: nonnegative int (default: 64)
Default marker size in pixels
Data Attributes
x: numpy.ndarray (default: [])
abscissas of the data points (1d or 2d array)
y: numpy.ndarray (default: [])
ordinates of the data points (1d or 2d array)
color: numpy.ndarray (default: None)
colors of the different lines based on data. If it is [], then the
colors from the colors attribute are used. Each line has a single color
and if the size of colors is less than the number of lines, the
remaining lines are given the default colors.
Notes
-----
The fields which can be passed to the default tooltip are:
name: label of the line
index: index of the line being hovered on
color: data attribute for the color of the line
The following are the events which can trigger interactions:
click: left click of the mouse
hover: mouse-over an element
The following are the interactions which can be linked to the above events:
tooltip: display tooltip
"""
kwargs: Dict[Any, Any] = without_default(Lines, locals())
widget_cls = bqplot.marks.Lines
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | 8456f09c6d0088d299123eea5545247a8df56ac8 | 18,502 |
import logging
def parse_currencies(row):
"""Clean up and convert currency fields to floats."""
date_columns = (
'Datum van laatste bijwerking',
'Einddatum',
'Begindatum'
)
for key in date_columns:
try:
row[key] = arrow.get(row[key], 'DD.MM.YYYY HH:mm')
except ParserError:
if row[key] != '0000-00-00 00:00:00':
message = 'Could not parse %s to a date, returning None'
logging.warning(message, row[key])
row[key] = None
return row | f0ae86dfc755d5ec4e79bb11ba297a463307ade1 | 18,503 |
import sympy
def as_tuple(item, type=None, length=None):
"""
Force item to a tuple.
Partly extracted from: https://github.com/OP2/PyOP2/.
"""
# Empty list if we get passed None
if item is None:
t = ()
elif isinstance(item, (str, sympy.Function)):
t = (item,)
else:
# Convert iterable to list...
try:
t = tuple(item)
# ... or create a list of a single item
except (TypeError, NotImplementedError):
t = (item,) * (length or 1)
if length and not len(t) == length:
raise ValueError("Tuple needs to be of length %d" % length)
if type and not all(isinstance(i, type) for i in t):
raise TypeError("Items need to be of type %s" % type)
return t | 9ede0fb0abc43829e4ca53dbb2e5aaeb96479a3c | 18,504 |
def messages(request):
"""
Return a lazy 'messages' context variable as well as
'DEFAULT_MESSAGE_LEVELS'.
"""
return {
"messages": get_messages(request=request),
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
} | 7c151df9ce2515e34f01886e4100a44a0fa50f36 | 18,505 |
def hash_graph(graph):
""" A hash value of the tupelized version of graph.
Args:
graph (NetworkX graph): A graph
Returns:
int: A hash value of a graph.
Example:
>>> g = dlib.sample(5)
>>> g.nodes
NodeView((0, 1, 2, 3, 4))
>>> g.edges
EdgeView([(0, 1), (0, 3), (1, 2), (1, 3), (2, 3)])
>>> glib.hash_graph(g)
249771633555694270
"""
return hash(str(graph_to_tuple(graph))) | 94039a1c067a2456345b49609f0f18267607e6f8 | 18,506 |
def deserialize(rank: str, suit: str) -> Card.Name:
"""
Convert a serialized card string to a `Card.Name`.
Parameters
----------
rank : str
A, 2, 3, ..., 10, J, Q, K
suit : str
C, D, H, S
"""
suit_map = {
'C': Suit.CLUBS,
'D': Suit.DIAMONDS,
'H': Suit.HEARTS,
'S': Suit.SPADES
}
return Card.Name(_map_rank(rank), suit_map[suit]) | cd4b8c09a2e0bddf3f8ba2079cd16f1cc3dbff9b | 18,507 |
from contextlib import suppress
import inspect
def enforce_types(target):
"""Class decorator adding type checks to all member functions
"""
def check_types(spec, *args, **kwargs):
parameters = dict(zip(spec.args, args))
parameters.update(kwargs)
for name, value in parameters.items():
with suppress(KeyError): # Assume un-annotated parameters can be any type
type_hint = spec.annotations[name]
if _is_unparameterized_special_typing(type_hint):
continue
if hasattr(type_hint, "__args__") and type_hint.__args__ is not None:
actual_type = type_hint.__args__
else:
actual_type = type_hint
if not isinstance(value, actual_type):
raise TypeError("Unexpected type for '{}' (expected {} but found {})"
.format(name, type_hint, type(value)))
def decorate(func):
spec = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
check_types(spec, *args, **kwargs)
return func(*args, **kwargs)
return wrapper
if inspect.isclass(target):
members = inspect.getmembers(target, predicate=inspect.isfunction)
for name, func in members:
setattr(target, name, decorate(func))
return target
else:
return decorate(target) | b8aac44b70290e9277935a52c49cce8da93511d0 | 18,508 |
def get_article(URL):
"""
Get an article from one our trusted sources.
Args:
URL: URL string to parse, e.g., http://www.hello.com/world
Returns
Article object if URL was success requested and parsed.
None if it fails to parse or the URL is from a source not
in the trusted list.
"""
try:
output = urlparse(URL)
source = output.netloc.split('.')[1]
except:
print("Failed to parse URL.")
return None
if source not in TRUSTED_SOURCES:
print("URL isn't in TRUSTED_SOURCES")
return None
article = Article(URL)
article.download()
article.parse()
return article | 4fe61fac2cc584819250198ca18c6ad4a640a245 | 18,509 |
def crop_central_whiten_images(images=None, height=24, width=24):
"""Crop the central of image, and normailize it for test data.
They are cropped to central of height * width pixels.
Whiten (Normalize) the images.
Parameters
----------
images : 4D Tensor
The tensor or placeholder of images
height : int
The height for central crop.
width: int
The width for central crop.
Returns
-------
result : tuple Tensor
(Tensor for distorted images, Tensor for while loop index)
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
>>> sess = tf.InteractiveSession()
>>> batch_size = 128
>>> x = tf.placeholder(tf.float32, shape=[batch_size, 32, 32, 3])
>>> central_images_op = tl.preprocess.crop_central_whiten_images(images=x, height=24, width=24)
>>> sess.run(tf.initialize_all_variables())
>>> feed_dict={x: X_train[0:batch_size,:,:,:]}
>>> central_images, idx = sess.run(central_images_op, feed_dict=feed_dict)
>>> tl.visualize.images2d(X_train[0:9,:,:,:], second=2, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212)
>>> tl.visualize.images2d(central_images[1:10,:,:,:], second=10, saveable=False, name='central_images', dtype=None, fig_idx=23012)
Notes
------
The first image in 'central_images' should be removed.
Code References
----------------
- ``tensorflow.models.image.cifar10.cifar10_input``
"""
print(" [Warning] crop_central_whiten_images will be deprecated due to speed, see TFRecord tutorial for more info...")
try:
batch_size = int(images._shape[0])
except:
raise Exception('unknow batch_size of images')
central_x = tf.Variable(tf.constant(0.1, shape=[1, height, width, 3]))
i = tf.Variable(tf.constant(0))
c = lambda central_x, i: tf.less(i, batch_size)
def body(central_x, i):
# 1. Crop the central [height, width] of the image.
image = tf.image.resize_image_with_crop_or_pad(tf.gather(images, i), height, width)
# 2. Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_whitening(image)
# 5. Append the image to a batch.
image = tf.expand_dims(image, 0)
return tf.concat(0, [central_x, image]), tf.add(i, 1)
result = tf.while_loop(cond=c, body=body, loop_vars=(central_x, i), parallel_iterations=16)
return result | d89a5daa8c40f5e56ff635351fd3ca2f09475dd7 | 18,510 |
def start(event):
"""
Whether or not return was pressed
"""
return event.type == KEYDOWN and event.key == system["ENTER"] | 03c54ac79897fa1d84ffb3fd5cbd335486e81c46 | 18,512 |
def cost(theta, X, y):
"""cost fn is -1(theta) for you to minimize"""
return np.mean(-y * np.log(sigmoid(X @ theta)) - (1 - y) * np.log(1 - sigmoid(X @ theta))) | 476da6562a7083b573037359a8785d2fd99fd785 | 18,513 |
def enrich_nodes(nodes, vindplaatsen, articles):
"""
Add some attributes to the nodes.
:param nodes:
:param vindplaatsen:
:return:
"""
nodes = add_year(nodes)
nodes = add_articles(nodes, articles)
nodes = add_versions(nodes, vindplaatsen)
return nodes | de708d6a0ac5a79431ec142f91d01c9711115df4 | 18,514 |
def has_string(match):
"""Matches if ``str(item)`` satisfies a given matcher.
:param match: The matcher to satisfy, or an expected value for
:py:func:`~hamcrest.core.core.isequal.equal_to` matching.
This matcher invokes the :py:func:`str` function on the evaluated object to
get its length, passing the result to a given matcher for evaluation. If
the ``match`` argument is not a matcher, it is implicitly wrapped in an
:py:func:`~hamcrest.core.core.isequal.equal_to` matcher to check for
equality.
Examples::
has_string(starts_with('foo'))
has_string('bar')
"""
return HasString(wrap_matcher(match)) | 01f9ef9c5b2acd3b54901300e6c6ed80e656a0b0 | 18,515 |
import numpy
def get_land_sea_mask(gridded_geo_box, \
ancillary_path='/g/data/v10/eoancillarydata/Land_Sea_Rasters'):
"""
Return a land/sea 2D numpy boolean array in which Land = True, Sea = False
for the supplied GriddedGeoBox and using the UTM projected data in the
supplied ancillary_path.
If the specified gridded_geo_box has a non-UTM CRS or a non-native
sample frequency, the data will be reprojected/resampled into the the
gridded_geo_box.
"""
# get lat/long of geo_box origin
to_crs = osr.SpatialReference()
to_crs.SetFromUserInput('EPSG:4326')
origin_longlat = gridded_geo_box.transform_coordinates(gridded_geo_box.origin, to_crs)
# get Land/Sea data file for this bounding box
utmZone = abs(get_utm_zone(origin_longlat))
utmDataPath = '%s/WORLDzone%d.tif' % (ancillary_path, utmZone)
# read the land/sea data
with rio.open(utmDataPath) as ds:
# get the gridded box for the full dataset extent
landSeaDataGGB = GriddedGeoBox.from_dataset(ds)
# read the subset relating to Flinders Islet
window = landSeaDataGGB.window(gridded_geo_box)
out = numpy.zeros(gridded_geo_box.shape, dtype=numpy.uint8)
ds.read(1, window=window, out=out)
return out | 54bec48a78f969cd9297872c2965d0ca8a62e39a | 18,516 |
import copy
import itertools
def qEI_brute(gp_, true_function, X_=np.linspace(0, 1, 200), q=3,
niterations=10, nsim=1000):
"""
q steps EI performed with brute force: Brute search on vector X_
"""
gp = copy.copy(gp_)
i = 0
nn = X_.shape[0]
rshape = q * [nn]
qEI_to_evaluate = np.asarray([np.vstack(np.array(comb))
for comb in itertools.product(X_, repeat=q)]).squeeze()
while i < niterations:
bplt.plot_gp(gp, X_, true_function=true_function, nsamples=5, show=False)
qEI_computed = acq.gp_qEI_computation_brute(gp, qEI_to_evaluate, nsim).reshape(rshape)
next_to_evaluate = X_[np.asarray(np.unravel_index(qEI_computed.argmax(),
qEI_computed.shape))]
value_evaluated = true_function(next_to_evaluate)
[plt.axvline(nextpoint, ls = '--', color = 'red')
for nextpoint in next_to_evaluate]
X = np.append(gp.X_train_, next_to_evaluate)
X = X[:, np.newaxis]
y = np.append(gp.y_train_, value_evaluated)
gp.fit(X, y)
i += 1
print(' Best value yet ' + str(gp.X_train_[gp.y_train_.argmin()]))
plt.show()
return gp | a89afb5dc2e569d28642492cbacd7183814ca9dc | 18,517 |
def potential_energy_diff(e_in, e_out):
"""Returns difference in potential energy.
arguments:
e_in - dictionary of energy groups from input file
e_out - dictionary of energy groups from output file
returns:
potential energy difference in units of the input
"""
energy_type = 'Potential'
input_energy = e_in[energy_type]
diff = e_out[energy_type].in_units_of(input_energy.unit) - input_energy
return diff._value | 11a2f25d5f034c7d824abd45369ed5a5711ad6ab | 18,518 |
def CA_potential_profile(pot_init: float, pot_step: float, pot_rest: float,
pot_init_time: float, pot_step_time: float, pot_rest_time: float,
buffer_size: int = 1200, samp_rate: int = 3600) -> tuple:
"""
:param pot_init: Initial potential in V
:param pot_step: Step potential in V
:param pot_rest: Rest potential in V
:param pot_init_time: Time to hold the initial potential in s
This will be elongated as needed to round out the total sample number.
:param pot_step_time: Time to hold the step potential in s
:param pot_rest_time: Time to hold the resting potential in s
:param buffer_size: Samples stored in buffer before callback
:param samp_rate: Sampling rate in samples/s; Use an integral multiple of 120/s and at least 3600 per volt
:return: pot_profile, samp_num_tot: An array holding potentials for each sample and the total sample number
"""
# number of samples for each section
samp_num_init = samp_rate * pot_init_time
samp_num_step = samp_rate * pot_step_time
samp_num_rest = samp_rate * pot_rest_time
# create potential profile array for each section
pot_profile_init = pot_init * np.repeat(1, samp_num_init)
pot_profile_step = pot_step * np.repeat(1, samp_num_step)
pot_profile_rest = pot_rest * np.repeat(1, samp_num_rest)
'''Since the total sample number must be a multiple of the buffer_size,
add additional samples to the initial potential step until it is.'''
# additional samples in the hold step to round off potential profile
additional_hold_sample = 0
# total sample size of the potential profile with extra samples as needed
samp_num_tot = additional_hold_sample + len(pot_profile_init) + len(pot_profile_step) + len(pot_profile_rest)
while samp_num_tot % buffer_size != 0:
additional_hold_sample += 1
samp_num_tot = additional_hold_sample + len(pot_profile_init) + len(pot_profile_step) + len(pot_profile_rest)
# Calculate hold profile
h_profile = np.linspace(pot_init, pot_init, int(additional_hold_sample))
'''Construct the potential profile by combining each individual section'''
pot_profile = np.concatenate((h_profile, pot_profile_init, pot_profile_step, pot_profile_rest))
samp_num_tot = int(len(pot_profile)) # must be an integer
'''Check potential profile to be set'''
plt.title('CA Program Potential', fontsize=16)
plt.xlabel('Time / s', fontsize=16)
plt.ylabel('$E_{\mathrm{in}}$ / V', fontsize=16)
plt.tick_params(axis='both', which='both', direction='in', right=True, top=True)
plt.plot(np.arange(0, len(pot_profile), 1) / samp_rate, pot_profile)
return pot_profile, samp_num_tot | b7a4ac226cb5f3f239125d7227013508df51404f | 18,519 |
def generate_model_class(grid_dir, data_dir, Nlon=936, Nlat=1062, Nz=90):
"""
Wrapper function for generating the LLCRegion object describing the
model region. The wrapper automatically reads the grid information.
Default values for grid size are for the Samoan Passage box (Box 12
in Dimitris' notation).
Parameters
----------
grid_dir : str
Path to grid files
data_dir : str
Path to data files
Nlon : int
Number of grid points in the zonal direction
Nlat : int
Number of grid points in the meridional
Nz : int
Number of grid points in the vertical
Returns
-------
m : LLCRegion model class
"""
m = LLCRegion(grid_dir=grid_dir, data_dir=data_dir,
Nlon=Nlon, Nlat=Nlat, Nz=Nz)
print('loading grid...')
m.load_grid()
print(m.grid_size3d)
return m | 442ea60d4c790dc4da65f65648657d8e49dfd6b7 | 18,520 |
from scipy.integrate import dblquad
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values | e70046afb8c26711045d0ce5c7c00ea482ca972a | 18,521 |
def detail(request, question_id):
"""
HelloWorld 내용 출력
"""
question = get_object_or_404(Question, pk=question_id)
context = {'question': question}
return render(request, 'HelloWorld/question_detail.html', context) | 61fe5664baa2a33282ec77aa020332038fc3b040 | 18,523 |
def TDataStd_BooleanArray_GetID(*args):
"""
* Static methods ============== Returns an ID for array.
:rtype: Standard_GUID
"""
return _TDataStd.TDataStd_BooleanArray_GetID(*args) | f5faa03336a07dc366a9c5d9133dac46e6d378db | 18,524 |
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order | 6a8ed5661822a40cf99762b098eeb0c3600caf3a | 18,525 |
def check_jax_usage(enabled: bool = True) -> bool:
"""Ensures JAX APIs (e.g. :func:`jax.vmap`) are used correctly with Haiku.
JAX transforms (like :func:`jax.vmap`) and control flow (e.g.
:func:`jax.lax.cond`) expect pure functions to be passed in. Some functions
in Haiku (for example :func:`~haiku.get_parameter`) have side effects and thus
functions using them are only pure after using :func:`~haiku.transform` (et
al).
Sometimes it is convenient to use JAX transforms or control flow before
transforming your function (for example, to :func:`~haiku.vmap` the
application of a module) but when doing so you need to be careful to use the
Haiku overloaded version of the underlying JAX function, which carefully makes
the function(s) you pass in pure functions before calling the underlying JAX
function.
:func:`check_jax_usage` enables checking raw JAX transforms are used
appropriately inside Haiku transformed functions. Incorrect usage of JAX
transforms will result in an error.
Consider the function below, it is not a pure function (a function of its
inputs with no side effects) because we call into a Haiku API
(:func:`~haiku.get_parameter`) which during init will create a parameter and
register it with Haiku.
>>> def f():
... return hk.get_parameter("some_param", [], init=jnp.zeros)
We should not use this with JAX APIs like :func:`jax.vmap` (because it is not
a pure function). :func:`check_jax_usage` allows you to tell Haiku to make
incorrect usages of JAX APIs an error:
>>> previous_value = hk.experimental.check_jax_usage(True)
>>> jax.vmap(f, axis_size=2)()
Traceback (most recent call last):
...
haiku.JaxUsageError: ...
Using the Haiku wrapped version works correctly:
>>> hk.vmap(f, axis_size=2, split_rng=False)()
DeviceArray([0., 0.], dtype=float32)
Args:
enabled: Boolean indicating whether usage should be checked or not.
Returns:
Boolean with the previous value for this setting.
"""
config = get_config()
previous_value, config.check_jax_usage = config.check_jax_usage, enabled
return previous_value | 30d22d616189c6af986373a39d4410d105c222a2 | 18,526 |
def score_numeric_deg_ssetype(omega_a, omega_b):
"""
Return the tableau matching score between two Omega matrix entries
omega_a and omega_b, as per Kamat et al (2008),
with effiectvely negative infinty score for SSE type mismatch
Parameters:
omega_a - angle in (-pi, pi]
omega_b - angle in (-pi, pi]
Return value:
score betweem omega_a and omega_b
"""
if (omega_a not in [0,1,2,3] and omega_b not in [0,1,2,3]):
return score_numeric_deg(omega_a, omega_b)
else:
if omega_a != omega_b:
return -99999
else:
return 0 | 5347492aadf50eaa1b32726b4f7eace433d47d7c | 18,527 |
def top_menu(context, calling_page=None):
"""
Checks to see if we're in the Play section in order to return pages with
show_in_play_menu set to True, otherwise retrieves the top menu
items - the immediate children of the site root. Also detects 404s in the
Play section.
"""
if (calling_page and in_play(calling_page)) or context.get('play_404', False):
play_models = [
StandardPage,
PersonIndexPage,
WorkIndexPage,
BlogIndexPage
]
menuitems = chain(*[
model.objects.filter(
live=True,
show_in_play_menu=True,
show_in_menus=False
) for model in play_models
])
else:
menuitems = get_site_root(context).get_children().filter(
live=True,
show_in_menus=True
)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
'play_404': context.get('play_404', False)
} | 0479a2f7834f740142330436d04282c19fe6ac20 | 18,528 |
def DefineJacobian(J, DN, x):
""" This method defines a Jacobian
Keyword arguments:
J -- The Jacobian matrix
DN -- The shape function derivatives
x -- The variable to compute the gradient
"""
[nnodes, dim] = x.shape
localdim = dim - 1
if (dim == 2):
if (nnodes == 2):
J[0,0] = 0.5 * (x[1,0] - x[0,0])
J[1,0] = 0.5 * (x[1,1] - x[0,1])
else:
if (nnodes == 3):
J[0,0] = - (x[0,0] + x[1,0])
J[1,0] = - (x[0,1] + x[1,1])
J[2,0] = - (x[0,2] + x[1,2])
J[0,1] = - (x[0,0] + x[2,0])
J[1,1] = - (x[0,1] + x[2,1])
J[2,1] = - (x[0,2] + x[2,2])
else:
for i in range(dim):
for j in range(localdim):
J[i, j] = 0
for i in range(nnodes):
for k in range(dim):
for m in range(localdim):
J[k,m] += x[i,k] * DN[i,m]
return J | e13c092e44db7771a942840083c1628e0f418cb2 | 18,529 |
import struct
def build_reg_text_tree(text, part):
"""Build up the whole tree from the plain text of a single regulation. This
only builds the regulation text part, and does not include appendices or
the supplement. """
title, body = utils.title_body(text)
label = [str(part)]
subparts_list = []
subpart_locations = subparts(body)
if subpart_locations:
pre_subpart = body[:subpart_locations[0][0]]
first_emptypart, children_text = build_subparts_tree(
pre_subpart, part, build_empty_part)
if pre_subpart.strip() and first_emptypart.children:
subparts_list.append(first_emptypart)
else:
children_text = pre_subpart
for start, end in subpart_locations:
subpart_body = body[start:end]
subpart, _ = build_subparts_tree(
subpart_body, part, lambda p: build_subpart(subpart_body, p))
subparts_list.append(subpart)
else:
emptypart, children_text = build_subparts_tree(
body, part, build_empty_part)
if emptypart.children:
subparts_list.append(emptypart)
else:
return struct.Node(
text, [build_empty_part(part)], label, title)
return struct.Node(children_text, subparts_list, label, title) | a9d1068ee061e1a68f47b5bcdcf7ddde9383f330 | 18,530 |
def solve(banks):
"""Calculate number of steps needed to exit the maze
:banks: list of blocks in each bank
:return: number of redistribtion cycles to loop
>>> solve([0, 2, 7, 0])
4
"""
seen = set()
loops = 0
mark = 0
for cycle in count(1):
# find value and the index of the bank with the largest block
m = max(banks)
i = banks.index(m)
# reset the largest bank
banks[i] = 0
# redistribute its blocks
q, r = divmod(m, len(banks))
banks = [x + q for x in banks]
for j in range(r):
banks[(i + j + 1) % len(banks)] += 1
# check if we've seen this configuration before
b = tuple(banks)
if b in seen:
loops += 1
if loops > 1:
return cycle - mark
else:
seen = set()
mark = cycle
seen.add(b) | 304f4c9294de690a38e517a2fd4455a355db3cb9 | 18,532 |
def evaluate_python_expression(body):
"""Evaluate the given python expression, returning its result. This is useful if the
front end application needs to do real-time processing on task data. If for instance
there is a hide expression that is based on a previous value in the same form.
The response includes both the result, and a hash of the original query, subsequent calls
of the same hash are unnecessary. """
try:
script_engine = CustomBpmnScriptEngine()
result = script_engine._evaluate(body['expression'], **body['data'])
return {"result": result, "expression": body['expression'], "key": body['key']}
except Exception as e:
return {"result": False, "expression": body['expression'], "key": body['key'], "error": str(e)} | 917490e73d5cd52493128f97d135adb44872a376 | 18,533 |
import collections
def DictFilter(alist, bits):
"""Translate bits from EDID into a list of strings.
Args:
alist: A list of tuples, with the first being a number and second a string.
bits: The bits from EDID that indicate whether each string is supported by
this EDID or not.
Returns:
A dict of strings and bools.
"""
d = collections.OrderedDict()
for x, s in alist:
d[s] = bool(bits & x)
return d | 33d236d649d75ae60ab354c7dc6588e75c855463 | 18,534 |
def mentions(request):
"""Mentions view."""
return render(request, "mentions.html", {"site_title": "Mentions legales"}) | 6bb3dcff6e098127e744d21d67384011595f67c7 | 18,535 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""activity assistant form a config entry
is only called once the whole magic has to happen here
"""
#_LOGGER.warning(str(entry.version))
#_LOGGER.warning(str(entry.entry_id))
#_LOGGER.warning(str(entry.title))
#_LOGGER.warning(str(entry.data))
#_LOGGER.warning(str(entry.source))
#_LOGGER.warning(str(entry.connection_class))
hass.data.setdefault(DOMAIN, {})
zeroconf = await async_get_instance(hass)
tmp = zeroconf.get_service_info(ZCNF_TYPE, ZCNF_NAME + '.' + ZCNF_TYPE)
val_dict = zeroconf_Info2Values(tmp)
act_assist = ActAssist(
aiohttp_client.async_get_clientsession(hass),
val_dict[KEY_HOSTNAME],
val_dict['port'],
val_dict[KEY_WEBHOOK]
)
hass.data[DOMAIN].update({entry.entry_id: act_assist})
_LOGGER.warning("saved ActAssistApi in : " + str(entry.entry_id))
# create binary state sensor
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | 4389f1438a9a9e363f63eae3fe11ec40438569e3 | 18,536 |
def delete(id):
""" Used by the product page to delete a product. Doesn't actually delete it, just sets the quantity to 0. """
db = get_db()
b_id = session.get("user_id")
query = "UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?"
db.execute(query, (id, b_id,))
db.commit()
return redirect(url_for("main.products")) | 25ea594a8d4db6b6040f81033cbd804751d86fce | 18,537 |
def from_meshmaker(filename_or_dict, material="dfalt"):
"""
Generate a mesh from a block MESHM.
Parameters
----------
filename_or_dict: str or dict
Input file name or parameters dict with key "meshmaker".
material : str, optional, default 'dfalt'
Default material name.
"""
if isinstance(filename_or_dict, str):
parameters = read(filename_or_dict, file_format="tough")
else:
parameters = filename_or_dict
if "meshmaker" not in parameters:
raise ValueError()
if "type" not in parameters["meshmaker"]:
raise ValueError()
if parameters["meshmaker"]["type"] not in {"xyz", "rz2d", "rz2dl"}:
raise ValueError()
# XYZ
if parameters["meshmaker"]["type"] == "xyz":
dx_, dy_, dz_ = parse_xyz(parameters["meshmaker"]["parameters"])
dx, dy, dz = [], [], []
for increment in dx_:
append(dx, **increment)
for increment in dy_:
append(dy, **increment)
for increment in dz_:
append(dz, **increment)
if not len(dx):
dx = [1.0]
if not len(dy):
dy = [1.0]
if not len(dz):
dz = [1.0]
return structured_grid(dx, dy, dz, material=material)
# RZ2D
else:
dr_, dz_ = parse_rz2d(parameters["meshmaker"]["parameters"])
dr, dz = [], []
for increment in dr_:
append(dr, **increment)
for increment in dz_:
append(dz, **increment)
if not len(dr):
dr = [1.0]
if not len(dz):
dz = [1.0]
return cylindric_grid(
dr, dz, layer=parameters["meshmaker"]["type"] == "rz2dl", material=material
) | 233d2e5059ebfcd819b526e43bb241e136fbc409 | 18,538 |
def compute_qtys_new_halos_pk(mvir, rvir, redshift, age_yr):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
:param mvir: list of mvir [Msun], length = n.
:param rvir: list of rvir [kpc] , length = n.
:param redshift: redshift of the snapshot replicated n times.
:param age_yr: age of the Universe for the snapshot replicated n times.
Typically inputs should be :
* mvir=self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos],
* rvir=self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos],
* age_yr=self.f1.attrs['age_yr']
returns
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass
"""
f_b=model.f_b
epsilon = model.epsilon(mvir, redshift )
f_lost = f_loss(age_yr)
# evaluate equation (4)
mvir_dot = mvir / age_yr
# no pseudo evolution correction
dMdt = mvir_dot
# evaluate equation (1)
dmdt_star = f_b * dMdt * epsilon
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion = n.zeros_like(self.dmdt_star)
# evaluate equation (11)
# equation (12)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
return mvir_dot, rvir / age_yr, dMdt, dmdt_star, star_formation_rate, star_formation_rate * age_yr | 8d68a82c08b38596a3ba6defa4633d1c156955b8 | 18,539 |
def rank():
"""A function which returns the Horovod rank of the calling process.
Returns:
An integer scalar with the Horovod rank of the calling process.
"""
rank = MPI_LIB_CTYPES.horovod_tensorflow_rank()
if rank == -1:
raise ValueError(
'Horovod has not been initialized; use horovod.tensorflow.init().')
return rank | 5ce5b0c4ffc644f6c2cb3fc397ea2eb2e68e7c86 | 18,540 |
def getRAMSizeOSX() -> CmdOutput:
"""Returns the RAM size in bytes.
Returns:
CmdOutput: The output of the command, as a `CmdOutput` instance containing
`stdout` and `stderr` as attributes.
"""
return runCommand(exe_args=ExeArgs("sysctl", ["-n", "hw.memsize"])) | 519d6bb5afff6722bdced29df793b36ae6ee734a | 18,541 |
def load_data(filepath, columns=['title','abstract']):
"""Loads specified columns of csv/excel data.
Arguments
---------
filepath: str
Path to file (e.g. 'data.csv')
columns: list
List of strings specifying the column names in the data to load.
Returns
-------
pandas.DataFrame
Pandas object containing the loaded tabular data. If labels are not
loaded, a 'label_included' column is added (filled with -1).
"""
file_type = filepath.split('.')[-1]
if file_type == 'csv':
df = pd.read_csv(filepath, delimiter=';',
encoding='utf-8',
usecols=columns)
elif file_type == 'xlsx':
df = pd.read_excel(filepath, usecols=columns)
else:
raise ValueError('Filetype not supported.')
if 'label_included' not in df.columns:
df['label_included'] = np.full(df.shape[0], -1, dtype=int)
return df | 2c1be3075950ac4ab82cdc7fa0b18cbdb7bf80b2 | 18,542 |
def pre_process(image):
"""
Invert pixel intensity of 'images' (to compensate the conversion into image with imwrite).
"""
return 1 - image * 255 | 7e7227930567c31874d966ce18aeeffa9b73e646 | 18,543 |
from typing import Sequence
from typing import List
def get_examples_to_execute(
predictions: Sequence[inference.Prediction], inference_config: inference.Config
) -> List[official_evaluation.ExecutionInstructions]:
"""
Converts predictions from a model into sqlite execution instructions. If abstract SQL was used, converts back to fully-specfied SQL.
"""
if FLAGS.using_abstract_sql:
predictions = restore_asql_wrapper(predictions)
# Load the database tables.
schema_obj = inference.load_schema_obj(
FLAGS.eval_dataset_name, FLAGS.original_data_directory
)
# Now match with the original data and save
return inference.match_with_dataset(inference_config, predictions, schema_obj) | 9e5ef9ec7fa07433e3f8f14c0164e9858d271461 | 18,545 |
def verify_query(ctx):
"""
Verify a LQL query.
"""
label_widget = ctx.get_state(state="query_builder", key="query_label")
lql_query = ctx.get("lql_query")
evaluator_id = ctx.get("lql_evaluator")
try:
_ = ctx.client.queries.validate(
lql_query, evaluator_id=evaluator_id)
except http_session.ApiError as err:
label_widget.value = "Failure to verify: {0}".format(err)
return False
label_widget.value = "LQL Verified."
return True | 93affbaf7a6049c162850199e5b3f1b55a4f95a9 | 18,546 |
import calendar
def data_feature_engineering(data):
"""
Add features to the data for later use
state_code, weekday, month, year
"""
data['state_code'] = data['state'].map(us_state_abbrev)
data['weekday'] = pd.to_datetime(data['date']).dt.weekday
data['weekday'] = data['weekday'].map(weekday_map)
month_dict = dict(enumerate(calendar.month_abbr))
data['month'] = pd.to_datetime(data['date']).dt.month
data['month'] = data['month'].map(month_dict)
data['year'] = pd.to_datetime(data['date']).dt.year
return data | 92c78a6c976191167d6de39d25762e2307689674 | 18,547 |
import numpy
def train_ei_oc(emotion, model, algorithm, evaluation, finetune, baseline, preprocessor=None):
"""
2. Task EI-oc: Detecting Emotion Intensity (ordinal classification)
Given:
a tweet
an emotion E (anger, fear, joy, or sadness)
Task: classify the tweet into one of four ordinal classes of intensity of E
that best represents the mental state of the tweeter:
0: no E can be inferred
1: low amount of E can be inferred
2: moderate amount of E can be inferred
3: high amount of E can be inferred
For each language: 4 training sets and 4 test sets: one for each emotion E.
:param emotion: emotions = ["anger", "fear", "joy", "sadness"]
:param pretrained:
:param finetune:
:param unfreeze:
:return:
"""
if preprocessor is None:
preprocessor = twitter_preprocess()
model_config = TASK1_EIOC
X_train, y_train = parse(task='EI-oc', emotion=emotion, dataset="train")
X_dev, y_dev = parse(task='EI-oc', emotion=emotion, dataset="dev")
X_test, y_test = parse(task='EI-oc', emotion=emotion, dataset="gold")
# keep only scores
y_train = [y[1] for y in y_train]
y_dev = [y[1] for y in y_dev]
y_test = [y[1] for y in y_test]
name = model_config["name"] + "_" + emotion
X_train = preprocessor("{}_{}".format(name, "train"), X_train)
X_dev = preprocessor("{}_{}".format(name, "dev"), X_dev)
X_test = preprocessor("{}_{}".format(name, "test"), X_test)
params = []
params_list = []
res_dev_list = []
res_test_list = []
if algorithm == 'LoR':
if finetune == 'true':
for LoR_C in numpy.arange(10,1000,5)/100:
params = (LoR_C)
print("Now training with parameters: C: {}".format(LoR_C))
model.set_params(clf__C=LoR_C,clf__solver='saga',clf__n_jobs=-1)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
print("Best result on gold set: ", max(res_test_list, key=lambda x:x["pearson"]))
print("Best params: ", params_list[res_test_list.index(max(res_test_list, key=lambda x:x["pearson"]))])
else:
if emotion == 'joy':
if baseline == 'true':
LoR_C = 3.1
else:
LoR_C = 3.5
elif emotion == 'sadness':
LoR_C = 1.085
elif emotion == 'fear':
LoR_C = 3.5
elif emotion == 'anger':
if baseline == 'true':
LoR_C = 2.25
else:
LoR_C = 3.8
params = (LoR_C)
model.set_params(clf__C=LoR_C,clf__solver='saga',clf__n_jobs=-1)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
elif algorithm == 'SVC':
if finetune == 'true':
for SVC_C in numpy.arange(10,50,1)/10:
for SVC_gamma in numpy.arange(65,500,15)/100:
params = (SVC_C,SVC_gamma)
print("Now training with parameters: C: {}, Gamma: {}".format(SVC_C,SVC_gamma))
model.set_params(clf__C=SVC_C,clf__gamma=SVC_gamma)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
print("Best result on gold set: ", max(fit[1], key=lambda x:x["pearson"]))
print("Best params: ", fit[2][res_test_list.index(max(res_test_list, key=lambda x:x["pearson"]))])
else:
if emotion == 'joy':
if baseline == 'true':
SVC_C = 3.1
SVC_gamma = 0.95
else:
SVC_C = 2.7
SVC_gamma = 3.35
elif emotion == 'sadness':
if baseline == 'true':
SVC_C = 2.5
SVC_gamma = 1.25
else:
SVC_C = 2.2
SVC_gamma = 2.6
elif emotion == 'fear':
if baseline == 'true':
SVC_C = 2.1
SVC_gamma = 0.65
else:
SVC_C = 4.9
SVC_gamma = 4.4
elif emotion == 'anger':
if baseline == 'true':
SVC_C = 1.8
SVC_gamma = 1.7
else:
SVC_C = 2.6
SVC_gamma = 4.4
params = (SVC_C,SVC_gamma)
model.set_params(clf__C=SVC_C,clf__gamma=SVC_gamma)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
res_dev = fit[0][fit[1].index(max(fit[1], key=lambda x:x["pearson"]))]
res_test = max(fit[1], key=lambda x:x["pearson"])
return res_dev, res_test | 9843d9100039b082efb62f293d77552c50032b02 | 18,548 |
import random
def cross(genom1, genom2, mutation_rate, widths, bounds):
"""
Generates a child_genom by breeding 2 parent_genoms with
a mutation chance = mutation rate = [0, 1].
"""
child_genom = []
for i in range(len(genom1)):
if widths[i] == 0:
child_genom.append(genom1[i])
continue
if random() < mutation_rate:
# Mutation
child_genom.append(random() * widths[i] + bounds[i][0])
else:
# Breeding
rand = round(random())
child_genom.append(rand * genom1[i] + (1-rand) * genom2[i])
return child_genom | 0f973ffeefeec7b346a27ca3fda84210daff7d74 | 18,549 |
def version_0_2(path_in, path_out_base, skip_if_exists = True):
"""
* name is based on start time (not launch time)
:param path_in:
:param path_out_base:
:param skip_if_exists:
:return:
"""
version = 'v0.2'
content = raw.read_file(path_in)
name_new = generate_name(content)
path_out = path_out_base.joinpath(name_new)
if path_out.is_file() and skip_if_exists:
print('\t File exists')
return None
ds = create_dataset(content, version = version)
# path_out
ds.to_netcdf(path_out)
return ds | add6fadabce12011fc1e5b136a1f3b042092d577 | 18,550 |
def value_or_dash(value):
"""Converts the given value to a unicode dash if the value does
not exist and does not equal 0."""
if not value and value != 0:
return u'\u2013'.encode('utf-8')
return value | 8cadbfd8dcfad9dfeb4112cb8537f0e0d5de49ba | 18,551 |
def null() -> ColumnExpr:
"""Equivalent to ``lit(None)``, the ``NULL`` value
:return: ``lit(None)``
.. admonition:: New Since
:class: hint
**0.6.0**
"""
return lit(None) | d51d861ac165bb5c40e372435bfa6698542a3e30 | 18,554 |
def uCSIsThaana(code):
"""Check whether the character is part of Thaana UCS Block """
ret = libxml2mod.xmlUCSIsThaana(code)
return ret | 3d6c4e712f997648f40ed9647c6b696126cdc99a | 18,555 |
def geomprojlib_Curve2d(*args):
"""
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] <Tolerance> is used as input if the projection needs an approximation. In this case, the reached tolerance is set in <Tolerance> as output. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:param UFirst:
:type UFirst: float
:param ULast:
:type ULast: float
:param VFirst:
:type VFirst: float
:param VLast:
:type VLast: float
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] <Tolerance> is used as input if the projection needs an approximation. In this case, the reached tolerance is set in <Tolerance> as output. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle. can expand a little the bounds of surface
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:param UDeb:
:type UDeb: float
:param UFin:
:type UFin: float
:param VDeb:
:type VDeb: float
:param VFin:
:type VFin: float
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle. can expand a little the bounds of surface
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:param UDeb:
:type UDeb: float
:param UFin:
:type UFin: float
:param VDeb:
:type VDeb: float
:param VFin:
:type VFin: float
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
"""
return _GeomProjLib.geomprojlib_Curve2d(*args) | 07a48fcad95aabcbbb31dc8563e3a57f51399b5c | 18,556 |
def histogram(ds, x, z=None, **plot_opts):
"""Dataset histogram.
Parameters
----------
ds : xarray.Dataset
The dataset to plot.
x : str, sequence of str
The variable(s) to plot the probability density of. If sequence, plot a
histogram of each instead of using a ``z`` coordinate.
z : str, optional
If given, range over this coordinate a plot a histogram for each.
row : str, optional
Dimension to vary over as a function of rows.
col : str, optional
Dimension to vary over as a function of columns.
plot_opts
See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.
"""
return Histogram(ds, x, z=z, **plot_opts) | 90e884734d7811258e148df8f8bba5a9dd29ac96 | 18,557 |
import pkg_resources
def get_resource(name):
"""Convenience method for retrieving a package resource."""
return pkg_resources.resource_stream(__name__, name) | 63aada8f6e99956b770bd9ea7f737d90432c3f90 | 18,559 |
def get_team(args):
"""Return authenticated team token data."""
return Team.query.get(args['team_id']) | 160a5aa27a246a740811aec764d7bc52eaa91098 | 18,560 |
def config_sanity_check(config: dict) -> dict:
"""
Check if the given config satisfies the requirements.
:param config: entire config.
"""
# back compatibility support
config = parse_v011(config)
# check model
if config["train"]["method"] == "conditional":
if config["dataset"]["train"]["labeled"] is False: # unlabeled
raise ValueError(
"For conditional model, data have to be labeled, got unlabeled data."
)
return config | 6933dc0687da4fe4d1e3e9cea3f7cbb5caefb69b | 18,561 |
import yaml
def parse_yaml() -> Dataset:
"""Test that 'after' parameters are properly read"""
d = yaml.safe_load(f)
dataset = d.get("dataset")[0]
d: FidesopsDataset = FidesopsDataset.parse_obj(dataset)
return convert_dataset_to_graph(d, "ignore") | 437fb7d9a495b59c21c88962d2f5d5543c041729 | 18,562 |
def race_data_cleaning(race_ethnicity_path):
"""Clean and relabel birth data based on race/ ethnicity."""
# Read in CSV.
race_df = pd.read_csv(race_ethnicity_path, na_values='*', engine='python')
# Fill na values with 0.
race_df.fillna(value=0, inplace=True)
# Drop default sort column.
race_df.drop(labels='sort', axis=1, inplace=True)
# Rename columns for ease of access.
race_df.rename(columns={'birth count': 'birth_count',
'birth count_pct': 'birth_percentage',
'county name': 'county',
'ethnicity desc': 'ethnicity',
'low birth weight ind desc': 'weight_indicator',
'race catg desc': 'race',
'year desc': 'year'
},
inplace=True
)
# Rename specific values for ease of access.
race_df.replace(to_replace=['2017 **',
'Low birth weight (<2500g)',
'Normal birth weight (2500g+)',
'African American (Black)',
'Pacific Islander/Hawaiian',
'Unknown/Not Reported'
],
value=[2017, 'low', 'normal',
'African American', 'Pacific Islander',
'Unknown'
],
inplace=True
)
# Clear irrelevant rows.
race_df = race_df[race_df.weight_indicator != 'Total']
race_df = race_df[race_df.year != 'Total']
# Convert years to numbers for ease of access.
race_df.year = pd.to_numeric(race_df.year)
return race_df | 1a7f8e540c14cdb42ff25b270916ef3af45e7790 | 18,564 |
import json
def validate_resource_policy(policy_document):
"""validate policy_document. Between 1 to 5120"""
if not isinstance(policy_document, policytypes):
raise ValueError("PolicyDocument must be a valid policy document")
if isinstance(policy_document, str) and not json_checker(policy_document):
raise ValueError("PolicyDocument must be a valid JSON formated string")
if isinstance(policy_document, dict):
policy_document_text = json.dumps(policy_document)
elif isinstance(policy_document, str):
policy_document_text = policy_document
else:
policy_document_text = policy_document.to_json()
# NB: {} empty dict is 2 length
if len(policy_document_text) < 3:
raise ValueError("PolicyDocument must not be empty")
if len(policy_document_text) > 5120:
raise ValueError("PolicyDocument maximum length must not exceed 5120")
return policy_document | b6b0a18a7e252cf5402aed6e17e7b184aa3b432f | 18,565 |
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor) | 7f19552a5e55a7dbcc5fd99992b9522377628c65 | 18,567 |
def _MAC_hash(mac_str):
"""
Returns MAC hash value in uppercase hexadecimal form and truncated to
32 characters.
"""
return MD5.new(mac_str).hexdigest().upper()[:32] | c60785fe3b41355ada19d08595f71ff0c02ff3ce | 18,568 |
def divide_rows(matrix, column, in_place=False):
"""Divide each row of `matrix` by the corresponding element in `column`.
The result is as follows: out[i, j] = matrix[i, j] / column[i]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (M,)
The column dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csr_matrix:
convert_to_csr = True
out = out.tocsc()
else:
convert_to_csr = False
column_repeated = np.take(column, out.indices)
nz = out.data.nonzero()
out.data[nz] /= column_repeated[nz]
if convert_to_csr:
out = out.tocsr()
else:
out /= column[:, np.newaxis]
return out | ae69f91f999cd4f5ea78c30de2ff7c089fe54efd | 18,569 |
def multiSMC(nruns=10, nprocs=0, out_func=None, collect=None, **args):
"""Run SMC algorithms in parallel, for different combinations of parameters.
`multiSMC` relies on the `multiplexer` utility, and obeys the same logic.
A basic usage is::
results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0)
This runs the same SMC algorithm 20 times, using all available CPU cores.
The output, ``results``, is a list of 20 dictionaries; a given dict corresponds
to a single run, and contains the following (key, value) pairs:
+ ``'run'``: a run identifier (a number between 0 and nruns-1)
+ ``'output'``: the corresponding SMC object (once method run was completed)
Since a `SMC` object may take a lot of space in memory (especially when
the option ``store_history`` is set to True), it is possible to require
`multiSMC` to store only some chosen summary of the SMC runs, using option
`out_func`. For instance, if we only want to store the estimate
of the log-likelihood of the model obtained from each particle filter::
of = lambda pf: pf.logLt
results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of)
It is also possible to vary the parameters. Say::
results = multiSMC(fk=my_fk_model, N=[100, 500, 1000])
will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for
N=500, and 10 times for N=1000. The number 10 comes from the fact that we
did not specify nruns, and its default value is 10. The 30 dictionaries
obtained in results will then contain an extra (key, value) pair that will
give the value of N for which the run was performed.
It is possible to vary several arguments. Each time a list must be
provided. The end result will amount to take a *cartesian product* of the
arguments::
results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial',
'residual'], nruns=20)
In that case we run our algorithm 80 times: 20 times with N=100 and
resampling set to multinomial, 20 times with N=100 and resampling set to
residual and so on.
Finally, if one uses a dictionary instead of a list, e.g.::
results = multiSMC(fk={'bootstrap': fk_boot, 'guided': fk_guided}, N=100)
then, in the output dictionaries, the values of the parameters will be replaced
by corresponding keys; e.g. in the example above, {'fk': 'bootstrap'}. This is
convenient in cases such like this where the parameter value is some non-standard
object.
Parameters
----------
* nruns: int, optional
number of runs (default is 10)
* nprocs: int, optional
number of processors to use; if negative, number of cores not to use.
Default value is 1 (no multiprocessing)
* out_func: callable, optional
function to transform the output of each SMC run. (If not given, output
will be the complete SMC object).
* collect: list of collectors, or 'off'
this particular argument of class SMC may be a list, hence it is "protected"
from Cartesianisation
* args: dict
arguments passed to SMC class (except collect)
Returns
-------
A list of dicts
See also
--------
`utils.multiplexer`: for more details on the syntax.
"""
f = _identity if out_func is None else _picklable_f(out_func)
return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True,
protected_args={'collect': collect},
**args) | fe7aeb4464a207d6d6764c1d9efc9a199b9100cf | 18,570 |
def colon_event_second(colon_word, words, start):
"""The second <something>
<something> can be:
* <day-name> -- the second day of that name in a month
"""
if len(words) != 1:
raise GiveUp('Expected a day name, in {}'.format(
colon_what(colon_word, words)))
elif words[0].capitalize() not in DAYS:
raise GiveUp('Expected a day name, not {!r}. in {}'.format(
words[0], colon_what(colon_word, words)))
day_name = words[0].capitalize()
date = calc_ordinal_day(start, 2, day_name)
event = Event(date)
event.repeat_ordinal.add((2, day_name))
event.colon_date = colon_what(colon_word, words)
return event | d438eb5a2f7f07e9c5cc4ae7e3b95f102bc909d6 | 18,571 |
import csv
def outfalls_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_outfalls']['start']
skip_rows = build_groups_dicts(model)['nodes_outfalls']['line_to_skip']
header = build_groups_dicts(model)['nodes_outfalls']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('outfalls','df created!')
return df | cbbbeb52faebd1ebc57de7e13b555f9d664639d9 | 18,572 |
def get_count(path, **kwargs):
"""
Return the number of items in an dictionary or array
:param path: Path to the dictionary or array to count
This operation is only valid in :cb_bmeth:`lookup_in`
.. versionadded:: 2.2.5
"""
return _gen_3spec(_P.SDCMD_GET_COUNT, path, **kwargs) | 7a8fe5a288cfba28dfcbac523ea4579cd4d8d165 | 18,573 |
def error_message() -> str:
"""Error message for invalid input"""
return 'Invalid input. Use !help for a list of commands.' | 2ffea48dd495d464264bc657ca62cfe6043a1084 | 18,574 |
def has_substring(string):
"""
Validate that the given substring is part of the given string.
>>> f = has_substring('foobarhamjam')
>>> f('arham')
True
>>> f('barham')
True
>>> f('FOO')
False
>>> f('JAMHAM')
False
:param str string: Main string to compare against.
:rtype: A validator function.
"""
def validator(substring):
return substring in string
return validator | 33b7d5b52c6be4185dfdf96bad9a6d33f284ba52 | 18,575 |
def repos():
"""Display And Add Repos"""
page = Repos(ReposTable, dynamodb_table)
return page.display() | 8c5ec3e44caa7e88cb9ed7be4039309ae9e5cfe5 | 18,576 |
def lambda_handler(event, _context):
""" Main Handler. """
microservice_name = event.get('MicroserviceName')
environment_name = event.get('EnvironmentName')
new_vn_sha = event.get('Sha')
failure_threshold_value = event.get('FailureThresholdValue')
if not failure_threshold_value:
failure_threshold_value = 0
failure_threshold_time = event.get('FailureThresholdTime')
if not failure_threshold_time:
failure_threshold_time = 600
return get_healthcheck_status(
microservice_name,
environment_name,
new_vn_sha,
failure_threshold_value,
failure_threshold_time
) | c14fa2ce93d526b1ef5dd0a79a4d7392d1951c5f | 18,577 |
def masked_crc32c(data):
"""Copied from
https://github.com/TeamHG-Memex/tensorboard_logger/blob/master/tensorboard_logger/tensorboard_logger.py"""
x = u32(crc32c(data)) # pylint: disable=invalid-name
return u32(((x >> 15) | u32(x << 17)) + 0xa282ead8) | 24c076033a6c9252b411604a3935d4ba09b5aa16 | 18,578 |
def yaml_dictionary(gra, one_indexed=True):
""" generate a YAML dictionary representing a given graph
"""
if one_indexed:
# shift to one-indexing when we print
atm_key_dct = {atm_key: atm_key+1 for atm_key in atom_keys(gra)}
gra = relabel(gra, atm_key_dct)
yaml_atm_dct = atoms(gra)
yaml_bnd_dct = bonds(gra)
# prepare the atom dictionary
yaml_atm_dct = dict(sorted(yaml_atm_dct.items()))
yaml_atm_dct = dict_.transform_values(
yaml_atm_dct, lambda x: dict(zip(ATM_PROP_NAMES, x)))
# perpare the bond dictionary
yaml_bnd_dct = dict_.transform_keys(
yaml_bnd_dct, lambda x: tuple(sorted(x)))
yaml_bnd_dct = dict(sorted(yaml_bnd_dct.items()))
yaml_bnd_dct = dict_.transform_keys(
yaml_bnd_dct, lambda x: '-'.join(map(str, x)))
yaml_bnd_dct = dict_.transform_values(
yaml_bnd_dct, lambda x: dict(zip(BND_PROP_NAMES, x)))
yaml_gra_dct = {'atoms': yaml_atm_dct, 'bonds': yaml_bnd_dct}
return yaml_gra_dct | b2efa54c188035971f174c14ce6b3543c9234cde | 18,579 |
def CreateCloudsWeights( weights = None, names = None, n_clusters = None,
save = 1, dirCreate = 1, filename = 'WC',
dirName = 'WCC', number = 50 ):
"""SAME AS CreateClouds but now it takes as inputs a list of each class
weights and makes the clouds based on them """
dictP, dictN = CalculateWeights( weights = weights, names = names,
n_clusters = n_clusters, number = number)
for i in np.arange( n_clusters ):
filenamePos = filename+'Pos'+str(i)
filenameNeg = filename+'Neg'+str(i)
clouds( counts = dictP[i], filename = filenamePos, dirName = dirName,
dirCreate = dirCreate)
clouds( counts = dictN[i], filename = filenameNeg, dirName = dirName,
dirCreate = dirCreate)
params = {'dictp':dictP, 'dictN': dictN}
return params | 9cfb7a94cb27f4587c24ad35b423af2246c68440 | 18,580 |
def sigmoid(x, deri=False):
"""
Sigmoid activation function:
Parameters:
x (array) : A numpy array
deri (boolean): If set to True function calulates the derivative of sigmoid
Returns:
x (array) : Numpy array after applying the approprite function
"""
if deri:
return x*(1-x)
else:
return 1/(1+np.exp(-x)) | d523a3ec7df31b71ae60609f63314475784e6b38 | 18,581 |
from typing import Counter
def palindrome_permutation(string):
"""
All palindromes follow the same rule, they have at most one letter whose
count is odd, this letter being the "pivot" of the palindrome. The letters
with an even count can always be permuted to match each other across the
pivot.
"""
string = string.strip().lower()
c = Counter(string)
l = [1 for letter_count in c.values() if letter_count % 2 == 1]
return sum(l) < 2 | a1e5721d73e9773d802b423747277dd43ee5983f | 18,583 |
import ctypes
def generate_pfm_v2(pfm_header_instance, toc_header_instance, toc_element_list, toc_elements_hash_list,
platform_id_header_instance, flash_device_instance, allowable_fw_list, fw_id_list, hash_type):
"""
Create a PFM V2 object from all the different PFM components
:param pfm_header_instance: Instance of a PFM header
:param toc_header_instance: Instance of a TOC header
:param toc_element_list: List of TOC elements to be included in PFM
:param toc_elements_hash_list: List of TOC hashes to be included in PFM
:param platform_id_header_instance: Instance of a PFM platform header
:param flash_device_instance: Instance of a PFM flash device header
:param allowable_fw_list: List of all allowable FWs to be included in PFM
:param fw_id_list: List of all FW ID instances
:hash_type: Hashing algorithm to be used for hashing TOC elements
:return Instance of a PFM object
"""
hash_algo = None
if hash_type == 2:
hash_algo = SHA512
elif hash_type == 1:
hash_algo = SHA384
elif hash_type == 0:
hash_algo = SHA256
else:
raise ValueError ("Invalid manifest hash type: {0}".format (hash_type))
toc_elements_size = ctypes.sizeof(toc_element_list[0]) * len (toc_element_list)
toc_hash_size = ctypes.sizeof(toc_elements_hash_list[0]) * len (toc_elements_hash_list)
# Table Hash
table_hash_buf = (ctypes.c_ubyte * ctypes.sizeof(toc_header))()
ctypes.memmove(ctypes.addressof(table_hash_buf), ctypes.addressof(toc_header), ctypes.sizeof(toc_header))
table_hash_object = hash_algo.new(table_hash_buf)
offset = 0
toc_elements_buf = (ctypes.c_ubyte * toc_elements_size)()
for toc_element in toc_elements_list:
ctypes.memmove(ctypes.addressof(toc_elements_buf) + offset, ctypes.addressof(toc_element), ctypes.sizeof(toc_element))
offset += ctypes.sizeof(toc_element)
# Update table hash with TOC elements
table_hash_object.update(toc_elements_buf)
toc_hash_buf = (ctypes.c_ubyte * toc_hash_size)()
offset = 0
for toc_hash in toc_elements_hash_list:
ctypes.memmove(ctypes.addressof(toc_hash_buf) + offset, ctypes.addressof(toc_hash), ctypes.sizeof(toc_hash))
offset += ctypes.sizeof(toc_hash)
# Update table hash with TOC
table_hash_object.update(toc_hash_buf)
table_hash_buf_size = ctypes.c_ubyte * table_hash_object.digest_size
table_hash_buf = (ctypes.c_ubyte * table_hash_object.digest_size).from_buffer_copy(table_hash_object.digest())
table_hash_buf_size = ctypes.sizeof(table_hash_buf)
platform_id_size = ctypes.sizeof(platform_id_header_instance)
platform_id_buf = (ctypes.c_ubyte * platform_id_size)()
ctypes.memmove(ctypes.addressof(platform_id_buf), ctypes.addressof(platform_id_header_instance), platform_id_size)
allowable_fw_size = 0
for fw_id in fw_id_list.values():
allowable_fw_size += ctypes.sizeof(fw_id)
for fw_list in allowable_fw_list.values():
for allowable_fw in fw_list:
allowable_fw_size += ctypes.sizeof(allowable_fw)
flash_device_size = 0
if flash_device_instance != None:
flash_device_size = ctypes.sizeof(flash_device_instance)
flash_device_buf = (ctypes.c_ubyte * flash_device_size)()
if flash_device_size:
ctypes.memmove(ctypes.addressof(flash_device_buf), ctypes.addressof(flash_device_instance), flash_device_size)
class pfm_v2(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [('manifest_header', manifest_common.manifest_header),
('toc_header', manifest_common.manifest_toc_header),
('toc_elements', ctypes.c_ubyte * toc_elements_size),
('toc_hash', ctypes.c_ubyte * toc_hash_size),
('table_hash', ctypes.c_ubyte * table_hash_buf_size),
('platform_id', ctypes.c_ubyte * platform_id_size),
('flash_device', ctypes.c_ubyte * flash_device_size),
('allowable_fw', ctypes.c_ubyte * allowable_fw_size)]
offset = 0
fw_buf = (ctypes.c_ubyte * allowable_fw_size)()
for fw_type, fw_id in fw_id_list.items():
ctypes.memmove(ctypes.addressof(fw_buf) + offset, ctypes.addressof(fw_id), ctypes.sizeof(fw_id))
offset += ctypes.sizeof(fw_id)
fw_list = allowable_fw_list.get(fw_type)
for allowed_fw in fw_list:
ctypes.memmove(ctypes.addressof(fw_buf) + offset, ctypes.addressof(allowed_fw), ctypes.sizeof(allowed_fw))
offset += ctypes.sizeof(allowed_fw)
return pfm_v2(pfm_header_instance, toc_header_instance, toc_elements_buf, toc_hash_buf, table_hash_buf,
platform_id_buf, flash_device_buf, fw_buf) | 2b1dfe4150cc18c588c3b8711e7ea94afcfbfbdc | 18,584 |
def symmetrise_AP(AP):
"""
No checks on this since this is a deep-inside-module helper routine.
AP must be a batch of matrices (n, 1, N, N).
"""
return AP + AP.transpose(2, 3) | 4a993f42e576656ec5f450c95af969722f10a58d | 18,585 |
def index():
"""新闻首页"""
#----------------------1.查询用户基本信息展示----------------------
# 需求:发现查询用户基本信息代码在多个地方都需要实现,
# 为了达到代码复用的目的,将这些重复代码封装到装饰器中
# # 1.根据session获取用户user_id
# user_id = session.get("user_id")
#
# user = None
# # 先定义,再使用 否则:local variable 'user_dict' referenced before assignment
# user_dict = None
# if user_id:
# # 2.根据user_id查询用户对象
# try:
# user = User.query.get(user_id)
# except Exception as e:
# current_app.logger.error(e)
# return "查询用户对象异常"
# 从g对象中读取user对象
user = g.user
# 3.将用户对象转换成字典
"""
if user:
user_dict = user.to_dict()
"""
user_dict = user.to_dict() if user else None
# ----------------------2.查询新闻排行列表数据----------------------
# order_by 将新闻的浏览量降序排序
try:
rank_news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询点击排行数据异常")
"""
rank_news_list:是一个对象列表 [news_obj1, news_obj2, .....]
rank_dict_list = []
if rank_news_list:
for news in rank_news_list:
news_dict = news.to_dict()
rank_dict_list.append(news_dict)
"""
# 将对象列表转换成字典列表
rank_dict_list = []
for news in rank_news_list if rank_news_list else []:
# 将对象转换成字典并且添加到列表中
rank_dict_list.append(news.to_dict())
# ----------------------3.查询新闻分类列表数据----------------------
# 1.查询所有分类数据
try:
categories = Category.query.all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询新闻分类对象异常")
# 2.将分类对象列表转换成字典列表
category_dict_list = []
for category in categories if categories else []:
# 将分类对象转换成字典添加到列表中
category_dict_list.append(category.to_dict())
# 返回模板的同时将查询到的数据一并返回
"""
数据格式:
data = {
"user_info": {
"id": self.id,
"nick_name": self.nick_name,
}
}
使用: data.user_info.nick_name
data.rank_news_list -- 字典列表
"""
# 组织返回数据
data = {
"user_info": user_dict,
"click_news_list": rank_dict_list,
"categories": category_dict_list
}
return render_template("news/index.html", data=data) | 63965ca1caef0efafe859c10ffd4d72724e16504 | 18,586 |
def formatter_message(message, use_color = True):
""" Method to format the pattern in which the log messages will be
displayed.
@param message: message log to be displayed
@param use_color: Flag to indicates the use of colors or not
@type message: str
@type use_color: boolean
@return: the new formatted message
@rtype: str
"""
if use_color:
message = message.replace('$RESET', RESET_SEQ).replace('$BOLD',
BOLD_SEQ)
else:
message = message.replace('$RESET', '').replace('$BOLD', '')
return message | f171638a60e6a031fde7b249a7e40839da25addc | 18,587 |
def find_offsets(head_mapping):
"""Find the time offsets that align the series in head_mapping
Finds the set of time offsets that minimize the sum of squared
differences in times at which each series crosses a particular
head. Input is a mapping of head id (a hashable value
corresponding to a head, normally an integer) to a sequence of
(series_id, time) pairs wherein series_id is an identifier for a
sequence and time is the time at which the series crossed the
corresponding head value.
The series with the series_id that is largest (last in sort order)
is treated as the reference and given an offset of zero; all other
offsets are relative to that one.
Returns series_ids, offsets where series_ids are the identifiers
"""
# Eliminate all heads with only one series, these are
# uninformative
for head_id, seq in list(head_mapping.items()):
# Don't use "assert seq" here, this is an ndarray
assert len(seq) > 0 # pylint: disable=len-as-condition
if len(seq) == 1:
del head_mapping[head_id]
# Assemble mapping of series ids to row numbers for the
# least-squares problem
series_ids = ((series_id for series_id, t_mean in seq)
for seq in list(head_mapping.values()))
series_ids = sorted(set().union(*series_ids))
series_indices = dict(zip(series_ids,
range(len(series_ids))))
# Reference series corresponds to the highest series id; it
# has the largest initial head, because we sorted them
reference_index = max(series_ids)
LOG.info('Reference index: %s', reference_index)
number_of_equations = sum(len(series_at_head) for series_at_head
in list(head_mapping.values()))
number_of_unknowns = len(series_indices) - 1
LOG.info('%s equations, %s unknowns',
number_of_equations, number_of_unknowns)
A = np.zeros((number_of_equations, number_of_unknowns))
b = np.zeros((number_of_equations,))
row_template = np.zeros((number_of_unknowns,))
row_index = 0
for head_id, series_at_head in list(head_mapping.items()):
row_template[:] = 0
sids, times = list(zip(*series_at_head))
number_of_series_at_head = len(sids)
indices = [series_indices[index] for index in sids
if index != reference_index]
row_template[indices] = 1. / number_of_series_at_head
mean_time = np.mean(times)
for series_id, t in series_at_head:
A[row_index] = row_template
# !!! some redundancy here
if series_id != reference_index:
series_index = series_indices[series_id]
A[row_index, series_index] -= 1
b[row_index] = t - mean_time
row_index += 1
assert row_index == number_of_equations, row_index
ATA = np.dot(A.transpose(), A)
assert ATA.shape == (number_of_unknowns,
number_of_unknowns), ATA.shape
ATd = np.dot(A.transpose(), b)
offsets = linalg_mod.solve(ATA, ATd) # pylint: disable=E1101
# this was the boundary condition, zero offset for
# reference (last) id
offsets = np.concatenate((offsets, [0]))
# offsets are by index, but reverse mapping is trivial
# because series ids are sorted
assert len(series_ids) == len(offsets), \
'{} != {}'.format(len(series_ids), len(offsets))
return (series_ids, offsets) | 93864ec2c9902e28e98eeb6e225e918ffc21dbaa | 18,588 |
def get_argument(value, arg):
"""Get argument by variable"""
return value.get(arg, None) | 0abd48a3a241ab1076c3ca19241df5b7b4346224 | 18,589 |
def tokenize(docs, word_tokenize_flag=1):
"""
:param docs:
:param word_tokenize_flag:
:return:
"""
sent_tokenized = []
for d_ in docs:
sent_tokenized += sent_tokenize(d_)
if word_tokenize_flag==1:
word_tokenized = []
for sent in sent_tokenized:
word_tokenized.append(word_tokenize(sent))
return word_tokenized
elif word_tokenize_flag==0:
return sent_tokenized | b9484010c3e98aa32d96450122510f46dc0d8d72 | 18,590 |
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> is_literal(a)
True
>>> is_literal(~a)
True
>>> is_literal(a + b)
True
>>> is_literal(Or(a, b))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction) | 7ec6b4a00aea544a05686f228be73fa71e70df6f | 18,591 |
def get_target_proportions_of_current_trial(individuals, target):
"""Get the proportion waiting times within the target for a given trial of
a threshold
Parameters
----------
individuals : object
A ciw object that contains all individuals records
Returns
-------
int
all ambulance patients that finished the simulation
int
all ambulance patients whose waiting times where within the target
int
all other patients that finished the simulation
int
all other patients whose waiting times where within the target
"""
ambulance_waits, ambulance_target_waits = 0, 0
other_waits, other_target_waits = 0, 0
for individual in individuals:
ind_class = len(individual.data_records) - 1
rec = individual.data_records[-1]
if rec.node == 2 and ind_class == 0:
other_waits += 1
if rec.waiting_time < target:
other_target_waits += 1
elif rec.node == 2 and ind_class == 1:
ambulance_waits += 1
if rec.waiting_time < target:
ambulance_target_waits += 1
return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits | 95f3781677f3ca7bb620488778b52502783c6eb9 | 18,592 |
def how_many(aDict):
"""
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
"""
return sum(len(value) for value in aDict.values()) | ed1729b55411f29626dfe61c6853bc19813ceedc | 18,593 |
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols):
"""Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the
required height and width of the crop.
"""
x, y, a, s = keypoint
x1, y1, x2, y2 = crop_coords
cropped_keypoint = [x - x1, y - y1, a, s]
return cropped_keypoint | 5a2365a611275fea4d0f5d031127426c88c43905 | 18,595 |
def get_tcoeff(epd_model, dF):
"""
Tranmission coefficients beta, gamma and delta
can be directly computed from the time series data.
Here we do not need reference to any compartmental model.
"""
df = dF.copy()
dfc = pd.DataFrame(columns=['date','beta','gamma','delta'])
df['infected'] = df['confirmed'] \
- df['recovered'] - df['deaths']
I = df['infected']
R = df['recovered']
D = df['deaths']
dI = I.diff(periods=1).iloc[1:]
dR = R.diff(periods=1).iloc[1:]
dD = D.diff(periods=1).iloc[1:]
dfc['beta'] = (dI + dR + dD ) / I
if epd_model == 'SIR':
dfc['gamma'] = (dR+dD) / I
if epd_model == 'SIRD':
dfc['gamma'] = dR / I
dfc['delta'] = dD / I
dfc['date'] = df['date'].to_list()
dfc.index = df['date'].to_list()
return dfc | 22d6290f15efcdeb2e75b421c8d5d0fa17ff57f3 | 18,596 |
from typing import Any
def rx_filter(observable: Observable, predicate: PredicateOperator) -> Observable:
"""Create an observable which event are filtered by a predicate function.
Args:
observable (Observable): observable source
predicate (Operator): predicate function which take on argument and return
a truthy value
Returns:
(Observable): observable instance
"""
_awaitable = iscoroutinefunction(predicate)
async def _subscribe(an_observer: Observer) -> Subscription:
async def _on_next(item: Any):
nonlocal _awaitable
_test = await predicate(item) if _awaitable else predicate(item) # type: ignore
if _test:
await an_observer.on_next(item)
return await observable.subscribe(an_observer=rx_observer_from(observer=an_observer, on_next=_on_next))
return rx_create(subscribe=_subscribe) | 1d43435d04660b1a05eb906b16f031d270122585 | 18,597 |
from typing import Optional
def map_time_program(raw_time_program, key: Optional[str] = None) \
-> TimeProgram:
"""Map *time program*."""
result = {}
if raw_time_program:
result["monday"] = map_time_program_day(
raw_time_program.get("monday"), key)
result["tuesday"] = map_time_program_day(
raw_time_program.get("tuesday"), key)
result["wednesday"] = map_time_program_day(
raw_time_program.get("wednesday"), key)
result["thursday"] = map_time_program_day(
raw_time_program.get("thursday"), key)
result["friday"] = map_time_program_day(
raw_time_program.get("friday"), key)
result["saturday"] = map_time_program_day(
raw_time_program.get("saturday"), key)
result["sunday"] = map_time_program_day(
raw_time_program.get("sunday"), key)
return TimeProgram(result) | 4a1d97d61e195184f0c3c40e4664a9904d591837 | 18,598 |
def _find_tols(equipment_id, start, end):
"""Returns existing TransportOrderLines matching with given arguments.
Matches only if load_in is matching between start and end."""
#logger.error('Trying to find TOL')
#logger.error(equipment_id)
#logger.error(start_time)
#logger.error(end_time)
tols = TransportOrderLine.objects.filter(
equipment__id=equipment_id).filter(
Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
#Q(transport_order__from_loc_load_out__range=(start, end)) | Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__from_convention__load_out__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
)
return tols | 5ebec8857d56382e377bd9d98ce2bb7aa74a44b2 | 18,599 |
def MC1(N,g1,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 1
Args:
N (int): Number of iterations/draws
g1 (float): Agents value of policy 1
x (float): Drawn from a beta distribution (X)
Returns:
MC1 (float): Agents value of policy 1
"""
#Draw N random values from a beta distribution (x)
X = np.random.beta(2, 7, size=N)
return np.mean(g1(X, par2, par3)) | f3a25656f60788e0ec50d75be6d20ff3e497f500 | 18,600 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.