content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def index(current_user=None):
""" Display home page """
return render_template('homepage.html', username=current_user['name'], \
logged_in=current_user['is_authenticated'], \
display_error=request.cookies.get('last_attempt_error') == 'True', \
login_banner=APP.config['LOGIN_BANNER']) | 287d8101ef318cb7ca308340e0d11ab157538450 | 17,555 |
def disable_admin_access(session, return_type=None, **kwargs):
"""
Disable Admin acccess
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = "/api/users/admin_access/disable.json"
return session.post_api(path=path, return_type=return_type, **kwargs) | 5ac6c09ed3098f5b99baa2d5749d7b42a465e9f4 | 17,557 |
def _get_bbox_indices(x, y, bbox):
"""
Convert bbox values to array indices
:param x, y: arrays with the X, Y coordinates
:param bbox: minx, miny, maxx, maxy values
:return: bbox converted to array indices
"""
minx, miny, maxx, maxy = bbox
xindices, = np.where((x >= minx) & (x <= maxx))
yindices, = np.where((y >= miny) & (y <= maxy))
return xindices[0], xindices[-1]+1, yindices[0], yindices[-1]+1 | dd71f1852971dbd2d3026c1720f9f477b3093fc8 | 17,558 |
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby = ~rtable.priority,
)
response.headers["Content-Type"] = "application/json"
return records.json() | 7edc87d20d36d25b05337365ed903126ef02742f | 17,559 |
def calc_field_changes(element, np_id):
"""
Walk up the tree of geo-locations, finding the new parents
These will be set onto all the museumobjects.
"""
fieldname = element._meta.concrete_model.museumobject_set.\
related.field.name
field_changes = {}
field_changes[fieldname] = element.id
if hasattr(element, 'parent'):
field_changes.update(
calc_field_changes(element.parent, element.parent.id))
return field_changes | cba816488dcf10a774bc18b1b3f6498e1d8dc3d8 | 17,560 |
def index(request, _):
"""
路由请求
`` request `` 请求对象
"""
if request.method == 'GET' or request.method == 'get':
return index_page(request)
elif request.method == 'POST' or request.method == 'post':
return send_wxmsg(request)
else:
rsp = JsonResponse({'code': -1, 'errorMsg': '请求方式错误'},
json_dumps_params={'ensure_ascii': False})
logger.info('response result: {}'.format(rsp.content.decode('utf-8')))
return render(request, 'index.html') | 5006cf1e5cb23e49b17e9083fca66c7731f5559b | 17,561 |
def get_daisy_client():
"""Get Daisy client instance."""
endpoint = conf.get('discoverd', 'daisy_url')
return daisy_client.Client(version=1, endpoint=endpoint) | 6ed0df1259672becfca3197f2d115a1c789306a1 | 17,562 |
from pathlib import Path
import multiprocessing
def intermediate_statistics(
scores, ground_truth, audio_durations, *,
segment_length=1., time_decimals=6, num_jobs=1,
):
"""
Args:
scores (dict, str, pathlib.Path): dict of SED score DataFrames
(cf. sed_scores_eval.utils.scores.create_score_dataframe)
or a directory path (as str or pathlib.Path) from where the SED
scores can be loaded.
ground_truth (dict, str or pathlib.Path): dict of lists of ground truth
event tuples (onset, offset, event label) for each audio clip or a
file path from where the ground truth can be loaded.
audio_durations: The duration of each audio file in the evaluation set.
segment_length: the segment length of the segments that are to be
evaluated.
time_decimals (int): the decimal precision used for evaluation. If
chosen to high detected or ground truth events that have
onsets or offsets right on a segment boundary may swap over to the
adjacent segment because of small deviations due to limited
floating point precision.
num_jobs (int): the number of processes to use. Default is 1 in which
case no multiprocessing is used.
Returns:
"""
if not isinstance(num_jobs, int) or num_jobs < 1:
raise ValueError(
f'num_jobs has to be an integer greater or equal to 1 but '
f'{num_jobs} was given.'
)
scores, ground_truth, audio_ids = parse_inputs(scores, ground_truth)
if isinstance(audio_durations, (str, Path)):
audio_durations = Path(audio_durations)
assert audio_durations.is_file(), audio_durations
audio_durations = read_audio_durations(audio_durations)
if audio_durations is not None and not audio_durations.keys() == set(audio_ids):
raise ValueError(
f'audio_durations audio ids do not match audio ids in scores. '
f'Missing ids: {set(audio_ids) - audio_durations.keys()}. '
f'Additional ids: {audio_durations.keys() - set(audio_ids)}.'
)
_, event_classes = validate_score_dataframe(scores[audio_ids[0]])
single_label_ground_truths = multi_label_to_single_label_ground_truths(
ground_truth, event_classes)
def worker(audio_ids, output_queue=None):
segment_scores = None
segment_targets = None
for audio_id in audio_ids:
scores_k = scores[audio_id]
timestamps, _ = validate_score_dataframe(
scores_k, event_classes=event_classes)
timestamps = np.round(timestamps, time_decimals)
if segment_scores is None:
segment_scores = {class_name: [] for class_name in event_classes}
segment_targets = {class_name: [] for class_name in event_classes}
scores_k = scores_k[event_classes].to_numpy()
if audio_durations is None:
duration = max(
[timestamps[-1]] + [t_off for t_on, t_off, _ in ground_truth[audio_id]]
)
else:
duration = audio_durations[audio_id]
n_segments = int(np.ceil(duration / segment_length))
segment_boundaries = np.round(
np.arange(n_segments+1) * segment_length,
time_decimals
)
segment_onsets = segment_boundaries[:-1]
segment_offsets = segment_boundaries[1:]
for class_name in event_classes:
gt = single_label_ground_truths[class_name][audio_id]
if len(gt) == 0:
segment_targets[class_name].append(
np.zeros(n_segments, dtype=np.bool_))
else:
segment_targets[class_name].append(
np.any([
(segment_onsets < gt_offset)
* (segment_offsets > gt_onset)
* (segment_offsets > segment_onsets)
for gt_onset, gt_offset in
single_label_ground_truths[class_name][audio_id]
], axis=0)
)
for i in range(n_segments):
idx_on = get_first_index_where(
timestamps, "gt", segment_onsets[i]) - 1
idx_on = max(idx_on, 0)
idx_off = get_first_index_where(
timestamps, "geq", segment_offsets[i])
idx_off = min(idx_off, len(timestamps)-1)
if idx_off <= idx_on:
scores_ki = np.zeros(scores_k.shape[-1])
else:
scores_ki = np.max(scores_k[idx_on:idx_off], axis=0)
for c, class_name in enumerate(event_classes):
segment_scores[class_name].append(scores_ki[c])
if output_queue is not None:
output_queue.put((segment_scores, segment_targets))
return segment_scores, segment_targets
if num_jobs == 1:
segment_scores, segment_targets = worker(audio_ids)
else:
queue = multiprocessing.Queue()
shard_size = int(np.ceil(len(audio_ids) / num_jobs))
shards = [
audio_ids[i*shard_size:(i+1)*shard_size] for i in range(num_jobs)
if i*shard_size < len(audio_ids)
]
processes = [
multiprocessing.Process(
target=worker, args=(shard, queue), daemon=True,
)
for shard in shards
]
try:
for p in processes:
p.start()
segment_scores, segment_targets = None, None
count = 0
while count < len(shards):
seg_scores_i, seg_targets_i = queue.get()
if segment_scores is None:
segment_scores = seg_scores_i
segment_targets = seg_targets_i
else:
for class_name in segment_scores:
segment_scores[class_name].extend(seg_scores_i[class_name])
segment_targets[class_name].extend(seg_targets_i[class_name])
count += 1
finally:
for p in processes:
p.terminate()
stats = {}
for class_name in event_classes:
segment_scores[class_name] = np.array(segment_scores[class_name]+[np.inf])
sort_idx = np.argsort(segment_scores[class_name])
segment_scores[class_name] = segment_scores[class_name][sort_idx]
segment_targets[class_name] = np.concatenate(
segment_targets[class_name]+[np.zeros(1)])[sort_idx]
tps = np.cumsum(segment_targets[class_name][::-1])[::-1]
n_sys = np.arange(len(tps))[::-1]
segment_scores[class_name], unique_idx = np.unique(segment_scores[class_name], return_index=True)
n_ref = tps[0]
fns = n_ref - tps
tns = n_sys[0] - n_sys - fns
stats[class_name] = {
'tps': tps[unique_idx],
'fps': n_sys[unique_idx] - tps[unique_idx],
'tns': tns,
'n_ref': n_ref,
}
return {
class_name: (segment_scores[class_name], stats[class_name])
for class_name in event_classes
} | 5039bec8ceafed7952833aa2f39c5d44d0909790 | 17,563 |
def vnorm(v1):
"""vnorm(ConstSpiceDouble [3] v1) -> SpiceDouble"""
return _cspyce0.vnorm(v1) | 00016eaa6a765f564ce247c4126c4a360aa2b60d | 17,564 |
def mean_iou(y_true, y_pred):
"""F2 loss"""
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0) | a2503703bae7c8c83b42ac93406178bc4c52a675 | 17,565 |
def _encode_string_parts(value, encodings):
"""Convert a unicode string into a byte string using the given
list of encodings.
This is invoked if `encode_string` failed to encode `value` with a single
encoding. We try instead to use different encodings for different parts
of the string, using the encoding that can encode the longest part of
the rest of the string as we go along.
Parameters
----------
value : text type
The unicode string as presented to the user.
encodings : list
The encodings needed to encode the string as a list of Python
encodings, converted from the encodings in Specific Character Set.
Returns
-------
byte string
The encoded string, including the escape sequences needed to switch
between different encodings.
Raises
------
ValueError
If `value` could not be encoded with the given encodings.
"""
encoded = bytearray()
unencoded_part = value
while unencoded_part:
# find the encoding that can encode the longest part of the rest
# of the string still to be encoded
max_index = 0
best_encoding = None
for encoding in encodings:
try:
unencoded_part.encode(encoding)
# if we get here, the whole rest of the value can be encoded
best_encoding = encoding
max_index = len(unencoded_part)
break
except UnicodeError as e:
if e.start > max_index:
# e.start is the index of first character failed to encode
max_index = e.start
best_encoding = encoding
# none of the given encodings can encode the first character - give up
if best_encoding is None:
raise ValueError()
# encode the part that can be encoded with the found encoding
encoded_part = unencoded_part[:max_index].encode(best_encoding)
if best_encoding not in handled_encodings:
encoded += ENCODINGS_TO_CODES.get(best_encoding, b'')
encoded += encoded_part
# set remaining unencoded part of the string and handle that
unencoded_part = unencoded_part[max_index:]
# unencoded_part is empty - we are done, return the encoded string
return encoded | 58f514ed7cbd9a6e2c10e6d8b22f32a32d71d6a7 | 17,566 |
def SocketHandler(qt):
""" `SocketHandler` wraps a websocket connection.
HTTP GET /ws
"""
class _handler(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
qt.log("new socket open ...")
qt.register_socket(self)
def on_close(self):
qt.remove_socket(self)
def on_message(self, msg):
qt.log("Got socket command: %s" % (msg))
qt.command(msg)
return _handler | 001f9dbee77560d4d5970fce731084b5a9cca7af | 17,568 |
def extract_flow_global_roi(flow_x, flow_y, box):
"""
create global roi cropped flow image (for numpy image)
image:
numpy array image
box:
list of [xmin, ymin, xmax, ymax]
"""
flow_x_roi = extract_global_roi(flow_x, box)
flow_y_roi = extract_global_roi(flow_y, box)
if flow_x_roi is None or flow_y_roi is None:
return None
else:
return (flow_x_roi, flow_y_roi) | 1b6d22d413693e978dc31cfbf1708c93d9256cf1 | 17,570 |
from unittest.mock import patch
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods."""
def shell_success(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
def shell_fail_python(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` method when it fails."""
self.shell_cmd = cmd
raise AttributeError
def shell_fail_server(self, cmd):
"""Mock the `DeviceFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_success), "server": patch("{}.DeviceFake.shell".format(__name__), shell_success)}
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_fail_python), "server": patch("{}.DeviceFake.shell".format(__name__), shell_fail_server)} | cdf4df2bb383c4c8b49b59442550e2c73ca828aa | 17,571 |
def __setAdjacent_square__(self, pos):
"""
Sets all adjacencies in the map for a map with square tiles.
"""
self.__checkIndices__(pos)
i, j = pos; adjacent = []
# Function to filter out nonexistent cells.
def filterfn(p):
do_not_filter = 0 <= p[0] < self.__numrows__ and 0 <= p[1] < self.__numcols__
return do_not_filter and not self.__isdisabled__[p[0]][p[1]]
for cell in filter(filterfn, ( (i+1,j), (i-1,j), (i,j+1), (i,j-1) )):
adjacent += [cell]
self.__adjacent__[i][j] = adjacent | ebdd3ee3d0104b5bd26cc48e07760de027615263 | 17,572 |
def model_definition_nested_events():
"""Test model for state- and parameter-dependent heavisides.
ODEs
----
d/dt x_1:
inflow_1 - decay_1 * x1
d/dt x_2:
- decay_2 * x_2
Events:
-------
event_1:
trigger: x_1 > inflow_1 / decay_2
bolus: [[ 0],
[ -1 / time]]
event_2:
trigger: x_2 > 0.5
bolus: [[ bolus],
[ bolus]]
"""
# Model components
species = ['x_1', 'x_2']
initial_assignments = {
'x_1': 'k1',
'x_2': 'k2',
}
rate_rules = {
'x_1': 'inflow_1 - decay_1 * x_1',
'x_2': '- decay_2 * x_2',
}
parameters = {
'k1': 0,
'k2': 0,
'inflow_1': 4,
'decay_1': 2,
'decay_2': 5,
'bolus': 0, # for bolus != 0, nested event sensitivities are off!
}
events = {
'event_1': {
'trigger': 'x_1 > inflow_1 / decay_2',
'target': 'x_2',
'assignment': 'x_2 - 1 / time'
},
'event_2': {
'trigger': 'x_2 < - 0.5',
'target': ['x_1', 'x_2'],
'assignment': ['x_1 + bolus', 'x_2 + bolus'],
}
}
timepoints = np.linspace(0, 1, 101)
# Analytical solution
def x_pected(t, k1, k2, inflow_1, decay_1, decay_2, bolus):
# gather temporary variables
# event_time = x_1 > inflow_1 / decay_2
equil = inflow_1 / decay_1
tmp1 = inflow_1 / decay_2 - inflow_1 / decay_1
tmp2 = k1 - inflow_1 / decay_1
event_time = (- 1 / decay_1) * np.log( tmp1 / tmp2)
def get_early_x(t):
# compute dynamics before event
x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1*np.exp(-decay_1 * t)
x_2 = k2 * np.exp(-decay_2 * t)
return np.array([[x_1], [x_2]])
if t < event_time:
x = get_early_x(t).flatten()
else:
# compute state after event
x_tau = get_early_x(event_time)
tau_x1 = x_tau[0] + bolus
tau_x2 = x_tau[1] - 1 / event_time + bolus
# compute dynamics after event
inhom = np.exp(decay_1 * event_time) * tau_x1
x_1 = equil * (1 - np.exp(decay_1 * (event_time - t))) + \
inhom * np.exp(- decay_1 * t)
x_2 = tau_x2 * np.exp(decay_2 * event_time) * np.exp(-decay_2 * t)
x = np.array([[x_1], [x_2]])
return x.flatten()
def sx_pected(t, parameters):
# get sx, w.r.t. parameters, via finite differences
sx = []
for ip in parameters:
eps = 1e-6
perturbed_params = deepcopy(parameters)
perturbed_params[ip] += eps
sx_p = x_pected(t, **perturbed_params)
perturbed_params[ip] -= 2*eps
sx_m = x_pected(t, **perturbed_params)
sx.append((sx_p - sx_m) / (2 * eps))
return np.array(sx)
return (
initial_assignments,
parameters,
rate_rules,
species,
events,
timepoints,
x_pected,
sx_pected
) | f42a5c7c01fd6f966ecec11b28c9620022dd7aaf | 17,573 |
async def address_balance_history(
request: Request,
address: Address,
token_id: TokenID = Query(None, description="Optional token id"),
timestamps: bool = Query(
False, description="Include timestamps in addition to block heights"
),
flat: bool | None = Query(True, description="Return data as flat arrays."),
limit: int | None = Query(50, gt=0, le=10000),
offset: int | None = Query(0, ge=0),
desc: bool | None = Query(True, description="Most recent first"),
):
"""
ERG or token balance history of an address.
"""
query = f"""
select d.height
{', h.timestamp' if timestamps else ''}
, sum(d.value) over (order by d.height) as balance
from bal.{'erg' if token_id is None else 'tokens'}_diffs d
join core.headers h on h.height = d.height
where d.address = $1
{'' if token_id is None else 'and token_id = $4'}
order by 1 {'desc' if desc else ''}
limit $2 offset $3;
"""
opt_args = [] if token_id is None else [token_id]
async with request.app.state.db.acquire() as conn:
rows = await conn.fetch(query, address, limit, offset, *opt_args)
if not rows:
raise HTTPException(status_code=404, detail=DETAIL_404)
if flat:
if timestamps:
return {
"heights": [r["height"] for r in rows],
"timestamps": [r["timestamp"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return {
"heights": [r["height"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return rows | 2fcae2ab775611e51fd056e98928afbcb6bf1278 | 17,574 |
def load(as_pandas=None):
"""
Loads the Grunfeld data and returns a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas) | 183c37228619b835a36dc4a1cc1e1a7649fca6ec | 17,575 |
def rule_if_system(system_rule, non_system_rule, context):
"""Helper function to pick a rule based on system-ness of context.
This can be used (with functools.partial) to choose between two
rule names, based on whether or not the context has system
scope. Specifically if we will fail the parent of a nested policy
check based on scope_types=['project'], this can be used to choose
the parent rule name for the error message check in
common_policy_check().
"""
if context.system_scope:
return system_rule
else:
return non_system_rule | 2149c2ffdd6afdd64f7d33a2de4c6a23b3143dee | 17,576 |
def find_inactive_ranges(note_sequence):
"""Returns ranges where no notes are active in the note_sequence."""
start_sequence = sorted(
note_sequence.notes, key=lambda note: note.start_time, reverse=True)
end_sequence = sorted(
note_sequence.notes, key=lambda note: note.end_time, reverse=True)
notes_active = 0
time = start_sequence[-1].start_time
inactive_ranges = []
if time > 0:
inactive_ranges.append(0.)
inactive_ranges.append(time)
start_sequence.pop()
notes_active += 1
# Iterate through all note on events
while start_sequence or end_sequence:
if start_sequence and (start_sequence[-1].start_time <
end_sequence[-1].end_time):
if notes_active == 0:
time = start_sequence[-1].start_time
inactive_ranges.append(time)
notes_active += 1
start_sequence.pop()
else:
notes_active -= 1
if notes_active == 0:
time = end_sequence[-1].end_time
inactive_ranges.append(time)
end_sequence.pop()
# if the last note is the same time as the end, don't add it
# remove the start instead of creating a sequence with 0 length
if inactive_ranges[-1] < note_sequence.total_time:
inactive_ranges.append(note_sequence.total_time)
else:
inactive_ranges.pop()
assert len(inactive_ranges) % 2 == 0
inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])
for i in range(len(inactive_ranges) // 2)]
return inactive_ranges | 8db86584908283385958c5f710fb36d95795f7b1 | 17,577 |
def is_connected(G):
"""Returns True if the graph is connected, False otherwise.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
connected : bool
True if the graph is connected, false otherwise.
Raises
------
NetworkXNotImplemented:
If G is directed.
Examples
--------
>>> G = nx.path_graph(4)
>>> print(nx.is_connected(G))
True
See Also
--------
is_strongly_connected
is_weakly_connected
is_semiconnected
is_biconnected
connected_components
Notes
-----
For undirected graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept('Connectivity is undefined ',
'for the null graph.')
return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G) | 03a2602629db60565702bee044a1d70ba026a8aa | 17,578 |
import math
def show_result(img,
result,
skeleton=None,
kpt_score_thr=0.3,
bbox_color=None,
pose_kpt_color=None,
pose_limb_color=None,
radius=4,
thickness=1,
font_scale=0.5,
win_name='',
show=False,
show_keypoint_weight=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
skeleton (list[list]): The connection of keypoints.
kpt_score_thr (float, optional): Minimum score of keypoints
to be shown. Default: 0.3.
pose_kpt_color (np.array[Nx3]`): Color of N keypoints.
If None, do not draw keypoints.
pose_limb_color (np.array[Mx3]): Color of M limbs.
If None, do not draw limbs.
radius (int): Radius of circles.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
show (bool): Whether to show the image. Default: False.
show_keypoint_weight (bool): Whether to change the transparency
using the predicted confidence scores of keypoints.
wait_time (int): Value of waitKey param.
Default: 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
Tensor: Visualized image only if not `show` or `out_file`
"""
img = cv2.imread(img)
img = img[:,:,::-1]
img = img.copy()
img_h, img_w, _ = img.shape
pose_result = []
for res in result:
pose_result.append(res['keypoints'])
for _, kpts in enumerate(pose_result):
# draw each point on image
if pose_kpt_color is not None:
assert len(pose_kpt_color) == len(kpts)
for kid, kpt in enumerate(kpts):
x_coord, y_coord, kpt_score = int(kpt[0]), int(
kpt[1]), kpt[2]
if kpt_score > kpt_score_thr:
if show_keypoint_weight:
img_copy = img.copy()
r, g, b = pose_kpt_color[kid]
cv2.circle(img_copy, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
transparency = max(0, min(1, kpt_score))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
r, g, b = pose_kpt_color[kid]
cv2.circle(img, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
# draw limbs
if skeleton is not None and pose_limb_color is not None:
assert len(pose_limb_color) == len(skeleton)
for sk_id, sk in enumerate(skeleton):
pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1, 1]))
pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1, 1]))
if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0
and pos1[1] < img_h and pos2[0] > 0
and pos2[0] < img_w and pos2[1] > 0
and pos2[1] < img_h
and kpts[sk[0] - 1, 2] > kpt_score_thr
and kpts[sk[1] - 1, 2] > kpt_score_thr):
r, g, b = pose_limb_color[sk_id]
if show_keypoint_weight:
img_copy = img.copy()
X = (pos1[0], pos2[0])
Y = (pos1[1], pos2[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5
angle = math.degrees(
math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = 2
polygon = cv2.ellipse2Poly(
(int(mX), int(mY)),
(int(length / 2), int(stickwidth)), int(angle),
0, 360, 1)
cv2.fillConvexPoly(img_copy, polygon,
(int(r), int(g), int(b)))
transparency = max(
0,
min(
1, 0.5 *
(kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
cv2.line(
img,
pos1,
pos2, (int(r), int(g), int(b)),
thickness=thickness)
if show:
imshow(img, win_name, wait_time)
if out_file is not None:
imwrite(img, out_file)
return img | af90da2b30ff9891613654d70724162ce7b4d702 | 17,579 |
def D2(X, Y, Y2=None, YT=None):
""" Calculate the pointwise (squared) distance.
Arguments:
X: of shape (n_sample, n_feature).
Y: of shape (n_center, n_feature).
Y2: of shape (1, n_center).
YT: of shape (n_feature, n_center).
Returns:
pointwise distances (n_sample, n_center).
"""
X2 = K.sum(K.square(X), axis = 1, keepdims=True)
if Y2 is None:
if X is Y:
Y2 = X2
else:
Y2 = K.sum(K.square(Y), axis = 1, keepdims=True)
Y2 = K.reshape(Y2, (1, K.shape(Y)[0]))
if YT is None:
YT = K.transpose(Y)
d2 = K.reshape(X2, (K.shape(X)[0], 1)) \
+ Y2 - 2 * K.dot(X, YT) # x2 + y2 - 2xy
return d2 | daa8940e939eb2806e043f9b4521bf8cd1aefd2e | 17,580 |
from typing import Dict
from typing import Any
from typing import cast
def spec_from_json_dict(
json_dict: Dict[str, Any]
) -> FieldSpec:
""" Turns a dictionary into the appropriate FieldSpec object.
:param dict json_dict: A dictionary with properties.
:raises InvalidSchemaError:
:returns: An initialised instance of the appropriate FieldSpec
subclass.
"""
try:
if json_dict.get('ignored', False):
return Ignore(json_dict['identifier'])
type_str = json_dict['format']['type']
spec_type = cast(FieldSpec, FIELD_TYPE_MAP[type_str])
except KeyError as e:
raise InvalidSchemaError("the feature definition {} is incomplete. Must contain: {}".format(json_dict, e))
return spec_type.from_json_dict(json_dict) | 9bf557364a7a17cea0c84c65ece5b1d0e3983b2f | 17,582 |
import scipy
def hyp_pfq(A, B, x, out=None, n=0):
"""
This function is decorated weirdly because its extra params are lists.
"""
out = np_hyp_pfq([a+n for a in A], [b+n for b in B], x, out)
with np.errstate(invalid='ignore'):
out *= np.prod([scipy.special.poch(a, n) for a in A])
out /= np.prod([scipy.special.poch(b, n) for b in B])
return out | f1d9e0454fa63d24b1a8a403bbae12e00b818bb2 | 17,583 |
from typing import Optional
from datetime import datetime
def create_new_token(
data: dict,
expires_delta: Optional[timedelta] = None,
page_only: bool = False):
"""Creates a token with the given permission and expiry"""
to_encode = data.copy()
if page_only:
expires = datetime.max
elif expires_delta:
expires = datetime.utcnow() + timedelta(minutes=expires_delta)
else:
expires = datetime.utcnow() + timedelta(minutes=TOKEN_EXPIRATION_TIME)
to_encode.update({"exp": expires})
to_encode.update({"scope": "userauth:none" if page_only else "userauth:full"})
return jwt.encode(to_encode, SECRET, ALGORITHM) | 3a0a2aebc6b814850333a5d4f5db72b1396cf208 | 17,584 |
from pathlib import Path
from typing import Union
import re
def parse_json_year_date(year: Number, fullpath: Path) -> Union[Path, None]:
"""
Filtra os arquivos json por ano.
"""
if not isinstance(fullpath, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
pattern_finder = re.search(f"_{year}\.json", fullpath.name)
if pattern_finder:
return fullpath
else:
return None | 1d482bf916c3574225fdc31e700fb570c47555b1 | 17,585 |
from malaya_speech.utils import describe_availability
def available_fastspeech2():
"""
List available FastSpeech2, Text to Mel models.
"""
return describe_availability(
_fastspeech2_availability,
text = '`husein` and `haqkiem` combined loss from training set',
) | b7fa7f6132eb478cf27068a4377688f8b3ec5c7b | 17,586 |
def solve(A, b, method='gauss', verbose=0, eps=1e-6, max_itration_times=100000, omega=1.9375):
"""
Solve equations in specified method.
:param A: coefficient matrix of the equations
:param b: vector
:param method: the way to solve equations
:param verbose: whether show the running information
:param eps: *epsilon*
:param max_itration_times: the maximum *rounds* of iteration
:param omega: *relaxation factor* for SOR method.
:return: the solution x or 'None' if error occurs
"""
# _show_equations(A, b) # only when dim <= 10
start = dt.now()
global _verbose, _eps, _max_itration_times, _omega
_verbose = verbose
_eps = eps
_max_itration_times = max_itration_times
_omega = omega
func = {
'gauss': gauss,
'lu': lu,
'chase': chase,
'square_root': square_root,
'jacobi': jacobi,
'gauss_seidel': gauss_seidel,
'sor': sor,
'cg': cg,
'qr': qr
}.get(method, 'other_method')
if func == 'other_method':
_raise_equasolerror_no_method(method)
# make a copy of A and b to make sure they will not be changed.
# show_equations(A, b)
A0 = np.copy(A)
b0 = np.copy(b)
answer = func(A0, b0)
if _verbose == 1:
print('[%s] time cost: %.4f s.' % (method, (dt.now() - start).total_seconds()))
return answer | 50c7cdc5a2c8b146a062c028c4cb684c0b7efc2f | 17,587 |
def returns_unknown():
"""Tuples are a not-supported type."""
return 1, 2, 3 | 9fc003c890b4e053362c684b1a5f0dfca59bbe42 | 17,588 |
def get_user(
cmd,
app_id: str,
token: str,
assignee: str,
api_version: str,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> User:
"""
Get information for the specified user.
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
token: (OPTIONAL) authorization token to fetch device details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
assignee: unique ID of the user
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
users: dict
"""
result = _make_call(
cmd,
app_id=app_id,
method="get",
path=assignee,
payload=None,
token=token,
central_dns_suffix=central_dns_suffix,
api_version=api_version,
)
return _utility.get_object(result, MODEL, api_version) | cc387259c97ebfecadd5d82dc6acf8f970d19478 | 17,589 |
def get(fg, bg=None, attribute = 0):
"""
Return string with ANSI escape code for set text colors
fg: html code or color index for text color
attribute: use Attribute class variables
"""
if type(fg) is str:
bg = bg if bg else "#000000"
return by_hex(fg, bg, attribute=attribute)
elif type(fg) is int and 0 <= fg <= 255:
bg = bg if bg else 0
return by_index(fg, bg, attribute=attribute)
else:
raise TypeError("You can use only string or int.") | 16ee7ea3bd5c66c415a6466632cee1c5b337696b | 17,591 |
def get_Qi(Q,i,const_ij,m):
"""
Aim:
----
Equalising two polynomials where one is obtained by a SOS
decomposition in the canonical basis and the other one is expressed
in the Laguerre basis.
Parameters
----------
Q : matrix for the SOS decomposition
i : integer
degree at which we compte the coefficients.
const_ij : list
contains indices of Q at which coefficients i+j= const.
Returns
-------
Real that is a sum of coefficients
"""
return sum(factorial(l)*binom(l,i)*\
sum(Q[j]/sqrt(factorial(j[0])*factorial(j[1])) \
for j in const_ij[2*l]) for l in np.arange(i,m+1)) | a54313c8763777840c4a018dedb2fe6363e09d55 | 17,592 |
def strToBool(s):
"""
Converts string s to a boolean
"""
assert type(s) == str or type(s) == unicode
b_dict = {'true': True, 'false': False, 'yes': True, 'no': False}
return b_dict[s.lower()] | 84e59429523e6e59a90739b0f1b160fa9e84bdc8 | 17,594 |
import json
def publish_to_sns(topic_name, message, region=None):
"""
Post a message to an SNS topic
"""
AWS = AWSCachedClient(region) # cached client object
partition = None
if region:
partition = partition_from_region(region)
else:
partition = 'aws'
region = 'us-east-1'
topic_arn = 'arn:' + partition + ':sns:' + region + ':' + AWS.account + ':' + topic_name
json_message = json.dumps({"default":json.dumps(message)})
message_id = AWS.get_connection('sns', region).publish(
TopicArn=topic_arn,
Message=json_message,
MessageStructure='json'
).get('MessageId', 'error')
return message_id | 5a3c35c0367873e2c0b3c79a176b7c384d2b74ed | 17,595 |
from typing import List
from typing import Tuple
def get_subset(
classes: List,
train_data,
train_labels,
val_data,
val_labels,
test_data,
test_labels,
) -> Tuple:
"""
creates a binary subset of training, validation, and testing set using the specified list of classes to select
:param classes: list of classes in the labels that are to be selected in the subset (only specify two)
:param train_data: list or numpy array containing training data
:param train_labels: list or numpy array containing training labels
:param val_data: list or numpy array containing validation/training phase 2 data
:param val_labels: list or numpy array containing validation/training phase 2 labels
:param test_data: list or numpy array containing testing data
:param test_labels: list or numpy array containing testing labels
:return: tuple of training sub-set, validation/training phase 2 sub-set, testing sub-set.
"sub-set" here is a tuple of training and testing numpy arrays
"""
train_set = np.isin(train_labels, classes)
val_set = np.isin(val_labels, classes)
test_set = np.isin(test_labels, classes)
train_data = train_data[train_set]
train_labels = train_labels[train_set] == classes[0]
val_data = val_data[val_set]
val_labels = val_labels[val_set] == classes[0]
test_data = test_data[test_set]
test_labels = test_labels[test_set] == classes[0]
return (train_data, train_labels), (val_data, val_labels), (test_data, test_labels) | 8857b7f5c4563692b3236b68889201bd3a28507e | 17,597 |
def to_xyzw(matrix):
"""Convenience/readibility function to bring spatial (trailing) axis to start.
Args:
matrix (...x4 array): Input matrix.
Returns:
4x... array
"""
return np.rollaxis(matrix, -1) | 7c74b9bd6dc271db4a5dd925bbcfec4eef7ca791 | 17,598 |
import numpy
def do_3d_pooling(feature_matrix, stride_length_px=2,
pooling_type_string=MAX_POOLING_TYPE_STRING):
"""Pools 3-D feature maps.
:param feature_matrix: Input feature maps (numpy array). Dimensions must be
M x N x H x C or 1 x M x N x H x C.
:param stride_length_px: See doc for `do_2d_pooling`.
:param pooling_type_string: Pooling type (must be accepted by
`_check_pooling_type`).
:return: feature_matrix: Output feature maps (numpy array). Dimensions will
be 1 x m x n x h x C.
"""
error_checking.assert_is_numpy_array_without_nan(feature_matrix)
error_checking.assert_is_integer(stride_length_px)
error_checking.assert_is_geq(stride_length_px, 2)
_check_pooling_type(pooling_type_string)
if len(feature_matrix.shape) == 4:
feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)
feature_tensor = K.pool3d(
x=K.variable(feature_matrix), pool_mode=pooling_type_string,
pool_size=(stride_length_px, stride_length_px, stride_length_px),
strides=(stride_length_px, stride_length_px, stride_length_px),
padding='valid', data_format='channels_last'
)
return feature_tensor.numpy() | 180ceae7364dcd1dd55d23a00389d0c3bb43cc38 | 17,599 |
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave | f0f0e58d0a864a114aafa24f68b683ac4ec2f419 | 17,601 |
def assign_lpvs(lat):
""" Given lattice type return 3 lattice primitive vectors"""
lpv = zeros((3,3))
if lat=='FCC':
lpv[0,1]=1./sqrt(2)
lpv[0,2]=1./sqrt(2)
lpv[1,0]=1./sqrt(2)
lpv[1,2]=1./sqrt(2)
lpv[2,0]=1./sqrt(2)
lpv[2,1]=1./sqrt(2)
elif lat=='SC':
lpv[0,0]=1
lpv[1,1]=1
lpv[2,2]=1
elif lat=='SH':
lpv[0,0]=1./2
lpv[0,1]=-sqrt(3)/2
lpv[1,0]=1./2
lpv[1,1]=sqrt(3)/2
lpv[2,2]=1.
return lpv | ecf599a661446e19e4155f170c41b5ac8271c8cb | 17,602 |
import torch
def flatten_and_batch_shift_indices(indices: torch.LongTensor,
sequence_length: int) -> torch.Tensor:
"""``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor,
which has size ``(batch_size, sequence_length, embedding_size)``. This function returns a vector
that correctly indexes into the flattened target. The sequence length of the target must be provided
to compute the appropriate offset.
Args:
indices (torch.LongTensor):
"""
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise ValueError("All the elements should be in range (0, {}), but found ({}, {})".format(
sequence_length - 1, torch.min(indices).item(), torch.max(indices).item()))
offsets = get_range_vector(indices.size(0), indices.device) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# (batch_size, d_1, ..., d_n) + (batch_size, 1, ..., 1)
offset_indices = indices + offsets
# (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | 6b283f3baaa4fde17af194f996b7f2dec409fc0b | 17,603 |
def raveled_affinity_watershed(
image_raveled, marker_coords, offsets, mask, output
):
"""Compute affinity watershed on raveled arrays.
Parameters
----------
image_raveled : 2D array of float(32), shape (npixels, ndim)
The z, y, and x affinities around each pixel.
marker_coords : 1D array of int
The location of each marker along the pixels dimension of
``image_raveled``.
offsets : 1D array of int
The signed offsets to each neighboring pixel.
mask : 1D array of bool, shape (npixels,)
True for pixels to which the watershed should spread.
output : 1D array of int
The output array for markers.
"""
n_neighbors = offsets.shape[0]
age = 0
marker_coords = marker_coords.astype(np.intp)
offsets = offsets.astype(np.intp)
aff_offsets = offsets.copy().astype(np.intp)
aff_offsets[:int(len(offsets) / 2), 1] = 0
heap = [
Element(
image_raveled[0, 0], age, marker_coords[0],
marker_coords[0]
)
]
_ = heappop(heap)
# add each seed to the stack
for i in range(marker_coords.shape[0]):
index = marker_coords[i]
value = np.float32(0.)
source = index
index = index
elem = Element(value, age, index, source)
heappush(heap, elem)
# remove from stack until empty
while len(heap) > 0:
elem = heappop(heap)
for i in range(n_neighbors):
# get the flattened address of the neighbor
# offsets are 2d (size, 2) with columns 0 and 1 corresponding to
# affinities (ie axis) and image neighbour indices respectively
neighbor_index = elem.index + offsets[i, 1]
if not mask[neighbor_index]:
# neighbor is not in mask, move on to next neighbor
continue
if output[neighbor_index]:
# if there is a non-zero value in output, move on to next
# neighbor
continue
# if the neighbor is in the mask and not already labeled,
# label it then add it to the queue
output[neighbor_index] = output[elem.index]
value = image_raveled[aff_offsets[i, 0],
aff_offsets[i, 1] + elem.index]
age += 1
new_elem = Element(value, age, neighbor_index, elem.source)
heappush(heap, new_elem)
return output | bc109b59bec4389a851cfc46a8e02648e1809c60 | 17,604 |
def get_spike_times(units: pynwb.misc.Units, index, in_interval):
"""Use bisect methods to efficiently retrieve spikes from a given unit in a given interval
Parameters
----------
units: pynwb.misc.Units
index: int
in_interval: start and stop times
Returns
-------
"""
st = units['spike_times']
unit_start = 0 if index == 0 else st.data[index - 1]
unit_stop = st.data[index]
start_time, stop_time = in_interval
ind_start = bisect_left(st.target, start_time, unit_start, unit_stop)
ind_stop = bisect_right(st.target, stop_time, ind_start, unit_stop)
return np.asarray(st.target[ind_start:ind_stop]) | c121747deec1fcc9b5e317f6ec5e57349604ebc3 | 17,605 |
def _make_hours(store_hours):
"""Store hours is a dictionary that maps a DOW to different open/close times
Since it's easy to represent disjoing hours, we'll do this by default
Such as, if a store is open from 11am-2pm and then 5pm-10pm
We'll slice the times in to a list of floats representing 30 minute intevals
So for monday, let's assume we have the store hours from 10am - 3pm
We represent this as
monday = [10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5]
"""
week_hrs = {}
for dow in store_hours.keys():
dow_hours = []
for hour_set in store_hours[dow]:
if len(hour_set) < 2:
open_hr = 0.0
close_hr = 24.0
else:
open_hr = float(hour_set[0])
close_hr = float(hour_set[1])
if close_hr < open_hr:
tmp = close_hr
close_hr = open_hr
open_hr = tmp
current_hr_it = open_hr
while((close_hr - current_hr_it) >= .5):
dow_hours.append(current_hr_it)
current_hr_it += .5
week_hrs[dow] = dow_hours
return week_hrs | 4845594e59e5dba2790ac1a3c376ddb8e8290995 | 17,606 |
def mul_time(t1, factor):
"""Get the product of the original Time and the number
time: Time
factor: number
returns: Time
"""
assert valid_time(t1)
secods = time_to_int(t1) * factor
return int_to_time(secods) | 43d9c3a52670b8755590693fe6886748665d81ee | 17,607 |
def create_pmf_from_samples(
t_samples_list, t_trunc=None, bin_width=None, num_bins=None):
"""
Compute the probability distribution of the waiting time from the sampled data.
Parameters
----------
t_samples_list : array-like 1-D
Samples of the waiting time.
t_trunc: int
The truncation time.
bin_width: int
The width of the bins for the histogram.
num_binms: int
The number of bins for the histogram.
If num_bins and bin_width are both given, bin_width has priority.
Returns
-------
pmf: array-like 1-D
The probability distribution also
the normalized histogram of waiting time.
bin_edges: array-like 1-D
The edge of each bins from ``numpy.histogram``.
"""
if t_trunc is None:
t_trunc = max(t_samples_list)
if bin_width is None:
if num_bins is None:
bin_width = int(np.ceil(t_trunc/200))
else:
bin_width = int(np.ceil(t_trunc/num_bins))
start = np.min(t_samples_list)
pmf, bin_edges = np.histogram(
t_samples_list, bins=np.arange(start, t_trunc+1, bin_width))
return pmf/len(t_samples_list), bin_edges | ce14c169ee719979284b01584b7e0523b19f256a | 17,608 |
def box_corner_to_center(boxes):
"""从(左上,右下)转换到(中间,宽度,高度)"""
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = paddle.stack((cx, cy, w, h), axis=-1)
return boxes | c07ef637576e5b9ebd8ba43795535e630ccf8b09 | 17,609 |
def irrelevant(condition=None, library=None, weblog_variant=None, reason=None):
""" decorator, allow to mark a test function/class as not relevant """
skip = _should_skip(library=library, weblog_variant=weblog_variant, condition=condition)
def decorator(function_or_class):
if not skip:
return function_or_class
full_reason = "not relevant" if reason is None else f"not relevant: {reason}"
return _get_skipped_item(function_or_class, full_reason)
return decorator | 7d2633247569c4ca5bc20d5249e0b49991ae1047 | 17,610 |
def get_all_approved(self) -> list:
"""Get all appliances currently approved
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance/approved
:return: Returns approved appliances
:rtype: list
"""
return self._get("/appliance/discovered") | 4c8c00cce144cf73b2a7b63d1e82a13f28de383c | 17,611 |
def bokeh_hover_tooltip(
label=False,
text=False,
image=False,
audio=False,
coords=True,
index=True,
custom=None,
):
"""
???+ note "Create a Bokeh hover tooltip from a template."
- param label: whether to expect and show a "label" field.
- param text: whether to expect and show a "text" field.
- param image: whether to expect and show an "image" (url/path) field.
- param audio: whether to expect and show an "audio" (url/path) field.
- param coords: whether to show xy-coordinates.
- param index: whether to show indices in the dataset.
- param custom: {display: column} mapping of additional (text) tooltips.
"""
# initialize mutable default value
custom = custom or dict()
# prepare encapsulation of a div box and an associated script
divbox_prefix = """<div class="out tooltip">\n"""
divbox_suffix = """</div>\n"""
script_prefix = """<script>\n"""
script_suffix = """</script>\n"""
# dynamically add contents to the div box and the script
divbox = divbox_prefix
script = script_prefix
if label:
divbox += """
<div>
<span style="font-size: 16px; color: #966;">
Label: @label
</span>
</div>
"""
if text:
divbox += """
<div style="word-wrap: break-word; width: 95%; text-overflow: ellipsis; line-height: 90%">
<span style="font-size: 11px;">
Text: @text
</span>
</div>
"""
if image:
divbox += """
<div>
<span style="font-size: 10px;">
Image: @image
</span>
<img
src="@image" height="60" alt="@image" width="60"
style="float: left; margin: 0px 0px 0px 0px;"
border="2"
></img>
</div>
"""
if audio:
divbox += """
<div>
<span style="font-size: 10px;">
Audio: @audio
</span>
<audio autoplay preload="auto" src="@audio">
</audio>
</div>
"""
if coords:
divbox += """
<div>
<span style="font-size: 12px; color: #060;">
Coordinates: ($x, $y)
</span>
</div>
"""
if index:
divbox += """
<div>
<span style="font-size: 12px; color: #066;">
Index: [$index]
</span>
</div>
"""
for _key, _field in custom.items():
divbox += f"""
<div>
<span style="font-size: 12px; color: #606;">
{_key}: @{_field}
</span>
</div>
"""
divbox += divbox_suffix
script += script_suffix
return divbox + script | 198e76e29d62c12c891c0fe51e947d16f39d65bb | 17,613 |
import math
def constant_xavier_initializer(shape, dtype=tf.float32, uniform=True):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * 1.0 / n)
return tf.random_uniform(shape, -limit, limit, dtype, seed=None)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * 1.0 / n)
return tf.truncated_normal(shape, 0.0, trunc_stddev, dtype, seed=None) | f11403932f04327b77f38930a8a7a235633449da | 17,614 |
from typing import Dict
from typing import Tuple
from typing import Any
def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict:
"""
Uploads a script by either given content or file
:param name: Script name to upload
:param permission_type: Permissions type of script to upload
:param content: PowerShell script content
:param entry_id: Script file to upload
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
body: Dict[str, Tuple[Any, Any]] = {
'name': (None, name),
'permission_type': (None, permission_type)
}
temp_file = None
try:
if content:
body['content'] = (None, content)
else: # entry_id was provided
file_ = demisto.getFilePath(entry_id)
file_name = file_.get('name') # pylint: disable=E1101
temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101
body['file'] = (file_name, temp_file)
headers = {
'Authorization': HEADERS['Authorization'],
'Accept': 'application/json'
}
response = http_request('POST', endpoint_url, files=body, headers=headers)
return response
finally:
if temp_file:
temp_file.close() | d33aa3a4f19cfac08d8ee5cb559c0088c6f577bb | 17,615 |
def create_orthogonal(left, right, bottom, top, znear, zfar):
"""Create a Mat4 orthographic projection matrix."""
width = right - left
height = top - bottom
depth = zfar - znear
sx = 2.0 / width
sy = 2.0 / height
sz = 2.0 / -depth
tx = -(right + left) / width
ty = -(top + bottom) / height
tz = -(zfar + znear) / depth
return Mat4((sx, 0.0, 0.0, 0.0,
0.0, sy, 0.0, 0.0,
0.0, 0.0, sz, 0.0,
tx, ty, tz, 1.0)) | 3f10bcabe0d95a9832956a7edcef1719c7db0d15 | 17,616 |
def update_image_version(name: str, new_version: str):
"""returns the passed image name modified with the specified version"""
parts = name.rsplit(':', 1)
return f'{parts[0]}:{new_version}' | cde798361a6c74d22f979fe013e963c46028a7e6 | 17,617 |
def compute_entanglement(theta):
"""Computes the second Renyi entropy of circuits with and without a tardigrade present.
Args:
- theta (float): the angle that defines the state psi_ABT
Returns:
- (float): The entanglement entropy of qubit B with no tardigrade
initially present
- (float): The entanglement entropy of qubit B where the tardigrade
was initially present
"""
dev = qml.device("default.qubit", wires=3)
# QHACK #
@qml.qnode(dev)
def circuits(theta, tartigrade):
if not tartigrade:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=0)
return qml.density_matrix(wires=[0])
def partial_trace(rho, qubit_2_keep): # Credits: GitHub @neversakura.
num_qubit = int(np.log2(rho.shape[0]))
qubit_axis = [(i, num_qubit + i) for i in range(num_qubit)
if i not in qubit_2_keep]
minus_factor = [(i, 2 * i) for i in range(len(qubit_axis))]
minus_qubit_axis = [(q[0] - m[0], q[1] - m[1])
for q, m in zip(qubit_axis, minus_factor)]
rho_res = np.reshape(rho, [2, 2] * num_qubit)
qubit_left = num_qubit - len(qubit_axis)
for i, j in minus_qubit_axis:
rho_res = np.trace(rho_res, axis1=i, axis2=j)
if qubit_left > 1:
rho_res = np.reshape(rho_res, [2 ** qubit_left] * 2)
return rho_res
psi_0 = np.array([1, 0])
psi_1 = np.array([0, 1])
g_bt = np.kron(psi_0, psi_0)
e_bt=np.cos(theta/2)*np.kron(psi_1,psi_0)+np.sin(theta/2)*np.kron(psi_0,psi_1)
psi_abt = 1/np.sqrt(2)*(np.kron(psi_0, e_bt)+np.kron(psi_1, g_bt))
rho_abt = np.outer(psi_abt, np.conj(psi_abt))
rho_b = partial_trace(rho_abt, [1])
mu_b = circuits(theta, 0)
s_mub = second_renyi_entropy(mu_b)
s_rhob = second_renyi_entropy(rho_b)
return s_mub, s_rhob
# QHACK # | bc6d70f1ef76666fa3b4d753f13dc04a8a368374 | 17,618 |
import logging
def parse_CDS_info(CDS_info):
"""
Args:
CDS_info (python d):
'aliases' (list<alias_list (multiple)>):
alias_list
list<'locus_tag', str> AND/OR
list<'old_locus_tag', str> AND/OR
list<'protein_id', str>
'dna_sequence' (str): The actual DNA sequence
'functions' (list<str>): First object of list is the function
'location' (list<scaffold (str), bp (int), strand ("+/-"), length (nt)>)
Returns:
gene_table_list_d (dict):
"locusId":str
"sysName": ?str
"type": 1
"scaffoldId": str
"begin": int
"end": int
"strand": str ("+"/"-")
"name": str (always "unknown" in this case)
"desc": str
"GC": float
"nTA": int
"AA_seq": Amino Acid sequence of gene
"""
gene_table_list_d = {}
#Getting locusId
aliases_l = CDS_info["aliases"]
locusId_obj = aliases_l[0]
if locusId_obj[0] != "locus_tag":
locus_tag_found = False
for i in range(1, len(aliases_l)):
if aliases_l[i][0] == "locus_tag":
locus_tag_found = True
locusId_obj = aliases_l[i]
break
logging.critical(f"Found locus_tag at different loc of list: {i}")
else:
locus_tag_found = True
if not locus_tag_found:
raise Exception("Expecting locus_tag from genome object, did not find it.")
else:
gene_table_list_d["locusId"] = locusId_obj[1]
gene_table_list_d["sysName"] = locusId_obj[1]
# Getting scaffold, location, strand
scaffold, bp_loc, strand, nt_len = get_location_info(CDS_info["location"][0])
gene_table_list_d["scaffoldId"] = scaffold
gene_table_list_d["begin"] = bp_loc
gene_table_list_d["end"] = bp_loc + nt_len
gene_table_list_d["strand"] = strand
# Getting description
gene_table_list_d["desc"] = CDS_info["functions"][0]
# Getting GC and nTA
DNA_seq = CDS_info["dna_sequence"].upper()
gene_table_list_d["GC"] = (DNA_seq.count("G") + DNA_seq.count("C"))/float(len(DNA_seq))
gene_table_list_d["nTA"] = DNA_seq.count("TA")
# Undecidable parts (from the data object)
gene_table_list_d["type"] = 1
gene_table_list_d["name"] = "unknown"
# Adding protein sequence
gene_table_list_d["AA_seq"] = CDS_info["protein_translation"].upper()
return gene_table_list_d | d55e5b2c56b42c89c9abeba63cb7c68213688945 | 17,619 |
import re
def recover_original_schema_name(sql: str, schema_name: str) -> str:
"""Postgres truncates identifiers to 63 characters at parse time and, as pglast
uses bits of PG to parse queries, image names like noaa/climate:64_chars_of_hash
get truncated which can cause ambiguities and issues in provenance. We can't
get pglast to give us back the full identifier, but we can try and figure out
what it used to be and patch the AST to have it again.
"""
if len(schema_name) < POSTGRES_MAX_IDENTIFIER:
return schema_name
candidates = list(set(re.findall(r"(" + re.escape(schema_name) + r"[^.\"]*)[.\"]", sql)))
# Us finding more than one candidate schema is pretty unlikely to happen:
# we'd have to have a truncated schema name that's 63 characters long
# (of kind some_namespace/some_repo:abcdef1234567890....)
# which also somehow features in this query as a non-identifier. Raise an error here if
# this does happen.
assert len(candidates) == 1
return str(candidates[0]) | 041c747e8722dc1e81a94b29b76ee0eded88992c | 17,620 |
def on_method_not_allowed(error):
"""Override the HTML 405 default."""
content = {"msg": "Method not allowed"}
return jsonify(content), 405 | a174592834952beca21c683890ab94c9583544f9 | 17,621 |
def dir_name(dir_path):
"""
转换零时文件夹、输入文件夹路径
:param dir_path: 主目录路径
:return:[tmp_dir, input_dir, res_dir]
"""
tmp_dir = dir_path + "tmp\\"
input_dir = dir_path + "input\\"
res_dir = dir_path + "result\\"
return tmp_dir, input_dir, res_dir | 9f775b4ace14b178fd7bc0dfa94e5df13e583557 | 17,622 |
def profile_binning(
r,
z,
bins,
z_name="pm",
z_clip=None,
z_quantile=None,
return_bin=True,
plot=True,
):
"""Bin the given quantity z in r.
Parameters
----------
r: 1d array, binned x values
z: 1d array, binned y values
bins: 1d array, bins
Returns
--------
r_rbin : 1d array, mean r in bins
z_rbin : 1d array, mean z in bins
z_bins : dict, numbers for bins
"""
if z_clip is None:
clip = clip_quantile_1d(z, z_quantile, return_func=True)
else:
clip = lambda z_: (z_ > z_clip[0]) & (z_ < z_clip[1])
z_bins = {}
if plot:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
# Clip by bins
for k, b in enumerate(bins[:-1]):
in_bin = (bins[k] <= r) & (r < bins[k + 1])
clipped = clip(z[in_bin])
z_in_bin = z[in_bin][clipped]
r_in_bin = r[in_bin][clipped]
z_bin = {z_name: z_in_bin, "r": r_in_bin}
z_bins[k] = z_bin
if plot:
lab = "{0:.2f}<r<{1:.2f}".format(bins[k], bins[k + 1])
sns.distplot(
z_in_bin,
hist=False,
kde_kws={"lw": 2, "alpha": 0.9},
label=lab,
)
r_rbin, z_rbin = get_mean_rbins(z_bins, z_name=z_name)
z_bins = z_bins if return_bin else None
return r_rbin, z_rbin, z_bins | f040fe7c7505e628978faf733a91578cb1a04709 | 17,623 |
def sequence_to_synergy_sims(inputs, params):
"""same as sequence to synergy, but prep some other tensors first
"""
# set up orig seq tensor
inputs[DataKeys.ORIG_SEQ] = inputs[DataKeys.FEATURES]
# set up thresholds tensor
num_interpretation_tasks = len(params["importance_task_indices"])
thresholds_shape = [
inputs[DataKeys.FEATURES].get_shape().as_list()[0],
num_interpretation_tasks, 1, 1]
inputs[DataKeys.WEIGHTED_SEQ_THRESHOLDS] = tf.zeros(thresholds_shape)
# move inputs to outputs
outputs = dict(inputs)
# and then run sequence to synergy
outputs, params = sequence_to_synergy(outputs, params)
return outputs, params | 6abb659be6d1977e7d8a3c7b47f2f60997faf951 | 17,624 |
def _gen_roi_func_constant(constant_roi):
"""
Return a RoI function which returns a constant radius.
See :py:func:`map_to_grid` for a description of the parameters.
"""
def roi(zg, yg, xg):
""" constant radius of influence function. """
return constant_roi
return roi | c7c69cf32fb289d5e9c9497474989aa873a231ba | 17,626 |
def less_important_function(num: int) -> str:
"""
Example which is documented in the module documentation but not highlighted on the main page.
:param num: A thing to pass
:return: A return value
"""
return f'{num}' | d6ba0644fc8f4582fb63ceb722b05e824d63312a | 17,627 |
def weth_asset_data(): # pylint: disable=redefined-outer-name
"""Get 0x asset data for Wrapped Ether (WETH) token."""
return asset_data_utils.encode_erc20(
NETWORK_TO_ADDRESSES[NetworkId.GANACHE].ether_token
) | 0341c1f5c46e05a316c99154be82399145ae9d1a | 17,628 |
def match_known_module_name(pattern):
"""
Matching with know module name.
Args:
pattern (Pattern): To be replaced pattern.
Returns:
str, matched module name, return None if not matched.
"""
matched_result = []
for ptn, module_name in BUILT_IN_MODULE_NAME.items():
if pattern.in_degree == ptn.in_degree and pattern.out_degree == ptn.out_degree and \
ptn.head == pattern.head and ptn.tail == pattern.tail:
is_matched, score = pattern_fuzzy_matching(pattern.ptn_items, ptn.ptn_items)
if is_matched:
matched_result.append((module_name, score))
if matched_result:
module_name = (matched_result if len(matched_result) == 1 else
sorted(matched_result, key=lambda x: x[1], reverse=True))[0][0]
if pattern.pattern not in used_module_name:
used_module_name[pattern.pattern] = 1
else:
module_name = f"{module_name}{used_module_name[pattern.pattern]}"
used_module_name[pattern.pattern] += 1
return module_name
return None | 0d76e22517d4fc435101702591e095a96cc5faf7 | 17,629 |
def _get_jones_types(name, numba_ndarray_type, corr_1_dims, corr_2_dims):
"""
Determine which of the following three cases are valid:
1. The array is not present (None) and therefore no Jones Matrices
2. single (1,) or (2,) dual correlation
3. (2, 2) full correlation
Parameters
----------
name: str
Array name
numba_ndarray_type: numba.type
Array numba type
corr_1_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (first option)
corr_2_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (second option)
Returns
-------
int
Enumeration describing the Jones Matrix Type
- 0 -- Not Present
- 1 -- (1,) or (2,)
- 2 -- (2, 2)
"""
if is_numba_type_none(numba_ndarray_type):
return JONES_NOT_PRESENT
if numba_ndarray_type.ndim == corr_1_dims:
return JONES_1_OR_2
elif numba_ndarray_type.ndim == corr_2_dims:
return JONES_2X2
else:
raise ValueError("%s.ndim not in (%d, %d)" %
(name, corr_1_dims, corr_2_dims)) | 8a9d6f3441c488e2bf1059dd6fcb506a2285d291 | 17,630 |
def editing_passport_serial_handler(update: Update,
context: CallbackContext) -> int:
"""Get and save passport serial."""
new_state = editing_pd(update, context,
validator=validators.passport_serial_validator,
attribute='passport_serial',
state=PASSPORT_SERIAL,
)
return new_state | 81c86bffa07376f17dd2c013f5eab42856fa4cea | 17,631 |
import requests
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame() | ddc87f05c8e67f84f2327cf0f06aded0e31e5e8c | 17,632 |
def effective_sample_size(samples):
"""
Calculates ESS for a matrix of samples.
"""
try:
n_samples, n_params = samples.shape
except (ValueError, IndexError):
raise ValueError('Samples must be given as a 2d array.')
if n_samples < 2:
raise ValueError('At least two samples must be given.')
return [ess_single_param(samples[:, i]) for i in range(0, n_params)] | 7a31d4a2c2bee133ab264dc793f16d0d6bd866f2 | 17,633 |
def get_credentials(credentials_path):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid it
returns None.
Returns:
Credentials, the obtained credential or None
"""
store = Storage(credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
return None
return credentials | b6a6fcd20f8def88c554d276e8f07aae3dc1f536 | 17,634 |
def setup(hass, config):
"""Set up this component."""
conf_track = config[DOMAIN][CONF_TRACK]
_LOGGER.info('version %s is starting, if you have ANY issues with this, please report'
' them here: https://github.com/custom-components/custom_updater', __version__)
ha_conf_dir = str(hass.config.path())
card_controller = CustomCards(hass, ha_conf_dir)
components_controller = CustomComponents(hass, ha_conf_dir)
def check_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track:
card_controller.cache_versions(call)
elif 'components' in conf_track:
components_controller.cache_versions(call)
def update_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track:
card_controller.update_all()
elif 'components' in conf_track:
components_controller.update_all()
if not conf_track or 'cards' in conf_track:
def upgrade_card_service(call):
"""Set up service for manual trigger."""
card_controller.upgrade_single(call.data.get(ATTR_CARD))
hass.services.register(DOMAIN, 'upgrade_single_card', upgrade_card_service)
if not conf_track or 'components' in conf_track:
def upgrade_component_service(call):
"""Set up service for manual trigger."""
components_controller.upgrade_single(call.data.get(ATTR_COMPONENT))
hass.services.register(DOMAIN, 'upgrade_single_component', upgrade_component_service)
track_time_interval(hass, card_controller.cache_versions, INTERVAL)
track_time_interval(hass, components_controller.cache_versions, INTERVAL)
hass.services.register(DOMAIN, 'check_all', check_all_service)
hass.services.register(DOMAIN, 'update_all', update_all_service)
return True | d0a18b4c2c3e2c94f19afb66e2e9d2a3d18fea07 | 17,635 |
def get_model(**kwargs):
"""
Returns the model.
"""
model = ShuffleNetV2(**kwargs)
return model | 6b226b56fe603a0b703267bc35e2b92f2c6dda7c | 17,637 |
import torch
def absolute_filter_change(baseline_state_dict, target_state_dict):
""" Calculate sum(abs(K2 - K1) / sum(K1))
Args:
baseline_state_dict (dict): state_dict of ori_net
target_state_dict (dict): state_dict of finetune_net
Returns:
sorted_diff (list): sorted values
sorted_index (list): sorted index of kernel
"""
# save all weight to list
baseline_weight_list = []
for key, value in baseline_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
baseline_weight_list.append(weight)
# [-1, 3, 3]
baseline_weight_list = torch.cat(baseline_weight_list, dim=0)
target_weight_list = []
for key, value in target_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
target_weight_list.append(weight)
# [-1, 3, 3]
target_weight_list = torch.cat(target_weight_list, dim=0)
sum_baseline_weight = torch.sum(torch.sum(abs(baseline_weight_list), dim=1), dim=1)
sum_baseline_weight = sum_baseline_weight.unsqueeze(1).unsqueeze(1)
diff = torch.sum(torch.sum(abs(target_weight_list - baseline_weight_list) / sum_baseline_weight, dim=1), dim=1)
return diff.cpu().numpy() | ad4616a03ef80f5a5430a87fd07d70d6bb10f7b7 | 17,638 |
import torch
def load_checkpoints(checkpoint_name):
"""
Load a pretrained checkpoint.
:param checkpoint_name: checkpoint filename
:return: model.state_dict, source_vocabulary, target_vocabulary,
"""
# Get checkpoint from file
checkpoint = torch.load(checkpoint_name, map_location=torch.device('cpu'))
# The epoch when training has been left
epoch = checkpoint['epoch']
# The time elapsed during training
time_elapsed = checkpoint['time_elapsed']
# Get state_dict of the model
model_state_dict = checkpoint['model_state_dict']
# Get the state_dict of the optimizer
optimizer_state_dict = checkpoint['optimizer_state_dict']
# Get source language vocabulary
src_vocabulary = checkpoint['src_vocabulary']
tgt_vocabulary = checkpoint['tgt_vocabulary']
return model_state_dict, optimizer_state_dict, epoch, time_elapsed, src_vocabulary, tgt_vocabulary | e81f094c811d497504fd1f93a8ee537e6b122bd6 | 17,640 |
def _extract_data(prices, n_markets):
""" Extract the open, close, high and low prices from the price matrix. """
os = prices[:, :, :n_markets]
cs = prices[:, :, n_markets:2*n_markets]
hs = prices[:, :, 2*n_markets:3*n_markets]
ls = prices[:, :, 3*n_markets:4*n_markets]
return os, cs, hs, ls | 154af0c8270fbe664b3dd5d07a724b753ff02040 | 17,641 |
def make_screen():
"""creates the code for a new screen"""
return pygame.display.set_mode((800,600)) | ca0e23f5583e652207f0297e7363dacaa5a7f085 | 17,642 |
def flip_vert(r, c, row, col, reversed):
"""1번 연산"""
if reversed:
row, col = col, row
return row - 1 - r, c, reversed | 053f6a354e5f6387a528af4ce07290cba370830c | 17,644 |
def get_orders(self, **kwargs):
"""
|
| **Current All Open Orders (USER_DATA)**
| *Get all open orders on a symbol. Careful when accessing this with no symbol.*
| *If the symbol is not sent, orders for all symbols will be returned in an array.*
:API endpoint: ``GET /dapi/v1/openOrders``
:API doc: https://binance-docs.github.io/apidocs/delivery/en/#current-all-open-orders-user_data
:parameter symbol: string
:parameter recvWindow: optional int, the value cannot be greater than 60000
|
"""
url_path = "/dapi/v1/openOrders"
params = { **kwargs }
return self.sign_request("GET", url_path, params) | 0561fdeb4863ea08b1644a7695ca7f4ed0622fd9 | 17,645 |
def en_13757(data: bytes) -> int:
"""
Compute a CRC-16 checksum of data with the en_13757 algorithm.
:param bytes data: The data to be computed
:return: The checksum
:rtype: int
:raises TypeError: if the data is not a bytes-like object
"""
_ensure_bytes(data)
return _crc_16_en_13757(data) | 85a7793f475f04cca2d7dcf92eeba523fde9b1c2 | 17,647 |
def get_agent_supported_features_list_for_extensions():
"""
List of features that the GuestAgent currently supports (like Extension Telemetry Pipeline, etc) needed by Extensions.
We need to send this list as environment variables when calling extension commands to inform Extensions of all the
features the agent supports.
:return: Dict containing all Extension supported features with the key as their names and the AgentFeature object as
the value if the feature is supported by the Agent.
Eg: {
CRPSupportedFeatureNames.ExtensionTelemetryPipeline: _ETPFeature()
}
"""
return dict((name, feature) for name, feature in __EXTENSION_ADVERTISED_FEATURES.items() if feature.is_supported) | 8a453286c433b3ecaed2fc402c5d557b335f3935 | 17,648 |
def GCMV(image, mask=None):
"""
:param image: input image, color (3 channels) or gray (1 channel);
:param mask: calc gamma value in the mask area, default is the whole image;
:return: gamma, and output
"""
# Step 1. Check the inputs: image
if np.ndim(image) == 3 and image.shape[-1] == 3: # color image
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img = hsv[:, :, 2]
color_flag = True
elif np.ndim(image) == 2: # gray image
img = image
color_flag = False
else:
print("ERROR:check the input image of AGT function...")
return 1, None
if mask is not None:
mask = mask<255
else:
mask = np.ones_like(img)
# Step 2. Main steps of GCMV
n_img = img/255.0
mean = np.mean(n_img)
gamma_list = np.arange(0.01,1.01,0.01) if mean<=0.5 else np.arange(1.1,10.1,0.1)
score = np.zeros_like(gamma_list)
for k, gamma in enumerate(gamma_list):
t_img = np.power(n_img, gamma)
m1, v1 = np.mean(t_img, axis=0), np.var(t_img, axis=0)
m2, v2 = np.mean(t_img, axis=1), np.var(t_img, axis=1)
score[k] = np.mean(np.power(m1-0.5077,2)) + np.mean(np.power(m2-0.5077,2))+np.mean(np.power(v1-0.0268,2)) + np.mean(np.power(v2-0.0268,2))
# grid search for the optimal gamma
ind = np.argmin(score)
best_gamma =gamma_list[ind]
# print(best_gamma)
# Step 2.4 apply gamma transformation
n_img = (img+0.5)/256
output = np.power(n_img, best_gamma)
# Step 3.0 stretch back and post-process
# if mask is not None:
# output = (output * 256 - 0.5) * mask / 255.0
# else:
output = (output * 256 - 0.5)
output = output.round().astype(np.uint8)
if color_flag:
hsv[:, :, 2] = output
output = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return best_gamma, output | 47070fdda8dcb2507fefd6a5aa922d21481c0896 | 17,649 |
from typing import Union
def downloadStaffFile(request: HttpRequest, filename: str) -> Union[HttpResponse, FileResponse]:
"""Serves the specified 'filename' validating the user is logged in and a staff user"""
return _downloadFileFromStorage(storages.StaffStorage(), filename) | 0e0137f5b5e4140c2d9ff300ed97b7a3e3c37602 | 17,650 |
def get_view_renderer_type(*args):
"""
get_view_renderer_type(v) -> tcc_renderer_type_t
Get the type of renderer currently in use in the given view (
'ui_get_renderer_type' )
@param v (C++: TWidget *)
"""
return _ida_kernwin.get_view_renderer_type(*args) | e35269d7b77196ebd8ea325db3d6301ffdb63908 | 17,651 |
async def create(req):
"""
Add a new label to the labels database.
"""
data = req["data"]
async with AsyncSession(req.app["pg"]) as session:
label = Label(
name=data["name"], color=data["color"], description=data["description"]
)
session.add(label)
try:
await session.flush()
document = label.to_dict()
await session.commit()
except IntegrityError:
raise HTTPBadRequest(text="Label name already exists")
document = await apply_transforms(document, [SampleCountTransform(req.app["db"])])
headers = {"Location": f"/labels/{document['id']}"}
return json_response(document, status=201, headers=headers) | 1d7de257f0a3bc1259168821e1fcd6358d4c31c6 | 17,652 |
def process_radial_velocity(procstatus, dscfg, radar_list=None):
"""
Estimates the radial velocity respect to the radar from the wind velocity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The input data type
latitude, longitude : float
arbitrary coordinates [deg] from where to compute the radial
velocity. If any of them is None it will be the radar position
altitude : float
arbitrary altitude [m MSL] from where to compute the radial
velocity. If None it will be the radar altitude
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
v_speed_field = None
h_speed_field = None
h_dir_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'wind_vel_v':
v_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_SPEED':
h_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_DIRECTION':
h_dir_field = get_fieldname_pyart(datatype)
if h_speed_field is None or h_dir_field is None:
warn('Horizontal wind speed and direction fields required'
' to estimate radial velocity')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if h_speed_field not in radar.fields or h_dir_field not in radar.fields:
warn('Unable to estimate radial velocity. '
'Missing horizontal wind')
return None, None
h_speed = radar.fields[h_speed_field]['data']
h_dir = radar.fields[h_dir_field]['data']
if v_speed_field is None or v_speed_field not in radar.fields:
warn('Unknown vertical wind speed. Assumed 0')
if v_speed_field is None:
v_speed_field == 'vertical_wind_component'
v_speed = np.ma.zeros((radar.nrays, radar.ngates))
else:
v_speed = radar.fields[v_speed_field]['data']
# user defined parameters
lat = dscfg.get('latitude', None)
lon = dscfg.get('longitude', None)
alt = dscfg.get('altitude', None)
# get u and v wind components
h_dir_rad = np.deg2rad(h_dir)
speed_h_u = h_speed*np.sin(h_dir_rad) # eastward component
speed_h_v = h_speed*np.cos(h_dir_rad) # northward component
if lat is not None or lon is not None or alt is not None:
# get antenna coordinates respect to new radar location
if lat is None:
lat = radar.latitude['data'][0]
if lon is None:
lon = radar.longitude['data'][0]
if alt is None:
alt = radar.altitude['data'][0]
x, y = pyart.core.geographic_to_cartesian_aeqd(
radar.gate_longitude['data'], radar.gate_latitude['data'], lon,
lat)
z = radar.gate_altitude['data'] - alt
_, azimuths, elevations = pyart.core.cartesian_to_antenna(
x, y, z)
azi_2D_rad = np.deg2rad(azimuths)
ele_2D_rad = np.deg2rad(elevations)
else:
azi_2D_rad = np.broadcast_to(
np.deg2rad(radar.azimuth['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
ele_2D_rad = np.broadcast_to(
np.deg2rad(radar.elevation['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
r_speed = pyart.config.get_metadata('velocity')
# assuming no vertical velocity
# r_speed['data'] = h_speed*np.cos(h_dir_rad-azi_2D_rad)*np.cos(ele_2D_rad)
# with vertical velocity included
r_speed['data'] = (
(speed_h_u*np.sin(azi_2D_rad)+speed_h_v*np.cos(azi_2D_rad)) *
np.cos(ele_2D_rad)+np.sin(ele_2D_rad)*v_speed)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('velocity', r_speed)
return new_dataset, ind_rad | 2114cf4f5524662f80cac69dab45a00729053192 | 17,653 |
def brute_force_diagonalize(answers, wordlist=WORDS, quiet=False):
"""
Find the most cromulent diagonalization for a set of answers, trying all
possible orders. See README.md for a cool example of this with 10 answers.
As a somewhat artificial example, let's suppose we have these seven
answers from the 2000 metas, but don't remember their order:
>>> metas = ['benjamins', 'billgates', 'donors', 'luxor', 'mansion', 'miserly', 'realty']
>>> brute_force_diagonalize(metas)[0] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Cromulence Text Info
9.5 RUN EAST
9.2 MIX LAST
9.1 MAX LAST
9.1 BE NOISY
8.8 LINE TO I
...
(9.5, 'RUN EAST', None)
Of course we were looking for the famous red herring "BE NOISY", but
"RUN EAST" sounds like a good way to find the coin also.
"""
results = []
seen = set()
answers = [parse_cell(word) for word in answers]
for i, permutation in enumerate(permutations(answers)):
if not quiet and i > 0 and i % 10000 == 0:
print("Tried %d permutations" % i)
try:
diag = diagonalize(permutation)
except IndexError:
continue
found = wordlist.search(diag, count=1, use_cromulence=True)
if found:
logprob, text = found[0]
slug = slugify(text)
if slug not in seen:
results.append((logprob, text, None))
seen.add(slug)
return wordlist.show_best_results(results) | 25725e34dc328cc605cc5dc147547c84de803873 | 17,654 |
def train():
"""
MNIST training set creator.
It returns a reader creator, each sample in the reader is image pixels in
[-1, 1] and label in [0, 9].
:return: Training reader creator
:rtype: callable
"""
return reader_creator(
paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
TRAIN_IMAGE_MD5),
paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
TRAIN_LABEL_MD5), 100) | b7008aa61ce49822838c4b30709537396a93f453 | 17,655 |
import base64
import hmac
import hashlib
def sign_v2(key, msg):
"""
AWS version 2 signing by sha1 hashing and base64 encode.
"""
return base64.b64encode(hmac.new(key, msg.encode("utf-8"), hashlib.sha1).digest()) | 1aa54cc2cd3ce20ad5222a889754efda2f4632c3 | 17,656 |
def graph_apply(fun, *args):
"""Currying wrapper around APP(-,-)."""
result = fun
for arg in args:
arg = as_graph(arg)
result = APP(result, arg)
return result | 709306884b37b41c9a7289ad6a372d2b43ede6a9 | 17,657 |
def find_hcf(a, b) :
""" Finds the Highest Common Factor among two numbers """
#print('HCF : ', a, b)
if b == 0 :
return a
return find_hcf(b, a%b) | 818bbc05ab9262e8fd1e8975daf68ca3e0fa6a8b | 17,658 |
def GAU_pdf(x: np.ndarray, mu: float, var: float) -> np.ndarray:
"""
Probability function of Guassian distribution
:param x: ndarray input parameters
:param mu: float mean of the distribution
:param var: float variance of the distribution
:return: ndarray probability of each sample
"""
k = (1 / (np.sqrt(2 * np.pi * var)))
up = -np.power(x - mu, 2) / (2 * var)
return k * np.exp(up) | 9810da4a05d86ac7895a2947a1890fe111faeae4 | 17,659 |
def version_compare(a, b): # real signature unknown; restored from __doc__
"""
version_compare(a: str, b: str) -> int
Compare the given versions; return a strictly negative value if 'a' is
smaller than 'b', 0 if they are equal, and a strictly positive value if
'a' is larger than 'b'.
"""
return 0 | 97b3fd3bbd542d776b75327c88f9e80d776ba248 | 17,660 |
def line_status():
"""
设备线路详情
:return:
"""
device_id = request.args.get("device_id")
lines = Line.objects(device_id=device_id).all()
result = Monitor.device_status(device_id, lines)
result.pop(0)
return Success(result) | 47ca3cfef469c346ad85b701339941707e2084ea | 17,661 |
def _hist_fig(df, pred, c):
"""
"""
bins = np.linspace(0, 1, 15)
unlabeled = pred[c][pd.isnull(df[c])].values
fig, (ax1, ax2) = plt.subplots(2,1)
# top plot: training data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == False)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == False)].values
train_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax1.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (train)", density=True)
if len(neg_labeled) > 0:
ax1.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (train)", density=True)
if len(unlabeled) > 0:
ax1.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
# bottom plot: validation data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == True)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == True)].values
test_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax2.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (val)", density=True)
if len(neg_labeled) > 0:
ax2.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (val)", density=True)
if len(unlabeled) > 0:
ax2.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
for a in [ax1, ax2]:
a.legend(loc="upper left")
a.set_xlabel("assessed probability", fontsize=14)
a.set_ylabel("frequency", fontsize=14)
title = "model outputs for '%s'\nAUC train %s, test AUC %s"%(c, train_auc, test_auc)
ax1.set_title(title, fontsize=14)
plt.close(fig)
return fig | 6836c0228f2db705642e5f5fa4da6d318674fd55 | 17,663 |
import requests
def is_responsive(url, code=200):
"""Check if something responds to ``url`` syncronously"""
try:
response = requests.get(url)
if response.status_code == code:
return True
except requests.exceptions.RequestException as _e:
pass
return False | 1ed307d7be468157c880bf7e481f255bac449c34 | 17,664 |
def fit_and_report(model, X, y, X_valid, y_valid):
"""
It fits a model and returns train and validation scores.
Parameters:
model (sklearn classifier model): The sklearn model
X (numpy.ndarray): The X part of the train set
y (numpy.ndarray): The y part of the train set
X_valid (numpy.ndarray): The X part of the validation set
y_valid (numpy.ndarray): The y part of the validation set
Returns:
scores (list): The list of scores of train and validation
"""
model.fit(X, y)
lr_probs = model.predict_proba(X)
lr_probs = lr_probs[:, 1]
lr_probs_val = model.predict_proba(X_valid)
lr_probs_val = lr_probs_val[:, 1]
# calculate scores
lr_auc = roc_auc_score(y, lr_probs)
lr_auc_val = roc_auc_score(y_valid, lr_probs_val)
scores = [lr_auc, lr_auc_val]
return scores | f993a5410248e5303995f37b5464cb4a57928bcf | 17,665 |
def move_all_generation_to_high_voltage(data):
"""Move all generation sources to the high voltage market.
Uses the relative shares in the low voltage market, **ignoring transmission losses**. In theory, using the production volumes would be more correct, but these numbers are no longer updated since ecoinvent 3.2.
Empties out the medium and low voltage mixes."""
MIXES = {low_voltage_mix, medium_voltage_mix, high_voltage_mix}
mix_filter = lambda ds: ds['name'] in MIXES
for group in toolz.groupby("location", filter(mix_filter, data)).values():
assert len(group) == 3
high, low, medium = sorted(group, key=lambda x: x['name'])
medium_in_low = [ex for ex in low['exchanges']
if ex['name'] == medium_voltage_transformation][0]['amount']
high_in_low = [ex for ex in medium['exchanges']
if ex['name'] == high_voltage_transformation][0]['amount'] * \
medium_in_low
for exc in high['exchanges']:
if (exc['name'] in high_voltage_providers or (
"electricity" in exc['name'] and
"import from" in exc['name'])):
rescale_exchange(exc, high_in_low)
high['exchanges'].extend([rescale_exchange(exc, medium_in_low)
for exc in medium['exchanges']
if exc['name'] in medium_voltage_providers])
high['exchanges'].extend([exc
for exc in low['exchanges']
if exc['name'] in low_voltage_providers])
data = empty_medium_voltage_markets(data)
data = empty_low_voltage_markets(data)
return data | ed9b1fcf60bb1b5645dbd6946fe2e98e6e73ccf3 | 17,666 |
from typing import Union
def parser_first_text_or_content_if_could(html: etree._Element,
query_path: str) -> Union[str, None]:
"""
如果解析出的内容是一个数组,默认取的第一个
"""
nodes = html.xpath(query_path)
if not nodes:
return None
if len(nodes) > 0:
desc = nodes[0]
if hasattr(desc, 'text'):
return str(desc.text)
return str(desc)
return None | 8410280ca71083986af0aa89a312d5082ff36d8d | 17,667 |
def get_all_quantity(results, q_func=None):
"""
"""
quantities = []
for res_name in results:
if q_func is not None:
# We change the quantity function
results[res_name].q_func = q_func
min_quantity = results[res_name].min_quantity
quantities.append(min_quantity)
return quantities | 56d50cacab2dcd7cb1554798a11bb1937436c73e | 17,669 |
import random
def generate_example_type_2a(problem, one_step_inferences):
"""Generates a type 2a training example.
Args:
problem: a lib.InferenceProblem instance.
one_step_inferences: the list of one step inferences that can be reahced
form the premises.
Returns:
An instance of "Example", or None if any issue was found.
"""
premises = problem.premises
example_type = "2a"
name_rule = random.choice([True, False])
inputs = ("What can be inferred from the following premises in a single "
"inference step (ignoring inferences that add new predicates or "
"constants)? ")
if name_rule:
inputs += "Name the inference rule being used: "
inputs += (". ".join([rules.render_logic_clause(x) for x in premises])) + "."
inferences_str = []
for [rule_inference, rule] in one_step_inferences:
rule_name = rule.rule_name
inference_str = rules.render_logic_clause(rule_inference)
if name_rule:
inference_str += f" can be inferred via the {rule_name} rule"
inferences_str.append(inference_str)
targets = (". ".join(inferences_str)) + "."
if not inferences_str:
example_type = "2a-empty"
targets = "Nothing can be inferred from these premises."
elif problem.contains_contradiction:
example_type = "2a-cont"
targets = ("Since the premises are contradictory, we can infer anything "
"from them.")
return lib.Example(inputs, targets, example_type, problem) | fafc05b70c7b2a84a2c1476e51fa783f240f2bd5 | 17,670 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.