content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
async def get_bot_queue(
request: Request,
state: enums.BotState = enums.BotState.pending,
verifier: int = None,
worker_session = Depends(worker_session)
):
"""Admin API to get the bot queue"""
db = worker_session.postgres
if verifier:
bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 AND verifier = $2 ORDER BY created_at ASC", state, verifier)
bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 ORDER BY created_at ASC", state)
return {"bots": [{"user": await get_bot(bot["bot_id"]), "prefix": bot["prefix"], "invite": await invite_bot(bot["bot_id"], api = True), "description": bot["description"]} for bot in bots]} | bfbd51933b140bfd60cc7ec2a401d02048ffdeae | 15,584 |
def ask_for_rating():
"""Ask the user for a rating"""
heading = '{} {}'.format(common.get_local_string(30019),
common.get_local_string(30022))
try:
return int(xbmcgui.Dialog().numeric(heading=heading, type=0,
defaultt=''))
except ValueError:
return None | a7a854e02b11ac1313d69f508851d162a7748006 | 15,585 |
def isthai(text,check_all=False):
"""
สำหรับเช็คว่าเป็นตัวอักษรภาษาไทยหรือไม่
isthai(text,check_all=False)
text คือ ข้อความหรือ list ตัวอักษร
check_all สำหรับส่งคืนค่า True หรือ False เช็คทุกตัวอักษร
การส่งคืนค่า
{'thai':% อักษรภาษาไทย,'check_all':tuple โดยจะเป็น (ตัวอักษร,True หรือ False)}
"""
listext=list(text)
i=0
num_isthai=0
if check_all==True:
listthai=[]
while i<len(listext):
cVal = ord(listext[i])
if(cVal >= 3584 and cVal <= 3711):
num_isthai+=1
if check_all==True:
listthai.append(True)
else:
if check_all==True:
listthai.append(False)
i+=1
thai=(num_isthai/len(listext))*100
if check_all==True:
dictthai=tuple(zip(listext,listthai))
data= {'thai':thai,'check_all':dictthai}
else:
data= {'thai':thai}
return data | 6a6bff64ba3b3939414e9f3aa83d169cd026e1c3 | 15,586 |
from typing import List
from typing import Optional
def _convert_object_array(
content: List[Scalar], dtype: Optional[DtypeObj] = None
) -> List[Scalar]:
"""
Internal function ot convert object array.
Parameters
----------
content: list of processed data records
dtype: np.dtype, default is None
Returns
-------
arrays: casted content if not object dtype, otherwise return as is in list.
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays | 7b093057b05afa93ced881289d22b1eda91018f0 | 15,587 |
def update_room_time(conn, room_name: str, req_time: int) -> int:
"""部屋のロックを取りタイムスタンプを更新する
トランザクション開始後この関数を呼ぶ前にクエリを投げると、
そのトランザクション中の通常のSELECTクエリが返す結果がロック取得前の
状態になることに注意 (keyword: MVCC, repeatable read).
"""
cur = conn.cursor()
# See page 13 and 17 in https://www.slideshare.net/ichirin2501/insert-51938787
cur.execute("INSERT INTO room_time(room_name, time) VALUES (%s, 0) ON DUPLICATE KEY UPDATE time = time",
(room_name,))
cur.execute("SELECT time FROM room_time WHERE room_name = %s FOR UPDATE", (room_name,))
room_time = cur.fetchone()[0]
current_time = get_current_time(conn)
if room_time > current_time:
raise RuntimeError(f"room_time is future: room_time={room_time}, req_time={req_time}")
if req_time and req_time < current_time:
raise RuntimeError(f"req_time is past: req_time={req_time}, current_time={current_time}")
cur.execute("UPDATE room_time SET time = %s WHERE room_name = %s", (current_time, room_name))
return current_time | 78066e9666ee28217f790fb8c26d2ade8c2ace7c | 15,588 |
def get_layer_coverage(cat, store, store_obj):
"""Get correct layer coverage from a store."""
coverages = cat.mosaic_coverages(store_obj)
# Find the correct coverage
coverage = None
for cov in coverages["coverages"]["coverage"]:
if store == cov['name']:
coverage = cov
break
if coverage is None:
logger.warning("Layer '%s' not found", store)
return coverage | 498c4a8db1a82dafd8569314e4faf13517e75aba | 15,589 |
import logging
import time
def retarget(songs, duration, music_labels=None, out_labels=None,
out_penalty=None, volume=None, volume_breakpoints=None,
springs=None, constraints=None,
min_beats=None, max_beats=None,
fade_in_len=3.0, fade_out_len=5.0,
**kwargs):
"""Retarget a song to a duration given input and output labels on
the music.
Suppose you like one section of a song, say, the guitar solo, and
you want to create a three minute long version of the solo.
Suppose the guitar solo occurs from the 150 second mark to the 200
second mark in the original song.
You can set the label the guitar solo with 'solo' and the rest of
the song with 'other' by crafting the ``music_labels`` input
function. And you can set the ``out_labels`` function to give you
nothing but solo::
def labels(t):
if 150 < t < 200:
return 'solo'
return 'other'
def target(t): return 'solo'
song = Song("sweet-rock-song.wav")
composition, info = retarget(song, 180,
music_labels=labels, out_labels=target)
composition.export(filename="super-long-solo")
You can achieve much more complicated retargetings by adjusting
the ``music_labels``, `out_labels` and ``out_penalty`` functions,
but this should give you a basic sense of how to use the
``retarget`` function.
:param song: Song to retarget
:type song: :py:class:`radiotool.composer.Song`
:param duration: Duration of retargeted song (in seconds)
:type duration: float
:param music_labels: A function that takes a time (in seconds) and
returns the label (str) of the input music at that time
:type music_labels: function
:param out_labels: A function that takes a time (in seconds) and
returns the desired label (str) of the output music at that
time
:type out_labels: function
:param out_penalty: A function that takes a time (in seconds) and
returns the penalty for not matching the correct output label
at that time (default is 1.0)
:type out_penalty: function
:returns: Composition of retargeted song, and dictionary of
information about the retargeting
:rtype: (:py:class:`radiotool.composer.Composition`, dict)
"""
# get song analysis
if isinstance(songs, Track):
songs = [songs]
multi_songs = len(songs) > 1
analyses = [s.analysis for s in songs]
# generate labels for every beat in the input and output
beat_lengths = [a[BEAT_DUR_KEY] for a in analyses]
beats = [a["beats"] for a in analyses]
beat_length = np.mean(beat_lengths)
logging.info("Beat lengths of songs: {} (mean: {})".
format(beat_lengths, beat_length))
if out_labels is not None:
target = [out_labels(i) for i in np.arange(0, duration, beat_length)]
else:
target = ["" for i in np.arange(0, duration, beat_length)]
if music_labels is not None:
if not multi_songs:
music_labels = [music_labels]
music_labels = [item for sublist in music_labels
for item in sublist]
if len(music_labels) != len(songs):
raise ArgumentException("Did not specify {} sets of music labels".
format(len(songs)))
start = [[music_labels[i](j) for j in b] for i, b in enumerate(beats)]
else:
start = [["" for i in b] for b in beats]
if out_penalty is not None:
pen = np.array([out_penalty(i) for i in np.arange(
0, duration, beat_length)])
else:
pen = np.array([1 for i in np.arange(0, duration, beat_length)])
# we're using a valence/arousal constraint, so we need these
in_vas = kwargs.pop('music_va', None)
if in_vas is not None:
if not multi_songs:
in_vas = [in_vas]
in_vas = [item for sublist in in_vas for item in sublist]
if len(in_vas) != len(songs):
raise ArgumentException("Did not specify {} sets of v/a labels".
format(len(songs)))
for i, in_va in enumerate(in_vas):
if callable(in_va):
in_va = np.array([in_va(j) for j in beats[i]])
in_vas[i] = in_va
target_va = kwargs.pop('out_va', None)
if callable(target_va):
target_va = np.array(
[target_va(i) for i in np.arange(0, duration, beat_length)])
# set constraints
if constraints is None:
min_pause_len = 20.
max_pause_len = 35.
min_pause_beats = int(np.ceil(min_pause_len / beat_length))
max_pause_beats = int(np.floor(max_pause_len / beat_length))
constraints = [(
rt_constraints.PauseConstraint(
min_pause_beats, max_pause_beats,
to_penalty=1.4, between_penalty=.05, unit="beats"),
rt_constraints.PauseEntryVAChangeConstraint(target_va, .005),
rt_constraints.PauseExitVAChangeConstraint(target_va, .005),
rt_constraints.TimbrePitchConstraint(
context=0, timbre_weight=1.5, chroma_weight=1.5),
rt_constraints.EnergyConstraint(penalty=0.5),
rt_constraints.MinimumLoopConstraint(8),
rt_constraints.ValenceArousalConstraint(
in_va, target_va, pen * .125),
rt_constraints.NoveltyVAConstraint(in_va, target_va, pen),
) for in_va in in_vas]
else:
max_pause_beats = 0
if len(constraints) > 0:
if isinstance(constraints[0], rt_constraints.Constraint):
constraints = [constraints]
pipelines = [rt_constraints.ConstraintPipeline(constraints=c_set)
for c_set in constraints]
trans_costs = []
penalties = []
all_beat_names = []
for i, song in enumerate(songs):
(trans_cost, penalty, bn) = pipelines[i].apply(song, len(target))
trans_costs.append(trans_cost)
penalties.append(penalty)
all_beat_names.append(bn)
logging.info("Combining tables")
total_music_beats = int(np.sum([len(b) for b in beats]))
total_beats = total_music_beats + max_pause_beats
# combine transition cost tables
trans_cost = np.ones((total_beats, total_beats)) * np.inf
sizes = [len(b) for b in beats]
idx = 0
for i, size in enumerate(sizes):
trans_cost[idx:idx + size, idx:idx + size] =\
trans_costs[i][:size, :size]
idx += size
trans_cost[:total_music_beats, total_music_beats:] =\
np.vstack([tc[:len(beats[i]), len(beats[i]):]
for i, tc in enumerate(trans_costs)])
trans_cost[total_music_beats:, :total_music_beats] =\
np.hstack([tc[len(beats[i]):, :len(beats[i])]
for i, tc in enumerate(trans_costs)])
trans_cost[total_music_beats:, total_music_beats:] =\
trans_costs[0][len(beats[0]):, len(beats[0]):]
# combine penalty tables
penalty = np.empty((total_beats, penalties[0].shape[1]))
penalty[:total_music_beats, :] =\
np.vstack([p[:len(beats[i]), :] for i, p in enumerate(penalties)])
penalty[total_music_beats:, :] = penalties[0][len(beats[0]):, :]
logging.info("Building cost table")
# compute the dynamic programming table (prev python method)
# cost, prev_node = _build_table(analysis, duration, start, target, pen)
# first_pause = 0
# if max_pause_beats > 0:
first_pause = total_music_beats
if min_beats is None:
min_beats = 0
elif min_beats is 'default':
min_beats = int(20. / beat_length)
if max_beats is None:
max_beats = -1
elif max_beats is 'default':
max_beats = int(90. / beat_length)
max_beats = min(max_beats, penalty.shape[1])
tc2 = np.nan_to_num(trans_cost)
pen2 = np.nan_to_num(penalty)
beat_names = []
for i, bn in enumerate(all_beat_names):
for b in bn:
if not str(b).startswith('p'):
beat_names.append((i, float(b)))
beat_names.extend([('p', i) for i in xrange(max_pause_beats)])
result_labels = []
logging.info("Running optimization (full backtrace, memory efficient)")
logging.info("\twith min_beats(%d) and max_beats(%d) and first_pause(%d)" %
(min_beats, max_beats, first_pause))
song_starts = [0]
for song in songs:
song_starts.append(song_starts[-1] + len(song.analysis["beats"]))
song_ends = np.array(song_starts[1:], dtype=np.int32)
song_starts = np.array(song_starts[:-1], dtype=np.int32)
t1 = time.clock()
path_i, path_cost = build_table_full_backtrace(
tc2, pen2, song_starts, song_ends,
first_pause=first_pause, max_beats=max_beats, min_beats=min_beats)
t2 = time.clock()
logging.info("Built table (full backtrace) in {} seconds"
.format(t2 - t1))
path = []
if max_beats == -1:
max_beats = min_beats + 1
first_pause_full = max_beats * first_pause
n_beats = first_pause
for i in path_i:
if i >= first_pause_full:
path.append(('p', i - first_pause_full))
result_labels.append(None)
# path.append('p' + str(i - first_pause_full))
else:
path.append(beat_names[i % n_beats])
song_i = path[-1][0]
beat_name = path[-1][1]
result_labels.append(
start[song_i][np.where(np.array(beats[song_i]) ==
beat_name)[0][0]])
# path.append(float(beat_names[i % n_beats]))
# else:
# print("Running optimization (fast, full table)")
# # this won't work right now- needs to be updated
# # with the multi-song approach
# # fortran method
# t1 = time.clock()
# cost, prev_node = build_table(tc2, pen2)
# t2 = time.clock()
# print("Built table (fortran) in {} seconds".format(t2 - t1))
# res = cost[:, -1]
# best_idx = N.argmin(res)
# if N.isfinite(res[best_idx]):
# path, path_cost, path_i = _reconstruct_path(
# prev_node, cost, beat_names, best_idx, N.shape(cost)[1] - 1)
# # path_i = [beat_names.index(x) for x in path]
# else:
# # throw an exception here?
# return None
# path = []
# result_labels = []
# if max_pause_beats == 0:
# n_beats = total_music_beats
# first_pause = n_beats
# else:
# n_beats = first_pause
# for i in path_i:
# if i >= first_pause:
# path.append(('p', i - first_pause))
# result_labels.append(None)
# else:
# path.append(beat_names[i % n_beats])
# song_i = path[-1][0]
# beat_name = path[-1][1]
# result_labels.append(
# start[song_i][N.where(N.array(beats[song_i]) ==
# beat_name)[0][0]])
# return a radiotool Composition
logging.info("Generating audio")
(comp, cf_locations, result_full_labels,
cost_labels, contracted, result_volume) =\
_generate_audio(
songs, beats, path, path_cost, start,
volume=volume,
volume_breakpoints=volume_breakpoints,
springs=springs,
fade_in_len=fade_in_len, fade_out_len=fade_out_len)
info = {
"beat_length": beat_length,
"contracted": contracted,
"cost": np.sum(path_cost) / len(path),
"path": path,
"path_i": path_i,
"target_labels": target,
"result_labels": result_labels,
"result_full_labels": result_full_labels,
"result_volume": result_volume,
"transitions": [Label("crossfade", loc) for loc in cf_locations],
"path_cost": cost_labels
}
return comp, info | 8ed317392e74545916d1ef33e282bce5c6846009 | 15,590 |
def st_get_ipfs_cache_path(user_did):
"""
Get the root dir of the IPFS cache files.
:param user_did: The user DID
:return: Path: the path of the cache root.
"""
return _st_get_vault_path(user_did) / 'ipfs_cache' | 4217b178025c395619d9def035d11cc96f2b139a | 15,591 |
def create_img_caption_int_data(filepath):
""" function to load captions from text file and convert them to integer
format
:return: dictionary with image ids and associated captions in int format
"""
print("\nLoading caption data : started")
# load caption data
img_caption_dict = load_img_caption_data(filepath)
# merge caption text data
text_data = " ".join([" ".join(txt) for txt in img_caption_dict.values()])
# create word to int mappings
(word_to_int_map, int_to_word_map) = create_word_mappings(text_data)
# convert caption data to int
img_caption_int_dict = {}
for key, value in img_caption_dict.items():
img_caption_int_dict[key] = [convert_text_to_int(txt, word_to_int_map)
for txt in value]
print("\nLoading caption data : completed")
return img_caption_int_dict | 6d1a449c1b5be7759c65740440865c72546514ef | 15,592 |
def eigenvector_2d_symmetric(a, b, d, eig, eps=1e-8):
"""Returns normalized eigenvector corresponding to the provided eigenvalue.
Note that this a special case of a 2x2 symmetric matrix where every element of the matrix is passed as an image.
This allows the evaluation of eigenvalues to be vectorized over the entire image. This is much more efficient
than calling the numpy function for computing the eigenvectors for each pixel of the image.
This function solves:
| a-lambda b |
| b d-lambda | [x, y] = 0
Which means that:
bx = (lambda - d) y
or
y = (lambda - a)/b x
This solution is invalid for b == 0. Here we expect orthogonal vectors [1 0] and [0 1].
ax + by = l x
bx + dy = l y
so x = 1 iff b = 0 and l = a
and y = 1 iff b = 0 and l = d
"""
ex = np.zeros(a.shape)
ey = np.zeros(a.shape)
ex[np.abs(a - eig) < eps] = 1
ey[np.abs(d - eig) < eps] = 1
mask = np.abs(b) > eps
tx = b[mask]
ty = eig[mask] - a[mask]
length = np.sqrt(tx * tx + ty * ty)
tx = tx / length
ty = ty / length
ex[mask] = tx
ey[mask] = ty
return ex, ey | 88a97af77e3f3097b6742db340f8e9559fb8164a | 15,593 |
from typing import Optional
def get_protection_path_name(protection: Optional[RouteProtection]) -> str:
"""Get the protection's path name."""
if protection is None:
return DEFAULT_PROTECTION_NAME
return protection | f3abaf21c9ba3cfe6c0ae793afaf018fce20dec9 | 15,594 |
def _get_object_description(target):
"""Return a string describing the *target*"""
if isinstance(target, list):
data = "<list, length {}>".format(len(target))
elif isinstance(target, dict):
data = "<dict, length {}>".format(len(target))
else:
data = target
return data | 57ad3803a702a1199639b8fe950ef14b8278bec1 | 15,595 |
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map | b822a5f5effcf1d925dec0e6c6b166ecb89b6627 | 15,596 |
def dynamicviewset(viewset):
"""
The activate route only makes sense if
user activation is required, remove the
route if activation is turned off
"""
if not settings['REQUIRE_ACTIVATION'] and hasattr(viewset, 'activate'):
delattr(viewset, 'activate')
return viewset | f31a191c7c4d51163f588fa4e728f92fb7d43816 | 15,597 |
import random
def generate_arabic_place_name(min_length=0):
"""Return a randomly generated, potentially multi-word fake Arabic place name"""
make_name = lambda n_words: ' '.join(random.sample(place_names, n_words))
n_words = 3
name = make_name(n_words)
while len(name) < min_length:
n_words += 1
name = make_name(n_words)
return name | 7efc760b8dcf5f8807e2d203542fa637908cbad2 | 15,598 |
def find_cutoffs(x,y,crdist,deltas):
"""function for identifying locations of cutoffs along a centerline
and the indices of the segments that will become part of the oxbows
from MeanderPy
x,y - coordinates of centerline
crdist - critical cutoff distance
deltas - distance between neighboring points along the centerline"""
diag_blank_width = int((crdist+20*deltas)/deltas)
# distance matrix for centerline points:
dist = distance.cdist(np.array([x,y]).T,np.array([x,y]).T)
dist[dist>crdist] = np.NaN # set all values that are larger than the cutoff threshold to NaN
# set matrix to NaN along the diagonal zone:
for k in range(-diag_blank_width,diag_blank_width+1):
rows, cols = kth_diag_indices(dist,k)
dist[rows,cols] = np.NaN
i1, i2 = np.where(~np.isnan(dist))
ind1 = i1[np.where(i1<i2)[0]] # get rid of unnecessary indices
ind2 = i2[np.where(i1<i2)[0]] # get rid of unnecessary indices
return ind1, ind2 # return indices of cutoff points and cutoff coordinates | 82de02759c70ab746d2adcfafd04313cbb0a8c4e | 15,599 |
def training_set_multiplication(training_set, mult_queue):
"""
Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutliple recordings
"""
logger.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording["handwriting"])
for sample in samples:
new_trning_set.append(
{
"id": recording["id"],
"is_in_testset": 0,
"formula_id": recording["formula_id"],
"handwriting": sample,
"formula_in_latex": recording["formula_in_latex"],
}
)
training_set = new_trning_set
return new_trning_set | db7105d64ba760ba88088363547795fff833cce6 | 15,601 |
def addDictionaryFromWeb(url, params=None, **kwargs):
"""
指定した URL にあるページに含まれる辞書メタデータ(JSON-LD)を取得し、
メタデータに記載されている URL から地名解析辞書(CSVファイル)を取得し、
データベースに登録します。
既に同じ identifier を持つ辞書データがデータベースに登録されている場合、
削除してから新しい辞書データを登録します。
登録した辞書を利用可能にするには、 ``setActivateDictionaries()``
または ``activateDictionaires()`` で有効化する必要があります。
Parameters
----------
url : str
辞書メタデータを含むウェブページの URL。
params : dict, optional
requests.get に渡す params パラメータ。
**kwargs : dict, optional
requests.get に渡す kwargs パラメータ。
Returns
-------
bool
常に True。登録に失敗した場合は例外が発生します。
Examples
--------
>>> import pygeonlp.api as api
>>> api.init()
>>> api.addDictionaryFromWeb('https://geonlp.ex.nii.ac.jp/dictionary/geoshape-city/')
True
>>> api.updateIndex()
>>> api.activateDictionaries(pattern=r'geoshape-city')
['geonlp:geoshape-city']
>>> geowords = api.searchWord('千代田区')
>>> len(geowords)
1
>>> next(iter(geowords.values()))['dictionary_identifier']
'geonlp:geoshape-city'
"""
_check_initialized()
return _default_service.addDictionaryFromWeb(url, params, **kwargs) | 52ea0c105dd5b5725859cd440f10a1fe163d36be | 15,602 |
from re import T
def test_nested_blocks(pprint):
"""
Expected result:
procedure test(x, y: Integer);
begin
x:=1;
y:=200;
for z:= 1 to 100 do
begin
x := x + z;
end;
y:=x;
end;
"""
def brk(offset=0):
"force a new line and indent by given offset"
return T.BREAK(blankSpace=9999, offset=offset)
text = [
T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING('procedure test(x, y: Integer);'), brk(),
T.STRING("begin"),
brk(2), T.STRING("x:=1;"),
brk(2), T.STRING("y:=200;"),
# indented for loop
brk(2), T.BEGIN(breakType=BreakType.consistent, offset=0),
T.STRING("for z:= 1 to 100 do"), brk(),
T.STRING("begin"),
brk(2), T.STRING("x := x + z;"), brk(),
T.STRING("end;"),
T.END(),
brk(2), T.STRING("y:=x;"), brk(),
T.STRING("end;"),
T.END(),
T.EOF()]
result = pprint(text)
assert result == (
'procedure test(x, y: Integer);\n'
'begin\n'
' x:=1;\n'
' y:=200;\n'
' for z:= 1 to 100 do\n'
' begin\n'
' x := x + z;\n'
' end;\n'
' y:=x;\n'
'end;'
) | 7ef533d66f57483fac98bc249fd121e7c47f3d9a | 15,603 |
from typing import Sequence
import asyncio
async def read(
sensors: Sequence[Sensor], msg: str = "", retry_single: bool = False
) -> bool:
"""Read from the Modbus interface."""
global READ_ERRORS # pylint:disable=global-statement
try:
try:
await SUNSYNK.read(sensors)
READ_ERRORS = 0
return True
except asyncio.TimeoutError:
_LOGGER.error("Read error%s: Timeout", msg)
except ModbusIOException:
# TCP: try to reconnect since it got a fairly serious error
await asyncio.sleep(1)
await SUNSYNK.connect()
except Exception as err: # pylint:disable=broad-except
_LOGGER.error("Read Error%s: %s", msg, err)
READ_ERRORS += 1
if READ_ERRORS > 3:
raise Exception(f"Multiple Modbus read errors: {err}") from err
if retry_single:
_LOGGER.info("Retrying individual sensors: %s", [s.name for s in SENSORS])
for sen in sensors:
await asyncio.sleep(0.02)
await read([sen], msg=sen.name, retry_single=False)
return False | 7e871984d3b86207a2e9c9909b5f83d3ef9c3c4a | 15,604 |
def boxlist_iou_guide_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
scores = boxlist.get_field(score_field)
ious = boxlist.get_field('ious')
keep, scores_new = iou_guide_nms(boxes, scores, ious, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
scores_new = scores_new[:, max_proposals]
boxlist = boxlist[keep]
boxlist.add_field("scores", scores_new)
return boxlist.convert(mode) | 1824575d4768b2145730caa0f9f9809dd784260d | 15,605 |
def get_payload_bin(payload, seconds):
"""
Since we can't run the ysoserial.exe file in ubuntu (at least not
easily with mono) we build the different payloads in windows and
save them to the PAYLOADS map above.
:param payload: The payload name
:param seconds: The seconds to wait
:return: The payload
"""
return SAVED_PAYLOADS[payload][seconds] | eec033969157ec2a67b2f9edbba5b355d25f55bc | 15,607 |
def srange(start, step, length, dtype=None):
"""
Like np.arange() but you give the start, the step, and the number
of steps. Saves having to compute the end point yourself.
"""
stop = start + (step * length)
return np.arange(start, stop, step, dtype) | ba71777c720063cbf5429085d8d249d83634b4f7 | 15,608 |
def get_tool_by_id(tool_id):
"""
returns the tool given the id
"""
tool = ToolType.objects.get(pk=tool_id)
return tool | 7cee1d4a484028db94049227f9a968541974cd1e | 15,609 |
import http
def domainr(text):
"""<domain> - uses domain.nr's API to search for a domain, and similar domains
:type text: str
"""
try:
data = http.get_json('http://domai.nr/api/json/search?q=' + text)
except (http.URLError, http.HTTPError):
return "Unable to get data for some reason. Try again later."
if data['query'] == "":
return "An error occurred: {status} - {message}".format(**data['error'])
domains = [format_domain(domain) for domain in data["results"]]
return "Domains: {}".format(", ".join(domains)) | d44e49c25256a10f242cb8515b1e970cf509676d | 15,610 |
def manage_blog():
""" 博文管理页面路由 """
if 'adminname' in session:
if request.method == 'POST':
del_result = manage_del_blog(db, Post, Comment, request.form.get('edit_id'))
return del_result
else:
blog_list = Post.query.order_by(Post.post_time.desc()).all()
return render_template('admin_blog.html',
page_in='blog',
blog_list=blog_list)
else:
return redirect(url_for('login')) | be73948d29e96413bff987447c5b9baa87177ccf | 15,611 |
def split(nodes, index, axis=0):
"""
Split a array of nodes into two separate, non-overlapping arrays.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
index : int
The leading edge of where the split should occur.
axis : int, optional
The axis along which ``nodes`` will be split. Use `axis = 0`
to split along rows and `axis = 1` for columns.
Raises
------
ValueError
Trying to split ``nodes`` at the edge (i.e., resulting in the
original array and an empty array) will raise an error.
Returns
-------
n1, n2 : numpy.ndarrays
The two non-overlapping sides of the original array.
"""
if index + 1 >= nodes.shape[axis] or index == 0:
raise ValueError("cannot split grid at or beyond its edges")
if axis == 0:
n1, n2 = nodes[:index, :], nodes[index:, :]
elif axis == 1:
n1, n2 = nodes[:, :index], nodes[:, index:]
return n1, n2 | 4ba4a078e35c7a4164675eab2fe36c943264bb28 | 15,612 |
def smart_oracle(oracle, text, code, block_len, max_rand):
"""Call oracle normally, or repeatedly call oracle in case of random prefix.
Returns "clean" oracle ouptut regardless of whether the oracle adds a
random prefix.
"""
if not max_rand:
return oracle(text, code) if code else oracle(text)
# append arbitrary bytes unlikely to occur in attacker-controlled plaintext
text_mod = bytearray([7] * block_len * 2) + text
success = False
while not success:
encrypted = oracle(text_mod, code) if code else oracle(text_mod)
text_start = blocks_aligned(encrypted, block_len, max_rand)
if text_start is not None:
success = True
return encrypted[text_start:] | fb20586236509838333b2723b24ead9fba9f2887 | 15,613 |
def inference_video_feed(request, project_id):
"""inference_video_feed
"""
return Response({
"status": "ok",
"url": "http://" + inference_module_url() + "/video_feed?inference=1",
}) | 7751737093b0f1cd72301e3dcd07c9ab929c9931 | 15,615 |
from datetime import datetime
def extract_start_timestamp() -> datetime:
"""Define extraction start timestamp.
Returns:
Extraction start timestamp used for testing.
"""
timestamp = datetime(2019, 8, 6, tzinfo=timezone.utc)
return timestamp | f03668c5b19a05c623040b8be1ff6fca23765437 | 15,616 |
def phi_pdf(X, corr=None):
"""
Standard normal PDF/Multivariate pdf.
**Input:**
* **X** (`float`)
Argument.
* **corr** (`ndarray`)
Correlation matrix.
**Output**
Standard normal PDF of X.
"""
norm_pdf = None
if isinstance(X, int) or isinstance(X, float):
norm_pdf = norm.pdf(X, loc=0, scale=1)
else:
if np.trace(corr) != len(X):
shape_error(' X or corr ')
else:
norm_pdf = multivariate_normal.pdf(X, cov=corr)
return norm_pdf | 31566a5e0c50eaae7be367f7ccfd5dc0c1bfcd94 | 15,617 |
def computeStatistic( benchmarks, field, func ):
"""
Return the result of func applied to the values of field in benchmarks.
Arguments:
benchmarks: The list of benchmarks to gather data from.
field: The field to gather from the benchmarks.
func: The function to apply to the data, must accept a list and return a single value.
"""
results = []
for benchmark in benchmarks:
results.append( benchmark[ field ] )
return func( results ) | 7eced912d319a3261170f8274c4562db5e28c34c | 15,618 |
import re
def bus_update_request(payload):
"""Parser for `bus_update_request` tracepoint"""
try:
match = re.match(bus_update_request_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BusUpdateRequest(**match_group_dict)
except Exception as e:
raise ParserError(e.message) | ea5d95eaef4964b900b5201a8878886529f8c132 | 15,619 |
def update_employee(request,id):
"""
Updating the employee profile.
"""
try:
obj = User.objects.get(id=id)
total_cl = obj.no_of_cl
total_sl = obj.no_of_sl
total_wh = obj.no_of_wh
attendance_cl = Attendance.objects.filter(id=id,leave_type='cl',approved_or_not=True).count()
attendance_sl = Attendance.objects.filter(id=id,leave_type='sl',approved_or_not=True).count()
attendance_wh = Attendance.objects.filter(id=id,leave_type='wl',approved_or_not=True).count()
taken_cl = (total_cl-attendance_cl)
taken_sl = (total_sl-attendance_sl)
taken_wh = (total_wh-attendance_wh)
if request.method == "GET":
form = EmployeeCreationForm(instance=obj,initial={'email':obj.email})
context = {
'form':form,
'obj':obj,
'attendance_cl':attendance_cl,
'attendance_sl':attendance_sl,
'attendance_wh':attendance_wh,
'taken_cl':taken_cl,
'taken_sl':taken_sl,
'taken_wh':taken_wh
}
return render (request,'Employees/edit_employee.html', context)
elif request.method == "POST":
form = EmployeeCreationForm(request.POST,request.FILES,instance=obj)
if form.is_valid():
form_save = form.save(commit=False)
form_save.email = form.cleaned_data['email']
form_save.img = form.cleaned_data['img']
form_save.save()
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return render(request,'Employees/edit_employee.html',{'form':form})
else:
return HttpResponseRedirect('/forbidden/')
except Exception, e:
return HttpResponseRedirect('/error/') | bcbfd2183ad6ea835b8e6fc4d6818b0bd31ea051 | 15,620 |
def func(TI, S0, alpha, T1):
""" exponential function for T1-fitting.
Args
----
x (numpy.ndarray): Inversion times (TI) in the T1-mapping sequence as input for the signal model fit.
Returns
-------
a, b, T1 (numpy.ndarray): signal model fitted parameters.
"""
mz = 1 - alpha * np.exp(-TI*(alpha-1)/T1)
return np.abs(S0 * mz) | 605393be1aaf9f70f7f65c56dc5a31d4a38390e7 | 15,621 |
def make_random_coordinate():
""" Make a random coordinate dictionary"""
return make_coordinate(randint(0, 100), randint(0, 100)) | 7719714d19e94a5be2ae3f93fc5290514bfe4b5e | 15,623 |
def inv_qft_core(qubits):
"""
Generates a quil programm that performs
inverse quantum fourier transform on given qubits
without swaping qubits at the end.
:param qubits: A list of qubit indexes.
:return: A Quil program to compute the invese QFT of the given qubits without swapping.
"""
qft_quil = Program.inst(qft_core(qubits, coef=-1))
inv_qft_quil = Program()
while(len(qft_quil) > 0):
inst = qft_quil.pop()
inv_qft_quil.inst(inst)
return inv_qft_quil | 3c7982cdb44398e1730a3aaaeee2c323694fae96 | 15,624 |
def analysis_precheck(_id, feature_table, rep_seqs, taxonomy, metadata):
"""
Do prechecks as to decrease the chance of job failing.
Input:
- feature_table: QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: QIIME2 artifact of type FeatureData[Sequence]
"""
feature_table_path = save_uploaded_file(_id, feature_table)
rep_seqs_path = save_uploaded_file(_id, rep_seqs)
taxonomy_path = save_uploaded_file(_id, taxonomy)
metadata_path = save_uploaded_file(_id, metadata)
def validate_analysis_input(feature_table, rep_seqs, taxonomy):
"""
Precheck input files prior to running denoise step
Input:
- feature_table: Path to QIIME2 artifact of type FeatureTable[Frequency]
- rep_seqs: Path to QIIME2 artifact of type FeatureData[Sequence]
"""
# Check Artifact type
try:
feature_table_artifact = Artifact.load(feature_table)
rep_seqs_artifact = Artifact.load(rep_seqs)
if(str(feature_table_artifact.type) != "FeatureTable[Frequency]"):
msg = "Input Feature Table is not of type 'FeatureTable[Frequency]'!"
raise ValueError(msg)
if(str(rep_seqs_artifact.type) != "FeatureData[Sequence]"):
msg = "Input Representative Sequences is not of type 'FeatureData[Sequence]'!"
raise ValueError(msg)
except ValueError as err:
message = str(err)
return 400, message
return 200, "Imported data good!"
responseIfError(validate_analysis_input, feature_table=feature_table_path, rep_seqs=rep_seqs_path, taxonomy=taxonomy_path)
return feature_table_path, rep_seqs_path, taxonomy_path, metadata_path | 65dde12b312926d185722a09779c8d11705d71dc | 15,625 |
import socket
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Deluge from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
api = await hass.async_add_executor_job(
DelugeRPCClient, host, port, username, password
)
api.web_port = entry.data[CONF_WEB_PORT]
try:
await hass.async_add_executor_job(api.connect)
except (
ConnectionRefusedError,
socket.timeout,
SSLError,
) as ex:
raise ConfigEntryNotReady("Connection to Deluge Daemon failed") from ex
except Exception as ex: # pylint:disable=broad-except
if type(ex).__name__ == "BadLoginError":
raise ConfigEntryAuthFailed(
"Credentials for Deluge client are not valid"
) from ex
_LOGGER.error("Unknown error connecting to Deluge: %s", ex)
coordinator = DelugeDataUpdateCoordinator(hass, api, entry)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 70ab1c52569274eb7125e56f38028db3eb792259 | 15,626 |
import cmath
def powerFactor(n):
"""Function to compute power factor given a complex power value
Will this work if we're exporting power? I think so...
"""
# Real divided by apparent
pf = n.real.__abs__() / n.__abs__()
# Determine lagging vs leading (negative).
# NOTE: cmath.phase returns counter-clockwise angle on interval [-pi, pi],
# so checking sign should be reliable for determining lead vs. lag
p = cmath.phase(n)
if p < 0:
return (pf, 'lead')
else:
return (pf, 'lag') | 1a507818f9c9906d27a1374cc9b757766b3038c1 | 15,627 |
import requests
import html
def send_request(url, raise_errors):
"""
Sends a request to a URL and parses the response with lxml.
"""
try:
response = requests.get(url, headers={'Accept-Language': '*'}, verify=_PEM_PATH)
response.raise_for_status()
doc = html.fromstring(response.text)
return doc
except requests.exceptions.RequestException:
if raise_errors:
raise
return None | ddf8dc7c899b97cebdc45727bd43ca1acf015ecb | 15,628 |
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True | 8c586375f018be36c0e7688549a551d17d4e2bc8 | 15,629 |
def crop_array(input_array, ylength, xlength=None, orgn=(0,0)):
"""Crops an image in numpy array format. Pads crops outside
of input image with zeros if necessary. If no y dimension
is specified, outputs a square image.
"""
if xlength == None:
xlength = ylength
ylength = int(ylength)
xlength = int(xlength)
orgn = (int(orgn[0]), int(orgn[1]))
target = np.zeros((ylength, xlength))
#slice ranges
ymin = max(orgn[0], 0)
xmin = max(orgn[1], 0)
ymax = min(orgn[0] + ylength, input_array.shape[0])
xmax = min(orgn[1] + xlength, input_array.shape[1])
yslice = slice(ymin, ymax)
xslice = slice(xmin, xmax)
#top, left, bottom, right pads
tp = max(-orgn[0], 0)
lp = max(-orgn[1], 0)
bp = max((ylength + orgn[0] - tp - input_array.shape[0]), 0)
rp = max((xlength + orgn[1] - lp - input_array.shape[1]), 0)
#insert slice into the right spot.
target[tp:(ylength-bp),lp:(xlength-rp)] = input_array[yslice, xslice]
return target | 4daeb126a8424fc038a5a42448d17eada9d12ee3 | 15,630 |
def get_service_defaults(servicename, version, **_):
"""
Load the default configuration for a given service version
Variables:
servicename => Name of the service to get the info
version => Version of the service to get
Data Block:
None
Result example:
{'accepts': '(archive|executable|java|android)/.*',
'category': 'Extraction',
'classpath': 'al_services.alsvc_extract.Extract',
'config': {'DEFAULT_PW_LIST': ['password', 'infected']},
'cpu_cores': 0.1,
'description': "Extracts some stuff"
'enabled': True,
'name': 'Extract',
'ram_mb': 256,
'rejects': 'empty|metadata/.*',
'stage': 'EXTRACT',
'submission_params': [{'default': u'',
'name': 'password',
'type': 'str',
'value': u''},
{'default': False,
'name': 'extract_pe_sections',
'type': 'bool',
'value': False},
{'default': False,
'name': 'continue_after_extract',
'type': 'bool',
'value': False}],
'timeout': 60}
"""
service = STORAGE.service.get(f"{servicename}_{version}", as_obj=False)
if service:
return make_api_response(service)
else:
return make_api_response("", err=f"{servicename} service does not exist", status_code=404) | 2ba13382e2d5a668f1653f90fed5efe01200d6e2 | 15,631 |
def read_nnet3_model(model_path: str) -> nnet3.Nnet:
"""Read in a nnet3 model in raw format.
Actually if this model is not a raw format it will still work, but this is
not an official feature; it was due to some kaldi internal code.
Args:
model_path: Path to a raw nnet3 model, e.g., "data/final.raw"
Returns:
nnet: A neural network AM.
"""
nnet = nnet3.Nnet()
with xopen(model_path) as istream:
nnet.read(istream.stream(), istream.binary)
return nnet | b5dbadeb0f2072dfeccd1f4fdc77990680d12068 | 15,632 |
def flat_list_of_lists(l):
"""flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
return [item for sublist in l for item in sublist] | c121dff7d7d9a4da55dfb8aa1337ceeea191fc30 | 15,633 |
def cancel(api, order_ids=None):
"""
DELETE all orders by api["symbol"] (or) by symbol and order_id:
"""
if DETAIL:
print(cancel.__doc__, "symbol", api['symbol'], "order_ids", order_ids)
if order_ids is None:
order_ids = [] # must be a list
# format remote procedure call to exchange api standards
symbol = symbol_syntax(api["exchange"], api['symbol'])
if not order_ids:
print("Cancel All")
else:
print("Cancel Order Ids:", order_ids)
# Coinbase and Poloniex offer both Cancel All and Cancel One
if api["exchange"] in ["coinbase", "poloniex"]:
if order_ids:
# Cancel a list of orders
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders/" + str(order_id)
api["params"] = {}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelOrder", "orderNumber": int(order_id)}
api["method"] = "POST"
response = process_request(api)
ret.append({"order_id": order_id, "response": response})
else:
# Cancel All
if api["exchange"] == "coinbase":
api["endpoint"] = "/orders"
api["params"] = {"product_id": symbol}
api["method"] = "DELETE"
elif api["exchange"] == "poloniex":
api["endpoint"] = "/tradingApi"
api["params"] = {"command": "cancelAllOrders", "currencyPair": symbol}
api["method"] = "POST"
ret = process_request(api)
# Handle cases where "Cancel All" in one market is not supported
elif api["exchange"] in ["kraken", "binance", "bittrex", "Bitfinex"]:
if (api["exchange"] == "bitfinex") and not api["symbol"]:
print("WARN: Cancel All in ALL MARKETS")
api["endpoint"] = "/v2/auth/w/order/cancel/multi"
api["params"] = {}
api["method"] = "POST"
ret = process_request(api)
else:
# If we have an order_ids list we'll use it, else make one
if not order_ids:
print("Open Orders call to suppport Cancel All")
orders = get_orders(api)
order_ids = []
for order in orders["asks"]:
order_ids.append(order["order_id"])
for order in orders["bids"]:
order_ids.append(order["order_id"])
ret = []
for order_id in order_ids:
print("Cancel Order", order_id)
if api['exchange'] == "bitfinex":
api["endpoint"] = "/v2/auth/w/order/cancel"
api["params"] = {"id": order_id}
api["method"] = ""
elif api["exchange"] == "binance":
api["endpoint"] = "/api/v3/order"
api["params"] = {"symbol": symbol, "orderId": order_id}
api["method"] = "DELETE"
elif api["exchange"] == "bittrex":
api["endpoint"] = "/api/v1.1/market/cancel"
api["params"] = {"uuid": order_id}
api["method"] = "GET"
elif api["exchange"] == "kraken":
api["endpoint"] = "/0/private/CancelOrder"
api["params"] = {"txid": order_id}
api["method"] = "POST"
response = process_request(api)
ret.append(response)
return ret | 24688256b20d3fdcc40eebb393ab98a963037d96 | 15,634 |
def decrypt(mess, key):
"""Decrypt the cypher text using AES decrypt"""
if len(key) % 16 != 0:
a = 16 - len(key) % 16
key = key.ljust(len(key) + a)
cipher = AES.new(key)
plain_txt = cipher.decrypt(mess)
return plain_txt | eb587960d63539e1e57158f84174af5b5dae3fb5 | 15,636 |
def multiVecMat( vector, matrix ):
"""
Pronásobí matici vektorem zprava.
Parametry:
----------
vector: list
Vektor
matrix: list
Pronásobená matice. Její dimenze se musí shodovat s dimenzí
vektoru.
Vrací:
list
Pole velikosti vektoru.
"""
# Vytvoří pole o velikosti vektoru
result = [0] * len( matrix[0] )
# Projde matici po řádcích
for r, row in enumerate( matrix ):
# Pokud nesedí rozměry, končíme
if len(row) != len(vector):
return None
# Projde každý prvek v řádku
for i, elem in enumerate( row ):
# K poli s výsledkem přičte na index aktuálního řádku výsledek
# násobení aktuálního prvku v řádku a jemu odpovídajícího
# prvku z vektoru.
result[r] += elem * vector[i]
return result | 8a10241173ab981d6007d8ff939199f9e86806e5 | 15,637 |
def verify_state(
state_prec_gdf,
state_abbreviation,
source,
year,
county_level_results_df,
office,
d_col=None,
r_col=None,
path=None,
):
"""
returns a complete (StateReport) object and a ((CountyReport) list) for the state.
:state_prec_gdf: (GeoDataFrame) containing precinct geometries and election results
:state_abbreviation: (str) e.g. 'MA' for Massachusetts
:source: (str) person or organization that made the 'state_prec_gdf' e.g 'VEST'
:year: (str) 'YYYY' indicating the year the election took place e.g. '2016'
:county_level_results_df: (DataFrame) containing official county-level election results
:office: (str) office to be evaluated in vote validation e.g. 'U.S. Senate'
:d_col: (str) denotes the column for democratic vote counts in each precinct
:r_col: (str) denotes the column for republican vote counts in each precinct
:path: (str) filepath to which the report should be saved (if None it won't be saved)
d_col, r_col are optional - if they are not provided, `get_party_cols` will be used
to guess based on comparing each column in state_prec_gdf to the expected results.
"""
print("Starting verification process for: ", state_abbreviation, source, year)
state_prec_gdf = state_prec_gdf.reset_index()
county_level_results_df = county_level_results_df.reset_index()
# enforce expected schema
assert "geometry" in state_prec_gdf.columns
assert {"county", "GEOID", "party", "votes"}.issubset(
set(county_level_results_df.columns)
)
# assign d_col and r_col
if not d_col or not r_col:
print("Candidate vote count columns are being assigned automatically")
d_col, r_col = get_party_cols(state_prec_gdf, state_abbreviation)
else:
print("Candidate vote count columns are being assigned manually")
print("Choose d_col as: ", d_col)
print("Choose r_col as: ", r_col)
state_prec_gdf = state_prec_gdf.rename(columns={d_col: "d_col", r_col: "r_col"})
# remove unecessary columns
cols_to_keep = ["d_col", "r_col", "geometry"]
if "GEOID" in state_prec_gdf.columns:
cols_to_keep.append("GEOID")
state_prec_gdf = state_prec_gdf[cols_to_keep]
print("Verification will now begin with this GeoDataFrame: \n")
print(state_prec_gdf.head())
# initialize state report
print("Starting Vote Verification")
state_report = StateReport(
county_level_results_df,
state_prec_gdf,
state_abbreviation,
year,
source,
office,
)
# poplulate the report
print("Starting Topology Verification")
state_report = verify_topology(state_prec_gdf, state_report)
print("Starting County Verification")
# assign GEOID
if "GEOID" not in state_prec_gdf.columns:
try:
print("Missing GEOID Column - attempting automatic assignment")
state_prec_gdf = assign_GEOID(state_prec_gdf, state_report.fips)
print("GEOID assignment successful")
except:
pass
else:
print("Using the GEOID Column in the original shapefile.")
assert "GEOID" in state_prec_gdf.columns
state_report, county_reports = verify_counties(
state_prec_gdf, county_level_results_df, state_report
)
if path:
make_report(path, state_report, county_reports)
print("All done!\n")
return state_report, county_reports | 245baf26d1fad646abcee493bfa31935f2d1db59 | 15,638 |
import requests
def remove_profile(serial, profile_id):
"""hubcli doesn't remove profiles so we have to do this server-side."""
r = requests.post(
url=f"https://{ AIRWATCH_DOMAIN }/API/mdm/profiles/{ profile_id }/remove",
json={"SerialNumber": serial},
headers={
"aw-tenant-code": AIRWATCH_KEY,
"Content-Type": "application/json",
"Accept": "application/json",
},
auth=HTTPBasicAuth(AIRWATCH_USER, AIRWATCH_PASSWORD),
)
r.raise_for_status()
return r | 8a47cd5c6588c3140d9e44aac94b59916517345a | 15,639 |
def KELCH(df, n):
"""
Keltner Channel
"""
temp = (df['High'] + df['Low'] + df['Close']) / 3
KelChM = pd.Series(temp.rolling(n).mean(), name='KelChM_' + str(n))
temp = (4 * df['High'] - 2 * df['Low'] + df['Close']) / 3
KelChU = pd.Series(temp.rolling(n).mean(), name='KelChU_' + str(n))
temp = (-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3
KelChD = pd.Series(temp.rolling(n).mean(), name='KelChD_' + str(n))
result = pd.DataFrame([KelChM, KelChU, KelChD]).transpose()
return out(SETTINGS, df, result) | 3335fd45ce073eec33d65b7a6d23c07b6a71a662 | 15,641 |
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs] | 530c3af30ca40025891980191c1f121d8f026a53 | 15,643 |
import io
def torquery(url):
"""
Uses pycurl to fetch a site using the proxy on the SOCKS_PORT.
"""
output = io.BytesIO()
query = pycurl.Curl()
query.setopt(pycurl.URL, url)
query.setopt(pycurl.PROXY, 'localhost')
query.setopt(pycurl.PROXYPORT, SOCKS_PORT)
query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
query.setopt(pycurl.WRITEFUNCTION, output.write)
try:
query.perform()
return output.getvalue()
except pycurl.error as exc:
return "Unable to reach %s (%s)" % (url, exc) | 7ae660a9a5c3af7be0fbf8789cc96d481863a2c4 | 15,644 |
def check_for_negative_residual(vel, data, errors, best_fit_list, dct,
signal_ranges=None, signal_mask=None,
force_accept=False, get_count=False,
get_idx=False, noise_spike_mask=None):
"""Check for negative residual features and try to refit them.
We define negative residual features as negative peaks in the residual that were introduced by the fit. These negative peaks have to have a minimum negative signal-to-noise ratio of dct['snr_negative'].
In case of a negative residual feature, we try to replace the Gaussian fit component that is causing the feature with two narrower components. We only accept this solution if it yields a better fit as determined by the AICc value.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
get_count : bool
Default is 'False'. If set to 'True', only the number of occurring negative residual features will be returned.
get_idx : bool
Default is 'False'. If set to 'True', the index of the Gaussian fit component causing the negative residual feature is returned. In case of multiple negative residual features, only the index of one of them is returned.
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
"""
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
# in case a single rms value is given instead of an array
if not isinstance(errors, np.ndarray):
errors = np.ones(len(data)) * errors
if ncomps_fit == 0:
if get_count:
return 0
return best_fit_list
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr_negative'], dct['significance'],
peak='negative')
# check if negative residual feature was already present in the data
remove_indices = []
for i, offset in enumerate(offset_guesses):
if residual[offset] > (data[offset] - dct['snr']*errors[0]):
remove_indices.append(i)
if len(remove_indices) > 0:
amp_guesses, fwhm_guesses, offset_guesses = remove_components_from_sublists(
[amp_guesses, fwhm_guesses, offset_guesses], remove_indices)
if get_count:
return (len(amp_guesses))
if len(amp_guesses) == 0:
return best_fit_list
# in case of multiple negative residual features, sort them in order of increasing amplitude values
sort = np.argsort(amp_guesses)
amp_guesses = np.array(amp_guesses)[sort]
fwhm_guesses = np.array(fwhm_guesses)[sort]
offset_guesses = np.array(offset_guesses)[sort]
for amp, fwhm, offset in zip(amp_guesses, fwhm_guesses, offset_guesses):
idx_low = max(0, int(offset - fwhm))
idx_upp = int(offset + fwhm) + 2
exclude_idx = check_which_gaussian_contains_feature(
idx_low, idx_upp, fwhms_fit, offsets_fit)
if get_idx:
return exclude_idx
if exclude_idx is None:
continue
params_fit = replace_gaussian_with_two_new_ones(
data, vel, errors[0], dct['snr'], dct['significance'],
params_fit, exclude_idx, offset)
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
noise_spike_mask=noise_spike_mask)
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
return best_fit_list | 3901af224a080e99aa0cfdef22a27b8951ee4c3c | 15,645 |
import requests
import shutil
def download(url, filename, proxies=None):
"""
Telechargement de l'URL dans le fichier destination
:param url: URL a telecharger
:param filename: fichier de destination
"""
error = ''
try:
req = requests.get(url, proxies=proxies, stream=True)
with open(filename, "wb") as f:
shutil.copyfileobj(req.raw, f)
except FileNotFoundError as fnf:
error = f"Error while downloading {url} - I/O Problem with {filename} : FileNotFound -> check path"
except Exception as ex:
error = f"Error while downloading {url}. {str(ex)}"
return len(error) == 0, error, filename | da097b46aef574623ac975aa8d5e9506ff191d53 | 15,647 |
import json
async def validate_devinfo(hass, data):
"""检验配置是否缺项。无问题返回[[],[]],有缺项返回缺项。"""
# print(result)
devtype = data['devtype']
ret = [[],[]]
requirements = VALIDATE.get(devtype)
if not requirements:
return ret
else:
for item in requirements[0]:
if item not in json.loads(data[CONF_MAPPING]):
ret[0].append(item)
for item in requirements[1]:
if item not in json.loads(data[CONF_CONTROL_PARAMS]):
ret[1].append(item)
return ret | 7e0822ae36617f209447221dc49f0485e88759e9 | 15,648 |
from datetime import datetime
def worldbank_date_to_datetime(date):
"""Convert given world bank date string to datetime.date object."""
if "Q" in date:
year, quarter = date.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
if "M" in date:
year, month = date.split("M")
return datetime.date(int(year), int(month), 1)
return datetime.date(int(date), 1, 1) | f36fb2c763da59ae58a08f446e4cbd566d6e87e0 | 15,650 |
from typing import Sequence
def select(
key: bytes, seq: Sequence[BucketType], *, seed: bytes = DEFAULT_SEED
) -> BucketType:
"""
Select one of the elements in seq based on the hash of ``key``.
Example partitioning of input on ``stdin`` into buckets::
bucketed_lines = {} # type: Dict[int, str]
for line in sys.stdin:
buckets[choice(b, [0, 1, 2, 3, 4, 5])] = line
:param key: The bytes to hash.
:param seq: The sequence from which to select an element. Must be non-empty.
:param seed: Seed to hash prior to hashing b.
:raise ValueError: If ``seq`` is empty.
:return: One of the elements in ``seq``.
"""
if not seq:
raise ValueError("non-empty sequence required")
return seq[range(key, len(seq), seed=seed)] | d9f3a2f3aa759f252965478e0f065a555b23a24d | 15,651 |
def unescape(s):
"""
unescape html
"""
html_codes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&')
)
for code in html_codes:
s = s.replace(code[1], code[0])
return s | c1434498694d90b219c962c0ce75b6c8978533bb | 15,652 |
def ua_mnem(*args):
"""ua_mnem(ea_t ea, char buf) -> char"""
return _idaapi.ua_mnem(*args) | 23322dd3780c9184ebed9f6006a0fad37000f29a | 15,653 |
import time
def tic(*names):
"""
Start timer, use `toc` to get elapsed time in seconds.
Parameters
----------
names : str, str, ...
Names of timers
Returns
-------
out : float
Current timestamp
Examples
--------
.. code-block:: python
:linenos:
:emphasize-lines: 10,11,12
import plazy
def foo():
total = 0
for _ in range(100000):
total += 1
return total
if __name__ == "__main__":
plazy.tic() # T1
plazy.tic("B") # T2
plazy.tic("C", "D", "E") # T3
foo()
dt1 = plazy.toc() # elapsed time since T1
dt2 = plazy.toc("B") # elapsed time since T2
dt3 = plazy.toc("C", "D") # elapsed time since T3
foo()
dt4 = plazy.toc("E") # elapsed time since T3
dt5 = plazy.toc("B") # elapsed time since T2
print(dt1) # 0.009924173355102539
print(dt2) # 0.009925603866577148
print(dt3) # [0.00992727279663086, 0.00992727279663086]
print(dt4) # 0.020497798919677734
print(dt5) # 0.020506620407104492
See also
--------
toc
"""
now_ts = time.time()
name_arr = list(names) + (
[
g_time_store.default_name,
]
if len(names) == 0
else []
)
for n in name_arr:
g_time_store.set_time(name=n, value=now_ts)
return now_ts | af9d93a092bed7849150cb3256a8fac6cc20f7d9 | 15,654 |
def retrieve_database():
"""Return the contents of MongoDB as a dataframe."""
return pd.DataFrame(list(restaurant_collection.find({}))) | 20d53dff1cc3164cedef303f8c4e38f7774e9f5e | 15,657 |
from typing import List
def readAbstractMethodsFromFile(file: str) -> List[AbstractMethod]:
"""
Returns a list of `AbstractMethods` read from the given `file`. The file should have one `AbstractMethod`
per line with tokens separated by spaces.
"""
abstractMethods = []
with open(file, "r") as f:
for line in f:
abstractMethods.append(AbstractMethod(line.strip()))
return abstractMethods | 44a1fb346bfbb0eb71882b623887744dbbfb5143 | 15,658 |
from typing import Callable
def recursive_descent(data: np.ndarray, function: Callable):
"""
**Recursivly process an `np.ndarray` until the last dimension.**
This function applies a callable to the very last dimension of a numpy multidimensional array. It is foreseen
for time series processing expecially in combination with the function `ts_gaf_transform`.
+ param **data**: multidimensional data, type `np.ndarray`.
+ param **function**: callable, type `Callable`.
+ return **function(data)**: all kind of processed data.
"""
if len(data.shape) == 1:
return function(data)
for i in range(0, data.shape[0]):
return ts_recursive_descent(data[i], function) | c5955f5c3968aae53ae222cc4c0288320c3fb1c6 | 15,660 |
def watt_spectrum(a, b):
""" Samples an energy from the Watt energy-dependent fission spectrum.
Parameters
----------
a : float
Spectrum parameter a
b : float
Spectrum parameter b
Returns
-------
float
Sampled outgoing energy
"""
return _dll.watt_spectrum(a, b) | 0682a8791cb1b1cce93b4449bebfa2ac098fde20 | 15,661 |
def get_definition_from_stellarbeat_quorum_set(quorum_set: QuorumSet) -> Definition:
"""Turn a stellarbeat quorum set into a quorum slice definition"""
return {
'threshold': quorum_set['threshold'],
'nodes': set(quorum_set['validators']) if 'validators' in quorum_set else set(),
'children_definitions': [
get_definition_from_stellarbeat_quorum_set(inner_quorum_set)
for inner_quorum_set in quorum_set['innerQuorumSets']
] if 'innerQuorumSets' in quorum_set else set()
} | 6259c820c4b920672f8b2333826f9996de3cd405 | 15,662 |
def values(names):
"""
Method decorator that allows inject return values into method parameters.
It tries to find desired value going deep. For convinience injects list with only one value as value.
:param names: dict of "value-name": "method-parameter-name"
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
if len(args)>1:
instance=args[1]
else:
instance = kwargs['instance']
def findReturnValues(rvalues):
for k, v in rvalues.iteritems():
if isinstance(v, dict):
findReturnValues(v) #go deep, to find desired name
if k in names.keys():
if isinstance(v,list) and len(v)==1:
kwargs.update({names[k]: v[0]})
else:
kwargs.update({names[k]: v})
findReturnValues(instance.returnValues)
#ensure all names was set
missing_params = [k for k, v in names.items() if v not in kwargs]
if missing_params:
raise AttributeError("Parameters {0} for '{1}' were not found".format(missing_params, func.__name__), missing_params)
func(*args, **kwargs)
return wrapped_func
return wrapper | 72e958692a14254b26e7ff1241103aa0a1063a33 | 15,663 |
from operator import or_
def select_user(with_dlslots=True):
"""
Select one random user, if can_download is true then user must have
download slots available
:returns User
"""
with session_scope() as db:
try:
query = db.query(User).filter(User.enabled.is_(True))
if with_dlslots:
query = query.filter(or_(
User.downloads_limit > User.downloads_today,
User.downloads_limit.is_(None)
))
user = query.order_by(func.random()).limit(1).one()
except NoResultFound:
raise OperationInterruptedException('No suitable users found')
else:
db.expunge(user)
return user | 2ba3e6c8b0f5488aa770fa982d3206c2dde5d0e1 | 15,664 |
def silero_number_detector(onnx=False):
"""Silero Number Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
if onnx:
url = 'https://models.silero.ai/vad_models/number_detector.onnx'
else:
url = 'https://models.silero.ai/vad_models/number_detector.jit'
model = Validator(url)
utils = (get_number_ts,
save_audio,
read_audio,
collect_chunks,
drop_chunks)
return model, utils | 46fa5a33b9e33cfebdc081e062f98711e3e8be61 | 15,665 |
def etaCalc(T, Tr = 296.15, S = 110.4, nr = 1.83245*10**-5):
"""
Calculates dynamic gas viscosity in kg*m-1*s-1
Parameters
----------
T : float
Temperature (K)
Tr : float
Reference Temperature (K)
S : float
Sutherland constant (K)
nr : float
Reference dynamic viscosity
Returns
-------
eta : float
Dynamic gas viscosity in kg*m-1*s-1
"""
eta = nr * ( (Tr + S) / (T+S) )*(T/Tr)**(3/2)
return eta | 3f8182ea29fd558e86280477f2e435247d09798e | 15,666 |
def refine_markers_harris(patch, offset):
""" Heuristically uses the max Harris response for control point center. """
harris = cv2.cornerHarris(patch, 2, 5, 0.07)
edges = np.where(harris < 0, np.abs(harris), 0)
point = np.array(np.where(harris == harris.max())).flatten()
point += offset
return np.float64(point) | afa080a8292c210f04483cddf8d81e297b5f2aec | 15,667 |
def get_realtime_price(symbol):
"""
获取实时股价
:param symbol:
:return:
"""
try:
df = get_real_price_dataframe()
df_s = df[df['code'] == symbol]
if len(df_s['trade'].get_values()):
return df_s['trade'].get_values()[0]
else:
return -1
except:
return -1 | af81787ab00283309829a6af2ef04155f73f360c | 15,668 |
def create_employee(db_session: Session, employee: schemas.EmployeeRequest):
""" Create new employee """
new_employee = Employee(
idir=employee.idir,
status=employee.status,
location=employee.location,
phone=employee.phone)
db_session.add(new_employee)
db_session.commit()
db_session.refresh(new_employee)
return db_session.query(Employee).filter(Employee.idir == employee.idir).first() | 1f858412e0e2c94ca40414054c28d67639b620aa | 15,669 |
def sim_categorical(var_dist_params, size):
"""
Function to simulate data for
a categorical/Discrete variable.
"""
values = var_dist_params[0]
freq = var_dist_params[1]
data_sim = np.random.choice(a=values, p=freq, size=size)
return data_sim | 1b81eac17f041a9200b8c96b9c86310d6d3b003f | 15,670 |
def validSolution(board: list) -> bool:
"""
A function validSolution/ValidateSolution/valid_solution()
that accepts a 2D array representing a Sudoku board,
and returns true if it is a valid solution, or false otherwise
:param board:
:return:
"""
return all([test_horizontally(board),
test_vertically(board),
test_sub_grids(board)]) | 023741cac4106372e8b4c9b8b7c7e60fb9837a7b | 15,671 |
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None | 66905b55dcbf3d04b9a1d7d6cbdf843c8694eafb | 15,672 |
def drawingpad(where=None, x=0, y=0, image=None, color=0xffffff, fillingColor=0x000000, thickness=3):
"""Create a drawing pad.
Args:
where (np.ndarray) : image/frame where the component should be rendered.
x (int) : Position X where the component should be placed.
y (int) : Position Y where the component should be placed.
image (np.ndarray) : Image to be rendered in the specified destination.
color (uint) : Color of the line in the format ``0xRRGGBB``, e.g. ``0xff0000`` for red.
fillingColor (uint) : Color of filling in the format `0xAARRGGBB`, e.g. `0x00ff0000` for red, `0xff000000` for transparent filling.
thickness (int) : Thickness of the lines used to draw a line.
Returns:
np.ndarray : The current ``image`` .
Examples:
>>> import cv2
>>> import numpy as np
>>> from pycharmers.opencv import cvui
...
>>> WINDOW_NAME = 'Drawing Pad'
>>> frame = np.zeros(shape=(400, 650, 3), dtype=np.uint8)
>>> image = np.full(shape=(250,250,3), fill_value=255, dtype=np.uint8)
>>> bgr = [128, 128, 128]
>>> fillingColors = ["White", "Black"]
>>> fillingStates = [True, False]
>>> thickness = [3]
>>> cvui.init(WINDOW_NAME)
>>> cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)
...
>>> while (True):
... # Fill the frame with a nice color
... frame[:] = (49, 52, 49)
... cvui.text(where=frame, x=320, y=10, text="Thickness")
... cvui.text(where=frame, x=320, y=100, text="Filling Color")
... thick = cvui.trackbar(where=frame, x=320, y=30, width=300, value=thickness, min=1, max=10, options=cvui.TRACKBAR_DISCRETE, discreteStep=1)
... idx = cvui.radiobox(where=frame, x=350, y=120, labels=fillingColors, states=fillingStates)
... bgr = cvui.colorpalette(where=frame, x=320, y=180, bgr=bgr, width=300, height=50)
... image = cvui.drawingpad(where=frame, x=30, y=50, image=image, color=bgr, fillingColor=[0xffffff, 0x000000][idx], thickness=thick)
... cvui.update()
... # Show everything on the screen
... cv2.imshow(WINDOW_NAME, frame)
... # Check if ESC key was pressed
... if cv2.waitKey(20) == cvui.ESCAPE:
... break
>>> cv2.destroyWindow(WINDOW_NAME)
>>> # You can draw a picture as follows by executing the following program while running the above program.
>>> def drawing(path, dsize=(250,250), thresh=127, sleep=3, drawing_val=0, offset=(30,125)):
... \"\"\"
... Args:
... path (str) : Path to binary image.
... dsize (tuple) : The size of drawing pad. ( ``width`` , ``height`` )
... thresh (int) : If you prepare the binary (bgr) image, you can use ``cv2.threshold`` to convert it to binary image. (See :meth:`cvPencilSketch <pycharmers.cli.cvPencilSketch.cvPencilSketch>` for more details.)
... sleep (int) : Delay execution for a given number of seconds. (You have to click the OpenCV window before before entering the for-loop.)
... drawing_val (int) : At what value to draw.
... offset (tuple) : Offset from top left ( ``cv2.moveWindow(winname=WINDOW_NAME, x=0, y=0)`` ) to drawing pad.
... \"\"\"
... import cv2
... import time
... import pyautogui as pgui # Use for controling the mouse. (https://pyautogui.readthedocs.io/en/latest/mouse.html)
... img = cv2.resize(src=cv2.imread(path, 0), dsize=dsize)
... img = cv2.threshold(src=img, thresh=thresh, maxval=255, type=cv2.THRESH_BINARY)[1]
... WINDOW_NAME = "Apotheosis"
... cv2.imshow(winname=v, mat=img)
... width,height = dsize
... x_offset, y_offset = offset
... time.sleep(sleep)
... for i in range(height):
... pgui.moveTo(x_offset, y_offset+i)
... prev_val, prev_pos = (0, 0)
... for j in range(width+1):
... if j<width:
... val = img[i,j]
... else:
... val = -1 # Must be different from ``prev_val``
... if prev_val != val:
... # Drawing.
... if prev_val == drawing_val:
... pgui.mouseDown()
... pgui.dragRel(xOffset=j-prev_pos, yOffset=0, button="left", duration=0.0, mouseDownUp=True)
... pgui.mouseUp()
... else:
... pgui.moveRel(xOffset=j-prev_pos, yOffset=0, duration=0.0)
... prev_pos = j
... prev_val = val
... key = cv2.waitKey(1)
... if key == 27: break
... if key == 27: break
... cv2.destroyWindow(WINDOW_NAME)
+--------------------------------------------------------+-------------------------------------------------------+
| Example |
+========================================================+=======================================================+
| .. image:: _images/opencv.cvui.drawingpad-konotaro.gif | .. image:: _images/opencv.cvui.drawingpad-tanziro.gif |
+--------------------------------------------------------+-------------------------------------------------------+
"""
handleTypeError(types=[np.ndarray, NoneType], where=where)
if isinstance(where, np.ndarray):
__internal.screen.where = where
block = __internal.screen
else:
block = __internal.topBlock()
x += block.anchor.x
y += block.anchor.y
return __internal.drawingpad(block, x, y, image, color, fillingColor, thickness) | 25d16ee18b25965dd9d8ecbabb46353950c53297 | 15,673 |
def json_request(url, **kwargs):
"""
Request JSON data by HTTP
:param url: requested URL
:return: the dictionary
"""
if 'auth_creds' in kwargs and 'authentication_enabled' in kwargs['auth_creds']:
if 'sessionToken' in kwargs:
url += "&sessionToken=%s" % kwargs['auth_creds']['sessionToken']
else:
url += "&ignite.login=%s&ignite.password=%s" % (kwargs['auth_creds']['auth_login'],
kwargs['auth_creds']['auth_password'])
req = Request(url)
decoded = {}
try:
r = urlopen(req)
reply = r.read().decode('UTF-8')
decoded = loads(reply)
except HTTPError:
print('')
print("HTTPError %s" % url)
except URLError:
print('')
print("URLError %s" % url)
return decoded | d0a496b36905a0a505d3eab721c8e23ab0eb0e21 | 15,674 |
import io
import json
def list():
"""
List all added path
"""
try:
with io.open(FILE_NAME, 'r', encoding='utf-8') as f:
data = json.load(f)
except:
data = {}
return data | 98d426ece920648d1789c4b2e09ee62eb1cb990d | 15,675 |
import pickle
def load(filename):
"""
Load an EigenM object
"""
with open(filename, 'rb') as f:
return pickle.load(f) | 2653cbbe6a1323725c8ba0f771778e1c738daf12 | 15,676 |
def resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys,
target_height, target_width, mask_image=None):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image of shape
`[target_height, target_width, channels]`
"""
with tf.name_scope('resize_with_crop_or_pad'):
image = ops.convert_to_tensor(image, name='image')
if mask_image is not None:
print('Image: ', image)
print('MaskImage: ', mask_image)
mask_image = ops.convert_to_tensor(mask_image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
height, width, _ = _ImageDimensions(image)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
height_crop = min_(target_height, height)
width_crop = min_(target_width, width)
cropped = tf.image.crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
if mask_image is not None:
cropped_mask_image = tf.image.crop_to_bounding_box(mask_image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
-offset_crop_height, -offset_crop_width,
height_crop, width_crop)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
if mask_image is not None:
resized_mask_image = tf.image.pad_to_bounding_box(cropped_mask_image, offset_pad_height, offset_pad_width,
target_height, target_width)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height_crop, width_crop,
offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = _ImageDimensions(resized)
assert_ops = []
assert_ops += _assert(equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if mask_image is None:
return resized, None, bboxes, xs, ys
else:
return resized, resized_mask_image, bboxes, xs, ys | 80c215d0cb750ce55d4d38ef2748f9b89789519c | 15,677 |
def sharpe_ratio(R_p, sigma_p, R_f=0.04):
"""
:param R_p: 策略年化收益率
:param R_f: 无风险利率(默认0.04)
:param sigma_p: 策略收益波动率
:return: sharpe_ratio
"""
sharpe_ratio = 1.0 * (R_p - R_f) / sigma_p
return sharpe_ratio | d197df7aa3b92f3a32cc8f11eb675012ffe8af57 | 15,678 |
def xr_vol_int_regional(xa, AREA, DZ, MASK):
""" volumen integral with regional MASK
input:
xa, AREA, DZ .. same as in 'xr_vol_int'
MASK .. 2D xr DataArray of booleans with the same dimensions as xa
output:
integral, int_levels .. same as in 'xr_vol_int'
"""
assert type(xa)==xr.core.dataarray.DataArray
assert type(AREA)==xr.core.dataarray.DataArray
assert type(DZ)==xr.core.dataarray.DataArray
assert np.shape(AREA)==np.shape(xa)[-2:]
assert np.shape(DZ)==np.shape(xa)[-3:]
assert np.dtype(MASK)==np.dtype('bool')
# determine min/max i/j of masked region
(imin, imax, jmin, jmax) = find_regional_coord_extent(MASK)
xa_reg = xa.where(MASK)[:,jmin:jmax+1,imin:imax+1]
AREA_reg = AREA.where(MASK)[jmin:jmax+1,imin:imax+1]
DZ_reg = DZ.where(MASK)[:,jmin:jmax+1,imin:imax+1]
integral, int_levels = xr_vol_int(xa_reg, AREA_reg, DZ_reg)
return integral, int_levels | 4840d66ec6164d16df56f9efeebf9d242bc60613 | 15,679 |
from typing import List
def test(
coverage: bool = typer.Option( # noqa: B008
default=False, help='Generate coverage information.'
),
html: bool = typer.Option( # noqa: B008
default=False, help='Generate an html coverage report.'
),
) -> List[Result]:
"""Run tests."""
coverage_flag = [f'--cov={PACKAGE_NAME}'] if coverage else []
return [
execute(['pytest', *coverage_flag, 'tests'], raise_error=False),
*(coverage_html() if coverage and html else ()),
] | 0b9fe2d265fd604ae32df29bcde71041a1f5dfcf | 15,680 |
import winreg as _winreg
import _winreg
def _get_win_folder_from_registry(csidl_name: Any) -> Any:
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names."""
if PY3:
else:
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir | 76dda1c71ea7184a327c0d32a35ecbf45579dd7c | 15,681 |
def transitions2kernelreward(transitions, num_states, num_actions):
"""Transform a dictionary of transitions to kernel, reward matrices."""
kernel = np.zeros((num_states, num_actions, num_states))
reward = np.zeros((num_states, num_actions))
for (state, action), transition in transitions.items():
for data in transition:
kernel[state, action, data["next_state"]] += data["probability"]
reward[state, action] += data["reward"] * data["probability"]
return kernel, reward | 72165577890342cf1e3f01dbe853cd0024a0324e | 15,682 |
def _inline_svg(svg: str) -> str:
"""Encode SVG to be used inline as part of a data URI.
Replacements are not complete, but sufficient for this case.
See https://codepen.io/tigt/post/optimizing-svgs-in-data-uris
for details.
"""
replaced = (
svg
.replace('\n', '%0A')
.replace('#', '%23')
.replace('<', '%3C')
.replace('>', '%3E')
.replace('"', '\'')
)
return 'data:image/svg+xml,' + replaced | 4e3c25f5d91dd7691f42f9b9ace4d64a297eb32f | 15,683 |
def contact_infectivity_asymptomatic_00x40():
"""
Real Name: b'contact infectivity asymptomatic 00x40'
Original Eqn: b'contacts per person normal 00x40*infectivity per contact'
Units: b'1/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_00x40() * infectivity_per_contact() | 2c71d8955078186636de0d0b369b37a71dcec3fc | 15,684 |
import click
def implemented_verified_documented(function):
""" Common story options """
options = [
click.option(
'--implemented', is_flag=True,
help='Implemented stories only.'),
click.option(
'--unimplemented', is_flag=True,
help='Unimplemented stories only.'),
click.option(
'--verified', is_flag=True,
help='Stories verified by tests.'),
click.option(
'--unverified', is_flag=True,
help='Stories not verified by tests.'),
click.option(
'--documented', is_flag=True,
help='Documented stories only.'),
click.option(
'--undocumented', is_flag=True,
help='Undocumented stories only.'),
click.option(
'--covered', is_flag=True,
help='Covered stories only.'),
click.option(
'--uncovered', is_flag=True,
help='Uncovered stories only.'),
]
for option in reversed(options):
function = option(function)
return function | 8c1dd5aaa0b962d96e9e90336183a29e2cf360db | 15,685 |
import requests
def create_collection(self, name, url, sourceType, **options):
"""Creates a new collection from a web or S3 url. Automatically kick off default indexes"""
(endpoint, method) = self.endpoints['create_collection']
try:
headers = {'Authorization': self.token.authorization_header()}
data = {
'name': name,
'url': url,
'sourceType': sourceType,
'indexWithDefault': 'true' if options.get('indexWithDefault') else 'false'
}
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except Exception as e:
raise error.APIConnectionError(message=e) | 37f7128526b5b7b1a22a9f946774f934827aa555 | 15,686 |
from typing import Union
def rmse(estimated: np.ndarray, true: np.ndarray) -> Union[np.ndarray, None]:
"""
Calculate the root-mean-squared error between two arrays.
:param estimated: estimated solution
:param true: 'true' solution
:return: root-mean-squared error
"""
return np.sqrt(((estimated - true) ** 2).mean(axis=1)) | 10eb4974f5d95ca20b5336e2e9637eb6426802ae | 15,687 |
def energy_com(data):
""" Calculate the energy center of mass for each day, and use this quantity
as an estimate for solar noon.
Function infers time stamps from the length of the first axis of the 2-D
data array.
:param data: PV power matrix as generated by `make_2d` from `solardatatools.data_transforms`
:return: A 1-D array, containing the solar noon estimate for each day in the data set
"""
data = np.copy(data)
data[np.isnan(data)] = 0
num_meas_per_hour = data.shape[0] / 24
x = np.arange(0, 24, 1. / num_meas_per_hour)
div1 = np.dot(x, data)
div2 = np.sum(data, axis=0)
com = np.empty_like(div1)
com[:] = np.nan
msk = div2 != 0
com[msk] = np.divide(div1[msk], div2[msk])
return com | 1b276b003f8527672fb95ad03b69536043a7ba17 | 15,688 |
import random
def cifar_noniid(dataset, no_participants, alpha=0.9):
"""
Input: Number of participants and alpha (param for distribution)
Output: A list of indices denoting data in CIFAR training set.
Requires: cifar_classes, a preprocessed class-indice dictionary.
Sample Method: take a uniformly sampled 10-dimension vector as parameters for
dirichlet distribution to sample number of images in each class.
"""
np.random.seed(666)
random.seed(666)
cifar_classes = {}
for ind, x in enumerate(dataset):
_, label = x
if label in cifar_classes:
cifar_classes[label].append(ind)
else:
cifar_classes[label] = [ind]
per_participant_list = defaultdict(list)
no_classes = len(cifar_classes.keys())
class_size = len(cifar_classes[0])
datasize = {}
for n in range(no_classes):
random.shuffle(cifar_classes[n])
sampled_probabilities = class_size * np.random.dirichlet(
np.array(no_participants * [alpha]))
for user in range(no_participants):
no_imgs = int(round(sampled_probabilities[user]))
datasize[user, n] = no_imgs
sampled_list = cifar_classes[n][:min(len(cifar_classes[n]), no_imgs)]
per_participant_list[user].extend(sampled_list)
cifar_classes[n] = cifar_classes[n][min(len(cifar_classes[n]), no_imgs):]
train_img_size = np.zeros(no_participants)
for i in range(no_participants):
train_img_size[i] = sum([datasize[i,j] for j in range(10)])
clas_weight = np.zeros((no_participants,10))
for i in range(no_participants):
for j in range(10):
clas_weight[i,j] = float(datasize[i,j])/float((train_img_size[i]))
return per_participant_list, clas_weight | 8ecbb6df113d04b5ff737bb065bc4e578d06c69b | 15,689 |
from typing import Dict
def example_metadata(
request,
l1_ls5_tarball_md_expected: Dict,
l1_ls7_tarball_md_expected: Dict,
l1_ls8_folder_md_expected: Dict,
):
"""
Test against arbitrary valid eo3 documents.
"""
which = request.param
if which == "ls5":
return l1_ls5_tarball_md_expected
elif which == "ls7":
return l1_ls7_tarball_md_expected
elif which == "ls8":
return l1_ls8_folder_md_expected
raise AssertionError | fd67c395aa7d773bc5757ca5649fed60b023e14f | 15,690 |
def register_middleware(app: FastAPI):
"""
请求响应拦截 hook
https://fastapi.tiangolo.com/tutorial/middleware/
:param app:
:return:
"""
@app.middleware("http")
async def logger_request(request: Request, call_next):
# https://stackoverflow.com/questions/60098005/fastapi-starlette-get-client-real-ip
logger.info(f"request:{request.method} url:{request.url}\nheaders:{request.headers.get('user-agent')}"
f"\nIP:{request.client.host}")
response = await call_next(request)
return response | 3455c7d406c405ae0df681d90bfcf57facddaa03 | 15,691 |
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to float type.
Parameters
----------
a : array_like
Input array.
dtype : string or dtype object, optional
Float type code to coerce input array `a`. If one of the 'int' dtype,
it is replaced with float64.
Returns
-------
out : ndarray, float
Input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a,dtype=dtype) | 8d26529602853e36dd8dc619d6210c475dcb7cd0 | 15,692 |
def read_geotransform_s2(path, fname='MTD_TL.xml', resolution=10):
"""
Parameters
----------
path : string
location where the meta data is situated
fname : string
file name of the meta-data file
resolution : {float,integer}, unit=meters, default=10
resolution of the grid
Returns
-------
geoTransform : tuple, size=(1,6)
affine transformation coefficients
Notes
-----
The metadata is scattered over the file structure of Sentinel-2, L1C
.. code-block:: text
* S2X_MSIL1C_20XX...
├ AUX_DATA
├ DATASTRIP
│ └ DS_XXX_XXXX...
│ └ QI_DATA
│ └ MTD_DS.xml <- metadata about the data-strip
├ GRANULE
│ └ L1C_TXXXX_XXXX...
│ ├ AUX_DATA
│ ├ IMG_DATA
│ ├ QI_DATA
│ └ MTD_TL.xml <- metadata about the tile
├ HTML
├ rep_info
├ manifest.safe
├ INSPIRE.xml
└ MTD_MSIL1C.xml <- metadata about the product
The following acronyms are used:
- DS : datastrip
- TL : tile
- QI : quality information
- AUX : auxiliary
- MTD : metadata
- MSI : multi spectral instrument
- L1C : product specification,i.e.: level 1, processing step C
"""
root = get_root_of_table(path, fname)
# image dimensions
for meta in root.iter('Geoposition'):
res = float(meta.get('resolution'))
if res == resolution:
ul_X,ul_Y= float(meta[0].text), float(meta[1].text)
d_X, d_Y = float(meta[2].text), float(meta[3].text)
geoTransform = (ul_X, d_X, 0., ul_Y, 0., d_Y)
return geoTransform | 322a5bb149f0cc28dc813adebb3dad861e3a3218 | 15,693 |
def embed_into_hbox_layout(w, margin=5):
"""Embed a widget into a layout to give it a frame"""
result = QWidget()
layout = QHBoxLayout(result)
layout.setContentsMargins(margin, margin, margin, margin)
layout.addWidget(w)
return result | a7a5182ac6e555f3adcbe7a9b11a6826517d08f4 | 15,694 |
def make_word_ds(grids, trfiles, bad_words=DEFAULT_BAD_WORDS):
"""Creates DataSequence objects containing the words from each grid, with any words appearing
in the [bad_words] set removed.
"""
ds = dict()
stories = grids.keys()
for st in stories:
grtranscript = grids[st].tiers[1].make_simple_transcript()
## Filter out bad words
goodtranscript = [x for x in grtranscript
if x[2].lower().strip("{}").strip() not in bad_words]
d = DataSequence.from_grid(goodtranscript, trfiles[st][0])
ds[st] = d
return ds | 43f405605d461dd6972f131c1474dad6b8acf35c | 15,695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.