content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def ntu_tranform_skeleton(test):
"""
:param test: frames of skeleton within a video sample
"""
remove_frame = False
test = np.asarray(test)
transform_test = []
d = test[0, 0:3]
v1 = test[0, 1 * 3:1 * 3 + 3] - test[0, 0 * 3:0 * 3 + 3]
v1 = v1 / np.linalg.norm(v1)
v2_ = test[0, 12 * 3:12 * 3 + 3] - test[0, 16 * 3:16 * 3 + 3]
if np.equal(np.sum(v2_), 0):
v2_ += 1e-6
proj_v2_v1 = np.dot(v1.T, v2_) * v1 / np.linalg.norm(v1)
v2 = v2_ - np.squeeze(proj_v2_v1)
v2 = v2 / np.linalg.norm(v2)
v3 = np.cross(v2, v1) / np.linalg.norm(np.cross(v2, v1))
v1 = np.reshape(v1, (3, 1))
v2 = np.reshape(v2, (3, 1))
v3 = np.reshape(v3, (3, 1))
R = np.hstack([v2, v3, v1])
for i in range(test.shape[0]):
xyzs = []
for j in range(25):
if test[i][j * 3:j * 3 + 3].all() == 0:
remove_frame = True
break
xyz = np.squeeze(np.matmul(np.linalg.inv(R), np.reshape(test[i][j * 3:j * 3 + 3] - d, (3, 1))))
xyzs.append(xyz)
if not remove_frame:
xyzs = np.reshape(np.asarray(xyzs), (-1, 75))
transform_test.append(xyzs)
else:
remove_frame = False
transform_test = np.squeeze(np.asarray(transform_test))
return transform_test.tolist() | 6f8e9e3ff0b6fa95b5f3b8c22aef2de05730a78c | 17,438 |
import random
import time
import requests
def request_to_dataframe(UF):
"""Recebe string do estado, retona DataFrame com faixa de CEP do estado"""
#Try to load the proxy list. If after several attempts it still doesn't work, raise an exception and quit.
proxy_pool = proxy_list_to_cycle()
#Set initial values for post request's parameters.
pagini = 1
pagfim = 50
count = 1
while True:
#random sleep times to decrease the chances of being blocked.
num1 = random.randint(2,5)
time.sleep(num1)
try:
#select_proxy from proxy pool.
proxy = next(proxy_pool)
print(f"Proxy atual: {proxy}")
#Define o post Field de acordo com a página Atual. Para a primeira página os campos "Bairro", "qtdrow", "pagini", "pagfim" não são considerados.
if count == 1:
post_fields = {"UF":UF, "Localidade":""}
full_dataframe = pd.DataFrame()
else:
post_fields = {"UF": UF, "Localidade":"**", "Bairro":"", "qtdrow":"50", "pagini":str(pagini),"pagfim": str(pagfim)}
#Makes the post request
request = make_post_request(post_fields, proxy)
#Extrai tabela com as faixas de CEP do HTML. Se estivermos na primeira página, o conteúdo se encontra no primeiro index do page content, caso o contrário, se encontra no próximo index.
if count == 1:
UF_table = request_text_to_table(request = request, page_content_index = 1)
else:
UF_table = request_text_to_table(request = request, page_content_index = 0)
except requests.exceptions.ProxyError:
print("")
print(f"Error with the proxy: {proxy}")
print(f"Proxies left: {proxy_pool}")
print("Tentando novamente")
print("")
continue
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as err:
print("")
print('Servidor demorando muito')
print("Tentando novamente")
print("")
continue
except Exception as e:
print("")
print(e)
proxy_pool = proxy_list_to_cycle()
continue
#Turning the table into a dataframe.
current_page_df = table_to_df(UF_table)
#Concat DataFrames for each page into one DataFrame
full_dataframe = pd.concat([full_dataframe, current_page_df])
print(f"Total de dados coletados sobre o Estado {UF}: {full_dataframe.shape[0]} ")
#Sair do loop de post requests para o estado atual se chegamos na última página.
if current_page_df.shape[0] < 49:
print(f"Última página do estado:{UF}")
break
#Incrementa o número da página e o contador de página.
pagini += 50
pagfim += 50
count = count + 1
return full_dataframe | f71de0ec169f375fff1fba87d55aa8021b851990 | 17,439 |
import csv
def read_sto_mot_file(filename):
"""
Read sto or mot file from Opensim
----------
filename: path
Path of the file witch have to be read
Returns
-------
Data Dictionary with file informations
"""
data = {}
data_row = []
first_line = ()
end_header = False
with open(f"{filename}", "rt") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if len(row) == 0:
pass
elif row[0][:9] == "endheader":
end_header = True
first_line = idx + 1
elif end_header is True and row[0][:9] != "endheader":
row_list = row[0].split("\t")
if idx == first_line:
names = row_list
else:
data_row.append(row_list)
for r in range(len(data_row)):
for col in range(len(names)):
if r == 0:
data[f"{names[col]}"] = [float(data_row[r][col])]
else:
data[f"{names[col]}"].append(float(data_row[r][col]))
return data | 584cff26cb217d5fadfcea025ad58e431f46676a | 17,440 |
def verify_cef_labels(device, route, expected_first_label, expected_last_label=None, max_time=90,
check_interval=10):
""" Verify first and last label on route
Args:
device ('obj'): Device object
route ('str'): Route address
expected_first_label ('str'): Expected first label
expected_last_label ('str'): Expected last label
max_time ('int'): Max time in seconds checking output
check_interval ('int'): Interval in seconds of each checking
Return:
True/False
Raises:
None
"""
reqs = R(
[
'vrf',
'(.*)',
'address_family',
'(.*)',
'prefix',
'(.*{}.*)'.format(route),
'nexthop',
'(.*)',
'outgoing_interface',
'(.*)',
'(?P<val>.*)'
]
)
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
result = True
out = None
try:
out = device.parse('show ip cef {}'.format(route))
except SchemaEmptyParserError:
out = None
if not out:
result = False
log.info('Could not get information about show ip cef {}'.format(route))
timeout.sleep()
continue
found = find([out], reqs, filter_=False, all_keys=True)
if found:
keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},
source=found)
for item in keys:
first_label = item.get('val',{}).get('outgoing_label', None)
if first_label and str(expected_first_label) not in str(first_label):
result = False
if expected_last_label:
sid = item.get('val',{}).get('sid', None)
if str(expected_last_label) != str(sid):
result = False
if result:
return True
timeout.sleep()
return False | c082920d0c93ec0c2897dc5a06c9d9d9452151af | 17,441 |
def fcat(*fs):
"""Concatenate a sequence of farrays.
The variadic *fs* input is a homogeneous sequence of functions or arrays.
"""
items = list()
for f in fs:
if isinstance(f, boolfunc.Function):
items.append(f)
elif isinstance(f, farray):
items.extend(f.flat)
else:
raise TypeError("expected Function or farray")
return farray(items) | 440a850ed17b8fc844cafaa765b24620a29fa0fd | 17,442 |
def get_path_to_spix(
name: str,
data_directory: str,
thermal: bool,
error: bool = False,
file_ending: str = "_6as.fits",
) -> str:
"""Get the path to the spectral index
Args:
name (str): Name of the galaxy
data_directory (str): dr2 data directory
thermal (bool): non thermal data
error (bool): path to error
file_ending (str, optional): File ending. Defaults to ".fits".
Returns:
str: [description]
"""
return f"{data_directory}/magnetic/{name}/{name}_spix{'_non_thermal' if thermal else ''}{'_error' if error else ''}{file_ending}" | bf8fdff001049ed0738ed856e8234c43ce4511b7 | 17,443 |
def hexpos (nfibres,diam) :
"""
Returns a list of [x,y] positions for a classic packed hex IFU configuration.
"""
positions = [[np.nan,np.nan] for i in range(nfibres)]
# FIND HEX SIDE LENGTH
nhex = 1
lhex = 1
while nhex < nfibres :
lhex += 1
nhex = 3*lhex**2-3*lhex+1
if nhex != nfibres:
lhex -= 1
nhex = 3*lhex**2-3*lhex+1
nextra = nfibres-nhex
n = 0
khex = 2*lhex-1 # NUMBER OF FIBRES IN THE CENTRAL ROW
xhex = (-khex//2)*diam
for i in range(khex) : # CENTRAL ROW
x = xhex+diam*i
positions[n] = [int(x*100)/100,0.]
n += 1
dx = 0.5*diam
dy = diam*np.sqrt(3./4.)
for i in range(1,lhex,1) : # FOR ALL ROWS PAIRS i
khex -= 1 # EACH ROW HAS 1 LESS THAN THE PREVIOUS
xhex += dx
for j in range(khex) : # FOR ALL FIBRES j IN ROWS i
x = xhex+diam*j
y = dy*i
positions[n] = [int(x*100)/100, int(y*100)/100]
positions[n+1] = [int(x*100)/100,-int(y*100)/100]
n += 2
return positions | 4dbf1209d7021c6a4defd1c58e420b362bdbf84c | 17,444 |
from bs4 import BeautifulSoup
def parse_object_properties(html):
"""
Extract key-value pairs from the HTML markup.
"""
if isinstance(html, bytes):
html = html.decode('utf-8')
page = BeautifulSoup(html, "html5lib")
propery_ps = page.find_all('p', {'class': "list-group-item-text"})
obj_props_dict = {}
for p in propery_ps:
if 'data-name' in p.attrs:
key = p.attrs['data-name']
value = p.get_text().strip()
obj_props_dict[key] = value
return obj_props_dict | 8eb2d15cb5f46075ec44ff61265a8f70123a8646 | 17,445 |
def rgb2hex(r, g, b, normalised=False):
"""Convert RGB to hexadecimal color
:param: can be a tuple/list/set of 3 values (R,G,B)
:return: a hex vesion ofthe RGB 3-tuple
.. doctest::
>>> from colormap.colors import rgb2hex
>>> rgb2hex(0,0,255, normalised=False)
'#0000FF'
>>> rgb2hex(0,0,1, normalised=True)
'#0000FF'
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
if normalised:
r, g, b = _denormalise(r, g, b, mode="rgb")
r = int(r)
g = int(g)
b = int(b)
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
return '#%02X%02X%02X' % (r, g, b) | 03afd09cc280d7731ca6b28098cf3f5605fddda7 | 17,446 |
def test_hookrelay_registry(pm):
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api:
@hookspec
def hello(self, arg):
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin:
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == [] | 5f7733efbdbaf193b483c108838d2571ff686e52 | 17,447 |
def model_choices_from_protobuf_enum(protobuf_enum):
"""Protobufs Enum "items" is the opposite order djagno requires"""
return [(x[1], x[0]) for x in protobuf_enum.items()] | d3f5431293a9ab3fdf9a92794b1225a0beec40cc | 17,448 |
def kmeans(boxes, k):
"""
Group into k clusters the BB in boxes.
http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans
:param boxes: The BB in format Nx4 where (x1,y1,x2,y2)
:param k: the number of clusters.
:return: k clusters with the element indexes of each clusters.
"""
model = KMeans(n_clusters=k).fit(boxes)
pred = model.labels_
indexes = [[]] * k
for i, v in enumerate(pred):
indexes[v] = indexes[v] + [i]
return indexes | 0d2bcfb2fb7d5639f95db92ac5aa5e73b1b27498 | 17,450 |
def observation_min_max_in_hex_grid_json(request: HttpRequest):
"""Return the min, max observations count per hexagon, according to the zoom level. JSON format.
This can be useful to dynamically color the grid according to the count
"""
zoom = extract_int_request(request, "zoom")
species_ids, datasets_ids, start_date, end_date, area_ids = filters_from_request(
request
)
sql_template = readable_string(
Template(
"""
WITH grid AS ($jinjasql_fragment_aggregated_grid)
SELECT MIN(count), MAX(count) FROM grid;
"""
).substitute(
jinjasql_fragment_aggregated_grid=JINJASQL_FRAGMENT_AGGREGATED_GRID
)
)
sql_params = {
"hex_size_meters": ZOOM_TO_HEX_SIZE[zoom],
"grid_extent_viewport": False,
"species_ids": species_ids,
"datasets_ids": datasets_ids,
"area_ids": area_ids,
}
if start_date:
sql_params["start_date"] = start_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
if end_date:
sql_params["end_date"] = end_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
j = JinjaSql()
query, bind_params = j.prepare_query(sql_template, sql_params)
with connection.cursor() as cursor:
cursor.execute(query, bind_params)
r = cursor.fetchone()
return JsonResponse({"min": r[0], "max": r[1]}) | 24a3f4846aceea2df0b724d6bada88315b815ee2 | 17,451 |
from bs4 import BeautifulSoup
def parseHtml(html):
"""
BeautifulSoup でパースする
Parameters
----------
html : str
HTML ソース文字列
Returns
-------
soup : BeautifulSoup
BeautifulSoup オブジェクト
"""
soup = BeautifulSoup(html, 'html.parser')
return soup | e8d7a39a9881606d1dfee810ab1c2cecd11eaba2 | 17,454 |
def am_score(probs_data, probs_gen):
"""
Calculate AM Score
"""
mean_data = np.mean(probs_data, axis=0)
mean_gen = np.mean(probs_gen, axis=0)
entropy_gen = np.mean(entropy(probs_gen, axis=1))
am_score = entropy(mean_data, mean_gen) + entropy_gen
return am_score | 5e3c3f42ed2402dd2e48ab1ff4f9ff13754d5c31 | 17,455 |
import torch
def load_image(path_image, size=None, bgr_mean=[103.939, 116.779, 123.68]):
"""
Loads and pre-process the image for SalGAN model.
args:
path_image: abs path to image
size: size to input to the network (it not specified, uses SalGAN predifined)
bgr_mean: mean values (BGR) to extract from images
returns:
torch tensor with processed image
original size of the image
"""
# image = cv2.imread(path_image)
image = cv2.imread(path_image) # BGR format
H, W, C = image.shape
if size is None:
size = SALGAN_RESIZE
image = cv2.resize(image, (size[1], size[0]), interpolation=cv2.INTER_AREA)
image = image.astype(np.float32)
bgr_mean=np.array(bgr_mean)
image -= bgr_mean
# convert to torch Tensor
image = torch.FloatTensor(image)
# swap channel dimensions
image = image.permute(2,0,1)
return image, (H, W) | 3a9ca220bb48f26d76ae35fd58897c8e59cdae0c | 17,456 |
def GetWsdlNamespace(version):
""" Get wsdl namespace from version """
return "urn:" + serviceNsMap[version] | bc75fa0e45c4ce4750898db75571de84aa302fc2 | 17,457 |
def is_PC(parcels):
"""
Dummy for Pinal County.
"""
return (parcels.county == 'PC').astype(int) | 60aa7dcc7adaefee177406c7e6bb963a5a4567d9 | 17,458 |
import hashlib
import requests
def check_password(password: str) -> int:
"""Use Have I Been Pwned to determine whether a password is bad.
If the request fails, this function will assume the password is fine, but
log an error so that administrators can diagnose it later.
:param password: The password to validate.
:return: A positive integer indicating the number of times the password has
been found in a breach. Zero is good, >0 is bad.
"""
sha1_hash = hashlib.sha1()
sha1_hash.update(password.encode("utf-8"))
digest = sha1_hash.hexdigest()
digest = digest.upper()
response = requests.get("https://api.pwnedpasswords.com/range/" + digest[0:5])
if response.status_code != 200:
# The docs say this shouldn't happen, but just in case.
return 0
return suffix_in_text(digest[5:], response.text) | 609dd29ee2b252452e31d64b18e835a39e1cbf22 | 17,459 |
def rqpos(A):
"""
RQ decomp. of A, with phase convention such that R has only positive
elements on the main diagonal.
If A is an MPS tensor (d, chiL, chiR), it is reshaped and
transposed appropriately
before the throughput begins. In that case, Q will be a tensor
of the same size, while R will be a chiL x chiL matrix.
"""
Ashp = A.shape
if len(Ashp) == 2:
return rqmat(A)
elif len(Ashp) != 3:
print("A had invalid dimensions, ", A.shape)
A = fuse_right(A) #chiL, d*chiR
R, Q = qrmat(A, mode="economic")
Q = unfuse_right(Q, Ashp)
return (Q, R) | 026629b6638265daee83e8d8b5ab5b47b61e64d8 | 17,460 |
import torch
from typing import OrderedDict
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
show_model_arch=True,
print_keys=True):
""" Note that official pre-trained models use `GroupNorm` in backbone.
"""
if not osp.isfile(filename):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('backbone.'):
state_dict = {}
for k, v in checkpoint['state_dict'].items():
new_k = k
if 'bbox_head.' in new_k:
if 'deconv_layers' in new_k:
new_k = new_k.replace("conv_offset_mask.", "conv_offset.")
new_k = new_k.replace("bbox_head.deconv_layers.", "neck.upsamples.")
if '.0.0.' in new_k:
new_k = new_k.replace(".0.0.", ".0.dcn.")
if '.0.1.' in new_k:
new_k = new_k.replace(".0.1.", ".0.dcn_bn.")
if '.1.0.' in new_k:
new_k = new_k.replace(".1.0.", ".1.dcn.")
if '.1.1.' in new_k:
new_k = new_k.replace(".1.1.", ".1.dcn_bn.")
if '.2.0.' in new_k:
new_k = new_k.replace(".2.0.", ".2.dcn.")
if '.2.1.' in new_k:
new_k = new_k.replace(".2.1.", ".2.dcn_bn.")
if '.shortcut_layers.' in new_k:
new_k = new_k.replace("bbox_head.shortcut_layers.", "neck.shortcuts.")
new_k = new_k.replace(".layers.", ".")
if '.hm.' in new_k:
new_k = new_k.replace(".hm.", ".ct_hm_head.")
if '.wh.' in new_k:
new_k = new_k.replace(".wh.", ".ct_wh_head.")
if print_keys:
print('> key = ', k, ' -> ', new_k)
state_dict[new_k] = v
if show_model_arch:
print('> model = ', model)
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint | 1d948f45f81c93af73394c891dc7e692c24378b3 | 17,461 |
def basic_image_2():
"""
A 10x10 array with a square (3x3) feature
Equivalent to results of rasterizing basic_geometry with all_touched=True.
Borrowed from rasterio/tests/conftest.py
Returns
-------
numpy ndarray
"""
image = np.zeros((20, 20), dtype=np.uint8)
image[2:5, 2:5] = 1
return image | 8e83070721b38f2a886c7affb4aadc9a053f1748 | 17,462 |
def download(url, verbose, user_agent='wswp', num_retries=2, decoding_format='utf-8', timeout=5):
"""
Function to download contents from a given url
Input:
url: str
string with the url to download from
user_agent: str
Default 'wswp'
num_retries: int
Number of times to retry downloading
if there is an error
verbose: bool
Print out url and errors
decoding: "utf-8"
Output:
returns: str
string with contents of given url
"""
# html_error = False
if verbose:
print('Downloading:', url)
headers = {'User-agent': user_agent}
request_obj = request.Request(url, headers=headers)
try:
with request.urlopen(request_obj, timeout=timeout) as response:
html = response.read()
except error.URLError as e:
if verbose:
print('Download error:', e.reason)
# html = None
# if num_retries > 0:
# if hasattr(e, 'code') and 500 <= e.code < 600:
# # retry 5XX HTTP errors
# return download(url, user_agent, num_retries - 1)[0]
# # elif hasattr(e, 'code') and e.code == 404:
# else:
# html_error = True
raise IOError(e.reason)
return html.decode(decoding_format) | 31592018b6f6f62154444dfc44b723efc1bd7f47 | 17,463 |
from typing import Union
from typing import List
def _write_deform(model: Union[BDF, OP2Geom], name: str,
loads: List[AEROS], ncards: int,
op2_file, op2_ascii, endian: bytes, nastran_format: str='nx') -> int:
"""
(104, 1, 81)
NX 2019.2
Word Name Type Description
1 SID I Deformation set identification number
2 EID I Element number
3 D RS Deformation
"""
key = (104, 1, 81)
nfields = 3
structi = Struct(endian + b'iif')
nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii)
for load in loads:
data = [load.sid, load.eid, load.deformation]
#flutter = model.loads[flutter_id] # type: FLUTTER
#print(flutter.get_stats())
assert None not in data, data
op2_ascii.write(f' DEFORM data={data}\n')
op2_file.write(structi.pack(*data))
return nbytes | 55f2cb18336a940c550ee68bd5148c8d74f5bb93 | 17,464 |
def polygonize(geometries, **kwargs):
"""Creates polygons formed from the linework of a set of Geometries.
Polygonizes an array of Geometries that contain linework which
represents the edges of a planar graph. Any type of Geometry may be
provided as input; only the constituent lines and rings will be used to
create the output polygons.
Lines or rings that when combined do not completely close a polygon
will result in an empty GeometryCollection. Duplicate segments are
ignored.
This function returns the polygons within a GeometryCollection.
Individual Polygons can be obtained using ``get_geometry`` to get
a single polygon or ``get_parts`` to get an array of polygons.
MultiPolygons can be constructed from the output using
``pygeos.multipolygons(pygeos.get_parts(pygeos.polygonize(geometries)))``.
Parameters
----------
geometries : array_like
An array of geometries.
axis : int
Axis along which the geometries are polygonized.
The default is to perform a reduction over the last dimension
of the input array. A 1D array results in a scalar geometry.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Returns
-------
GeometryCollection or array of GeometryCollections
See Also
--------
get_parts, get_geometry
polygonize_full
Examples
--------
>>> lines = [
... Geometry("LINESTRING (0 0, 1 1)"),
... Geometry("LINESTRING (0 0, 0 1)"),
... Geometry("LINESTRING (0 1, 1 1)"),
... ]
>>> polygonize(lines)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))>
"""
return lib.polygonize(geometries, **kwargs) | 20b883734a1acedb1df3241e1815687640cac8cd | 17,465 |
def slerp(input_latent1, input_latent2, interpolation_frames=100):
"""Spherical linear interpolation ("slerp", amazingly enough).
Parameters
----------
input_latent1, input_latent2 : NumPy arrays
Two arrays which will be interpolated between.
interpolation_frames : int, optional
Number of frame returned during interpolation.
Returns
-------
list
List of vectors of size interpolation_frames
"""
output_latents = []
for idx in range(interpolation_frames):
val = float(idx) / interpolation_frames
if np.allclose(input_latent1, input_latent2):
output_latents += [input_latent2]
continue
omega = np.arccos(np.dot(input_latent1 / np.linalg.norm(input_latent1), input_latent2 / np.linalg.norm(input_latent2)))
so = np.sin(omega)
output_latents += [np.sin((1.0 - val) * omega) / so * input_latent1 + np.sin(val * omega) / so * input_latent2]
return output_latents | 392b2e61f3369cf1e4038fac4240dca36f848dce | 17,466 |
from datetime import datetime
def parent_version_config():
"""Return a configuration for an experiment."""
config = dict(
_id="parent_config",
name="old_experiment",
version=1,
algorithms="random",
metadata={
"user": "corneauf",
"datetime": datetime.datetime.utcnow(),
"user_args": ["--x~normal(0,1)"],
},
)
backward.populate_space(config)
return config | ff1f123ce06d687eb3b0031d6bc82c808918c46e | 17,467 |
import re
def sanitize_k8s_name(name):
"""From _make_kubernetes_name
sanitize_k8s_name cleans and converts the names in the workflow.
"""
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-') | edaf6dc3083f0b57aeb1d95a66b5a7f8c1347b55 | 17,468 |
def main():
""" Process command line arguments and run x86 """
run = X86Run()
result = run.Run()
return result | 7de61875207aa17bcf2ef87ff138540626fc7d2b | 17,469 |
def gen_key(uid, section='s'):
"""
Generate store key for own user
"""
return f'cs:{section}:{uid}'.encode() | 5e6386650f6bbaef681636424fd813f2df93fe58 | 17,470 |
def convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,
box_width: float, voxel_width: float) -> np.ndarray:
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices: np.ndarray
A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates
of specified atom.
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
logger.warning('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, coordinates[atom_index], indices))
return indices | 6f08b594f2012aa0ba4a7985d5f4e2049c4629d3 | 17,471 |
def plot_det_curve(y_true_arr, y_pred_proba_arr, labels_arr, pos_label=None, plot_thres_for_idx=None,
log_wandb=False):
"""Function for plotting DET curve
Args:
y_true_arr (list/np.array): list of all GT arrays
y_pred_proba_arr (list/np.array): list of all predicted probabilities
labels_arr (list/np.array): list of labels
pos_label (str, optional): What is the label of the positive class. Defaults to 'Yes'.
plot_thres_for_idx (int, optional): If true, best threshold (F1) is plotted
for the DET curve corresponding to this index. Defaults to None.
log_wandb (bool, optional): If true, figure is logged to W&B. Defaults to False.
Returns:
plt.Figure, plt.Axes: The tuple of figure and axes
"""
fig, ax = plt.subplots(figsize=(12, 8))
for i, (y_true, y_pred_proba) in enumerate(zip(y_true_arr, y_pred_proba_arr)):
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
auc_score = auc(fpr, fnr)
ax.plot(norm.ppf(fpr), norm.ppf(fnr),
label=f'{labels_arr[i]} (AUC - {round(auc_score, 3)})')
if plot_thres_for_idx is not None:
y_true = y_true_arr[plot_thres_for_idx]
y_pred_proba = y_pred_proba_arr[plot_thres_for_idx]
_, idx = get_best_threshold_gmean(
y_true, y_pred_proba, pos_label=pos_label)
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
ax.plot([norm.ppf(fpr[idx])], [norm.ppf(fnr[idx])], '-o',
c=ax.lines[plot_thres_for_idx].get_color(),
label=f'Best {labels_arr[plot_thres_for_idx]} Threshold (GMean)')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('False Negative Rate')
ax.set_title('DET Curve')
ax.legend()
ax.grid()
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_locations = norm.ppf(ticks)
tick_labels = [
'{:.0%}'.format(s) if (100*s).is_integer() else '{:.1%}'.format(s)
for s in ticks
]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels)
ax.set_yticks(tick_locations)
ax.set_yticklabels(tick_labels)
if log_wandb:
wandb.log({"det_curve": [wandb.Image(fig)]})
plt.close(fig)
return fig, ax | 0437d700a9555b48b84cbb6e225bc88f1a57e34d | 17,473 |
def harmonic_separation(audio, margin=3.0):
"""
Wraps librosa's `harmonic` function, and returns a new Audio object.
Note that this folds to mono.
Parameters
---------
audio : Audio
The Audio object to act on.
margin : float
The larger the margin, the larger the separation.
The default is `3.0`.
"""
harmonic = librosa.effects.harmonic(
librosa.to_mono(audio.raw_samples), margin=margin
)
harmonic_audio = Audio(raw_samples=harmonic, sample_rate=audio.sample_rate)
return harmonic_audio | 3ac3e0d87f719814ca021f594a21dde08e9fd02f | 17,474 |
def merge(
left,
right,
how: str = "inner",
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes=("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate=None,
): # noqa: PR01, RT01, D200
"""
Merge DataFrame or named Series objects with a database-style join.
"""
if isinstance(left, Series):
if left.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
left = left.to_frame()
if not isinstance(left, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(left)} was passed"
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
) | da07b44fb80ee28cc8320c071876ef6ad573d974 | 17,475 |
def generate_modal(title, callback_id, blocks):
"""
Generate a modal view object using Slack's BlockKit
:param title: Title to display at the top of the modal view
:param callback_id: Identifier used to help determine the type of modal view in future responses
:param blocks: Blocks to add to the modal view
:return: View object (Dictionary)
"""
modal = {
"type": "modal",
"callback_id": callback_id,
"title": {
"type": "plain_text",
"text": title,
"emoji": False
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": False
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": False
},
"blocks": blocks
}
return modal | e0caeec1ab1cf82ed6f02ec77a984dcb25e329f5 | 17,476 |
def dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):
"""
#---------------------
# This function applies Sobel x and y,
# then computes the direction of the gradient,
# and then applies a threshold.
#
"""
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
# and calculate the direction of the gradient
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 255
# Return the binary image
return binary_output.astype(np.uint8) | 0f5aefdbc9ffbe8e3678145e2926a4fbd7e01629 | 17,477 |
from datetime import datetime, timedelta
def seconds_to_time( time ):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
if not time:
return "0s"
if isinstance( time, timedelta ) or isinstance( time, datetime ):
if time.days < 0:
diff = timedelta( )
else:
diff = time
else:
diff = timedelta( seconds = int(time if time >= 0 else 0) )
second_diff = diff.seconds
if second_diff < 0:
second_diff = 0
if second_diff > 60:
return "%sm%ss" % ( str( second_diff / 60 ), ( second_diff % 60 ) )
else:
return "%ss" % second_diff | 407fa93f782c8cff142be1ab721969d3e4c2b42f | 17,478 |
def load_txt_into_set(path, skip_first_line=True):
"""Load a txt file (one value per line) into a set."""
result = set()
file = open_file_dir_safe(path)
with file:
if skip_first_line:
file.readline()
for line in file:
line = line.strip()
result.add(line)
return result | 17ad3c15820595b72254dbe4c9097a8857511599 | 17,480 |
def failed(obj):
"""Returns True if ``obj`` is an instance of ``Fail``."""
return isinstance(obj, Fail) | 715fe3ae1154e3e5712b6f4535021b44e8020146 | 17,481 |
def linkCount(tupleOfLists, listNumber, lowerBound, upperBound):
"""Counts the number of links in one of the lists passed.
This function is a speciality function to aid in calculating
statistics involving the number of links that lie in a given
range. It is primarily intended as a private helper function. The
parameters are:
tupleOfLists -- usually a linkograph entry.
listNumber -- a list of the indicies in entry that should be
considered.
lowerBound -- the lowest index that should be considered.
upperBound -- the highest index that should be considered.
Example: a typical tupleOfLists is ({'A', 'B'}, {1,2}, {4,5}) a
listNumber of [1] would only consider the links in {1,2}, a
listNumber of [2] would only consider the links in {4,5} and a
listNumber of [1,2] would consider the links in both {1,2}, and
{4,5}.
"""
summation = 0
for index in listNumber:
summation += len({link for link in tupleOfLists[index]
if link >= lowerBound
and link <= upperBound})
return summation | 239fd8d3c01fe6c88444cfa7369459e3c76005dc | 17,482 |
def encode_md5(plain_text):
"""
Encode the plain text by md5
:param plain_text:
:return: cipher text
"""
plain_text = plain_text + EXT_STRING
encoder = md5()
encoder.update(plain_text.encode('utf-8'))
return encoder.hexdigest() | ad88ebc12334c9438c38719cd7c836edb9736d3c | 17,483 |
import warnings
def delta(x, y, assume_normal=True, percentiles=[2.5, 97.5],
min_observations=20, nruns=10000, relative=False, x_weights=1, y_weights=1):
"""
Calculates the difference of means between the samples (x-y) in a
statistical sense, i.e. with confidence intervals.
NaNs are ignored: treated as if they weren't included at all. This is done
because at this level we cannot determine what a NaN means. In some cases,
a NaN represents missing data that should be completely ignored, and in some
cases it represents inapplicable (like PCII for non-ordering customers) - in
which case the NaNs should be replaced by zeros at a higher level. Replacing
with zeros, however, would be completely incorrect for return rates.
Computation is done in form of treatment minus control, i.e. x-y
Args:
x (array_like): sample of a treatment group
y (array_like): sample of a control group
assume_normal (boolean): specifies whether normal distribution
assumptions can be made
percentiles (list): list of percentile values for confidence bounds
min_observations (integer): minimum number of observations needed
nruns (integer): only used if assume normal is false
relative (boolean): if relative==True, then the values will be returned
as distances below and above the mean, respectively, rather than the
absolute values. In this case, the interval is mean-ret_val[0] to
mean+ret_val[1]. This is more useful in many situations because it
corresponds with the sem() and std() functions.
x_weights (list): weights for the x vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
y_weights (list): weights for the y vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
Returns:
tuple:
* mu (float): mean value of the difference
* c_i (dict): percentile levels (index) and values
* ss_x (int): size of x excluding NA values
* ss_y (int): size of y excluding NA values
* _x (float): absolute mean of x
* _y (float): absolute mean of y
"""
# Checking if data was provided
if x is None or y is None:
raise ValueError('Please provide two non-None samples.')
# Coercing missing values to right format
_x = np.array(x, dtype=float) * x_weights
_y = np.array(y, dtype=float) * y_weights
x_nan = np.isnan(_x).sum()
y_nan = np.isnan(_y).sum()
if x_nan > 0:
warnings.warn('Discarding ' + str(x_nan) + ' NaN(s) in the x array!')
if y_nan > 0:
warnings.warn('Discarding ' + str(y_nan) + ' NaN(s) in the y array!')
ss_x = sample_size(_x)
ss_y = sample_size(_y)
# Checking if enough observations are left after dropping NaNs
if min(ss_x, ss_y) < min_observations:
# Set mean to nan
mu = np.nan
# Create nan dictionary
c_i = dict(list(zip(percentiles, np.empty(len(percentiles)) * np.nan)))
else:
# Computing the mean
mu = _delta_mean(_x, _y)
# Computing the confidence intervals
if assume_normal:
c_i = normal_sample_difference(x=_x, y=_y, percentiles=percentiles,
relative=relative)
else:
c_i, _ = bootstrap(x=_x, y=_y, percentiles=percentiles, nruns=nruns,
relative=relative)
# Return the result structure
return mu, c_i, ss_x, ss_y, np.nanmean(_x), np.nanmean(_y) | 37b742775777b5a0bd26f7e8fdf7a189a69b199f | 17,484 |
def CircleCircumference(curve_id, segment_index=-1):
"""Returns the circumference of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The circumference of the circle if successful.
"""
return circle.Circumference | 7a9200b089cebab93cbea387a4dd92590157dc45 | 17,485 |
def generate_handshake(info_hash, peer_id):
"""
The handshake is a required message and must be the first message
transmitted by the client. It is (49+len(pstr)) bytes long in the form:
<pstrlen><pstr><reserved><info_hash><peer_id>
Where:
pstrlen: string length of <pstr>, as a single raw byte
pstr: string identifier of the protocol
reserved: eight (8) reserved bytes. All current implementations use all
zeroes. Each bit in these bytes can be used to change the behavior of the
protocol.
info_hash: 20-byte SHA1 hash of the info key in the meta info file. This is
the same info_hash that is transmitted in tracker requests.
peer_id: 20-byte string used as a unique ID for the client. This is usually
the same peer_id that is transmitted in tracker requests
In version 1.0 of the BitTorrent protocol:
pstrlen = 19 and pstr = "BitTorrent protocol".
:param info_hash:
:param peer_id:
:return:
"""
pstr = b"BitTorrent protocol"
pstrlen = bytes(chr(len(pstr)))
reserved = b"\x00" * 8 # 8 zeroes
handshake = pstrlen + pstr + reserved + info_hash + peer_id
assert len(handshake) == 49 + len(pstr)
assert pstrlen == bytes(chr(19))
return handshake | ae13462608f3e2ec47abdb12e87a3bc08faa1cba | 17,486 |
def tokenizer_decorator(func, **kwargs):
"""
This decorator wraps around a tokenizer function.
It adds the token to the info dict and removes the found token from the given name.
"""
if not callable(func):
raise TypeError(f"func {func} not callable")
@wraps(func)
def wrapper(name, info, **kwargs):
try:
if ("patterns" and "token_name") in kwargs:
token = func(name, **kwargs)
elif "reference_date" in kwargs:
token = func(name, reference_date=kwargs.get("reference_date", None))
elif "template_file_found" in kwargs:
token = func(
name, template_file_found=kwargs.get("template_file_found", None)
)
else:
token = func(name)
except TypeError as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
except Exception as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
if not token:
# logger.warning(f'Wrapper no token found for {func}, {name}')
return name, info
str_token_values = [i for i in token.values() if isinstance(i, str)]
str_token_values_in_name = [i for i in str_token_values if i in name]
if str_token_values:
for val in str_token_values_in_name:
val_is_subset = [
i
for i in str_token_values_in_name
if val in i and len(i) > len(val)
]
if not val_is_subset:
name = replace_and_strip(name, val, **kwargs)
info.update(**token)
# print("wrapper token:",info,'\nname',name)
return name, info
return wrapper | d1827ab75a12f923c6da69927323d9c5013124c0 | 17,487 |
def reverse_complement( seq ):
"""
Biological reverse complementation. Case in sequences are retained, and
IUPAC codes are supported. Code modified from:
http://shootout.alioth.debian.org/u32/program.php?test=revcomp&lang=python3&id=4
"""
return seq.translate(_nt_comp_table)[::-1] | 86229dfeceecb7e0d2e1215b25074c35fbd38792 | 17,488 |
def computeLPS(s, n):
"""
Sol with better comle
"""
prev = 0 # length of the previous longest prefix suffix
lps = [0]*(n)
i = 1
# the loop calculates lps[i] for i = 1 to n-1
while i < n:
if s[i] == s[prev]:
prev += 1
lps[i] = prev
i += 1
else:
# This is tricky. Consider the example.
# AAACAAAA and i = 7. The idea is similar
# to search step.
if prev != 0:
prev = lps[prev-1]
# Also, note that we do not increment i here
else:
lps[i] = 0
i += 1
print(lps)
return lps[n-1] | 8b4374c9ac29f59cf1f4b0e6e07628776828c11a | 17,489 |
def roundedCorner(pc, p1, p2, r):
"""
Based on Stackoverflow C# rounded corner post
https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon
"""
def GetProportionPoint(pt, segment, L, dx, dy):
factor = float(segment) / L if L != 0 else segment
return PVector((pt.x - dx * factor), (pt.y - dy * factor))
# Vector 1
dx1 = pc.x - p1.x
dy1 = pc.y - p1.y
# Vector 2
dx2 = pc.x - p2.x
dy2 = pc.y - p2.y
# Angle between vector 1 and vector 2 divided by 2
angle = (atan2(dy1, dx1) - atan2(dy2, dx2)) / 2
# The length of segment between angular point and the
# points of intersection with the circle of a given radius
tng = abs(tan(angle))
segment = r / tng if tng != 0 else r
# Check the segment
length1 = sqrt(dx1 * dx1 + dy1 * dy1)
length2 = sqrt(dx2 * dx2 + dy2 * dy2)
min_len = min(length1, length2)
if segment > min_len:
segment = min_len
max_r = min_len * abs(tan(angle))
else:
max_r = r
# Points of intersection are calculated by the proportion between
# length of vector and the length of the segment.
p1Cross = GetProportionPoint(pc, segment, length1, dx1, dy1)
p2Cross = GetProportionPoint(pc, segment, length2, dx2, dy2)
# Calculation of the coordinates of the circle
# center by the addition of angular vectors.
dx = pc.x * 2 - p1Cross.x - p2Cross.x
dy = pc.y * 2 - p1Cross.y - p2Cross.y
L = sqrt(dx * dx + dy * dy)
d = sqrt(segment * segment + max_r * max_r)
circlePoint = GetProportionPoint(pc, d, L, dx, dy)
# StartAngle and EndAngle of arc
startAngle = atan2(p1Cross.y - circlePoint.y, p1Cross.x - circlePoint.x)
endAngle = atan2(p2Cross.y - circlePoint.y, p2Cross.x - circlePoint.x)
# Sweep angle
sweepAngle = endAngle - startAngle
# Some additional checks
if sweepAngle < 0:
startAngle, endAngle = endAngle, startAngle
sweepAngle = -sweepAngle
if sweepAngle > PI:
startAngle, endAngle = endAngle, startAngle
sweepAngle = TWO_PI - sweepAngle
# Draw result using graphics
# noStroke()
with pushStyle():
noStroke()
beginShape()
vertex(p1.x, p1.y)
vertex(p1Cross.x, p1Cross.y)
vertex(p2Cross.x, p2Cross.y)
vertex(p2.x, p2.y)
endShape(CLOSE)
line(p1.x, p1.y, p1Cross.x, p1Cross.y)
line(p2.x, p2.y, p2Cross.x, p2Cross.y)
arc(circlePoint.x, circlePoint.y, 2 * max_r, 2 * max_r,
startAngle, startAngle + sweepAngle, OPEN) | e77497918025deba211469616d210c23483e2152 | 17,490 |
def synthetic_data(n_points=1000, noise=0.05,
random_state=None, kind="unit_cube",
n_classes=None, n_occur=1, legacy_labels=False, **kwargs):
"""Make a synthetic dataset
A sample dataset generators in the style of sklearn's
`sample_generators`. This adds other functions found in the Matlab
toolkit for Dimensionality Reduction
Parameters
----------
kind: {'unit_cube', 'swiss_roll', 'broken_swiss_roll', 'twinpeaks', 'difficult'}
The type of synthetic dataset
legacy_labels: boolean
If True, try and reproduce the labels from the Matlab Toolkit for
Dimensionality Reduction. (overrides any value in n_classes)
This usually only works if algorithm-specific coefficient choices
(e.g. `height` for swiss_roll) are left at their default values
n_points : int, optional (default=1000)
The total number of points generated.
n_classes: None or int
If None, target vector is based on underlying manifold coordinate
If int, the manifold coordinate is bucketized into this many classes.
n_occur: int
Number of occurrences of a given class (along a given axis)
ignored if n_classes = None
noise : double or None (default=0.05)
Standard deviation of Gaussian noise added to the data.
If None, no noise is added.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
Additional Parameters
---------------------
difficult:
n_dims: int (default 5)
Number of dimensions to embed
swiss_roll:
broken_swiss_roll:
height: float (default 30.)
scaling to apply to y dimension
Returns
-------
X : array of shape [n_points, 2]
The generated samples.
y : array of shape [n_points]
The labels for class membership of each point.
"""
generator = check_random_state(random_state)
metadata = {
"synthetic_type": kind,
"n_points": n_points,
"noise": noise
}
if kind == 'unit_cube':
x = 2 * (generator.rand(n_points) - 0.5)
y = 2 * (generator.rand(n_points) - 0.5)
z = 2 * (generator.rand(n_points) - 0.5)
X = np.column_stack((x, y, z))
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(X, shift_factors=shift, scale_factors=scale, n_occur=n_occur, n_classes=n_classes)
metadata['manifold_coords'] = np.concatenate((x,y,z), axis=0).T
elif kind == 'twinpeaks':
inc = 1.5 / np.sqrt(n_points)
x = np.arange(-1, 1, inc)
xy = 1 - 2 * generator.rand(2, n_points)
z = np.sin(np.pi * xy[0, :]) * np.tanh(3 * xy[1, :])
X = np.vstack([xy, z * 10.]).T # + noise * generator.randn(n_points, 3)
t = xy.T
metadata['manifold_coords'] = t
if legacy_labels is True:
labels = np.remainder(np.sum(np.round((X + np.tile(np.min(X, axis=0), (X.shape[0], 1))) / 10.), axis=1), 2)
elif n_classes is None:
labels = 1-z
else:
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'swiss_roll':
height = kwargs.pop('height', 30.)
t = 1.5 * np.pi * (1.0 + 2.0 * generator.rand(n_points))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'broken_swiss_roll':
height = kwargs.pop('height', 30.)
np1 = int(np.ceil(n_points / 2.0))
t1 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(np1) * 0.4))
t2 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(n_points - np1) * 0.4 + 0.6))
t = np.concatenate((t1, t2))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'difficult':
n_dims = kwargs.pop("n_dims", 5)
points_per_dim = int(np.round(float(n_points ** (1.0 / n_dims))))
l = np.linspace(0, 1, num=points_per_dim)
t = np.array(list(_combn(l, n_dims)))
X = np.vstack((np.cos(t[:,0]),
np.tanh(3 * t[:,1]),
t[:,0] + t[:,2],
t[:,3] * np.sin(t[:,1]),
np.sin(t[:,0] + t[:,4]),
t[:,4] * np.cos(t[:,1]),
t[:,4] + t[:,3],
t[:,1],
t[:,2] * t[:,3],
t[:,0])).T
tt = 1 + np.round(t)
# Generate labels for dataset (2x2x2x2x2 checkerboard pattern)
labels = np.remainder(tt.sum(axis=1), 2)
metadata['n_dims'] = n_dims
metadata['manifold_coords'] = t
else:
raise Exception(f"Unknown synthetic dataset type: {kind}")
if noise is not None:
X += noise * generator.randn(*X.shape)
return X, labels, metadata | 740b5d2f708e177ce703f2124806ab7bd0079a09 | 17,491 |
def _load_default_profiles():
# type: () -> Dict[str, Any]
"""Load all the profiles installed on the system."""
profiles = {}
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
if _is_abstract_profile(name):
continue
definition = _read_profile_definition(path)
try:
recursively_expand_base_profiles(definition)
except Exception:
logger.error("Could not expand base profile %s", path)
raise
profiles[name] = {'definition': definition}
return profiles | b53411dce6bdf3baba876a626b023a2b93e48c99 | 17,493 |
import torch
def train_model(model, train_loader, valid_loader, learning_rate, device,
epochs):
"""Trains a model with train_loader and validates it with valid_loader
Arguments:
model -- Model to train
train_loader -- Data to train
valid_loader -- Data to validate the training
learning_rate -- Learning rate
device -- Device where the computations will be executed
epochs -- Number of epochs to train
Returns:
The trained model
"""
# Our loss function will be 'negative log likelihood'
criterion = nn.NLLLoss()
# We only want to optimize our classifier parameters
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# makes PyTorch use 'device' to compute
model.to(device)
criterion.to(device)
print_every = 25
step = 0
for epoch in range(epochs): # for each epoch
running_loss = 0
print("Epoch: {}/{}".format(epoch+1, epochs))
print("==========")
for inputs, labels in train_loader: # for each batch of data / label
step += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # resets gradients to zero
output = model.forward(inputs) # feed forward
loss = criterion(output, labels) # calculate the loss
loss.backward() # back propagate the loss
optimizer.step() # do gradient descent (update weights)
running_loss += loss.item()
if step % print_every == 0:
model.eval() # Turn off dropout to make the validation pass
# Turn off gradients for the validation pass
with torch.no_grad():
valid_loss, accuracy = validate_model(model, valid_loader,
criterion, device)
print("Training Loss: {:.3f}.. ".format(
running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}".format(
accuracy/len(valid_loader)))
running_loss = 0
model.train() # enable dropout back
model.eval() # Turn off dropout to make the validation pass
with torch.no_grad(): # Turn off gradients for the validation pass
valid_loss, accuracy = validate_model(
model, valid_loader, criterion, device)
print("\nEpoch: {}/{}.. ".format(epoch+1, epochs),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}\n".format(
accuracy/len(valid_loader)))
model.train() # enable dropout back
return model | 3addd258adddcbb43d846dae09d943d9a7016b69 | 17,494 |
def get_weapon_techs(fighter=None):
"""If fighter is None, return list of all weapon techs.
If fighter is given, return list of weapon techs fighter has."""
if fighter is None:
return weapon_tech_names
else:
return [t for t in fighter.techs if get_tech_obj(t).is_weapon_tech] | bbda76e55fdbe80e9883ff05746256fb56767136 | 17,495 |
def xml_to_values(l):
"""
Return a list of values from a list of XML data potentially including null values.
"""
new = []
for element in l:
if isinstance(element, dict):
new.append(None)
else:
new.append(to_float(element))
return new | 30b6af4101f45697e0f074ddedcd051aba37cb99 | 17,496 |
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options | e88014f0f5497e72973afbdf669cf14bf4537051 | 17,497 |
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest | 6dc4b76e52f7512074eb044d5505c904a323eb69 | 17,498 |
def normalize_skeleton(joints):
"""Normalizes joint positions (NxMx2 or NxMx3, where M is 14 or 16) from parent to child order. Each vector from parent to child is normalized with respect to it's length.
:param joints: Position of joints (NxMx2) or (NxMx3)
:type joints: numpy.ndarray
:return: Normalzed position of joints (NxMx2) or (NxMx3)
:rtype: numpy.ndarray
"""
assert len(joints.shape) == 3
assert joints.shape[1] == 14 or joints.shape[1] == 16
assert joints.shape[-1] == 2 or joints.shape[-1] == 3
hip = 0
if joints.shape[1] == 14:
names = NAMES_14
else:
names = NAMES_16
neck = names.index('Neck')
joints_ = joints.copy()
joints_ -= joints_[:, :1, :]
spine = joints_[:, neck, :] - joints_[:, hip, :]
spine_norm = np.linalg.norm(spine, axis=1).reshape(-1, 1)
adjacency = adjacency_list(joints_.shape[1])
queue = []
queue.append(0)
while len(queue) > 0:
current = queue.pop(0)
for child in adjacency[current]:
queue.append(child)
prnt_to_chld = joints[:, child, :] - joints[:, current, :]
prnt_to_chld_norm = np.linalg.norm(prnt_to_chld, axis=1).reshape(-1, 1)
prnt_to_chld_unit = prnt_to_chld / prnt_to_chld_norm
joints_[:, child, :] = joints_[:, current, :] + (prnt_to_chld_unit * (prnt_to_chld_norm / (spine_norm + 1e-8)))
return joints_ | 579862d05814eaa9b04f3e1a4812e727b02175aa | 17,499 |
def is_valid_instruction(instr: int, cpu: Cpu = Cpu.M68000) -> bool:
"""Check if an instruction is valid for the specified CPU type"""
return bool(lib.m68k_is_valid_instruction(instr, cpu.value)) | ae528e503e24698507971334d33dc6abf0f4c39c | 17,501 |
def docker_available():
"""Check if Docker can be run."""
returncode = run.run(["docker", "images"], return_code=True)
return returncode == 0 | 43ce2c7f5cb16657b4607faa5eac61b20e539e53 | 17,503 |
from datetime import datetime
def is_bc(symbol):
"""
判断是否背驰
:param symbol:
:return:
"""
bars = get_kline(symbol, freq="30min", end_date=datetime.now(), count=1000)
c = CZSC(bars, get_signals=get_selector_signals)
factor_ = Factor(
name="背驰选股",
signals_any=[
Signal("30分钟_倒1笔_三笔形态_向下盘背_任意_任意_0"),
Signal("30分钟_倒1笔_基础形态_底背驰_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类一买_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类二买_任意_任意_0"),
],
signals_all=[
# Signal("30分钟_倒0笔_潜在三买_构成中枢_任意_任意_0")
]
)
# c.open_in_browser()
if factor_.is_match(c.signals):
return True
else:
return False | 07dc2f01374f95544898375b8bc02b6128d70090 | 17,504 |
import time
import calendar
def IEEE2030_5Time(dt_obj, local=False):
""" Return a proper IEEE2030_5 TimeType object for the dt_obj passed in.
From IEEE 2030.5 spec:
TimeType Object (Int64)
Time is a signed 64 bit value representing the number of seconds
since 0 hours, 0 minutes, 0 seconds, on the 1st of January, 1970,
in UTC, not counting leap seconds.
:param dt_obj: Datetime object to convert to IEEE2030_5 TimeType object.
:param local: dt_obj is in UTC or Local time. Default to UTC time.
:return: Time XSD object
:raises: If utc_dt_obj is not UTC
"""
if dt_obj.tzinfo is None:
raise Exception("IEEE 2030.5 times should be timezone aware UTC or local")
if dt_obj.utcoffset() != timedelta(0) and not local:
raise Exception("IEEE 2030.5 TimeType should be based on UTC")
if local:
return xsd_models.TimeType(valueOf_=int(time.mktime(dt_obj.timetuple())))
else:
return xsd_models.TimeType(valueOf_=int(calendar.timegm(dt_obj.timetuple()))) | fbb9466e927f1162226760efbe609bf3e779e163 | 17,505 |
def learning_rate_schedule(params, global_step):
"""Handles learning rate scaling, linear warmup, and learning rate decay.
Args:
params: A dictionary that defines hyperparameters of model.
global_step: A tensor representing current global step.
Returns:
A tensor representing current learning rate.
"""
base_learning_rate = params['base_learning_rate']
lr_warmup_step = params['lr_warmup_step']
first_lr_drop_step = params['first_lr_drop_step']
second_lr_drop_step = params['second_lr_drop_step']
batch_size = params['batch_size']
scaling_factor = params['gpu_num'] * batch_size / ssd_constants.DEFAULT_BATCH_SIZE
adjusted_learning_rate = base_learning_rate * scaling_factor
learning_rate = (tf.cast(global_step, dtype=tf.float32) /
lr_warmup_step) * adjusted_learning_rate
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate | b88d67dd0d241d26bf183e90e3d3c215e0abd957 | 17,506 |
def profile(step):
"""
Profiles a Pipeline step and save the results as HTML file in the project output
directory.
Usage:
@profile
def step(self):
pass
"""
@wraps(step)
def wrapper(*arg, **kwargs):
pipeline_instance = arg[0]
project = pipeline_instance.project
with Profiler() as profiler:
result = step(*arg, **kwargs)
output_file = project.get_output_file_path("profile", "html")
output_file.write_text(profiler.output_html())
pipeline_instance.log(f"Profiling results at {output_file.resolve()}")
return result
return wrapper | f300000a0471a2439ae951a2d33b8a03aa61b333 | 17,508 |
from modin.pandas.series import Series
def make_dataframe_wrapper(DataFrame):
"""
Prepares a "delivering wrapper" proxy class for DataFrame.
It makes DF.loc, DF.groupby() and other methods listed below deliver their
arguments to remote end by value.
"""
conn = get_connection()
class ObtainingItems:
def items(self):
return conn.obtain_tuple(self.__remote_end__.items())
def iteritems(self):
return conn.obtain_tuple(self.__remote_end__.iteritems())
ObtainingItems = _deliveringWrapper(Series, mixin=ObtainingItems)
class DataFrameOverrides(_prepare_loc_mixin()):
@classmethod
def _preprocess_init_args(
cls,
data=None,
index=None,
columns=None,
dtype=None,
copy=None,
query_compiler=None,
):
(data,) = conn.deliver((data,), {})[0]
return (), dict(
data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy,
query_compiler=query_compiler,
)
@property
def dtypes(self):
remote_dtypes = self.__remote_end__.dtypes
return ObtainingItems(__remote_end__=remote_dtypes)
DeliveringDataFrame = _deliveringWrapper(
DataFrame,
[
"groupby",
"agg",
"aggregate",
"__getitem__",
"astype",
"drop",
"merge",
"apply",
"applymap",
],
DataFrameOverrides,
"DataFrame",
)
return DeliveringDataFrame | a2d523f6e9cb9d23ae722195a091d8e2b68139cc | 17,509 |
def download_cmems_ts(lats, lons, t0, tf, variables, fn=None):
"""Subset CMEMS output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
t0 = datetime for start of time series
tf = datetime for end of time series
variables = list of variables in ["zos", "uo", "vo", "so", "thetao"]
:returns:
Xarray Dataset of selected variables
"""
validate_datetime(t0)
validate_datetime(tf)
try:
validate_cmems_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, zos = fix_zos(variables)
request = (
"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
"longitude[0:1:4319],latitude[0:1:2040],depth[0:1:49],time[0:1:10012]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = cmemslon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = cmemslon2index(lons[1], coords)
lat_ll = cmemslat2index(lats[0], coords)
lat_ur = cmemslat2index(lats[1], coords)
t0i = time2index(t0, coords)
tfi = time2index(tf, coords)
request = (
f"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
f"longitude[{lon_ll}:1:{lon_ur}],latitude[{lat_ll}:1:{lat_ur}],depth[0:1:49],time[{t0i}:1:{tfi}],"
)
request = request + "".join(
[
f"{variable}[{t0i}:1:{tfi}][0:1:49][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
for variable in _variables
]
)
# append surf_el if present
if zos is not None:
request = (
request + f"{zos}[{t0i}:1:{tfi}][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
)
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds | b97de3a7428d6e2b50ab36b28e47afe479c24042 | 17,510 |
def construct_gpu_info(statuses):
""" util for unit test case """
m = {}
for status in statuses:
m[status.minor] = status
m[status.uuid] = status
return m | b8b2f41799b863d2e22066005b901f17a610d858 | 17,511 |
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
max_tokens=10000):
"""Return the iterator and the vocabulary of the time machine dataset."""
data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter,
max_tokens)
return data_iter, data_iter.vocab | ed9d6b63c34cf9d1a750daabbdb81e03e467e939 | 17,512 |
def scan_paths(paths, only_detect, recursive, module_filter):
"""
Scans paths for known bots and dumps information from them
@rtype : dict
@param paths: list of paths to check for files
@param only_detect: only detect known bots, don't process configuration information
@param recursive: recursively traverse folders
@param module_filter: if not None, only modules in list will be used
@return: dictionary of file to dictionary of information for each file
"""
results = {}
while len(paths) != 0:
file_path = abspath(paths[0])
del paths[0]
if isfile(file_path):
with open(file_path, mode='rb') as file_handle:
file_content = file_handle.read()
r = scan_file_data(file_content, module_filter, only_detect)
if r is not None:
results[file_path] = r
elif isdir(file_path):
for p in listdir(file_path):
p = join(file_path, p)
if isfile(p) or (isdir(p) and recursive):
paths.append(p)
return results | f58216f1ed5955828738689fa67522a8cc0e497a | 17,513 |
def generate_masks(input_size, output_size=1, observed=None):
"""
Generates some basic input and output masks.
If C{input_size} is an integer, the number of columns of the mask will be
that integer. If C{input_size} is a list or tuple, a mask with multiple channels
is created, which can be used with RGB images, for example.
By default, the input region will cover the upper half of the mask, also known as a
*causal neighborhood*. If any of the channels is observed, the input region in that
channel will cover a full square neighborhood around the output region.
Examples:
>>> input_mask, output_mask = generate_masks(8, 2)
>>> input_mask, output_mask = generate_masks([3, 7, 7], 1, [1, 0, 0])
@type input_size: C{int} / C{list}
@param input_size: determines the size of the input region
@type output_size: C{int}
@param output_size: determines the size of the output region
@type observed: C{list}
@param observed: can be used to indicate channels which are observed
@rtype: C{tuple}
@return: one input mask and one output mask
"""
if not iterable(input_size):
if iterable(observed):
input_size = [input_size] * len(observed)
else:
input_size = [input_size]
if observed is None:
observed = [False] * len(input_size)
if len(observed) != len(input_size):
raise ValueError("Incompatible `input_size` and `observed`.")
num_channels = len(input_size)
num_cols = max(input_size)
num_rows = num_cols if any(observed) else (num_cols + 1) // 2 + output_size // 2
input_mask = zeros([num_rows, num_cols, num_channels], dtype='bool')
output_mask = zeros_like(input_mask)
tmp1 = (num_cols + 1) // 2
tmp2 = output_size // 2
tmp3 = (output_size + 1) // 2
for k in range(num_channels):
offset = tmp1 - (input_size[k] + 1) // 2
if observed[k]:
input_mask[
offset:num_cols - offset,
offset:num_cols - offset, k] = True
else:
input_mask[offset:tmp1 + tmp2, offset:num_cols - offset, k] = True
for i in range(output_size):
input_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:, k] = False
output_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:tmp1 + output_size // 2, k] = True
if input_mask.shape[2] == 1:
input_mask.resize(input_mask.shape[0], input_mask.shape[1])
output_mask.resize(output_mask.shape[0], output_mask.shape[1])
return input_mask, output_mask | dee12176f72a158e9f39036981fa1dbd6be81817 | 17,514 |
def average(time_array,height_array,data_array,height_bin_size=100,time_bin_size=3600):
"""
average: function that averages the radar signal by height and time
Args:
time_array: numpy 1d array with timestamps
height_array: numpy 1d array with height range
data_array: numpy 2d array size len(time_array) X len(height_array)
height_bin_size: the averaging window in meters
time_bin_size: the averaging window in seconds
Returns:
time: returns the new time dimension
height: returns the new height dimension
averaged: the data averaged size len(time) X len(height)
"""
past_time = time_array[0]
bins_time = []
for time in time_array:
if past_time + time_bin_size > time:
continue
else:
bins_time.append((past_time,time))
past_time = time
bins_time.append((time,time_array[-1]))
bin_range_time = [bini[0] for bini in bins_time]
pixel_in_bin_time = []
for time in time_array:
pixel_in_bin_time.append(find_bin(time,bins_time))
max_val_time = np.max(pixel_in_bin_time)
pixel_in_bin_time = np.array(pixel_in_bin_time)
bins = create_bins(height_array[0],height_array[-1],height_bin_size)
bin_range = [bini[0] for bini in bins]
pixel_in_bin = []
for height in height_array:
pixel_in_bin.append(find_bin(height,bins))
max_val = np.max(pixel_in_bin)
pixel_in_bin = np.array(pixel_in_bin)
averaged = np.zeros((len(bins_time),len(bins)))
for i in range(max_val_time+1):
for j in range(max_val+1):
min_time = np.where(pixel_in_bin_time==i)[0][0]
max_time = np.where(pixel_in_bin_time==i)[0][-1]
min_height = np.where(pixel_in_bin==j)[0][0]
max_height = np.where(pixel_in_bin==j)[0][-1]
temp_selection = data_array[min_time:max_time,min_height:max_height]
temp_average = np.nanmean(temp_selection)
averaged[i,j] = temp_average
time = bin_range_time
height = bin_range
return time,height,averaged | 710f4c8821cffe110511bda0dd3d4fd3052f33a9 | 17,516 |
def new_pitch():
"""
route to new pitch form
:return:
"""
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
pitch = form.pitch.data
category = form.category.data
fresh_pitch = Pitch(title=title, pitch_actual=pitch, category=category, user_id=current_user.id)
fresh_pitch.save_pitch()
return redirect(url_for('.profile', uname=current_user.username))
title = 'New pitch'
return render_template('new_pitch.html' , title=title, pitch_form=form) | a7724149a7e6b9d545559fef643dcc8fd2f5c731 | 17,518 |
def get_entity_bios(seq,id2label):
"""Gets entities from sequence.
note: BIOS
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
# >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC']
# >>> get_entity_bios(seq)
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("S-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[2] = indx
chunk[0] = tag.split('-')[1]
chunks.append(chunk)
chunk = (-1, -1, -1)
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif tag.startswith('I-') and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks | 25219d29ba8ecb2d44ca5a8245059432f3220d8d | 17,519 |
import torch
import copy
def early_stopping_train(model, X, Y_, x_test, y_test, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
best_model, best_accuracy = None, 0
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
# evaluate the model on the test dataset
probs = eval(model, x_test)
Y = np.argmax(probs, axis=1)
accuracy, recall, matrix = data.eval_perf_multi(Y, y_test)
print("Current accuracy on testset: ", accuracy)
if accuracy > best_accuracy:
best_model = copy.copy(model)
best_accuracy = accuracy
return best_model | 83a8acdd24a4fde3db77184c3b4a99a1c1783349 | 17,520 |
def my_vtk_grid_props(vtk_reader):
"""
Get grid properties from vtk_reader instance.
Parameters
----------
vtk_reader: vtk Reader instance
vtk Reader containing information about a vtk-file.
Returns
----------
step_x : float
For regular grid, stepsize in x-direction.
step_y : float
For regular grid, stepsize in y-direction.
npts_x : float
Number of cells in x-direction.
npts_y : float
Number of cells in y-direction.
low_m_x : float
Middle of first x cell
high_m_x : float
Middle of last x cell
low_m_y : float
Middle of first y cell
high_m_y : float
Middle of last y cell
low_x : float
Edge of first x cell
high_x : float
Edge of last x cell
low_y : float
Edge of first y cell
high_y : float
Edge of last y cell
Notes
----------
0: step_x
1: step_y
2: npts_x
3: npts_y
4: low_m_x - Middle of cells: first x cell
5: high_m_x - Middle of cells: last x cell
6: low_m_y - Middle of cells: first y cell
7: high_m_y - Middle of cells: last y cell
8: low_x - Edge of cells: first x cell
9: high_x - Edge of cells: last x cell
10: low_y - Edge of cells: first y cell
11: high_y - Edge of cells: last y cell
"""
vtk_output = vtk_reader.GetOutput()
# Read attributes of the vtk-Array
# num_cells = vtk_output.GetNumberOfCells()
# num_points = vtk_output.GetNumberOfPoints()
# whole_extent = vtk_output.GetExtent()
grid_bounds = vtk_output.GetBounds()
grid_dims = vtk_output.GetDimensions()
# Grid information
step_x = (grid_bounds[1] - grid_bounds[0]) / (grid_dims[0] - 1)
step_y = (grid_bounds[3] - grid_bounds[2]) / (grid_dims[1] - 1)
if grid_bounds[0] == 0.0: # CELLS
npts_x = grid_dims[0] - 1
npts_y = grid_dims[1] - 1
low_m_x = grid_bounds[0] + 0.5 * step_x
high_m_x = grid_bounds[1] - 0.5 * step_x
low_m_y = grid_bounds[2] + 0.5 * step_y
high_m_y = grid_bounds[3] - 0.5 * step_y
low_x = grid_bounds[0]
high_x = grid_bounds[1]
low_y = grid_bounds[2]
high_y = grid_bounds[3]
else: # POINTS
npts_x = grid_dims[0]
npts_y = grid_dims[1]
low_m_x = grid_bounds[0]
high_m_x = grid_bounds[1]
low_m_y = grid_bounds[2]
high_m_y = grid_bounds[3]
low_x = grid_bounds[0] - 0.5 * step_x
high_x = grid_bounds[1] + 0.5 * step_x
low_y = grid_bounds[2] - 0.5 * step_y
high_y = grid_bounds[3] + 0.5 * step_y
return step_x, step_y, \
npts_x, npts_y, \
low_m_x, high_m_x, low_m_y, high_m_y, \
low_x, high_x, low_y, high_y | 26ef8a51648ea487372ae06b54c8ccf953aeb414 | 17,521 |
def make_env(stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
env = grc.RemoteEnv('tmp/sock')
env = SonicDiscretizer(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
if stack:
env = FrameStack(env, 4)
return env | 347376103fa00d4d43714f30097b0d129ef45f43 | 17,522 |
def plot_distr_cumsum(result, measure="degree", scale=['log', 'log'], figures=[], prefix="", show_std=True, show_figs=True, mode="safe", colors=('r', 'b')):
""" plots the cummulative distribution functions
special care has to be taken because averaging these is not trivial in comparison to e.g. degree
"""
maj_name=f'{measure}_distr_cumsum_maj'
min_name=f'{measure}_distr_cumsum_min'
maj_x = f'{measure}_distr_cumsum_maj_x'
min_x = f'{measure}_distr_cumsum_min_x'
tmp=result.groupby(['homophily']).agg({maj_name : list, min_name:list, min_x:list, maj_x:list})
maj = []
for x,y in zip(tmp[maj_x], tmp[maj_name]):
x_out, mean_out, std_out = cumsum_mean(x,y, mode=mode)
maj.append((x_out, mean_out, std_out))
mino = []
for x,y in zip(tmp[min_x], tmp[min_name]):
x_out, mean_out, std_out = cumsum_mean(x,y,mode=mode)
mino.append((x_out, mean_out, std_out))
if len(figures)==0:
figures = [plt.Figure() for _ in range(len(tmp.index))]
for fig in figures:
if len(fig.axes)==0:
ax = fig.add_subplot()
for h, (min_xx, min_vals, min_std), (maj_xx, maj_vals, maj_std), fig in zip(tmp.index, maj, mino, figures):
plt.figure()
x=min_xx
x2=maj_xx
ax = fig.axes[0]
ax.set_xscale(scale[0])
ax.set_yscale(scale[1])
if show_std:
ax.errorbar(x,min_vals, yerr=min_std, label=prefix + "min", color=colors[0])
ax.errorbar(x2,maj_vals,yerr=maj_std, label=prefix + "maj", color=colors[1])
else:
ax.plot(x,min_vals,label=prefix + "min", color=colors[0])
ax.plot(x2,maj_vals, label=prefix + "maj", color=colors[1])
#print(maj_vals)
ax.set_xlabel(f"{measure}")
ax.set_ylabel(f"{measure} distrubution")
ax.set_title(f"h={h}")
ax.legend()
return figures | 6b0a526cf8f09dd66ac7b0988c9445d57416be21 | 17,523 |
def state_space_model(A, z_t_minus_1, B, u_t_minus_1):
"""
Calculates the state at time t given the state at time t-1 and
the control inputs applied at time t-1
"""
state_estimate_t = (A @ z_t_minus_1) + (B @ u_t_minus_1)
return state_estimate_t | 0e04207028df8d4162c88aad6606e792ef618f5a | 17,526 |
def get_post(id , check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
#u = User.query(User ).filter(User.posts.id == id ).first()
post = db_session.query(Post).filter(Post.id == id).first()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post | a15ac3816d134f1dd89bf690c2f800e412d7219b | 17,527 |
def get_pixel(x, y):
"""Get the RGB value of a single pixel.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7
"""
global _pixel_map
return _pixel_map[y][x] | 47a77090683a5b8e7178b3c7d83ae5b1a090342f | 17,528 |
from typing import Callable
import re
def check_for_NAs(func: Callable) -> Callable:
"""
This decorator function checks whether the input string qualifies as an
NA. If it does it will return True immediately. Otherwise it will run
the function it decorates.
"""
def inner(string: str, *args, **kwargs) -> bool:
if re.fullmatch("^|0|NA$", string) is not None:
return True
else:
return func(string, *args, **kwargs)
return inner | e9336cca2e6cd69f81f6aef1d11dc259492774f8 | 17,529 |
from typing import Union
from typing import Callable
def integrateEP_w0_ode( w_init: np.ndarray, w0: Union[ Callable, np.ndarray ], w0prime: Union[ Callable, np.ndarray ],
B: np.ndarray, s: np.ndarray, s0: float = 0, ds: float = None,
R_init: np.ndarray = np.eye( 3 ), Binv: np.ndarray = None, arg_check: bool = True,
wv_only: bool = False ) -> (np.ndarray, np.ndarray, np.ndarray):
""" integrate Euler-Poincare equation for needle shape sensing for given intrinsic angular deformation
using scipy.integrate
Author: Dimitri Lezcano
Args:
w_init: 3-D initial deformation vector
w0: Callable function or N x 3 intrinsic angular deformation
w0prime: Callable function or N x 3 d/ds w0
B: 3 x 3 needle stiffness matrix
s: the arclengths desired (Not implemented)
s0: (Default = 0) the initial length to start off with
ds: (Default = None) the arclength increments desired
Binv: (Default = None) inv(B) Can be provided for numerical efficiency
R_init: (Default = 3x3 identity) SO3 matrix for initial rotation angle
arg_check: (Default = False) whether to check if the arguments are valid
wv_only: (Default = False) whether to only integrate wv or not.
Return:
(N x 3 needle shape, N x 3 x 3 SO3 matrices of orientations), N x 3 angular deformation)
(None, None, N x 3 angular deformation) if 'wv_only' is True
"""
if arg_check:
assert (w_init.size == 3)
w_init = w_init.flatten()
assert (B.shape == (3, 3))
assert (geometry.is_SO3( R_init ))
assert (s0 >= 0)
# if
# argument parsing
s = s[ s >= s0 ]
if Binv is None:
Binv = np.linalg.inv( B )
elif arg_check:
assert (Binv.shape == (3, 3))
# setup intrinsic curvature functions
if callable( w0 ):
w0_fn = w0
else:
w0_fn = interpolate.interp1d( s, w0.T, fill_value='extrapolate' )
# w0_fn = lambda t: jit_linear_interp1d( t, w0, s )
if callable( w0prime ):
w0prime_fn = w0prime
else:
w0prime_fn = interpolate.interp1d( s, w0prime.T, fill_value='extrapolate' )
# w0prime_fn = lambda t: jit_linear_interp1d( t, w0prime, s )
# perform integration
ode_EP = lambda s, wv: differential_EPeq( wv, s, w0_fn, w0prime_fn, B, Binv )
wv = odeint( ode_EP, w_init, s, full_output=False, hmin=ds/2, h0=ds/2, tfirst=True )
# wv = solve_ivp( ode_EP, (s0, s.max()), w_init, method='RK45', t_eval=s,
# first_step=ds ) # 'RK23' for speed (all slower than odeint)
# integrate angular deviation vector in order to get the pose
if wv_only:
pmat, Rmat = None, None
else:
pmat, Rmat = integratePose_wv( wv, s=s, s0=s0, ds=ds, R_init=R_init )
return pmat, Rmat, wv | 75a042b94ac46b7ecbb86e23abacde0d4034b9fe | 17,530 |
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints | 2aa69a55d7f8177784afb41f50cd7ccfbffdbde3 | 17,531 |
import random
def _get_name(filename: str) -> str:
"""
Function returns a random name (first or last)
from the filename given as the argument.
Internal function. Not to be imported.
"""
LINE_WIDTH: int = 20 + 1 # 1 for \n
with open(filename) as names:
try:
total_names = int(next(names))
nth_name_to_read: int = random.randint(1, total_names)
# Here 'nth_name_to_read' lines are skipped that include
# the first line (with no of lines) and n-1 names
# Next read would always be the desired name
bytes_to_seek: int = LINE_WIDTH * nth_name_to_read
_ = names.seek(bytes_to_seek) # Now skipped n - 1 names
name: str = next(names).strip()
return name
except StopIteration:
# Return empty string if the file is empty
return '' | 1b4cd75488c6bd1814340aee5669d1631318e77f | 17,533 |
def map_to_udm_section_associations(enrollments_df: DataFrame) -> DataFrame:
"""
Maps a DataFrame containing Canvas enrollments into the Ed-Fi LMS Unified Data
Model (UDM) format.
Parameters
----------
enrollments_df: DataFrame
Pandas DataFrame containing all Canvas enrollments
Returns
-------
DataFrame
A LMSSectionAssociations-formatted DataFrame
DataFrame columns are:
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a the section-association by
the source system
SourceSystem: The system code or name providing the user data
EnrollmentStatus: Possible values are Active, Expired, Invite pending, Request Pending, Archived
LMSUserSourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source
system
LMSSectionSourceSystemIdentifier: A unique number or alphanumeric code assigned to a section by the
source system
CreateDate: Date/time at which the record was first retrieved
LastModifiedDate: Date/time when the record was modified, or when first retrieved
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
if enrollments_df.empty:
return enrollments_df
assert "id" in enrollments_df.columns
assert "enrollment_state" in enrollments_df.columns
assert "user_id" in enrollments_df.columns
assert "course_section_id" in enrollments_df.columns
assert "created_at" in enrollments_df.columns
assert "updated_at" in enrollments_df.columns
enrollments_df = enrollments_df[
[
"id",
"enrollment_state",
"user_id",
"course_section_id",
"created_at",
"updated_at",
"CreateDate",
"LastModifiedDate",
]
].copy()
enrollments_df.rename(
columns={
"id": "SourceSystemIdentifier",
"enrollment_state": "EnrollmentStatus",
"user_id": "LMSUserSourceSystemIdentifier",
"course_section_id": "LMSSectionSourceSystemIdentifier",
"created_at": "SourceCreateDate",
"updated_at": "SourceLastModifiedDate",
},
inplace=True,
)
enrollments_df["SourceCreateDate"] = enrollments_df["SourceCreateDate"].apply(
_get_date_formated
)
enrollments_df["SourceLastModifiedDate"] = enrollments_df[
"SourceLastModifiedDate"
].apply(_get_date_formated)
enrollments_df["EnrollmentStatus"] = enrollments_df["EnrollmentStatus"].apply(
_get_enrollment_status
)
enrollments_df["SourceSystem"] = SOURCE_SYSTEM
return enrollments_df | 303223302e326854f7a19b2f3c9d0b626a2807bc | 17,534 |
def plot_electrodes(mris, grid, values=None, ref_label=None, functional=None):
"""
"""
surf = mris.get('pial', None)
if surf is None:
surf = mris.get('dura', None)
pos = grid['pos'].reshape(-1, 3)
norm = grid['norm'].reshape(-1, 3)
labels = grid['label'].reshape(-1)
right_or_left = sign(mean(surf['pos'][:, 0]))
if values is None:
iswire = labels == WIRE
colors = labels.copy()
colors[iswire] = 'red'
colors[~iswire] = 'black'
if ref_label is not None:
colors[labels == ref_label] = 'green'
marker = dict(
size=MARKER_SIZE,
color=colors,
)
hovertext = labels
else:
values = values['value'].reshape(-1)
marker = dict(
size=MARKER_SIZE,
color=values,
colorscale=COLORSCALE,
showscale=True,
cmin=nanmin(values),
cmax=nanmax(values),
colorbar=dict(
title='electrode values',
),
)
hovertext = [f'{x0}<br>{x1:0.3f}' for x0, x1 in zip(labels, values)]
traces = [
go.Mesh3d(
x=surf['pos'][:, 0],
y=surf['pos'][:, 1],
z=surf['pos'][:, 2],
i=surf['tri'][:, 0],
j=surf['tri'][:, 1],
k=surf['tri'][:, 2],
color='pink',
hoverinfo='skip',
flatshading=False,
lighting=dict(
ambient=0.18,
diffuse=1,
fresnel=0.1,
specular=1,
roughness=0.1,
),
lightposition=dict(
x=0,
y=0,
z=-1,
),
),
]
if functional is not None:
traces.append(
go.Scatter3d(
x=functional['pos'][:, 0],
y=functional['pos'][:, 1],
z=functional['pos'][:, 2],
mode='markers',
hoverinfo='skip',
marker=dict(
size=5,
color=functional['value'],
symbol='diamond',
colorscale='RdBu',
reversescale=True,
cmid=0,
colorbar=dict(
x=1.2,
title='functional values',
),
),
opacity=1,
))
elif False:
"""do not show Cone, it's not easy to see"""
traces.append(
go.Cone(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
u=norm[:, 0] * -1,
v=norm[:, 1] * -1,
w=norm[:, 2] * -1,
sizeref=2,
sizemode='absolute',
anchor='tail',
text=labels,
showscale=False,
colorscale=[
[0, 'rgb(0, 0, 0)'],
[1, 'rgb(0, 0, 0)'],
],
hoverinfo='skip',
),
)
traces.append(
go.Scatter3d(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
text=labels,
mode='markers',
hovertext=hovertext,
hoverinfo='text',
marker=marker,
),
)
fig = go.Figure(
data=traces,
layout=go.Layout(
showlegend=False,
scene=dict(
xaxis=AXIS,
yaxis=AXIS,
zaxis=AXIS,
camera=dict(
eye=dict(
x=right_or_left,
y=0,
z=0.5,
),
projection=dict(
type='orthographic',
),
),
),
),
)
return fig | 0bcc5545c625675be080e6b70bf7a74d247ba1c9 | 17,535 |
from typing import Tuple
def _get_laplace_matrix(bcs: Boundaries) -> Tuple[np.ndarray, np.ndarray]:
"""get sparse matrix for laplace operator on a 1d Cartesian grid
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
tuple: A sparse matrix and a sparse vector that can be used to evaluate
the discretized laplacian
"""
dim = bcs.grid.dim
if dim == 1:
result = _get_laplace_matrix_1d(bcs)
elif dim == 2:
result = _get_laplace_matrix_2d(bcs)
else:
raise NotImplementedError(f"{dim:d}-dimensional Laplace matrix not implemented")
return result | 80880c7fb1d54a7d4502e1096c2f2ade4d30ce21 | 17,536 |
import warnings
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape)) | ef3a5bfe7a1ae07b925c1d9b897bce0eff29b275 | 17,537 |
def conv_tower(
inputs,
filters_init,
filters_end=None,
filters_mult=None,
divisible_by=1,
repeat=1,
**kwargs
):
"""Construct a reducing convolution block.
Args:
inputs: [batch_size, seq_length, features] input sequence
filters_init: Initial Conv1D filters
filters_end: End Conv1D filters
filters_mult: Multiplier for Conv1D filters
divisible_by: Round filters to be divisible by (eg a power of two)
repeat: Tower repetitions
Returns:
[batch_size, seq_length, features] output sequence
"""
def _round(x):
return int(np.round(x / divisible_by) * divisible_by)
# flow through variable current
current = inputs
# initialize filters
rep_filters = filters_init
# determine multiplier
if filters_mult is None:
assert filters_end is not None
filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1))
for ri in range(repeat):
# convolution
current = conv_block(current, filters=_round(rep_filters), **kwargs)
# update filters
rep_filters *= filters_mult
return current | 82ff878423309e2963090a9569f14090a85d30e5 | 17,538 |
def edit_coach(request, coach_id):
""" Edit a coach's information """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only the owners can do that.')
return redirect(reverse('home'))
coach = get_object_or_404(Coach, pk=coach_id)
if request.method == 'POST':
form = CoachForm(request.POST, request.FILES, instance=coach)
if form.is_valid():
coach = form.save()
messages.success(request, 'Successfully updated coach!')
return redirect(reverse('view_coach', args=[coach.id]))
else:
messages.error(request, (
'Failed to update coach. Please ensure the form is valid.'))
else:
form = CoachForm(instance=coach)
messages.info(request, f'You are editing {coach.first_name}')
template = 'coaches/edit_coach.html'
context = {
'form': form,
'coach': coach,
}
return render(request, template, context) | ecaf07df3249d3349928b4e9da9c0524b27e603e | 17,539 |
import torch
def estimate_translation(S,
joints_2d,
focal_length=5000.,
img_size=224.,
use_all_joints=False,
rotation=None):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
if rotation is not None:
S = torch.einsum('bij,bkj->bki', rotation, S)
# Use only joints 25:49 (GT joints)
if use_all_joints:
S = S.cpu().numpy()
joints_2d = joints_2d.cpu().numpy()
else:
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i,
joints_i,
conf_i,
focal_length=focal_length,
img_size=img_size)
return torch.from_numpy(trans).to(device) | 70b5cc75dc28919b6bb6cea70b49eae8ca593452 | 17,540 |
import random
def create_midterm_data(all_students):
"""
Create the midterm data set
Ten questions, two from each topic, a percentage of students did not
show up, use it as an example of merge
Rules:
- International students have a 10% drop out rate
- Performance changes by PROGRAM!
:param all_students:
:return: dictionary with the midterm answers
"""
midterm_choices = ['A', 'B', 'C', 'D']
midterm_solution = []
for _ in range(0, 10):
midterm_solution.append(random.choice(midterm_choices))
# Insert the solution row
midterm_answers = pd.DataFrame(
[[0, '', 'SOLUTION', 'SOLUTION'] + midterm_solution + ['100']],
columns=midterm_answers_fields)
for idx, student_info in all_students.iterrows():
midterm_score = {}
# Detect if a student has to be dropped
skip = False
for enrolment, rate in midterm_dropout_rates:
# print random.random(), rate
if student_info['Enrolment Type'] == enrolment and \
random.random() <= rate:
skip = True
if skip:
continue
midterm_score['SID'] = student_info['SID']
midterm_score['email'] = student_info['email']
midterm_score['Last Name'] = student_info['Surname']
midterm_score['First Name'] = student_info['GivenName']
# Select the score based on the program
prg = student_info['Program']
score = int(round(random.normalvariate(
midterm_score_average[prg][0] / 10,
midterm_score_average[prg][1] / 10)))
if score > 10:
score = 10
if score < 0:
score = 0
# Score contains the number of questions that are correct
text_score = str(10 * score)
midterm_score['Total'] = text_score
# Add the score also to the all_student database for further reference
student_info['MIDTERM_SCORE'] = text_score
# Generate the set of answers for the midterm
correct_answers = random.sample(list(range(0, 10)), score)
for x in range(0, 10):
field = midterm_answers_fields[x + 4]
if x in correct_answers:
answer = midterm_solution[x]
score = 1
else:
incorrect = list(midterm_choices)
incorrect.remove(midterm_solution[x])
answer = random.choice(incorrect)
score = 0
midterm_score[field] = answer
midterm_score[field[1:]] = score
midterm_answers = midterm_answers.append(midterm_score,
ignore_index=True)
return midterm_answers | b1f946ebab616362113ada54a17cc3e857b33f98 | 17,541 |
def identify_outliers(x_vals, y_vals, obj_func, outlier_fraction=0.1):
"""Finds the indices of outliers in the provided data to prune for subsequent curve fitting
Args:
x_vals (np.array): the x values of the data being analyzed
y_vals (np.array): the y values of the data being analyzed
obj_func (str): the objective function to use for curve fitting to determine outliers
outlier_fraction (float): the fractional deviation from predicted value required in
order to classify a data point as an outlier
Returns:
np.array: the indices of the identified outliers"""
# get objective function
objective = create_objective_function(obj_func)
# get fitted values
popt, _ = curve_fit(objective, x_vals, y_vals)
# create generate function
func = create_prediction_function(name=obj_func, weights=popt)
# generate predictions
preds = func(x_vals)
# specify outlier bounds based on multiple of predicted value
upper_bound = preds * (1 + outlier_fraction)
lower_bound = preds * (1 - outlier_fraction)
# identify outliers
outlier_mask = np.logical_or(y_vals > upper_bound, y_vals < lower_bound)
outlier_idx = np.where(outlier_mask)[0]
return outlier_idx | e1742747ac63b34c39d1e57cbc896b9df5af85e0 | 17,542 |
def GetTypeMapperFlag(messages):
"""Helper to get a choice flag from the commitment type enum."""
return arg_utils.ChoiceEnumMapper(
'--type',
messages.Commitment.TypeValueValuesEnum,
help_str=(
'Type of commitment. `memory-optimized` indicates that the '
'commitment is for memory-optimized VMs.'),
default='general-purpose',
include_filter=lambda x: x != 'TYPE_UNSPECIFIED') | f00e645a2dbfcae94a33fc5b016809f72e87c0a9 | 17,543 |
def prepare_concepts_index(create=False):
"""
Creates the settings and mappings in Elasticsearch to support term search
"""
index_settings = {
"settings": {"analysis": {"analyzer": {"folding": {"tokenizer": "standard", "filter": ["lowercase", "asciifolding"]}}}},
"mappings": {
"_doc": {
"properties": {
"top_concept": {"type": "keyword"},
"conceptid": {"type": "keyword"},
"language": {"type": "keyword"},
"id": {"type": "keyword"},
"category": {"type": "keyword"},
"provisional": {"type": "boolean"},
"type": {"type": "keyword"},
"value": {
"analyzer": "standard",
"type": "text",
"fields": {"raw": {"type": "keyword"}, "folded": {"analyzer": "folding", "type": "text"}},
},
}
}
},
}
if create:
se = SearchEngineFactory().create()
se.create_index(index=CONCEPTS_INDEX, body=index_settings)
return index_settings | a33e7e6172c7a7c8577abab77cb467125e629e39 | 17,545 |
def pack_wrapper(module, att_feats, att_masks):
"""
for batch computation, pack sequences with different lenghth with explicit setting the batch size at each time step
"""
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats) | ff5e02ac5977cf525a0e2f2a96714ff8a6cf1fe3 | 17,546 |
def recommendation_inspiredby(film: str, limit: int=20) -> list:
"""Movie recommandations from the same inspiration with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, inspiration list,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(GROUP_CONCAT(DISTINCT ?inspiredbyLabel; separator="; ") AS ?inspiredbyList)
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originInspiredby
WHERE {{ wd:{film} wdt:P941 ?originInspiredby . }}
}}
?film wdt:P31 wd:Q11424;
wdt:P941 ?inspiredby;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?inspiredby rdfs:label ?inspiredbyLabel.
}}
FILTER (?inspiredby IN (?originInspiredby))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings']) | d70d6a30eabc5d1a5b5a7c3b0cebc28a9dcb0fa9 | 17,548 |
import string
def str2twixt(move):
""" Converts one move string to a twixt backend class move.
Handles both T1-style coordinates (e.g.: 'd5', 'f18'') as well as tsgf-
style coordinates (e.g.: 'fg', 'bi') as well as special strings
('swap' and 'resign'). It can handle letter in upper as well as lowercase.
Args:
move: string with a move
Returns:
twixt.SWAP or twixt.RESIGN or twixt.Point
Raises
ValueError if the move_str can't be parsed in any valid format
Examples:
>>> str2twixt('b3')
b3
>>> str2twixt('i18')
i18
>>> str2twixt('fj')
f10
>>> str2twixt('swap')
'swap'
>>> str2twixt('resign')
'resign'
>>> str2twixt('123')
ValueError: Can't parse move: '123'
>>> str2twixt('invalid')
ValueError: Can't parse move: 'invalid'
"""
# Handle swap and resign
if move.lower() == twixt.SWAP.lower():
return twixt.SWAP
elif move.lower() == twixt.RESIGN.lower():
return twixt.RESIGN
# Handle T1-style moves
elif move[0] in string.ascii_letters and move[-1] in string.digits:
return twixt.Point(move)
# Handle tsgf-stype moves
elif len(move) == 2 and all(c in string.ascii_letters for c in move):
return twixt.Point(move[0] + str(ord(move[1].lower()) - ord('a') + 1))
# Can't handle move. Throw exception
raise ValueError(f"Can't parse move: '{move}'") | fe1e644519f7d6fe7df2be8a38754ba230981a91 | 17,549 |
from datetime import datetime
import re
def celery_health_view(request):
"""Admin view that displays the celery configuration and health."""
if request.method == 'POST':
celery_health_task.delay(datetime.now())
messages.success(request, 'Health task created.')
return HttpResponseRedirect(request.path)
capital = re.compile('^[A-Z]')
settings = [key for key in dir(current_app.conf) if capital.match(key)]
sorted_settings = [
{
'key': key,
'value': ('*****' if 'password' in key.lower()
else getattr(current_app.conf, key))
} for key in sorted(settings)
]
return render(request, 'admin/celery_health_view.html', {
'settings': sorted_settings,
'title': 'Celery Settings and Health'
}) | 52f7fb76af5dc5557e22976b1930c19e6249f1cc | 17,550 |
def get_n_runs(slurm_array_file):
"""Reads the run.sh file to figure out how many conformers or rotors were meant to run
"""
with open(slurm_array_file, 'r') as f:
for line in f:
if 'SBATCH --array=' in line:
token = line.split('-')[-1]
n_runs = 1 + int(token.split('%')[0])
return n_runs
return 0 | 5574ef40ef87c9ec5d9bbf2abd7d80b62cead2ab | 17,551 |
def get_field_attribute(field):
"""
Format and return a whole attribute string
consists of attribute name in snake case and field type
"""
field_name = get_field_name(field.name.value)
field_type = get_field_type(field)
strawberry_type = get_strawberry_type(
field_name, field.description, field.directives
)
field_type += strawberry_type if strawberry_type else ""
return f"{str_converters.to_snake_case(field.name.value)}: {field_type}" | fbbe2dbdf6c5f0427365fbbb0d5f43df8bb74678 | 17,553 |
def shuffle_data(data):
"""
Shuffle the data
"""
rng_state = np.random.get_state()
for c, d in data.items():
np.random.set_state(rng_state)
np.random.shuffle(d)
data[c] = d
return data | 5a1fa1f81fbec54092c8d7b50ebf75f8edb526c7 | 17,554 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.