content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import numpy
import math
def S_inv_eulerZYX_body_deriv(euler_coordinates, omega):
""" Compute dE(euler_coordinates)*omega/deuler_coordinates
cfo, 2015/08/13
"""
y = euler_coordinates[1]
z = euler_coordinates[2]
"""
w1 = omega[0]; w2 = omega[1]; w3 = omega[2]
J = numpy.zeros((3,3))
J[0,0] = 0
J[0,1] = math.tan(y) / math.cos(y) * (math.sin(z) * w2 + math.cos(z) * w3)
J[0,2] = w2/math.cos(y)*math.cos(z) - w3/math.cos(y)*math.sin(z)
J[1,0] = 0
J[1,1] = 0
J[1,2] = -w2*math.sin(z) - w3*math.cos(z)
J[2,0] = w1
J[2,1] = 1.0/math.cos(y)**2 * (w2 * math.sin(z) + w3 * math.cos(z))
J[2,2] = w2*math.tan(y)*math.cos(z) - w3*math.tan(y)*math.sin(z)
"""
#second version, x = psi, y = theta, z = phi
# J_x = numpy.zeros((3,3))
J_y = numpy.zeros((3,3))
J_z = numpy.zeros((3,3))
# dE^-1/dtheta
J_y[0,1] = math.tan(y)/math.cos(y)*math.sin(z)
J_y[0,2] = math.tan(y)/math.cos(y)*math.cos(z)
J_y[2,1] = math.sin(z)/(math.cos(y))**2
J_y[2,2] = math.cos(z)/(math.cos(y))**2
# dE^-1/dphi
J_z[0,1] = math.cos(z)/math.cos(y)
J_z[0,2] = -math.sin(z)/math.cos(y)
J_z[1,1] = -math.sin(z)
J_z[1,2] = -math.cos(z)
J_z[2,1] = math.cos(z)*math.tan(y)
J_z[2,2] = -math.sin(z)*math.tan(y)
J = numpy.zeros((3,3))
J[:,1] = numpy.dot(J_y, omega)
J[:,2] = numpy.dot(J_z, omega)
return J | f7688b61084b0421288be002042b7299a7f8e867 | 5,748 |
import torch
def normalize_rotmat(mat: torch.Tensor) -> torch.Tensor:
"""Normalizes rotation matrix to a valid one"""
u, _, v = torch.svd(mat)
s = torch.zeros_like(mat)
s[..., 0, 0] = 1.0
s[..., 1, 1] = 1.0
s[..., 2, 2] = torch.det(u) * torch.det(v)
return u @ s @ v.transpose(-1, -2) | 1ba33c73a943392d6fe06448e81c346a5e7dc9f6 | 5,749 |
import requests
def get_status_code(ep):
"""Function that gets an endpoint and returns its HTTP status code"""
req = requests.get(ep)
return req.status_code | bd15853ac4615e96306c2b259c6945a4c46dd17b | 5,750 |
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return 'https://%s/' % host | caf5c9015a4cd863e407fb889d473ddebd7bbabc | 5,751 |
def rec_pow(a, b):
"""Compute a**b recursively"""
if b == 0:
return 1
if b == 1:
return a
return (rec_pow(a,b//2)**2) * (a if b % 2 else 1) | 42972acab57b3e217dbd10fa32a38125c5eab44d | 5,753 |
from typing import Dict
def matrix_str_to_dict(matrix_str: str) -> Dict[str, Dict[str, int]]:
"""Transform dictionary string to 2-d array of ints."""
scoring_matrix = {}
table = [line.split() for line in matrix_str.split("\n") if line[0] != "#"]
aa_tos = table[0]
for row in table[1:]:
aa_from = row[0]
scores = [int(entry) for entry in row[1:]] # convert scores to ints
pairs = dict(list(zip(aa_tos, scores))) # dict of pairs, {aa_to: score}
scoring_matrix[
aa_from
] = pairs # 2-D dictionary scoring_matrix[from][to] == score
return scoring_matrix | de1d4fd581a40cb6e61bd2b13c3a89718a67736f | 5,754 |
def ask_note():
"""Function to ask user for task notes"""
task_note = input("Enter any additional task notes here >")
return task_note | 952010409c0430b697b899edf7c29b5fa2f9bedb | 5,756 |
def cleanup_queryset(queryset):
"""
Remove multiple joins on the same table, if any
WARNING: can alter the origin queryset order
"""
return queryset.model.objects.filter(pk__in=[instance.pk for instance in queryset.all()]) | ecdab862fd67359fab1a5706092fe0d023d31321 | 5,758 |
def _is_greater(list1: list, list2: list):
"""
return True if `list1[i] > list2[i]` for each `i`
"""
return all([list1[i] > list2[i] for i in range(len(list1))]) | 925fb214f741d6503b41b49d57a268506f05a048 | 5,760 |
def _get_pair_nodes(root_node):
"""
Internal method to get "pair" nodes under root_node
"""
method_elem = root_node
in_configs_elem_list = method_elem.getElementsByTagName("inConfigs")
in_configs_elem = in_configs_elem_list[0]
pair_elems_list = in_configs_elem.getElementsByTagName("pair")
return pair_elems_list | c2b74f7a507394d2117cd6292116e62d34f3e556 | 5,761 |
def Shard(ilist, shard_index, num_shards):
"""Shard a given list and return the group at index |shard_index|.
Args:
ilist: input list
shard_index: 0-based sharding index
num_shards: shard count
"""
chunk_size = len(ilist) / num_shards
chunk_start = shard_index * chunk_size
if shard_index == num_shards - 1: # Exhaust the remainder in the last shard.
chunk_end = len(ilist)
else:
chunk_end = chunk_start + chunk_size
return ilist[chunk_start:chunk_end] | 7f79ade521c1264d0ddc8c5a228679d7053d9651 | 5,762 |
def separation_scorer(catalogue,name_TGSS,name_NVSS):
"""given two names, gives separation
by set-up, only gives non-zero for those in catalogue"""
if (name_TGSS,name_NVSS) in catalogue.index:
sep = catalogue.loc[name_TGSS,name_NVSS].separation
sep *= 3600
return max(0,(40-sep)/40)
else:
return 0 | f70a6cf58ec12caba784ff7f51cbdbbf74f536b6 | 5,763 |
def function_d(d, d1, d2=1):
"""doc string"""
return d + d1 + d2 | 92d3bb788191612c6a67f67a05bd703a02f43a04 | 5,765 |
def ppmv2pa(x, p):
"""Convert ppmv to Pa
Parameters
----------
x Gas pressure [ppmv]
p total air pressure [Pa]
Returns
-------
pressure [Pa]
"""
return x * p / (1e6 + x) | 974d79d022a7fb655040c7c2900988cd4a10f064 | 5,767 |
def make_elastic_uri(schema: str, user: str, secret: str, hostname: str, port: int) -> str:
"""Make an Elasticsearch URI.
:param schema: the schema, e.g. http or https.
:param user: Elasticsearch username.
:param secret: Elasticsearch secret.
:param hostname: Elasticsearch hostname.
:param port: Elasticsearch port.
:return: the full Elasticsearch URI.
"""
return f"{schema}://{user}:{secret}@{hostname}:{port}" | be959e98330913e75485006d1f4380a57e990a05 | 5,768 |
def _truncate(s: str, max_length: int) -> str:
"""Returns the input string s truncated to be at most max_length characters
long.
"""
return s if len(s) <= max_length else s[0:max_length] | 52c49c027057024eaa27a705a0d2c013bff7a2ce | 5,769 |
import importlib
def load_model(opt, dataloader):
""" Load model based on the model name.
Arguments:
opt {[argparse.Namespace]} -- options
dataloader {[dict]} -- dataloader class
Returns:
[model] -- Returned model
"""
model_name = opt.model
model_path = f"lib.models.{model_name}"
print('use model:',model_name)
model_lib = importlib.import_module(model_path)
model = getattr(model_lib, model_name.title())
return model(opt, dataloader) | 8ad05c4a0f51c40851a9daecf81ed8bf9862979c | 5,770 |
import pandas
def coerce_integer(df):
"""
Loop through the columns of a df, if it is numeric,
convert it to integer and fill nans with zeros.
This is somewhat heavy-handed in an attempt to force
Esri to recognize sparse columns as integers.
"""
# Numeric columns to not coerce to integer
EXCEPT = ["latitude", "longitude", "zipCode"]
def numeric_column_to_int(series):
return (
series.fillna(0).astype(int)
if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT
else series
)
return df.transform(numeric_column_to_int, axis=0) | d4b5963378a10a4bde6f7e1e2111908b83d90b7d | 5,774 |
def get_maxlevel(divs, maxlevel):
"""
Returns the maximum div level.
"""
for info in divs:
if info['level'] > maxlevel:
maxlevel = info['level']
if info.get('subdivs', None):
maxlevel = get_maxlevel(info['subdivs'], maxlevel)
return maxlevel | b7153ef84cb260a4b48c58315aa63fc5179fc06c | 5,776 |
def get_molecules(topology):
"""Group atoms into molecules."""
if 'atoms' not in topology:
return None
molecules = {}
for atom in topology['atoms']:
idx, mol_id, atom_type, charge = atom[0], atom[1], atom[2], atom[3]
if mol_id not in molecules:
molecules[mol_id] = {'atoms': [], 'types': [], 'charge': []}
molecules[mol_id]['atoms'].append(idx)
molecules[mol_id]['types'].append(atom_type)
molecules[mol_id]['charge'].append(charge)
return molecules | 4bf63000c9d5b56bb9d35922ed521ce81cf3a6c1 | 5,777 |
def g_fam(arr):
"""
Returns the next array
"""
aux = 0
hol = []
while(aux +1 < arr.__len__()):
if arr[aux] or arr[aux + 1]:
hol.append(True)
else:
hol.append(False)
aux += 1
return hol | 4f0ed0d4ba205ef205579a2b150250760e7b38fe | 5,779 |
def get_attribute_distribution():
"""
Attribute weights based on position and prototype, in this order:
[potential, confidence, iq, speed, strength, agility, awareness, stamina,
injury, run_off, pass_off, special_off, run_def, pass_def, special_def]
"""
attr_dist = {
'QB': {
'Gunslinger': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Scrambler': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Field General': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'HB': {
'Elusive': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'All-Purpose': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'FB': {
'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Rushing': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'WR': {
'Possession': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Deep Threat': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Route Runner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'TE': {
'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Receiving': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Hybrid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'LT': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'LG': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'C': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'RG': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'RT': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'DE': {
'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'DT': {
'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'OLB': {
'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'MLB': {
'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'CB': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'FS': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'SS': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'K': {
'Accurate': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'P': {
'Coffin Corner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
}
}
return attr_dist | 25dc83ba2f4bec4caaa88423e2607af300dcfbc4 | 5,781 |
def math_div_str(numerator, denominator, accuracy=0, no_div=False):
"""
除法
:param numerator: 分子
:param denominator: 分母
:param accuracy: 小数点精度
:param no_div: 是否需要除。如3/5,True为3/5,False为1/1.6
:return:
"""
if denominator == 0 or numerator == 0:
return 0
if abs(numerator) < abs(denominator):
if no_div:
return '%d/%d' % (numerator, denominator)
return '1/' + str(int(round(denominator / numerator, 0)))
else:
if not numerator % denominator:
accuracy = 0
t = round(float(numerator) / float(denominator), accuracy)
return str(int(t)) if accuracy == 0 else str(t) | bbcead0ec0f79d8915289b6e4ff23b0d6e4bf8ed | 5,784 |
def zero_fuel(distance_to_pump, mpg, fuel_left):
"""
You were camping with your friends far away from home, but when it's time to go back, you realize that you fuel is
running out and the nearest pump is 50 miles away! You know that on average, your car runs on about 25 miles per
gallon. There are 2 gallons left. Considering these factors, write a function that tells you if it is possible to
get to the pump or not. Function should return true (1 in Prolog) if it is possible and false (0 in Prolog) if not.
The input values are always positive.
:param distance_to_pump: an integer value, positive.
:param mpg: an integer value, positive.
:param fuel_left: an integer value, positive.
:return: True if able to make journey to pump on fuel left, otherwise False.
"""
return distance_to_pump / mpg <= fuel_left | 67a69b59d6f35a872f87e18ee0e8693af886c386 | 5,785 |
def get_union(*args):
"""Return unioin of multiple input lists.
"""
return list(set().union(*args)) | 18025cfd37d64f15daf92aa2ae3e81176cae6e39 | 5,786 |
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners | f0c49ffe8a8879d1d052f6fc37df596efa021a84 | 5,788 |
import string
import random
def password_generator(length=12, chars=None):
"""
Simple, naive password generator
"""
if not chars:
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length)) | e94754e8d8ee3cf806ddbe092033f8cbc89496f7 | 5,789 |
import random
import string
def oversized_junk():
"""
Return a string of random lowercase letters that is over 4096 bytes long.
"""
return "".join(random.choice(string.ascii_lowercase) for _ in range(4097)) | a7bbaadde1948e1644f708c0166aa7833bb25037 | 5,790 |
import ast
def get_teams_selected(request, lottery_info):
""" get_teams_selected updates the teams
selected by the user
@param request (flask.request object): Object containing
args attributes
@param lottery_info (dict): Dictionary keyed by
reverse standings order, with dictionary
values containing 'name' and 'id' keys
for the team
Returns:
- teams_selected (list): Teams previously
selected by the user
"""
teams_selected = []
selections = ast.literal_eval(request.args['teams_selected'])
for val in selections:
team_name = selections[val].split(' ')[-1]
if team_name != '':
for x in range(len(lottery_info), 0, -1):
if lottery_info[x]['name'] == team_name:
teams_selected.append(x)
return teams_selected | 35edfab322ce5ad039f869027552c664f9e6b576 | 5,791 |
def parse_tags(source):
"""
extract any substring enclosed in parenthesis
source should be a string
normally would use something like json for this
but I would like to make it easy to specify these tags and their groups
manually (via text box or command line argument)
http://stackoverflow.com/questions/1651487/python-parsing-bracketed-blocks
"""
unmatched_count = 0
start_pos = 0
opened = False
open_pos = 0
cur_pos = 0
finished = []
segments = []
for character in source:
#scan for mismatched parenthesis:
if character == '(':
unmatched_count += 1
if not opened:
open_pos = cur_pos
opened = True
if character == ')':
unmatched_count -= 1
if opened and unmatched_count == 0:
clean = source[start_pos:open_pos]
clean = clean.strip()
if clean:
finished.extend(clean.split())
segment = source[open_pos:cur_pos+1]
#segments.append(segment)
#get rid of bounding parentheses:
pruned = segment[1:-1]
group = pruned.split()
finished.append(group)
opened = False
start_pos = cur_pos+1
cur_pos += 1
assert unmatched_count == 0
if start_pos != cur_pos:
#get anything that was left over here
remainder = source[start_pos:cur_pos].strip()
finished.extend(remainder.split())
## #now check on recursion:
## for item in segments:
## #get rid of bounding parentheses:
## pruned = item[1:-1]
## if recurse:
## results = parse_tags(pruned, recurse)
## finished.expand(results)
## else:
## finished.append(pruned.strip())
return finished | 315ea121cec56a38edc16bfa9e6a7ccaeeab1dc2 | 5,794 |
def summarize_data(data):
"""
"""
#subset desired columns
data = data[['scenario', 'strategy', 'confidence', 'decile', 'cost_user']]
#get the median value
data = data.groupby(['scenario', 'strategy', 'confidence', 'decile'])['cost_user'].median().reset_index()
data.columns = ['Scenario', 'Strategy', 'Confidence', 'Decile', 'Cost Per User ($)']
return data | 9964d99ed70a1405f1c94553172fd6830371472a | 5,795 |
from typing import List
import sys
def read_zones() -> List[str]:
"""Read the list of zone_names from the sys.stdin."""
zones: List[str] = []
for line in sys.stdin:
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
zones.append(line)
return zones | b13c39e87167d54ca731f7b1c19b01cdca6f2943 | 5,796 |
from typing import Iterable
from pathlib import Path
import os
def check_files_exist(file_list: Iterable[str]) -> list[str]:
"""Check if all files exist. Return False if not."""
file_errors: list[str] = []
cwd = Path(os.getcwd())
for file_ in file_list:
if cwd.joinpath(file_).is_file() is False:
file_errors.append(file_)
return sorted(file_errors) | 20fc5caba0fe8ad173020ce18eea109c59425243 | 5,797 |
import math
def _fill_arc_trigonometry_array():
"""
Utility function to fill the trigonometry array used by some arc* functions (arcsin, arccos, ...)
Returns
-------
The array filled with useful angle measures
"""
arc_trig_array = [
-1,
math.pi / 4, # -45°
math.pi / 6, # -30°
0, # 0°
math.pi / 6, # 30°
math.pi / 4, # 45°
1
]
return arc_trig_array | 6b5c39dbacf028d84a397e2911f9c9b7241fe0f4 | 5,798 |
def panel_list_tarefas(context, tarefas, comp=True, aluno=True):
"""Renderiza uma lista de tarefas apartir de um Lista de tarefas"""
tarefas_c = []
for tarefa in tarefas:
tarefas_c.append((tarefa, None))
context.update({'tarefas': tarefas_c, 'comp': comp})
return context | 3de659af41a6d7550104321640526f1970fd415c | 5,799 |
def valueFromMapping(procurement, subcontract, grant, subgrant, mapping):
"""We configure mappings between FSRS field names and our needs above.
This function uses that config to derive a value from the provided
grant/subgrant"""
subaward = subcontract or subgrant
if mapping is None:
return ''
elif isinstance(mapping, str):
return getattr(subaward, mapping)
elif isinstance(mapping, tuple) and subcontract:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[0])
elif isinstance(mapping, tuple) and subgrant:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[1])
else:
raise ValueError("Unknown mapping type: {}".format(mapping)) | 1bf2dda830183d1c8289e957b83b1c0d01619160 | 5,800 |
def convert_data_to_ints(data, vocab2int, word_count, unk_count, eos=True):
"""
Converts the words in the data into their corresponding integer values.
Input:
data: a list of texts in the corpus
vocab2list: conversion dictionaries
word_count: an integer to count the words in the dataset
unk_count: an integer to count the <UNK> tokens in the dataset
eos: boolean whether to append <EOS> token at the end or not (default true)
Returns:
converted_data: a list of corpus texts converted to integers
word_count: updated word count
unk_count: updated unk_count
"""
converted_data = []
for text in data:
converted_text = []
for token in text.split():
word_count += 1
if token in vocab2int:
# Convert each token in the paragraph to int and append it
converted_text.append(vocab2int[token])
else:
# If it's not in the dictionary, use the int for <UNK> token instead
converted_text.append(vocab2int['<UNK>'])
unk_count += 1
if eos:
# Append <EOS> token if specified
converted_text.append(vocab2int['<EOS>'])
converted_data.append(converted_text)
assert len(converted_data) == len(data)
return converted_data, word_count, unk_count | c415aea164f99bc2a44d5098b6dbcc3d723697a6 | 5,801 |
def _endian_char(big) -> str:
"""
Returns the character that represents either big endian or small endian in struct unpack.
Args:
big: True if big endian.
Returns:
Character representing either big or small endian.
"""
return '>' if big else '<' | 2e1a63ec593ca6359947385019bcef45cb3749c0 | 5,804 |
from typing import Dict
from typing import Any
def _clean_parameters(parameters: Dict[str, Any]) -> Dict[str, str]:
""" Removes entries which have no value."""
return {k: str(v) for k, v in parameters.items() if v} | b8e911674baee7a656f2dc7ba68514c63f84290c | 5,805 |
import random
def get_initators(filepath, n_lines):
"""
Open text file with iniator words and sample random iniator for each line in the poem.
"""
with open(filepath, "r", encoding = "utf-8") as file:
# save indices of all keywords
loaded_text = file.read() # load text file
lines = loaded_text.splitlines() # seperate initiator lines
initiators_list = list(random.sample(lines, n_lines)) # sample random initators
return initiators_list | 94792679a6ea4e0bb14afd5eb38b656a2cc8af67 | 5,806 |
import traceback
import time
def wrapLoop(loopfunc):
"""Wraps a thread in a wrapper function to restart it if it exits."""
def wrapped():
while True:
try:
loopfunc()
except BaseException:
print(f"Exception in thread {loopfunc},"
" restarting in 10s...")
traceback.print_exc()
else:
print(f"Thread {loopfunc} exited, restarting in 10s...")
time.sleep(10)
return wrapped | 86c48bc850bb1cf17121130ee9349dd529acf5e3 | 5,807 |
def _decode_feed_ids(option_feeds):
"""
>>> _decode_feed_ids('123,456')
[123, 456]
"""
return [int(x) for x in option_feeds.strip().split(',')] | 9218a170c445b3b8d83f08c39d1547c3ff6e2d20 | 5,808 |
def splits_for_blast(target, NAME):
"""Create slices for BLAST
This function creates multiple slices of 400 nucleotides given an fasta
sequence. The step size is 50. This the gaps are excluded from the sequence.
Thats why sequences with less than 400 nucleotides are excluded.
Args:
target (np.array): Fasta sequence in an array.
NAME (str): Global variable. Internal index of SNAPPy for this fasta.
Returns:
List of fasta files slices. Each is a proper fasta.
"""
target_seq = target[1:]
no_gap_t = target_seq[target_seq != '-']
target_length = no_gap_t.shape[0]
sub_aligns =[ [[f'>{NAME}_{x}'] , no_gap_t[x:x+400]] for x in range(0, target_length, 50) if len(no_gap_t[x:x+400]) == 400]
return sub_aligns | 6ad193fe494a6387fbb06d2c2a3b6a059b903a5f | 5,809 |
def protobuf_get_constant_type(proto_type) :
"""About protobuf write types see :
https://developers.google.com/protocol-buffers/docs/encoding#structure
+--------------------------------------+
+ Type + Meaning + Used For +
+--------------------------------------+
+ + + int32, int64, uint32+
+ 0 + Varint + uint64,sint32,sint64+
+ + + boolean, enum +
+--------------------------------------+
+ + + +
+ 1 + 64-bit + fixed64, sfixed64, +
+ + + double +
+--------------------------------------+
+ 2 + string + string +
+--------------------------------------+
+ 5 + 32-bit + float +
+--------------------------------------+
"""
if 'uInt32' == proto_type or \
'sInt32' == proto_type or \
'int32' == proto_type :
return 0
elif 'double' == proto_type :
return 1
elif 'string' == proto_type :
return 2
elif 'float' == proto_type :
return 5
return 2 | 46ce7e44f8499e6c2bdcf70a2bc5e84cb8786956 | 5,811 |
import pprint
def format_locals(sys_exc_info):
"""Format locals for the frame where exception was raised."""
current_tb = sys_exc_info[-1]
while current_tb:
next_tb = current_tb.tb_next
if not next_tb:
frame_locals = current_tb.tb_frame.f_locals
return pprint.pformat(frame_locals)
current_tb = next_tb | b5a21f42c8543d9de060ff7be2b3ad6b23065de9 | 5,812 |
def binarize_garcia(label: str) -> str:
"""
Streamline Garcia labels with the other datasets.
:returns (str): streamlined labels.
"""
if label == 'hate':
return 'abuse'
else:
return 'not-abuse' | 5cc26303e0c496d46b285e266604a38a0c88e8d7 | 5,813 |
import numpy as np
def clean_time_series(time, val, nPoi):
"""
Clean doubled time values and checks with wanted number of nPoi
:param time: Time.
:param val: Variable values.
:param nPoi: Number of result points.
"""
# Create shift array
Shift = np.array([0.0], dtype='f')
# Shift time to right and left and subtract
time_sr = np.concatenate((Shift, time))
time_sl = np.concatenate((time, Shift))
time_d = time_sl - time_sr
time_dn = time_d[0:-1]
# Get new values for time and val
tol = 1E-5
timen = time[time_dn > tol]
valn = val[time_dn > tol]
if len(timen) != nPoi:
raise ValueError(
"Error: In clean_time_series, length and number of results \
points do not match.")
return timen, valn | 35a4cea11a0dbf33916f3df6f8aae5c508a0c838 | 5,814 |
def data_count():
"""
:return: 数据集大小
"""
return 300 | 1582c3782cd77ee79727a7874afbb74539f3ff9e | 5,815 |
def fbx_mat_properties_from_texture(tex):
"""
Returns a set of FBX metarial properties that are affected by the given texture.
Quite obviously, this is a fuzzy and far-from-perfect mapping! Amounts of influence are completely lost, e.g.
Note tex is actually expected to be a texture slot.
"""
# Mapping Blender -> FBX (blend_use_name, blend_fact_name, fbx_name).
blend_to_fbx = (
# Lambert & Phong...
("diffuse", "diffuse", b"DiffuseFactor"),
("color_diffuse", "diffuse_color", b"DiffuseColor"),
("alpha", "alpha", b"TransparencyFactor"),
("diffuse", "diffuse", b"TransparentColor"), # Uses diffuse color in Blender!
("emit", "emit", b"EmissiveFactor"),
("diffuse", "diffuse", b"EmissiveColor"), # Uses diffuse color in Blender!
("ambient", "ambient", b"AmbientFactor"),
# ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore...
("normal", "normal", b"NormalMap"),
# Note: unsure about those... :/
# ("", "", b"Bump"),
# ("", "", b"BumpFactor"),
# ("", "", b"DisplacementColor"),
# ("", "", b"DisplacementFactor"),
# Phong only.
("specular", "specular", b"SpecularFactor"),
("color_spec", "specular_color", b"SpecularColor"),
# See Material template about those two!
("hardness", "hardness", b"Shininess"),
("hardness", "hardness", b"ShininessExponent"),
("mirror", "mirror", b"ReflectionColor"),
("raymir", "raymir", b"ReflectionFactor"),
)
tex_fbx_props = set()
for use_map_name, name_factor, fbx_prop_name in blend_to_fbx:
# Always export enabled textures, even if they have a null influence...
if getattr(tex, "use_map_" + use_map_name):
tex_fbx_props.add(fbx_prop_name)
return tex_fbx_props | 363c9f60084a55aa8d9c01c2f06d4d30d5e45993 | 5,816 |
import os
def get_stretch_directory(sub_directory=''):
"""Returns path to stretch_user dir if HELLO_FLEET_PATH env var exists
Parameters
----------
sub_directory : str
valid sub_directory within stretch_user/
Returns
-------
str
dirpath to stretch_user/ or dir within it if stretch_user/ exists, else /tmp
"""
base_path = os.environ.get('HELLO_FLEET_PATH', None)
full_path = base_path + '/' + sub_directory if base_path is not None else '/tmp/'
return full_path | 0af8b46c160008750c62b4aada700ed46b87aff9 | 5,817 |
def xroot(x, mu):
"""The equation of which we must find the root."""
return -x + (mu * (-1 + mu + x))/abs(-1 + mu + x)**3 - ((-1 + mu)*(mu + x))/abs(mu + x)**3 | 5db07cc197f1bc4818c4591597099cd697576df2 | 5,818 |
import random
def spliter(data_dict, ratio=[6, 1, 1], shuffle=True):
"""split dict dataset into train, valid and tests set
Args:
data_dict (dict): dataset in dict
ratio (list): list of ratio for train, valid and tests split
shuffle (bool): shuffle or not
"""
if len(ratio) != 3:
raise ValueError(f'ratio must include three int numbers')
train = {'x': list(), 'y': list()}
valid = {'x': list(), 'y': list()}
tests = {'x': list(), 'y': list()}
for _, [samples, labels] in data_dict.items():
samples_lens = len(samples)
train_ratio = round(samples_lens * (ratio[0] / sum(ratio)))
tests_ratio = round(samples_lens * (ratio[2] / sum(ratio)))
valid_ratio = samples_lens - train_ratio - tests_ratio
data = list(zip(samples, labels))
if shuffle:
random.shuffle(data)
x, y = zip(*data)
train['x'].extend(x[:train_ratio])
train['y'].extend(y[:train_ratio])
valid['x'].extend(x[train_ratio:train_ratio + valid_ratio])
valid['y'].extend(y[train_ratio:train_ratio + valid_ratio])
tests['x'].extend(x[-tests_ratio:])
tests['y'].extend(y[-tests_ratio:])
return train, valid, tests | 793af274e3962d686f2ef56b34ae5bc0a53aac5b | 5,819 |
import os
def _get_start_command(city: str) -> str:
"""Returns the docker run command needed for the MTS in order to train on the given city.
Parameters
----------
city: str
City the MTS should train on.
Returns
-------
docker_run_command: str
Corresponding docker run command.
"""
return f'sudo sh mts/mts.sh {os.environ["PGHOST"]} ' \
f'{int(os.environ["PGPORT"])} ' \
f'{os.environ["PGDATABASE"]} ' \
f'{os.environ["PGUSER"]} ' \
f'{os.environ["PGPASSWORD"]} ' \
f'{city} ' \
f'{int(os.environ["MTS_EPOCHS"])} ' \
f'{int(os.environ["MIN_IMAGE_NUMBER_PER_LABEL"])}' | df4b54732115f9dd7ed814fe1075dff3cb050c15 | 5,820 |
import torch
def freeze_layers(
model: torch.nn.Sequential,
n_layers_to_train: int
) -> torch.nn.Sequential:
"""
Function to freeze given number of layers for selected model
:param model: Instance of Pytorch model
:param n_layers_to_train: number of layers to train, counting from the last one.
The rest of the layers is going to be frozen.
:return: Model with frozen layers.
"""
n_layers = len(list(model.children()))
for idx, child in enumerate(model.children()):
if idx < (n_layers - n_layers_to_train):
for param in child.parameters():
param.requires_grad = False
return model | bfeca127c684de0815493ef621dce790b3a090f3 | 5,821 |
import subprocess
def is_figlet() -> bool:
"""Ensure figlet exists"""
process = subprocess.Popen(
['which', 'figlet'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return process.wait() == 0 | e89db43304ee01a8230f18e0af1a40321a52b41d | 5,822 |
def get_headers(wsgi_env):
"""
Extracts HTTP_* variables from a WSGI environment as
title-cased header names.
"""
return {
key[5:].replace('_', '-').title(): value
for key, value in wsgi_env.iteritems() if key.startswith('HTTP_')} | 01e7140a670957c691fec01dd90d53bdc29425bd | 5,823 |
import torch
def bilinear_sampling(img, dp):
"""
warp rgbd images using projected depth * pixel and bilinear sampling
"""
b, hw, _ = dp.shape
_, _, h, w = img.shape
dp = dp.reshape(-1, 3) # reshape dp to (b*h*w, 3)
# homogeneous coord (wx, wy, w) -> real coord (x, y, 1)
# restrict depth not to negative value
u = dp[:, 0] / torch.clip(dp[:, 2], 1e-4, 10000)
v = dp[:, 1] / torch.clip(dp[:, 2], 1e-4, 10000)
# in deepvoxel, x, y is opposite (ignore this comment)
u, v = v, u
u0 = u.type(torch.int32)
u1 = u0 + 1
v0 = v.type(torch.int32)
v1 = v0 + 1
# define weights
w1 = (u1 - u) * (v1 - v)
w2 = (u - u0) * (v1 - v)
w3 = (u1 - u) * (v - v0)
w4 = (u - u0) * (v - v0)
# make image coord for all images in batch size
img_coord = torch.divide(torch.arange(b * hw), hw, rounding_mode='floor').type(torch.long)
# find a point that is not in out-of-grid section after warping
not_out = (u >= 0) * (u < h - 1) * (v >= 0) * (v < w - 1) * (dp[:, 2] > 1e-4)
# make out-points to 0 using not_out
u0 = (u0 * not_out).type(torch.long)
u1 = (u1 * not_out).type(torch.long)
v0 = (v0 * not_out).type(torch.long)
v1 = (v1 * not_out).type(torch.long)
w1 = (w1 * not_out)
w2 = (w2 * not_out)
w3 = (w3 * not_out)
w4 = (w4 * not_out)
# bilinear sampling
warped = w1[:, None] * img[img_coord, :, u0, v0] + w2[:, None] * img[img_coord, :, u1, v0] + \
w3[:, None] * img[img_coord, :, u0, v1] + w4[:, None] * img[img_coord, :, u1, v1]
return warped, not_out | 529f24e80ca2bf785efc95db4d20cbbc13d66e2f | 5,825 |
def mname(obj):
""" Get the full dotted name of the test method """
mod_name = obj.__class__.__module__.replace('h5py.tests.','')
return "%s.%s.%s" % (mod_name, obj.__class__.__name__, obj._testMethodName) | 85a4a4f1aec25d57212f31968d5a42f8dc8d39e0 | 5,826 |
import re
def config_file_has_self_install_dirs(config_file):
"""Config file has self install dirs"""
has_self_install_dirs = False
with open(config_file) as _f:
for line in _f:
if re.search(r'^hard_drive_\d+_label\s*=\s*(amigaosdir|kickstartdir|userpackagesdir)', line, re.I) or \
re.search(r'^(hardfile2|uaehf\d+|filesystem2)=.*[,:](amigaosdir|kickstartdir|userpackagesdir)[,:]', line, re.I):
has_self_install_dirs = True
break
return has_self_install_dirs | 5e095570ea20156cc3d38cf7379f199b5b8af5bc | 5,827 |
import os
def gen_cmd(dali_root_dir, file_list, process_includes=False):
"""
Command for calling cpplint.py
"""
if not file_list:
return ["true"]
cmd = ["python",
os.path.join(dali_root_dir, "third_party", "cpplint.py"),
"--quiet",
"--linelength=100",
"--root=" + os.path.join(dali_root_dir, "include" if process_includes else "")]
cmd.extend(file_list)
return cmd | c9b6dca4fca75e7bba0016cb4f2c395ea2608d27 | 5,828 |
def call(value):
"""Call is meant to be used with the Method filter. It attempts to call
the method specified.
Usage:
{% load custom_filters %}
{% if foo|method:"has_access"|call %}
This will invoke foo.has_access()
"""
if not callable(value):
return "[%s is not callable]" % value
return value() | 1bb4217b74bf69b55d4c2cae1c29a396e19f5153 | 5,829 |
def permute_by_indices(list_of_things, *list_of_index_transpositions):
"""Given a list_of_things and a list of pairs of transpositions of indices
[(i, j), (k, m), ...], return the list_of_things with the i-th an j-th
values swapped, the k-th- and m-th values swapped, and so on.
Examples
--------
>>> permute_by_indices(['a', 'b', 'c'], [(0, 1)])
['b', 'a', 'c']
>>> permute_by_indices(['a', 'b', 'c'], [(0, 2), (1, 2)])
['c', 'a', 'b']
"""
result = list_of_things
for i, j in list_of_index_transpositions:
result[j], result[i] = result[i], result[j]
return result | 31d7f73028fcb4c3a43750d1ade0c27e1b563dbb | 5,830 |
import re
def _string_to_int(string: str) -> int:
"""
a helper function convert from string to int, like S1 -> 1
Args:
string (str): input string
Returns:
(int): return value if this is a int, return 0 if this is not a int
"""
r = re.findall('\d+', string)
if (len(r) > 0):
return int(r[0])
else:
return 0 | d4dbea658e6092edb27b85154b319e098c588a76 | 5,831 |
from pathlib import Path
def create_moving_path(**kwargs):
"""
User interface function to create Path object for moving load.
:keyword:
* start_point (`Point`): Start point of path
* end_point (`Point`): End point of path
* increments (`int`): Increment of path steps. Default is 50
* mid_point (`Point`): Default = None
:returns: :class:`~ospgrillage.load.Path` object
"""
return Path(**kwargs) | 05de795c61e7b3fc4c3f4c2aa14505b4a6fcf986 | 5,832 |
def _no_negative_zero(val):
"""Make sure -0 is never output. Makes diff tests easier."""
if val == 0:
return 0
return val | 345802e297cc1e1c77a5b1db664715bfc42f3da6 | 5,833 |
import os
def join_paths(*paths):
"""
Looks for a resource (e.g. configuration file).
:param workspace:
:param name:
:param resources_dir:
:return: path to requested resource
"""
return os.path.join(*paths) | 010c48cf8f12e0bc5c542b3f952e8dcd664fd80f | 5,834 |
def find_all_indexes_r(text, pattern, itext=0, ipattern=0, indices=None):
"""Recursive implementation of find_all_indexes. The time complexity should
be equialent to find all indexs not recursive."""
if indices is None:
indices = []
if len(text) == itext + ipattern:
if len(pattern) == ipattern and len(pattern) != 0:
indices.append(itext)
return indices
elif len(pattern) == 0:
indices.append(itext)
itext += 1
elif len(pattern) == ipattern:
indices.append(itext)
ipattern = 0
itext += 1
elif pattern[ipattern] == text[ipattern + itext]:
ipattern += 1
else:
ipattern = 0
itext += 1
return find_all_indexes_r(text, pattern, itext, ipattern, indices) | bcd447140c92d8ffbbe0577e469d2e9e4cc9edad | 5,835 |
def get_txt_version():
"""Get version string from version.txt."""
try:
with open("version.txt", "r") as fp:
return fp.read().strip()
except IOError:
return None | 62951a878bfb52ae6b00543e1816b9ff298bb907 | 5,837 |
import argparse
def get_args():
"""Get arguments from the command line."""
parser = argparse.ArgumentParser(description='Train ResNet-50. Fast.')
parser.add_argument('--output-dir', type=str, required=True,
help='Output directory')
parser.add_argument('--job-id', type=str, default=None,
help='Job identifier')
parser.add_argument('--print-freq', type=int, default=None,
help='Frequency for printing batch info')
parser.add_argument('--seed', type=int, default=42,
help='Random seed')
parser.add_argument('--no-eval', default=False, action='store_true',
help='Do not evaluate on validation set each epoch')
parser.add_argument('--save-stats', default=False, action='store_true',
help='Save all performance statistics to files')
parser.add_argument('--cache-dir', type=str, required=True,
help='Directory (on local disk/PFS) containing cached data')
# Data/training details.
parser.add_argument('--data-dir', type=str, required=True,
help='Directory containing data to train with')
parser.add_argument('--dataset', type=str, default='imagenet',
choices=['imagenet', 'imagenet-22k'],
help='Dataset to use')
parser.add_argument('--synth-data', default=False, action='store_true',
help='Use synthetic data')
parser.add_argument('--no-augmentation', default=False, action='store_true',
help='Disable data augmentation')
parser.add_argument('--batch-size', type=int, default=120,
help='Per-GPU batch size')
parser.add_argument('--epochs', type=int, default=90,
help='Number of epochs to train for')
parser.add_argument('--drop-last', default=False, action='store_true',
help='Drop last small mini-batch')
# Optimization.
parser.add_argument('--lr', type=float, default=None,
help='Learning rate')
parser.add_argument('--start-lr', type=float, default=0.1,
help='Initial learning rate for warmup')
parser.add_argument('--base-batch', type=int, default=256,
help='Base batch size for learning rate scaling')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum')
parser.add_argument('--warmup-epochs', type=int, default=10,
help='Number of epochs to warm up')
parser.add_argument('--decay-epochs', type=int, nargs='+',
default=[30, 60, 80],
help='Epochs at which to decay the learning rate')
parser.add_argument('--decay-factors', type=float, nargs='+',
default=[0.1, 0.1, 0.1],
help='Factors by which to decay the learning rate')
parser.add_argument('--label-smoothing', type=float, default=0.0,
help='Label smoothing factor')
parser.add_argument('--decay', type=float, default=0.0001,
help='L2 weight decay')
# Performance.
parser.add_argument('--dist', default=False, action='store_true',
help='Do distributed training')
parser.add_argument('-r', '--rendezvous', type=str, default='file',
help='Distributed initialization scheme (file, tcp)')
parser.add_argument('--fp16', default=False, action='store_true',
help='Use FP16/AMP training')
parser.add_argument('--workers', type=int, default=8,
help='Number of workers for reading samples')
parser.add_argument('--no-cudnn-bm', default=False, action='store_true',
help='Do not do benchmarking to select cuDNN algos')
parser.add_argument('--no-prefetch', default=False, action='store_true',
help='Do not use fast prefetch pipeline')
parser.add_argument('--hdmlp', default=False, action='store_true',
help='Use HDMLP for I/O')
parser.add_argument('--hdmlp-config-path', type=str,
help='Config path for HDMLP')
parser.add_argument('--hdmlp-lib-path', type=str, default=None,
help='Library path for HDMLP')
parser.add_argument('--hdmlp-stats', default=False, action='store_true',
help='Save HDMLP statistics every epoch')
parser.add_argument('--channels-last', default=False, action='store_true',
help='Use channels-last memory order')
parser.add_argument('--dali', default=False, action='store_true',
help='Use DALI for I/O and data augmentation')
parser.add_argument('--bucket-cap', type=int, default=25,
help='Communication max bucket size (in MB)')
return parser.parse_args() | e6bf1debbbd86b3aaac669b1b5191e8f0b0f947e | 5,838 |
import json
def load_json_from_string(string):
"""Load schema from JSON string"""
try:
json_data = json.loads(string)
except ValueError as e:
raise ValueError('Given string is not valid JSON: {}'.format(e))
else:
return json_data | 66f96373a8e02bf69289e5e4594ac319906475f5 | 5,839 |
def _parse_detector(detector):
"""
Check and fix detector name strings.
Parameters
----------
detector : `str`
The detector name to check.
"""
oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',
'n10', 'n11']
altlist = [str(i) for i in range(12)]
if detector in oklist:
return detector
elif detector in altlist:
return 'n' + detector
else:
raise ValueError('Detector string could not be interpreted') | f78d7eb5004b3cb6d3276b0c701263c71668e36e | 5,840 |
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i]) | 2649250e2ea2619c7f6c21b8dd2cebaeec10647b | 5,841 |
def min_rl(din):
"""
A MIN function should "go high" when any of its
inputs arrives. Thus, OR gates are all that is
needed for its implementation.
Input: a list of 1-bit WireVectors
Output: a 1-bit WireVector
"""
if len(din) == 1:
dout = din[0]
else:
dout = din[0] | min_rl(din[1:])
return dout | 06f0bbce664367307669ddb28c60c65b79de91d3 | 5,842 |
import math
def shoulders_up(x, y, max_angle=10):
"""
1:"Neck",
2:"RShoulder",
5:"LShoulder".
looks at line from left shoulder to neck, and
line from right shoulder to neck
if either are not straight returns 1
if both are flat (slope of 0 or close to 0) returns 1
"""
left_degrees = math.degrees(math.atan2(y[5]-y[1], x[5]-x[1]))
right_degrees = math.degrees(math.atan2(y[1]-y[2], x[1]-x[2]))
slope_shoulder = (y[5]-y[2])/(x[5]-x[2])
if (left_degrees <= max_angle and
right_degrees <= max_angle) \
and slope_shoulder <= 0.25:
return left_degrees, right_degrees, 0.0
else:
return left_degrees, right_degrees, 1.0 | 2a6adce5dad431c91cac77bd79e4011964f76341 | 5,843 |
import re
def _validate_eida_token(token):
"""
Just a basic check if the string contains something that looks like a PGP
message
"""
if re.search(pattern='BEGIN PGP MESSAGE', string=token,
flags=re.IGNORECASE):
return True
return False | 746fbd011b38abab43be983a1a054505526dcf78 | 5,844 |
def bounds(gdf):
"""Calculates the bounding coordinates (left, bottom, right, top) in the given GeoDataFrame.
Args:
gdf: A GeoDataFrame containing the input points.
Returns:
An array [minx, miny, maxx, maxy] denoting the spatial extent.
"""
bounds = gdf.total_bounds
return bounds | 48242e870edd1db9b1191518c4b9ba7433420610 | 5,845 |
import re
def simplestr(text):
"""convert a string into a scrubbed lower snakecase. Intended use is converting
human typed field names deterministically into a string that can be used for a
key lookup.
:param text: type str text to be converted
"""
text = text.strip()
text = text.replace(' ', '_')
text = text.lower()
return re.sub('\W+', '', text) | b030c50cd300dd97d69a9d2b8421892bb1f0c23a | 5,846 |
def is_hr_between(time: int, time_range: tuple) -> bool:
"""
Calculate if hour is within a range of hours
Example: is_hr_between(4, (24, 5)) will match hours from 24:00:00 to 04:59:59
"""
if time_range[1] < time_range[0]:
return time >= time_range[0] or time <= time_range[1]
return time_range[0] <= time <= time_range[1] | 70d874f0a5dee344d7638559101fc6be2bcca875 | 5,848 |
def mld(returns_array, scale=252):
"""
Maximum Loss Duration
Maximum number of time steps when the returns were below 0
:param returns_array: array of investment returns
:param scale: number of days required for normalization. By default in a year there are 252 trading days.
:return: MLD
"""
max_loss = 0
curr = 0
for i in range(returns_array.shape[0]):
# If first returns is negative, add this occurrence to max loss counter
# If it's positive, continue
if i == 0 and returns_array[0] < 0:
curr += 1
max_loss = curr
# If the equity continues dropping
elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] < 0):
curr += 1
if max_loss < curr:
max_loss = curr
# If the equity stops dropping
elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] > 0):
curr = 0
# Normalize over the number of trading days in a year
return max_loss / scale | 2d78d76c1456ebb4df606a9450f45e47b5e49808 | 5,849 |
import math
def get_distance_wgs84(lon1, lat1, lon2, lat2):
"""
根据https://github.com/googollee/eviltransform,里面的算法:WGS - 84
:param lon1: 经度1
:param lat1: 纬度1
:param lon2: 经度2
:param lat2: 纬度2
:return: 距离,单位为 米
"""
earthR = 6378137.0
pi180 = math.pi / 180
arcLatA = lat1 * pi180
arcLatB = lat2 * pi180
x = (math.cos(arcLatA) * math.cos(arcLatB) *
math.cos((lon1 - lon2) * pi180))
y = math.sin(arcLatA) * math.sin(arcLatB)
s = x + y
if s > 1:
s = 1
if s < -1:
s = -1
alpha = math.acos(s)
distance = alpha * earthR
return distance | 8da67a3a690ff0cb548dc31fb65f3b2133fa3e3f | 5,850 |
def resource_method_wrapper(method):
"""
Wrap a 0-ary resource method as a generic renderer backend.
>>> @resource_method_wrapper
... def func(resource):
... print repr(resource)
>>> action = "abc"
>>> resource = "def"
>>> func(action, resource)
'def'
"""
def generic_renderer_backend(action, resource):
return method(resource)
return generic_renderer_backend | e07bd139586a7b80d48c246ea831b39c3183224e | 5,851 |
def fib_lista(n):
"""
Função que retorna uma lista contendo os números da sequência de Fibonacci
até o número n.
"""
lista = []
i, j = 0, 1
while i < n:
lista.append(i)
i, j = j, i + j
return lista | ec307ce80ae70e5fba81d2e26b140f1b86c95619 | 5,852 |
import os
def _get_json_file(module_path):
"""
Returns the path of the JSON file for a module, empty if doesn't exitst.
"""
json_file = '%s.json' % module_path.rsplit('.', 1)[0]
if os.path.isfile(module_path) and os.path.isfile(json_file):
return json_file
else:
return '' | 4a98fc9358d88817311fc0a09c44b8ea54529d74 | 5,853 |
def make_item_accessor(idx):
"""
Returns a property that mirrors access to the idx-th value of an object.
"""
@property
def attr(self):
return self[idx]
@attr.setter
def attr(self, value):
self[idx] = value
return attr | 7cd1248b3f9402fc9be10d277dee849dc47840c0 | 5,854 |
def calc_correlation(data, data2):
"""
Calculate the correlations between 2 DataFrames().
Parameters:
- data: The first dataframe.
- data2: The second dataframe.
Returns:
A Series() object.
"""
return (
data.corrwith(data2).
loc[lambda x: x.notnull()]
) | 7f47592a4525efa9db2fba317d095448d5288399 | 5,855 |
def commandLine(Argv):
"""
Method converting a list of arguments/parameter in a command line format (to include in the execution of a program for exemple).
list --> str
"""
assert type(Argv) is list, "The argument of this method are the arguments to convert in the command line format. (type List)"
commandLine = ''
for i in Argv[1::]:
commandLine += i+" "
return(commandLine) | 4b27e73fd43ec914f75c22f2482271aafd0848ac | 5,856 |
def prob(n: int, p: float) -> float:
"""
Parameters:
- n (int): số lần thực hiện phép thử
- p (float): xác suất phép thử thành công
Returns:
- float: xác suất hình học
"""
pr = p * (1 - p) ** (n - 1)
return pr | fca3fab45ec852c8910619889ac19b0753f5b498 | 5,857 |
def map_code(func):
"""
Map v to an Ontology code
"""
def mapper(v):
if v is None:
return v
else:
return func(str(v))
return mapper | 76eb3c6756c983fd73c180b57c1c998a348d32eb | 5,858 |
def find_nth(s, x, n):
"""
find the nth occurence in a string
takes string where to search, substring, nth-occurence
"""
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i | b54998db817272ec534e022a9f04ec8d350b08fb | 5,859 |
def ReadFile(filename):
"""
description: Read program from file
param {*} filename
return {*} file
"""
input_file = open(filename, "r")
result = []
while True:
line = input_file.readline()
if not line:
break
result.append(line)
for line_index in range(len(result)):
result[line_index] = result[line_index][:-1] # delete the '\n' of every line
input_file.close()
return result | fd7d7faab401f335579719f6e015bf7b9d82c2e2 | 5,861 |
def gen_nested_prop_getter(val_name, throws, klass):
"""
generates a nested property getter, it
actually returns an _Internal object
"""
def _internal(self):
try:
getattr(self, val_name)
except AttributeError:
setattr(self, val_name, klass())
return getattr(self, val_name)
return _internal | 54f766ae1dfcbc0e491355a4c741ccbadff6d26f | 5,862 |
def get_maf(variant):
"""
Gets the MAF (minor allele frequency) tag from the info field for the
variant.
Args:
variant (cyvcf2.Variant)
Returns:
maf (float): Minor allele frequency
"""
return variant.INFO.get("MAF") | 1d25f577a3cec14b8d05095d320fad6584484718 | 5,863 |
import statistics
def coverageCalc(coverageList,minCov):
"""Function parsing coverageList for
:param coverageList: List of pacbam coverage information
:param minCov: Int of minimum passing coverage
:return:
covCount: Int of bases with coverage
minCovCount: Int of bases with minimum coverage
meanDepth: Int mean coverage stat
"""
covCount = 0
minCovCount = 0
meanDepth = statistics.mean(coverageList)
for i in coverageList:
if i != 0:
covCount +=1
if i >= minCov:
minCovCount +=1
return(covCount,minCovCount,round(meanDepth,2)) | e20dc1e1f0b6f7e328501afe9921455a705f196a | 5,864 |
def parse_rsync_url(location):
"""Parse a rsync-style URL."""
if ':' in location and '@' not in location:
# SSH with no user@, zero or one leading slash.
(host, path) = location.split(':', 1)
user = None
elif ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':', 1)
if '@' in user_host:
user, host = user_host.rsplit('@', 1)
else:
user = None
host = user_host
else:
raise ValueError('not a valid rsync-style URL')
return (user, host, path) | fc315c1a6b376cbb83b047246fee51ae936b68ef | 5,868 |
def setup_go_func(func, arg_types=None, res_type=None):
"""
Set up Go function, so it know what types it should take and return.
:param func: Specify Go function from library.
:param arg_types: List containing file types that function is taking. Default: None.
:param res_type: File type that function is returning. Default: None.
:return: Returns func arg back for cases when you want to setup function and assign it to variable in one line.
"""
if arg_types is not None:
func.argtypes = arg_types
if res_type is not None:
func.restype = res_type
return func | 05f48f4dfecdf0133613f76f235b1e82f14bc5a9 | 5,869 |
import argparse
def positive_int(val):
"""
ArgumentParse positive int check
"""
try:
ival = int(val)
assert ival > 0
return ival
except (ValueError, AssertionError):
raise argparse.ArgumentTypeError("'%s' is not a valid positive int" % val) | cf98daeeb9876bc768e9c3ad0d227ce39386e8b4 | 5,870 |
def jaccard_similarity(x, y):
""" Returns the Jaccard Similarity Coefficient (Jarccard Index) between two
lists.
From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard
coefficient measures similarity between finite sample sets, as is defined as
the size of the intersection divided by the size of the union of the sample
sets.
"""
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality) | 81cf0c882ff4b06e79b102abb2d8f13755b68873 | 5,873 |
import shlex
def tokenizer_word(text_string, keep_phrases=False):
"""
Tokenizer that tokenizes a string of text on spaces and new lines (regardless of however many of each.)
:param text_string: Python string object to be tokenized.
:param keep_phrases: Booalean will not split "quoted" text
:return: Array of strings, each is a word
"""
text_string = str(text_string)
if keep_phrases:
tokens = shlex.split(text_string.replace('\n', ' ').replace('/', ' '))
else:
tokens = text_string.replace('\n', ' ').replace('/', ' ').split()
return tokens | 940f716072e9b2ce522c9854b2394327fbd1e934 | 5,875 |
def weighted_mean(values, weights):
"""Calculate the weighted mean.
:param values: Array of values
:type values: numpy.ndarray
:param weights: Array of weights
:type weights: numpy.ndarray
:rtype: float
"""
weighted_mean = (values * weights).sum() / weights.sum()
return weighted_mean | 886d7cff1555c40b448cda03e08620a0e2d69ede | 5,876 |
def init_json():
"""
This function init the JSON dict.
Return : Dictionnary
"""
data_json = {}
data_json['login'] = ""
data_json['hash'] = ""
data_json['duration'] = 0
data_json['nbFiles'] = 0
data_json['nbVirus'] = 0
data_json['nbErrors'] = 0
data_json['uuidUsb'] = ""
data_json['viruses'] = []
return data_json | 9411f3a525df9e68f53fba94da679bd8d5b34013 | 5,878 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.