content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def score_vectore(vector):
"""
:type vector: list of float
:return:
"""
x = 0.
y = 0.
for i in range(0, len(vector)):
if i % 2 == 0:
if vector[i] > 12:
x += vector[i]
else:
y += vector[i] + min(vector[i-1], vector[i])
return x, y | 8fc60cb3cec65d5a3b8f0fea62777739e17249a4 | 6,576 |
def _compound_register(upper, lower):
"""Return a property that provides 16-bit access to two registers."""
def get(self):
return (upper.fget(None) << 8) | lower.fget(None)
def set(self, value):
upper.fset(None, value >> 8)
lower.fset(None, value)
return property(get, set) | 00f315cc4c7f203755689adb5004f152a8b26823 | 6,577 |
import os
def get_file_extension(path: str) -> str:
"""Returns extension of the file"""
return os.path.basename(path).split(".")[-1] | fc883ba45b8c7dbeb38b18c3a730c715dcf933a4 | 6,578 |
import pandas
def label_by_wic(grasp_wic, exclude_C0=False):
"""Label each grasp by the whiskers it contains
grasp_wic : DataFrame
Index: grasp keys. Columns: whisker. Values: binarized contact.
exclude_C0 : bool
If False, group by all whiskers.
If True, ignore C0, and group only by C1, C2, and C3.
But label C0-only contacts as C0.
"""
# Set grouping_whiskers
if exclude_C0:
grouping_whiskers = ['C1', 'C2', 'C3']
else:
grouping_whiskers = ['C0', 'C1', 'C2', 'C3']
# Init return variable
res = pandas.Series(
['blank'] * len(grasp_wic), index=grasp_wic.index).rename('label')
# Group
gobj = grasp_wic[['C0', 'C1', 'C2', 'C3']].groupby(grouping_whiskers)
for included_mask, sub_grasp_wic in gobj:
# Generate label by joining all whiskers in this group
label = '-'.join(
[w for w, w_in in zip(grouping_whiskers, included_mask) if w_in])
if label == '':
# This should only happen if exclude_C0 and on the C0 group
assert exclude_C0
assert (sub_grasp_wic['C0'] == 1).all()
assert (sub_grasp_wic.drop('C0', 1) == 0).all().all()
# So label it C0
label = 'C0'
# Assign
res.loc[sub_grasp_wic.index] = label
# Error check
assert 'blank' not in res.values
return res | 0f1e552c68be0b77bad442b2432e071a74db4947 | 6,580 |
def _chordfinisher(*args, **kwargs):
"""
Needs to run at the end of a chord to delay the variant parsing step.
http://stackoverflow.com/questions/
15123772/celery-chaining-groups-and-subtasks-out-of-order-execution
"""
return "FINISHED VARIANT FINDING." | b68d09e755c2da468b98ab0466821770d2f7f4a7 | 6,581 |
import re
def check_id(id):
"""
Check whether a id is valid
:param id: The id
:return: The result
"""
return bool(re.match(r"^[a-f0-9]{24}$", id)) | f336d34de12f4f5520d4c88a838ebdb396857d2b | 6,582 |
import unicodedata
def normalize_text(text: str) -> str:
"""Normalize the text to remove accents
and ensure all the characters are valid
ascii symbols.
Args:
text : Input text
Returns:
Output text
"""
nfkd_form = unicodedata.normalize("NFKD", text)
only_ascii = nfkd_form.encode("ASCII", "ignore")
return only_ascii.decode() | fa1c5362caa9946e79152f9e14ccf2131754f258 | 6,584 |
def rotate_y(x, z, cosangle, sinangle):
"""3D rotaion around *y* (roll). *x* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, zNew*."""
return cosangle*x + sinangle*z, -sinangle*x + cosangle*z | 0a1b28548f771b9ca8cec29ba4060be7b0919182 | 6,585 |
def or_operator():
"""|: Bitwise "or" operator."""
class _Operand:
def __or__(self, other):
return " ~or~ ".join(('east coast', other))
return _Operand() | 'dirty south' | 9ccfc124dd6c7aae8035b336788cc07cdff983d1 | 6,587 |
import re
def _get_version(basename):
"""Returns the _get_next_version of a file."""
match = re.search(r"\(\d*\)", basename)
if match:
v = int(match.group(0)
.replace('(', '')
.replace(')', ''))
return v
return 0 | 7340b74dca04ecb5520c03b046ec650c34527b4c | 6,588 |
def utf8_product_page():
"""
Single product page with utf8 content.
"""
with open('data/product_utf8.html') as f:
return ''.join(f) | 56a70e463cebdaef632ebd2997be4a523289da02 | 6,589 |
def get_formatted_timestamp(app_type):
"""Different services required different date formats - return the proper format here"""
if app_type in {'duo', 'duo_admin', 'duo_auth'}:
return 1505316432
elif app_type in {'onelogin', 'onelogin_events'}:
return '2017-10-10T22:03:57Z'
elif app_type in {'gsuite', 'gsuite_admin', 'gsuite_drive',
'gsuite_login', 'gsuite_token', 'salesforce'}:
return '2017-06-17T15:39:18.460Z'
elif app_type in {'box', 'box_admin_events'}:
return '2017-10-27T12:31:22-07:00' | f5d4f2ac1d30383849b6149a46525e67439229df | 6,595 |
import sys
import os
def UseWin64():
"""Check if we are on 64 bit windows."""
if sys.platform != 'win32':
return False
arch32 = os.environ.get('PROCESSOR_ARCHITECTURE', 'unk')
arch64 = os.environ.get('PROCESSOR_ARCHITEW6432', 'unk')
if arch32 == 'AMD64' or arch64 == 'AMD64':
return True
return False | 1a122fa6fff489a1c857082648de2b10c7d8adb1 | 6,596 |
def parse_address(address):
"""Convert host:port or port to address to pass to connect."""
if ':' not in address:
return ('', int(address))
host, port = address.rsplit(':', 1)
return (host, int(port)) | 06eb172974c4e75d33ae205f952e8533c88acfeb | 6,597 |
import os
import glob
def directory_contents(path_to_dir):
"""
Returns list of paths to files and folders relatively from path_to_dir.
"""
cur_dir_backup = os.getcwd()
os.chdir(path_to_dir)
files = glob.glob('**', recursive=True)
os.chdir(cur_dir_backup)
return files | 669ac9c59582b8e2764f4f37a64a76f1ec17309b | 6,598 |
def version():
"""donghuangzhong version"""
return "0.0.1" | ad5d9834dddad46c2f4add31f46ea470bf370304 | 6,599 |
def limit_to_value_max(value_max, value):
"""
:param
1.(int) value_max -- value that should not be exceed
2.(int) value -- actual value
:return
1. return a value in the given range bound with value_max
"""
if value > value_max:
return value_max
elif value < -value_max:
return -value_max
else:
return value | a568bc1febe9a0cb6115efb4c95c0e1705787bfe | 6,601 |
import numpy as np
def extract_municipality_hashtags(df):
""" This function takes a twitter dataframe as an input then the output is the dataframe with 2 new columns namely a hashtag
column and a municipality column.
Example
------
if the tweet contains the @mention '@CityPowerJhb' then the coresponding output in the municipality column should be
Johannesburg.
The function also extracts hashtags and saves them under the hastags column as a list in the dataframe.
"""
mun_dict = {
'@CityofCTAlerts' : 'Cape Town',
'@CityPowerJhb' : 'Johannesburg',
'@eThekwiniM' : 'eThekwini' ,
'@EMMInfo' : 'Ekurhuleni',
'@centlecutility' : 'Mangaung',
'@NMBmunicipality' : 'Nelson Mandela Bay',
'@CityTshwane' : 'Tshwane'
}
g = df['Tweets'].str.findall(r'@.*?(?=\s|$)') # finds all @mentions
df['municipality'] = g.apply(lambda x: [mun_dict[g] for g in mun_dict.keys() if g in x]) # produces the values for the keys found in the tweets
df["municipality"]= df['municipality'].apply(lambda x: ''.join(x) if len(x) > 0 else np.nan) # removes the list format to string format
df['hashtags'] = df['Tweets'].str.findall(r'#.*?(?=\s|$)') # finds all the hashtags and stores them in the newly created hashtags column
df["hashtags"]= df['hashtags'].apply(lambda x: ','.join(x).lower().split(',') if len(x) > 0 else np.nan) # makes all the hashtags lowercase
return df | 1c58e3154f57ad82a8129c5ed765a622b12b8d08 | 6,602 |
import re
import pandas
def wig_to_dataframe(infile, step, format):
"""Read a wig file into a Pandas dataframe
infile(str): Path to file
Returns:
Dataframe
"""
fs = open(infile, 'r')
coverage_data = []
pos = 0
chr = ""
for line in fs.readlines():
try:
f = float(line)
coverage_data.append([chr, f, pos])
pos += 5000
except ValueError:
reresult = re.search("chrom=(\w*)", line) # find chromosome name in line
if reresult:
last_pos = [chr, 0, 249255001] # writen in every set to give same scale when plotting
coverage_data.append(last_pos)
chr = reresult.group(1) # start working on next chromosome
pos =0
fs.close()
df = pandas.DataFrame(coverage_data, columns= format)
return df | 07873b340b450ef3d0eb3d7715afb9b204a8277e | 6,603 |
def _t_P(P):
"""Define the boundary between Region 2 and 3, T=f(P)
>>> "%.2f" % _t_P(16.52916425)
'623.15'
"""
n=[0, 0.34805185628969e3, -0.11671859879975e1, 0.10192970039326e-2,0.57254459862746e3, 0.1391883977870e2]
return n[4]+((P-n[5])/n[3])**0.5 | 196f4fae80d9425b0f3a06213c21f77d3049e401 | 6,606 |
import inspect
def add_as_function(cls):
""" Decorator for classes. Automatically adds functional interface for `call` method of class.
For example, `ConvBlock` class is transformed into `conv_block` function, while
`Conv1DTranspose` class is transformed into `conv1d_transpose` function.
"""
name = cls.__name__
func_name = ''.join('_' + c.lower()
if (c.isupper() and (i != len(name)-1) and name[i+1].islower()) else c.lower()
for i, c in enumerate(name)).strip('_')
def func(inputs, *args, **kwargs):
# We also want to use `training` or `is_training` indicators as arguments for call
call_args = [inputs]
training = kwargs.get('training') or kwargs.get('is_training')
if (training is not None) and (len(inspect.getfullargspec(cls.__call__)[0]) > 2):
call_args.append(training)
return cls(*args, **kwargs)(*call_args)
func.__doc__ = cls.__doc__
module = inspect.getmodule(inspect.stack()[1][0])
setattr(module, func_name, func)
return cls | 38f2e604e03e5a356450569bbfe7d0764bd784cb | 6,607 |
def expand_basic(state):
"""
Simple function which returns child states by appending an available move to
current state.
"""
assert(len(state) < 9)
# Calculte set difference to get remaining moves.
n = tuple(set(range(9)) - set(state))
# Create tuple of available new states and return to caller.
c = tuple(state + (q,) for q in n)
return c | 0889a21b043f6f675d133fed6e3c825eb69f4a82 | 6,608 |
def get_reachable_observed_variables_for_inferred_variables(model, observed=set()):
"""
After performing inference on a BayesianModel, get the labels of observed variables
("reachable observed variables") that influenced the beliefs of variables inferred
to be in a definite state.
Args
model: instance of BayesianModel class or subclass
observed: set,
set of labels (strings) corresponding to variables pinned to a definite
state during inference.
Returns
dict,
key, value pairs {source_label_id: reachable_observed_vars}, where
source_label_id is an int or string, and reachable_observed_vars is a list
of label_ids
"""
if not observed:
return {}
source_vars = model.get_unobserved_variables_in_definite_state(observed)
return {var: model.reachable_observed_variables(var, observed) for var in source_vars} | a693d6c57969b38b357a4a57fe2e868650b514b6 | 6,610 |
def get_stopwords():
"""common stopwords to skip when checking for article names (derived from nltk)
"""
return [
"i",
"me",
"my",
"myself",
"we",
"our",
"out",
"ours",
"ourselves",
"you",
"your",
"he",
"him",
"his",
"himself",
"it",
"its",
"itself",
"they",
"them",
"that",
"their",
"what",
"which",
"who",
"whom",
"this",
"that",
"those",
"these",
"am",
"is",
"are",
"what",
"was",
"be",
"been",
"being",
"have",
"has",
"had",
"having",
"do",
"does",
"did",
"doing",
"a",
"an",
"the",
"and",
"but",
"if",
"or",
"because",
"as",
"until",
"while",
"of",
"at",
"by",
"for",
"with",
"about",
"against",
"between",
"into",
"through",
"during",
"before",
"after",
"above",
"below",
"to",
"from",
"up",
"down",
"in",
"out",
"on",
"off",
"over",
"under",
"again",
"further",
"then",
"once",
"here",
"there",
"when",
"where",
"why",
"how",
"all",
"any",
"both",
"each",
"few",
"more",
"most",
"other",
"some",
"such",
"no",
"nor",
"not",
"own",
"same",
"so",
"than",
"too",
"very",
"s",
"t",
"can",
"will",
"just",
"dont",
"should",
"not",
] | 861037bad40204961f205f03399b4b6bbe0e6b2d | 6,611 |
from textwrap import dedent
def get_device_number(connection_str):
"""Return the integer device number from the connection string or raise ValueError
if the connection string is not in the format "device <n>" with positive n."""
try:
prefix, num = connection_str.split(' ')
num = int(num)
if prefix != 'device' or num <= 0:
raise ValueError
except (TypeError, ValueError):
msg = f"""Connection string '{connection_str}' not in required format 'device
<n>' with n > 0"""
raise ValueError(dedent(msg)) from None
return num | 396a13d4449166e0d63e830b17b07b3b22a208e7 | 6,612 |
import os
def get_secret_id(source="~/.vault-id"):
""" Reads a vault user-id (UUID) from a file."""
source = os.path.abspath(os.path.expanduser(source))
user_id = None
# pylint: disable=invalid-name
if os.path.isfile(source):
fd = open(source, "r")
user_id = fd.read().strip()
fd.close()
return user_id | 6d584be71cbc52fe43b826690348441d4f54c5fd | 6,614 |
from typing import Any
def resolve_mock_target(target: Any) -> str:
"""
`mock.patch` uses a str-representation of an object to find it, but this doesn't play well with
refactors and renames. This method extracts the str-representation of an object.
This method will not handle _all_ kinds of objects, in which case an AttributeError will most likely be raised.
"""
return f"{target.__module__}.{target.__name__}" | 4c7520d2b17daaf79d1de2d9eca4f615e401fb12 | 6,615 |
import os
import subprocess
def build_and_push_docker_image(args):
"""docker-py doesn't seem to work, so use subprocess to call Docker"""
# This could be configurable, but there isn't much point.
HTTP_PORT = 8081
image_name = f"{args.dockerhub_repo}/scpca_portal_api"
# Change dir so docker can see the code.
os.chdir("../api")
system_version_build_arg = "SYSTEM_VERSION={}".format(args.system_version)
http_port_build_arg = "HTTP_PORT={}".format(HTTP_PORT)
# check_call() will raise an exception for us if this fails.
completed_command = subprocess.check_call(
[
"docker",
"build",
"--tag",
image_name,
"--build-arg",
system_version_build_arg,
"--build-arg",
http_port_build_arg,
"-f",
"Dockerfile.prod",
".",
],
)
docker_login_command = ["docker", "login"]
if "DOCKER_ID" in os.environ:
docker_login_command.extend(["--username", os.environ["DOCKER_ID"]])
if "DOCKER_PASSWORD" in os.environ:
docker_login_command.extend(["--password", os.environ["DOCKER_PASSWORD"]])
try:
completed_command = subprocess.check_call(docker_login_command)
except subprocess.CalledProcessError as e:
print("Failed to login to docker.")
return 1
if completed_command != 0:
return completed_command
completed_command = subprocess.check_call(["docker", "push", image_name])
# Change dir back so terraform is run from the correct location:
os.chdir("../infrastructure")
return completed_command | 3878599b53323dcb6dcddced6eda0ec421ba7781 | 6,616 |
import importlib
def getattr_in_module(module_name: str, func_name: str):
""" 在某个模块中获取属性
Args:
module_name: 模块名
func_name: 属性名
Returns:
属性
"""
m = importlib.import_module(module_name)
return getattr(m, func_name) | e0ceec50c063cea8350c04a4f048ca53d75ab5f6 | 6,617 |
import numpy
def cp_ls_cholesky_factor_objective(beta_gamma, norb, nthc, cholesky_factor, calcgrad=False):
"""cholesky_factor is reshaped into (norb, norb, num_cholesky)
Cholesky factor B_{ab,x}
Least squares fit objective ||B_{ab,x} - \sum_{r}beta_{a,x}beta_{b,x}gamma_{ab,x}||
This function provides the objective function value and gradient with respect to beta and gamma
"""
# compute objective
num_cholfactors = cholesky_factor.shape[-1]
beta_bR = beta_gamma[:norb*nthc].reshape((norb, nthc))
gamma_yR = beta_gamma[norb*nthc:norb*nthc+nthc*num_cholfactors].reshape((num_cholfactors, nthc))
beta_abR = numpy.einsum('aR,bR->abR', beta_bR, beta_bR)
chol_approx = numpy.einsum('abR,XR->abX', beta_abR, gamma_yR)
delta = cholesky_factor - chol_approx
fval = 0.5 * numpy.sum((delta)**2)
if calcgrad:
# compute grad
# \partial O / \partial beta_{c,s}
grad_beta = -2 * numpy.einsum('Cbx,bS,xS->CS', delta, beta_bR, gamma_yR, optimize=['einsum_path', (0, 2), (0, 1)])
grad_gamma = -numpy.einsum('abY,aS,bS->YS', delta, beta_bR, beta_bR, optimize=['einsum_path', (1, 2), (0, 1)])
grad = numpy.hstack((grad_beta.ravel(), grad_gamma.ravel()))
return fval, grad
else:
return fval | cfa02ca214c0d0638243f916afdbfa052dbc9efe | 6,619 |
import torch
def _get_product_features(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Get outer product of 2 tensors along the last dimension.
All dimensions except last are preserved. The last dimension is replaced
with flattened outer products of last-dimension-vectors from input tensors
This is a vectorized implementation of (for 2D case):
for i in range(x.shape[0]):
out[i, :] = torch.outer(x[i, :], y[i, :]).flatten()
For 2D inputs:
Input shapes:
x: (batch, feature_dim_x)
y: (batch, feature_dim_y)
Output shape:
(batch, feature_dim_x*feature_dim_y)
"""
return torch.einsum("...i,...j->...ij", (x, y)).flatten(start_dim=-2) | cf438a799b749563ea9509184cf117f4075730ab | 6,620 |
def slice_or_index(index):
"""Return index or slice the array [:]."""
return slice(None) if index is None else index | 7bdf34a0667cfcc387c41bfcfc000d0881c3f6cd | 6,622 |
def get_bits(byte_size, number):
"""Returns last byte_size*8 bits from number."""
res = []
for i in range(byte_size * 8):
res.append(str(number % 2))
number //= 2
res.reverse()
return res | ad41d3c9f1192b2f7026caa0f42a084ea39c82fa | 6,623 |
import copy
def greedy_find_path(entrance, exits, corridors):
""" Find ANY path connecting input and output, """
print("corridors at start: ", corridors)
#max_flow = float("inf")
# Not the case.
#if entrance in exits:
# return 1/0
cur_pos = entrance
path = [entrance]
max_flows = [2000000]
# Create a local copy of corridors leading to not visited rooms.
not_visited_corridors = copy.deepcopy(corridors)
# Find path - greedy select corridors with biggest flows.
while (cur_pos not in exits):
# Remove all corridors leading to visited rooms, including entrance and "recurrent corridors"!
not_visited_corridors[cur_pos][entrance] = 0
not_visited_corridors[cur_pos][cur_pos] = 0 # recurrent
#for visited in path:
# not_visited_corridors[cur_pos][visited] = 0
print("not_visited_corridors: ", not_visited_corridors)
# Find max flow in remaining corridors.
max_flow = max(not_visited_corridors[cur_pos])
corridor = not_visited_corridors[cur_pos].index(max_flow)
print ("found corridor {} with flow {}".format(corridor, max_flow))
# Reset that path.
not_visited_corridors[cur_pos][corridor] = 0
# If there is no flow - move back!
if max_flow == 0:
# Move back.
path.pop()
max_flows.pop()
# if we came back to entrance - there is no other path.
if not path:
return 0
cur_pos = path[-1]
print ("moving back to ", cur_pos)
continue
# "Select that corridor.
max_flows.append(max_flow)
path.append(corridor)
print ("selecting corridor {} with flow {}".format(corridor, max_flow))
print ("path = ", path)
print ("max_flows = ", max_flows)
# Move! ;)
cur_pos = corridor
print ("whole path = ", path)
print ("whole max_flows = ", max_flows)
min_flow = min(max_flows)
# Check whether there in fact is any flow!
if min_flow == 0:
return 0;
# "Realize" flow from entrance to exit - in original corridors!
print ("sending {} bunnies from {} through {}".format(min_flow, entrance, path))
cur_pos = entrance
for corridor, max_flow in zip(path, max_flows):
corridors[cur_pos][corridor] = corridors[cur_pos][corridor] - min_flow
cur_pos = corridor
print("corridors at end: ", corridors)
# Return number of bunnies that reached the escape pods;)
return min_flow | c47065d2d9e6913009cb4e41daf0730c7a04c3ef | 6,624 |
import json
from networkx.readwrite import json_graph
def write_nxgraph_to_json(g, output):
"""
Write a networkx graph as JSON to the specified output
Args:
g (networkx.Graph): graph to write as JSON
output (filelike): output to write to
"""
jsond = json_graph.node_link_data(g)
return json.dump(jsond, output) | a909fd3f4e8c87bb3fe059b310819570758c553e | 6,625 |
from typing import Optional
from typing import List
def clean_eisenhower(raw_eisen: Optional[List[str]]) -> List[str]:
"""Clean the raw Eisenhower values from Notion."""
if raw_eisen is None:
return []
return [e for e in raw_eisen if e != ''] | a8dd48a307455f20b8dd7afbf5b5aec1835c7a2d | 6,626 |
import os
def list_keys(key_prefix, n, marker=''):
"""
List keys that start with key_prefix (<> key_prefix itself)
@n = number of items to return
@marker = name of last item
"""
key_list = []
i = 0
for file in os.listdir(key_prefix):
key_list.append(os.path.join(key_prefix, file))
return key_list, (i == n) | 26b0d282ac745da523854e6a59773aa136fc54f3 | 6,627 |
def get_instance_type(entity_name, instance_dict=None):
"""
:param entity_name: name of an entity;
:param instance_dict: dictionary that contains the instance type of each entity;
:return: the instance type of the provided entity;
Get the instance type of a given entity, as specified by the instance_dict;
If the entity is not present, return "owl#Thing";
"""
if (instance_dict is None) or (entity_name not in instance_dict):
return "owl#Thing"
else:
return instance_dict[entity_name] | 0fead313271ee8b2b0d7be0d8048d506657b4944 | 6,628 |
def file_keyword(request):
"""Return multiple possible styles for the bumpsemver:file keyword."""
return request.param | 86700f811786a99b290557e9498bba03b800618d | 6,630 |
def summarize_results(warmup_rewards, rewards):
"""
Print a summary of running a Bandit algorithm for a number of runs
"""
warmup_reward = warmup_rewards.sum()
rewards = rewards.sum(axis=-1)
r_mean = rewards.mean()
r_std = rewards.std()
r_total = r_mean + warmup_reward
print(f"Expected Reward : {r_total:0.2f} ± {r_std:0.2f}")
return r_total, r_std | 2a3b786fdc835d312fe826600f46f4ab7a7ccaa7 | 6,631 |
def add_scalebar(
ax, left, right, label, fontsize=15,
ax_y=-0.01,
):
"""
"""
ax.hlines(ax_y, left, right, color='k', linewidth=3,
transform=ax.get_xaxis_transform(),
clip_on=False,
)
ax.text(right, ax_y-0.01, label,
va='top', ha='right',
transform=ax.get_xaxis_transform(),
fontsize=fontsize)
# end scale bar
return ax | 077d8a095c548085e1544050f1144bcb51f307b9 | 6,633 |
def find_keyword(URL, title, keywords):
""" find keyword helper function of history_list """
for keyword in keywords:
# case insensitive
if len(keyword) > 0 and (URL is not None and keyword.lower() in URL.lower()) or (title is not None and keyword.lower() in title.lower()):
return True
return False | b956cc3744411a409a227cb80423dcf52ca9d248 | 6,634 |
def parsetypes(dtype):
"""
Parse the types from a structured numpy dtype object.
Return list of string representations of types from a structured numpy
dtype object, e.g. ['int', 'float', 'str'].
Used by :func:`tabular.io.saveSV` to write out type information in the
header.
**Parameters**
**dtype** : numpy dtype object
Structured numpy dtype object to parse.
**Returns**
**out** : list of strings
List of strings corresponding to numpy types::
[dtype[i].name.strip('1234567890').rstrip('ing') \
for i in range(len(dtype))]
"""
return [dtype[i].name.strip('1234567890').rstrip('ing')
for i in range(len(dtype))] | 6f373135f751b243104cc7222326d995048d7c93 | 6,635 |
import numpy as np
def get_swarm_yspans(coll, round_result=False, decimals=12):
"""
Given a matplotlib Collection, will obtain the y spans
for the collection. Will return None if this fails.
Modified from `get_swarm_spans` in plot_tools.py.
"""
_, y = np.array(coll.get_offsets()).T
try:
if round_result:
return np.around(y.min(), decimals), np.around(y.max(),decimals)
else:
return y.min(), y.max()
except ValueError:
return None | 2561f04243e63dfa87896e891ae337ab9be310a7 | 6,637 |
def secondes(heure):
"""Prend une heure au format `H:M:S` et renvoie le nombre de secondes
correspondantes (entier).
On suppose que l'heure est bien formattée. On aura toujours un nombre
d'heures valide, un nombre de minutes valide et un nombre de secondes valide.
"""
H, M, S = heure.split(":")
return (3600 * int(H)) + (60 * int(M)) + int(S) | 33d380005479d66041e747130a4451c555baf497 | 6,638 |
def create_mapping(alphabet):
"""
Change list of chars to list of ints, taking sequencial natural numbers
:param alphabet: list of char
:return: dictionary with keys that are letters from alphabet and ints as values
"""
mapping = {}
for (letter, i) in zip(alphabet, range(len(alphabet))):
mapping[letter] = i
return mapping | 20ef12101597206e08ca0ea399d97af0f5c8b760 | 6,639 |
def triplets(a, b, c):
"""
Time: O(n)
Space: O(n lg n), for sorting
-
n = a_len + b_len + c_len
"""
a = list(sorted(set(a)))
b = list(sorted(set(b)))
c = list(sorted(set(c)))
ai = bi = ci = 0
a_len, c_len = len(a), len(c)
answer = 0
while bi < len(b):
while ai < a_len and a[ai] <= b[bi]:
ai += 1
while ci < c_len and b[bi] >= c[ci]:
ci += 1
answer += ai * ci
bi += 1
return answer | d15d340d0a4b870124bbfd8ca6f40358a27f7555 | 6,641 |
import csv
def csv_to_list(filename: str) -> list:
"""Receive an csv filename and returns rows of file with an list"""
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
csv_data = [line for line in reader]
return csv_data | d7344496271de6edcb3fc1df30bb78dd00980c30 | 6,643 |
def sqrt(x):
"""
Calculate the square root of argument x.
"""
#initial gues for square root
z = x/2.0
#Continuously improve the guess
#Adadapted from https://tour.golang.org/flowcontrol/8
while abs(x - (z*z)) > 0.0001:
z = z-(z*z - x) / (2*z)
return z | 5598eb37bc56e3f514f75be0deae0b6a94c3831e | 6,644 |
import math
def eq11 (A):
"""Chemsep equation 11
:param A: Equation parameter A"""
return math.exp(A) | 354b33a14f17de2862e5674edc421045c3dd21a9 | 6,645 |
def extractor_to_question(extractor: str):
"""
return questions for a extractor in a tuple
:param extractor:
:return:
"""
if extractor == 'action':
return ('who', 'what')
elif extractor == 'cause':
return ('why',)
elif extractor == 'environment':
return ('where', 'when')
elif extractor == 'method':
return ('how',)
else:
return ('no_mapping',) | 9f32562b426b59c4e44efab32064045796ec27ed | 6,646 |
import copy
def update_dict(original, new):
"""
Update nested dictionary (dictionary possibly containing dictionaries)
If a field is present in new and original, take the value from new.
If a field is present in new but not original, insert this field
:param original: source dictionary
:type original: dict
:param new: dictionary to take new values from
:type new: dict
:return: updated dictionary
:rtype: dict
"""
updated = copy.deepcopy(original)
for key, value in original.items():
if key in new.keys():
if isinstance(value, dict):
updated[key] = update_dict(value, new[key])
else:
updated[key] = new[key]
return updated | 1608d28321d294943f4c955e42939b054966751f | 6,648 |
def solve():
"""
Replace this with a nice docstring
that describes what this function is supposed
to do.
:return: The answer required.
"""
return -1 | f054515e7bb23bb84ecfb1847410fa111ec431c6 | 6,649 |
def _compute_third(first, second):
""" Compute a third coordinate given the other two """
return -first - second | 57ea03c71f13f3847d4008516ec8f0f5c02424af | 6,651 |
def decipher(criptotext):
"""
Descifra el mensaje recuperando el texto plano siempre y cuando haya
sido cifrado con XOR.
Parámetro:
cryptotext -- el mensaje a descifrar.
"""
messagedecrip = ""
for elem in criptotext:
code = ord(elem)^1
messagedecrip += chr(code)
return messagedecrip | c90fc56fda9e65690a0a03ea7f33008883feb3f4 | 6,652 |
def mag(initial, final):
"""
calculate magnification for a value
"""
return float(initial) / float(final) | ab996ee84ff588ce41086927b4da1a74e164278a | 6,653 |
import numpy
def _pfa_check_stdeskew(PFA, Grid):
"""
Parameters
----------
PFA : sarpy.io.complex.sicd_elements.PFA.PFAType
Grid : sarpy.io.complex.sicd_elements.Grid.GridType
Returns
-------
bool
"""
if PFA.STDeskew is None or not PFA.STDeskew.Applied:
return True
cond = True
if Grid.TimeCOAPoly is not None:
timecoa_poly = Grid.TimeCOAPoly.get_array(dtype='float64')
if timecoa_poly.shape == (1, 1) or numpy.all(timecoa_poly.flatten()[1:] < 1e-6):
PFA.log_validity_error(
'PFA.STDeskew.Applied is True, and the Grid.TimeCOAPoly is essentially constant.')
cond = False
# the Row DeltaKCOAPoly and STDSPhasePoly should be essentially identical
if Grid.Row is not None and Grid.Row.DeltaKCOAPoly is not None and \
PFA.STDeskew.STDSPhasePoly is not None:
stds_phase_poly = PFA.STDeskew.STDSPhasePoly.get_array(dtype='float64')
delta_kcoa = Grid.Row.DeltaKCOAPoly.get_array(dtype='float64')
rows = max(stds_phase_poly.shape[0], delta_kcoa.shape[0])
cols = max(stds_phase_poly.shape[1], delta_kcoa.shape[1])
exp_stds_phase_poly = numpy.zeros((rows, cols), dtype='float64')
exp_delta_kcoa = numpy.zeros((rows, cols), dtype='float64')
exp_stds_phase_poly[:stds_phase_poly.shape[0], :stds_phase_poly.shape[1]] = stds_phase_poly
exp_delta_kcoa[:delta_kcoa.shape[0], :delta_kcoa.shape[1]] = delta_kcoa
if numpy.max(numpy.abs(exp_delta_kcoa - exp_stds_phase_poly)) > 1e-6:
PFA.log_validity_warning(
'PFA.STDeskew.Applied is True,\n'
'and the Grid.Row.DeltaKCOAPoly ({}) and PFA.STDeskew.STDSPhasePoly ({})\n'
'are not in good agreement.'.format(delta_kcoa, stds_phase_poly))
cond = False
return cond | 987c492e1210114bf8eb129f60711f280b116a75 | 6,654 |
def create_list_from_dict(mydict):
"""
Converts entities dictionary to flat list.
Args:
mydict (dict): Input entities dictionary
Returns:
list
"""
outputs = []
for k, v in mydict.items():
if len(v) > 0:
for i in v:
outputs.append(i)
return outputs | 50fba98b7590bd7d243464cf45be24c4405f2cef | 6,656 |
from typing import List
def containsDuplicate(nums: List[int]) -> bool:
"""
Time: O(n)
Space: O(n)
"""
visited = set()
for n in nums:
if n in visited:
return True
else:
visited.add(n)
return False | 673544bcd10d31d185b65cb7c4b4330a0a7199a4 | 6,657 |
from datetime import datetime
def get_modtime(ftp, filename):
"""
Get the modtime of a file.
:rtype : datetime
"""
resp = ftp.sendcmd('MDTM ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
mod_time = datetime.strptime(s,'%Y%m%d%H%M%S')
return mod_time
return datetime.min | 2a69d0448c093319392afafcfff96dc04ec225d0 | 6,659 |
def conform_json_response(api, json_response):
"""Get the right data from the json response. Expects a list, either like [[],...], or like [{},..]"""
if api=='cryptowatch':
return list(json_response['result'].values())[0]
elif api=='coincap':
return json_response['data']
elif api in {'poloniex', 'hitbtc', 'bitfinex', 'coinbase'}:
return json_response
else:
raise Exception('API not supported', api, 'Response was ', json_response)
return None | a9a2ec51edc13843d0b8b7ce5458bb44f4efd242 | 6,660 |
import os
import fnmatch
def find_matching(root_path,
relative_paths_to_search,
file_pattern):
"""
Given an absolute `root_path`, a list of relative paths to that
absolute root path (`relative_paths_to_search`), and a `file_pattern`
like '*.sql', returns information about the files. For example:
> find_matching('/root/path', 'models', '*.sql')
[ { 'absolute_path': '/root/path/models/model_one.sql',
'relative_path': 'models/model_one.sql',
'searched_path': 'models' },
{ 'absolute_path': '/root/path/models/subdirectory/model_two.sql',
'relative_path': 'models/subdirectory/model_two.sql',
'searched_path': 'models' } ]
"""
matching = []
for relative_path_to_search in relative_paths_to_search:
absolute_path_to_search = os.path.join(
root_path, relative_path_to_search)
walk_results = os.walk(absolute_path_to_search)
for current_path, subdirectories, local_files in walk_results:
for local_file in local_files:
absolute_path = os.path.join(current_path, local_file)
relative_path = os.path.relpath(
absolute_path, absolute_path_to_search)
if fnmatch.fnmatch(local_file, file_pattern):
matching.append({
'searched_path': relative_path_to_search,
'absolute_path': absolute_path,
'relative_path': relative_path,
})
return matching | 696e8b29a3d367f98498efef5d016e73a3359ff4 | 6,661 |
def json_format(subtitle, data):
"""
Format json to string
:param subtitle: description to text
:type subtitle: string
:param data: content to format
:type data: dictionary
"""
msg = subtitle+':\n'
for name in data: msg += name+': '+data[name]+'\n'
return msg.strip() | bb3392d7ad57a482b4175838858d316ecc5f56e1 | 6,665 |
def remove_dihedral(mol, a, b, c, d):
"""
utils.remove_dihedral
Remove a specific dihedral in RDkit Mol object
Args:
mol: RDkit Mol object
a, b, c: Atom index removing a specific dihedral (int)
Returns:
boolean
"""
if not hasattr(mol, 'dihedrals'):
return False
for i, dihedral in enumerate(mol.dihedrals):
if ((dihedral.a == a and dihedral.b == b and dihedral.c == c and dihedral.d == d) or
(dihedral.d == a and dihedral.c == b and dihedral.b == c and dihedral.a == d)):
del mol.dihedrals[i]
break
return True | 7e26e995fec97c5c6d2304e11d06fec03b990942 | 6,666 |
import torch
def thresh_ious(gt_dists, pred_dists, thresh):
"""
Computes the contact intersection over union for a given threshold
"""
gt_contacts = gt_dists <= thresh
pred_contacts = pred_dists <= thresh
inter = (gt_contacts * pred_contacts).sum(1).float()
union = union = (gt_contacts | pred_contacts).sum(1).float()
iou = torch.zeros_like(union)
iou[union != 0] = inter[union != 0] / union[union != 0]
return iou | 9bd6244325acae0d3ebb5ffca46e0453a71000d1 | 6,667 |
def normalize_parameters(raw_parameters, minvals, maxvals):
"""takes in a list of parameters and does simple min/max normalization according to min/max values
INPUTS
raw_parameters: length n, containing parameters for a star
minvals: length n, minimum parameter values
maxvals: length n, max parameter values
OUTPUTS
normed_parameters: length n, normalized parameters
"""
normed_parameters = (raw_parameters - minvals) / (maxvals-minvals)
return normed_parameters | 53249bcecc8c3fd88beae7f377c6d5490693fba9 | 6,668 |
from numpy import cos, sin, array
def euler(a, b, c):
"""
Calculate a three dimensional rotation matrix from the euler angles.
@param a: alpha, angle between the x-axis and the line of nodes
@param b: beta, angle between the z axis of the different coordinate systems
@param c: gamma, angle between the line of nodes and the X-axis
"""
ca, cb, cc = cos(a), cos(b), cos(c)
sa, sb, sc = sin(a), sin(b), sin(c)
return array([[ cc * cb * ca - sc * sa, cc * cb * sa + sc * ca, -cc * sb],
[-sc * cb * ca - cc * sa, -sc * cb * sa + cc * ca, sc * sb],
[ sb * ca, sb * sa, cb ]]) | 406786a62b798b8e7dbbf57bd33f7243b5671e06 | 6,669 |
def make_stepped_schedule(steps):
"""
Helper to generate a schedule function to perform step-wise changes of a given optimizer hyper-parameter.
:param steps: List of tuples (start_epoch, value);
start_epochs should be increasing, starting at 0. E.g.
momentum_schedule = make_stepped_schedule([(0, 0.5), (5, 0.9)])
"""
def get_value_for_epoch(epoch):
return next(x[1][1] for x in reversed(list(enumerate(steps))) if x[1][0] <= epoch) # search backwards for first step whose epoch <= current epoch, return associated hyper-parameter value
return get_value_for_epoch | 33fcd823bba6bcbd5fbf9edfbad780d79a1875f5 | 6,671 |
def prettyprint(s, toUpper=False):
"""Given a string, replaces underscores with spaces and uppercases the
first letter of each word ONLY if the string is composed of lowercased
letters. If the param, toUpper is given then s.upper is returned.
Examples: "data_quality" -> "Data Quality"
"copy_number_123" -> "Copy Number 123"
"My_own_title" -> "My own title"
"Hla" -> "Hla"
"""
if toUpper:
s = s.upper()
s= s.replace("_", " ")
else:
s = s.title()
s= s.replace("_", " ")
return s | 18a57e74a2e3df66db4ede337663f9d8993a986b | 6,672 |
import math
def _rsqrt(step_number, tail_start, body_value):
"""Computes a tail using a scaled reciprocal square root of step number.
Args:
step_number: Absolute step number from the start of training.
tail_start: Step number at which the tail of the curve starts.
body_value: Value relative to which the tail should be computed.
Returns:
A learning rate value that falls as the reciprocal square root of the step
number, scaled so that it joins smoothly with the body of a BodyAndTail
instance.
"""
return body_value * (math.sqrt(tail_start) / math.sqrt(step_number)) | 99138c88ae8d0fc0d49a5ac55e389cd5a40d5f90 | 6,674 |
def set_formula_in_row(ws, num, row, mr_col=1):
"""
This "loops" through single cells in a column and applies mixing ratio formulas. The format of the formulas is quite
convulted for legacy reasons. Existing procedures made adhering to this format easier, but the gist is below.
Samples come in sets of 10, from a GC Run. They were kept in two columns in a spreadsheet, where the below is
repeated in sets of 5 rows (per GC Run). One GC Run looks like:
col1 | col2
-------------
samp1 | samp2
std1 | samp4
samp5 _______ # all beyond this line are quantified with standard 2
______| samp6
samp7 | std2
samp9 | samp10
Samples 1-5 use the first standard (sample 3) to quantify themselves, and samples 6-10 use the second standard
(sample 8) to quantify themselves. These rules are applied in the integration code itself, but the mixing ratios
and relevant statistics need to be calculated within the spreadsheet so the person integrating has access to them as
they integrate manually.
The sheet is loaded by add_formulas_and_format_sheet(), then the two columns are passed row-by-row to this function
to add the formulae before saving.
:param ws: object, open worksheet with openpyxl as the engine
:param num: int, absolute row number (excluding header
:param row: object, the row object generated by iterating over ws
:param mr_col: int, in [1,2]
:return: ws, the modified worksheet is passed back
"""
assert mr_col in [1,2], "Invalid mixing ratio column. It must either 1 or 2"
std_relnum = 1 if mr_col is 1 else 3
# if it's the first mixing ratio column, the standard will be in the second row (0-indexed: 1)
# if it's the second mixing ratio columnm, the standard will be in the fourth row (0-indexed: 3)
standard_div_line = 2 if mr_col is 1 else 1
# samples 1-5 (excluding the standardd) are quantified using the first standard (sample 3)
# samples 6-10 (excluding the stnadrad) are quantified using the second standard (sample 8)
# so, in column 1, every sample up to (0-indexed) 2 should be quantified with standard 1, and
# everything after is quantified with standard 2. In column 2, that number changes to 1
relnum = num % 5
# num is 0-indexed, relnum is the position in this group of 5 rows (one run is 5r x 2c for 10 total runs)
if relnum is std_relnum: return ws # skip the standard for this column
for cell in row:
if cell.value is None: # assume cells with some value have been modified and should not be changed
rownum = cell.row # retrieve the real row number
pa_cell = f'C{rownum}' if mr_col is 1 else f'D{rownum}'
# the peak area for a mixing ratio cell will always be C for column 1 and D for column 2, always same row
if relnum <= standard_div_line: # this is should be quantified by standard 1, in this column
std_pa_cell = f'C{rownum - relnum + 1}'
else: # it should be quantified by standard 2, in the next column
std_pa_cell = f'D{rownum - relnum + 3}'
cell.value = f'={pa_cell}/{std_pa_cell} * 2067.16'
if relnum is 0 and mr_col is 1: # the first line of every 5-row batch needs additional statistics added
# this does not need to be done twice, which is why it's done only for MR col 1
run_range = f'E{rownum}:F{rownum+4}' # all mixing cells in the run
std_range = f'C{rownum+1}, D{rownum+3}' # the two standards
run_median_cell = ws[f'G{rownum}']
run_rsd_cell = ws[f'H{rownum}']
std_med_cell = ws[f'I{rownum}']
std_rsd_cell = ws[f'J{rownum}']
run_rsd_cell.number_format = '0.00%'
std_rsd_cell.number_format = '0.00%'
run_median_cell.value = f'=MEDIAN({run_range})' # set formulas
run_rsd_cell.value = f'=STDEV({run_range})/{run_median_cell.coordinate}'
std_med_cell.value = f'=MEDIAN({std_range})'
std_rsd_cell.value = f'=STDEV({std_range})/{std_med_cell.coordinate}'
return ws | 2d4d438670e0760ce0158d5930390824634cce52 | 6,675 |
from datetime import datetime
import random
def partial_generator(api_list, seed = datetime.now().microsecond):
"""
Randomly denys access to certain API functions
:param api_list: The list of functions in the api
:param seed: An int, allows for seeding the tests with a certain seed to create predictable results
:return: Returns an api where roughly 1/2 the functions are denied access
"""
random_gen = random.Random()
random_gen.seed(seed)
return {func: (lambda x: False) for
func in api_list if
random_gen.random() > 0.5} | c32f8e072d35a79028cd9dcbe1edb2b28edf867c | 6,677 |
def has_other_useful_output(content_text):
"""Returns whether |content_text| has other useful output.
Namely, console errors/warnings & alerts/confirms/prompts.
"""
prefixes = ('CONSOLE ERROR:', 'CONSOLE WARNING:', 'ALERT:', 'CONFIRM:',
'PROMPT:')
def is_useful(line):
return any(line.startswith(prefix) for prefix in prefixes)
lines = content_text.strip().splitlines()
return any(is_useful(line) for line in lines) | c1abfdaf681816314134ae33b5fd0fc48757dcc5 | 6,679 |
def to_nbsphinx(s):
"""Use the sphinx naming style for anchors of headings"""
s = s.replace(" ", "-").lower()
return "".join(filter(lambda c : c not in "()", s)) | 87b266c84f9b32c1d7357c5ed23ba4058ba33673 | 6,681 |
def ret_comb_index(bi_tot, get_indices=False, isotope=None):
"""
:param bi_tot:
:return:
"""
bi_1 = int(str(bi_tot)[-2:])
bi_2 = int(str(bi_tot)[0:-2])
if get_indices:
return (bi_1, bi_2 - 1, isotope)
else:
return (bi_1, bi_2 - 1) | 5647b273d4807aa876f6cd3bd28d287f60cf6296 | 6,682 |
def dct2spatial(image):
"""
Rearrange DCT image from [H//8, W//8, 64] to [H,W]
"""
assert image.shape[2] == 64
block_view = (image.shape[0], image.shape[1], 8, 8)
image_shape = (image.shape[0] * 8, image.shape[1] * 8)
block_permute = 0, 2, 1, 3
result = image.reshape(block_view).transpose(*block_permute).reshape(image_shape)
return result | a1052b2f851a59eabb28c672f01547b5c4797bbc | 6,683 |
import logging
def get_warning_logger() -> logging.Logger:
"""
Set up a logger to capture warnings and log them
"""
logging.captureWarnings(True)
warning_logger = logging.getLogger('py.warnings')
if warning_logger.handlers: warning_logger.handlers = []
return warning_logger | 5cdf76ff66963851715b3302c02681340d2f6a72 | 6,685 |
def check_type_match(actual_val, expected_val) -> bool:
"""Check actual_val matches type of expected_val
The exception here is that expected_val can be
float, and in that case actual_val can be either
int or float
Args:
actual_val (Any): Actual type
expected_val (Any): Expected type
Returns:
bool: Whether the type matches
"""
if type(actual_val) == type(expected_val):
return True
# Make an exception here since int can be represented as float
# But not vice versa (for example, index)
if type(expected_val) == float and type(actual_val) == int:
return True
return False | 90f74b1978deb0c55b65a4faa2569f54fe6bceee | 6,686 |
def fieldtype(field):
"""Get the type of a django form field (thus helps you know what class to apply to it)"""
return field.field.widget.__class__.__name__ | e2d68cbdd72219de1a23095100c054a46c6c191b | 6,687 |
def get_team_repo(remote_url):
"""
Takes remote URL (e.g., `[email protected]:mozilla/fireplace.git`) and
returns team/repo pair (e.g., `mozilla/fireplace`).
"""
if ':' not in remote_url:
return remote_url
return remote_url.split(':')[1].replace('.git', '') | 5e0120881557e9d95b697ab194fd7a8e8a84c68d | 6,689 |
import re
def check_if_partial(comments):
"""
Checks if comments contain info about Dat being a part of a series of dats (i.e. two part entropy scans where first
part is wide and second part is narrow with more repeats)
Args:
comments (string): Sweeplogs comments (where info on part#of# should be found)
Returns:
bool: True or False
"""
assert type(comments) == str
comments = comments.split(',')
comments = [com.strip() for com in comments]
part_comment = [com for com in comments if re.match('part*', com)]
if part_comment:
return True
else:
return False | 6f565bae6fe1cf5da4a11e0d511a25daa07aadd1 | 6,690 |
def read_file_info(filename):
"""
Read an info file.
Parameters
----------
filename : string
The name of file with cross-sectional area and length information.
Returns
-------
info : dict
The values of cross-sectional area and length of the specimens,
"""
fd = open(filename, 'r')
info = {}
for line in fd:
if line and (not line.isspace()) and (line[0] != '#'):
key, val = line.split()
info[key] = float(val)
fd.close()
return info | c3f8c106126b45845c1202b34b19cad2ce2ae036 | 6,691 |
def position_specializations(position_block):
"""
:param bs4.Tag position_block: position block
:return: list
"""
position_block = position_block.find("div", {"class": "bloko-gap bloko-gap_bottom"})
profarea_name = position_block.find("span", {"data-qa": "resume-block-specialization-category"})
profarea_name = profarea_name.getText()
profarea_specializations = position_block.find("ul")
profarea_specializations = profarea_specializations.findAll("li", {"class": "resume-block__specialization",
"data-qa": "resume-block-position-specialization"})
profarea_specializations = [item.getText() for item in profarea_specializations]
profarea_specializations = [{"name": specialization_name, "profarea_name": profarea_name}
for specialization_name in profarea_specializations]
return profarea_specializations | c37a43b2c139780d0bcb0db2cb758165d155a526 | 6,692 |
def nice_layer_name(weight_key):
"""Takes a tuple like ('weights', 2) and returns a nice string like "2nd layer weights"
for use in plots and legends."""
return "Layer {num} {name}".format(num=weight_key[1] + 1, name=weight_key[0]) | c88dd554c2a3cf35e6d6e96131833738c19766ac | 6,693 |
def get_image(microscopy_collection, series):
"""Return microscopy image."""
image = microscopy_collection.image(s=series)
image = image[:, :, 0]
return image | d95b213c8db49e89d2bcaa3f7b69c0f4d546ac1c | 6,695 |
import argparse
def parse_command_line_args(args):
"""Parse command line arguments.
Args:
args ([str]): List of command line arguments and flags.
Returns
(argparse.Namespace): Parsed arguments from argparse
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-s', '--schema', type=str,
help='Schema name of the data')
parser.add_argument(
'-t', '--table', type=str,
help='Table name of the data')
parser.add_argument(
'-c', '--categorical', type=int, default=10,
help='Max number of distinct values in all categorical columns')
parser.add_argument(
'-f', '--input_file', type=str,
help='JSON file containing input parameters')
out = parser.parse_args(args)
# Validation
msg = ('Either an input file or both a table name and schema name must be '
'provided.')
if out.input_file is None:
if (out.schema is None) or (out.table is None):
raise ValueError(msg)
if out.input_file is not None:
if (out.schema is not None) or (out.table is not None):
raise ValueError(msg)
return out | 9cbc6c91e5c2f15baadb342a0e09e87e6be3da3b | 6,696 |
def convert_into_decimal(non_binary_number: str, base: int) -> int:
"""
Converts a non binary string number into a decimal number
:param non_binary_number:
:param base:
:return: a decimal number
"""
decimal_number = 0
for digit in range(len(non_binary_number)):
decimal_number += int(non_binary_number[digit]) * base ** abs((digit - (len(non_binary_number) - 1)))
return decimal_number | fecdd152000399fbc33a259a647118b0607daac9 | 6,700 |
def stringify(num):
"""
Takes a number and returns a string putting a zero in front if it's
single digit.
"""
num_string = str(num)
if len(num_string) == 1:
num_string = '0' + num_string
return num_string | 7cf37776bc774d02bce0b2016d41b26b8ab94cf7 | 6,701 |
def euc_distance(vertex, circle_obstacle):
"""
Finds the distance between the point and center of the circle.
vertex: Vertex in question.
circle_obstacle: Circle obstacle in question.
return: Distance between the vertex and the center of the circle.
"""
x = vertex[0] - circle_obstacle.position[0]
y = vertex[1] - circle_obstacle.position[1]
dist = ((x ** 2) + (y ** 2)) ** 0.5
return dist | 60ed338eb7a81fc282196c38d41cecda8f28efb7 | 6,702 |
def get_mvarg(size_pos, position="full"):
"""Take xrandrs size&pos and prepare it for wmctrl (MVARG) format
MVARG: <G>,<X>,<Y>,<W>,<H>
* <G> - gravity, 0 is default
"""
allowed = ["left", "right", "top", "bottom", "full"]
if position not in allowed:
raise ValueError(f"Position has to be one of {allowed}")
size, x, y = size_pos.split("+")
w, h = size.split("x")
if position == "left":
w = int(w) // 2
if position == "right":
w = int(w) // 2
x = int(x) + w
return f"0,{x},{y},{w},{h}" | 0b8a9c3f5ca7e24212502a3f2c76b18167deef6e | 6,704 |
import json
def get_json(file_path):
"""
Faz a leitura de um arquivo Json com codificacao utf-8,
apenas para arquivos dentro do diretorio folhacerta_settings
:param file_path: (string) nome do arquivo json com extensao
:return: Dicionario com os dados do json
"""
with open(file_path, encoding='utf-8') as data_json:
return json.load(data_json) | bd475d7427705026ad17d32d25a1a016d6c6f93d | 6,705 |
def bytes_to_str(s, encoding='latin-1'):
"""Extract null-terminated string from bytes."""
if b'\0' in s:
s, _ = s.split(b'\0', 1)
return s.decode(encoding, errors='replace') | 7d98d91443ab16478f1b8ecba39311110e92009c | 6,706 |
def lens_of(data):
"""Apply len(x) to elemnts in data."""
return len(data) | e10cba2bd801afd8f41dd0b15bfc23b7849c06ba | 6,707 |
def check_in_all_models(models_per_expt):
"""
Check intersection of which models are in all experiments
:param models_per_expt: an ordered dictionary of expts as keys with a list of valid models
:return: list of models that appear in all experiments
"""
in_all = None
for key, items in models_per_expt.items():
if in_all is None:
in_all = set(items)
else:
in_all.intersection_update(items)
return in_all | 75e17b8558a592471dda8a855959f60c40b06759 | 6,708 |
def is_class_name(class_name):
"""
Check if the given string is a python class.
The criteria to use is the convention that Python classes start with uppercase
:param class_name: The name of class candidate
:type class_name: str
:return: True whether the class_name is a python class otherwise False
"""
return class_name.capitalize()[0] == class_name[0] | 2b4b6a09f2a112f7e8163f3caf97fdfca0c93e12 | 6,709 |
import mimetypes
def guess_type(filename, strict=False, default="application/octet-stream"):
""" Wrap std mimetypes.guess_type to assign a default type """
content_type, encoding = mimetypes.guess_type(filename, strict=strict)
if content_type is None:
content_type = default
return content_type, encoding | ae3dae1d005797b2dc96cd893f4a79d20922e6a2 | 6,710 |
def _lookup_alias(aliases, value):
"""
Translate to a common name if our value is an alias.
:type aliases: dict of (str, [str])
:type value: str
:rtype: str
>>> _lookup_alias({'name1': ['alias1']}, 'name1')
'name1'
>>> _lookup_alias({'name1': ['alias1', 'alias2']}, 'alias1')
'name1'
>>> _lookup_alias({'name1': ['alias1', 'alias2']}, 'alias2')
'name1'
>>> _lookup_alias({'name1': ['alias1']}, 'name2')
'name2'
"""
better_name = [name for (name, aliases) in aliases.items()
if value in aliases]
return better_name[0] if better_name else value | df0641b1f8aca964f76afd2a83fb91a587d52e1d | 6,711 |
def missing_columns(df, missing_threshold=0.6):
"""Find missing features
Parameters
----------
df : pd.DataFrame, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and n_features is the number of features.
missing_threshold : float, default=0.6
Count all features with a missing rate greater than `missing_threshold`.
Returns
-------
t : All features with a missing rate greater than `missing_threshold`
"""
assert 1>=missing_threshold>=0, "`missing_threshold` should be one of [0, 1]."
t = (1-df.count()/len(df)).reset_index()
t.columns = ['feature_name', 'missing_rate']
t = t[t.missing_rate>=missing_threshold].reset_index(drop=True)
return t | 4d31673670d894556b6571a0233ec36c8452570a | 6,712 |
def is_ordered(treap):
""" Utility to check that every node in the given Treap satisfies the following:
Rules:
- if v is a child of u, then v.priority <= u.priority
- if v is a left child of u, then v.key < u.key
- if v is a right child of u, then v.key > u.key
"""
# iterate through all nodes in the heap
for node in treap:
# check parent (if not root)
if node != treap.root and (node.priority > node.parent.priority):
print("Node {} and parent ({}) have mismatched priorities.".format(node, node.parent))
return False
# check left and right. All are optional, technically
if node.left and (node.key < node.left.key):
print("Node {} and left child ({}) have mismatched keys.".format(node, node.left))
return False
if node.right and (node.key > node.right.key):
print("Node {} and right child ({}) have mismatched keys.".format(node, node.right))
return False
return True | 38b7fd7690931e017e9ece52b6cba09dbb708400 | 6,713 |
from typing import Sequence
import fnmatch
def _should_ignore(fd_name: str, patterns: Sequence[str]) -> bool:
"""Return whether `fd_name` should be ignored according to `patterns`.
Examples
--------
>>> fd_name = "google/protobuf/empty.proto"
>>> pattern = "google/protobuf/*"
>>> _should_ignore(fd_name, [pattern])
True
>>> fd_name = "foo/bar"
>>> _should_ignore(fd_name, [pattern])
False
"""
return any(fnmatch.fnmatchcase(fd_name, pattern) for pattern in patterns) | 8bf698afddbda869e26ebcaa98e1f4e950117c08 | 6,714 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.