content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Dict
from typing import Any
import toml
from pathlib import Path
import textwrap
def load_configuration() -> Dict[str, Any]:
"""
Return dict from TOML formatted string or file.
Returns:
The dict configuration.
"""
default_config = """
[key_bindings]
AUTOCLEAR = "c"
CANCEL = "esc"
ENTER = "enter"
FILTER = ["F4", "\\\\"]
FOLLOW_ROW = "F"
HELP = ["F1", "?"]
MOVE_DOWN = ["down", "j"]
MOVE_DOWN_STEP = "J"
MOVE_END = "end"
MOVE_HOME = "home"
MOVE_LEFT = ["left", "h"]
MOVE_RIGHT = ["right", "l"]
MOVE_UP = ["up", "k"]
MOVE_UP_STEP = "K"
NEXT_SORT = ["p", ">"]
PREVIOUS_SORT = "<"
PRIORITY_DOWN = ["F8", "d", "]"]
PRIORITY_UP = ["F7", "u", "["]
QUIT = ["F10", "q"]
REMOVE_ASK = ["del", "F9"]
RETRY = "r"
RETRY_ALL = "R"
REVERSE_SORT = "I"
SEARCH = ["F3", "/"]
SELECT_SORT = "F6"
SETUP = "F2"
TOGGLE_EXPAND_COLLAPSE = "x"
TOGGLE_EXPAND_COLLAPSE_ALL = "X"
TOGGLE_RESUME_PAUSE = "space"
TOGGLE_RESUME_PAUSE_ALL = "P"
TOGGLE_SELECT = "s"
UN_SELECT_ALL = "U"
ADD_DOWNLOADS = "a"
[colors]
BRIGHT_HELP = "CYAN BOLD BLACK"
FOCUSED_HEADER = "BLACK NORMAL CYAN"
FOCUSED_ROW = "BLACK NORMAL CYAN"
HEADER = "BLACK NORMAL GREEN"
METADATA = "WHITE UNDERLINE BLACK"
SIDE_COLUMN_FOCUSED_ROW = "BLACK NORMAL CYAN"
SIDE_COLUMN_HEADER = "BLACK NORMAL GREEN"
SIDE_COLUMN_ROW = "WHITE NORMAL BLACK"
STATUS_ACTIVE = "CYAN NORMAL BLACK"
STATUS_COMPLETE = "GREEN NORMAL BLACK"
STATUS_ERROR = "RED BOLD BLACK"
STATUS_PAUSED = "YELLOW NORMAL BLACK"
STATUS_WAITING = "WHITE BOLD BLACK"
"""
config_dict = {}
config_dict["DEFAULT"] = toml.loads(default_config)
# Check for configuration file
config_file_path = Path(user_config_dir("aria2p")) / "config.toml"
if config_file_path.exists():
try:
config_dict["USER"] = toml.load(config_file_path)
except Exception as error: # noqa: W0703 (too broad exception)
logger.error(f"Failed to load configuration file: {error}")
else:
# Write initial configuration file if it does not exist
config_file_path.parent.mkdir(parents=True, exist_ok=True)
with config_file_path.open("w") as fd:
fd.write(textwrap.dedent(default_config).lstrip("\n"))
return config_dict | a7a53382dd43023b74fbb88b9c2540499c9beb4f | 8,900 |
def type_weapon(stage, bin, data=None):
"""Weapon"""
if data == None:
return 1
if stage == 1:
return (str(data),'')
try:
v = int(data)
if 0 > v or v > 255:
raise
except:
raise PyMSError('Parameter',"Invalid Weapon value '%s', it must be 1 for ground attack or not 1 for air attack." % data)
return v | 51ad1c627b05b57ad67f5558bb76de3fe6e48f27 | 8,901 |
def to_square_feet(square_metres):
"""Convert metres^2 to ft^2"""
return square_metres * 10.7639 | 50510aad230efcb47662936237a232662fef5596 | 8,902 |
def middle_name_handler(update: Update, context: CallbackContext) -> str:
"""Get and save patronymic of user. Send hello with full name."""
u = User.get_user(update, context)
name = (f'{context.user_data[LAST_NAME]} {context.user_data[FIRST_NAME]} '
f'{context.user_data[MIDDLE_NAME]}')
context.bot.send_message(
chat_id=u.user_id,
text=static_text.HELLO_FULL_NAME.format(name=name)
)
update.message.reply_text(
text=static_text.ASK_GENDER,
parse_mode=ParseMode.HTML,
reply_markup=keyboard_utils.get_keyboard_for_gender()
)
return GENDER | dab2144282aeb63c2a3c4218236d04c3bb940ac8 | 8,903 |
def submit_barcodes(barcodes):
"""
Submits a set of {release1: barcode1, release2:barcode2}
Must call auth(user, pass) first
"""
query = mbxml.make_barcode_request(barcodes)
return _do_mb_post("release", query) | 6e975e791196ed31ef6f52cdd0ca04d71a8d19eb | 8,904 |
from typing import Counter
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict | e98a9578695781e4965b36d713c4c0a4351e53da | 8,905 |
import json
def load_id_json_file(json_path):
"""
load the JSON file and get the data inside
all this function does is to call json.load(f)
inside a with statement
Args:
json_path (str): where the target JSON file is
Return:
ID list (list): all the data found in the file
"""
with open(json_path, 'r') as f:
return json.load(f) | fd0f7fb73636cdf407b4de3e1aa3ae66dcc8f964 | 8,906 |
def check_github_scopes(exc: ResponseError) -> str:
"""
Parse github3 ResponseError headers for the correct scopes and return a
warning if the user is missing.
@param exc: The exception to process
@returns: The formatted exception string
"""
user_warning = ""
has_wrong_status_code = exc.response.status_code not in (403, 404)
if has_wrong_status_code:
return user_warning
token_scopes = get_oauth_scopes(exc.response)
# Gist resource won't return X-Accepted-OAuth-Scopes for some reason, so this
# string might be `None`; we discard the empty string if so.
accepted_scopes = exc.response.headers.get("X-Accepted-OAuth-Scopes") or ""
accepted_scopes = set(accepted_scopes.split(", "))
accepted_scopes.discard("")
request_url = urlparse(exc.response.url)
if not accepted_scopes and request_url.path == "/gists":
accepted_scopes = {"gist"}
missing_scopes = accepted_scopes.difference(token_scopes)
if missing_scopes:
user_warning = f"Your token may be missing the following scopes: {', '.join(missing_scopes)}\n"
# This assumes we're not on enterprise and 'api.github.com' == request_url.hostname
user_warning += (
"Visit Settings > Developer settings > Personal access tokens to add them."
)
return user_warning | ebb3fffcaddc792dac7c321d9029b5042a42be86 | 8,907 |
def user_login():
"""
# 显示页面的设置
:return: 接收前端的session信息来显示不同的页面
"""
# 获取参数
name = session.get("name")
if name is not None:
return jsonify(errno=RET.OK, errmsg="True", data={"name": name})
else:
return jsonify(errno=RET.SESSIONERR, errmsg="用户未登入") | 213ad2338260364186c0539a9e995b84ee889b42 | 8,908 |
def sample_conditional(node: gtsam.GaussianConditional, N: int, parents: list = [], sample: dict = {}):
"""Sample from conditional """
# every node ~ exp(0.5*|R x + S p - d|^2)
# calculate mean as inv(R)*(d - S p)
d = node.d()
n = len(d)
rhs = d.reshape(n, 1)
if len(parents) > 0:
rhs = rhs - node.S() @ np.vstack([sample[p] for p in parents])
# sample from conditional Gaussian
invR = np.linalg.inv(node.R())
return invR @ (rhs + np.random.normal(size=(n, N))) | b9ab05ea50eea05a779c6d601db4643a86b343d5 | 8,909 |
def _liftover_data_path(data_type: str, version: str) -> str:
"""
Paths to liftover gnomAD Table.
:param data_type: One of `exomes` or `genomes`
:param version: One of the release versions of gnomAD on GRCh37
:return: Path to chosen Table
"""
return f"gs://gnomad-public-requester-pays/release/{version}/liftover_grch38/ht/{data_type}/gnomad.{data_type}.r{version}.sites.liftover_grch38.ht" | 8da0f93c86568d56b3211bcb9e226b9cb495c8e2 | 8,910 |
def valueinfo_to_tensor(vi):
"""Creates an all-zeroes numpy tensor from a ValueInfoProto."""
dims = [x.dim_value for x in vi.type.tensor_type.shape.dim]
return np.zeros(
dims, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type]
) | b814373e7c9d4f1e43f9d1af0c6e48b82989602e | 8,911 |
def signup_email():
"""Create a new account using data encoded in the POST body.
Expects the following form data:
first_name: E.g. 'Taylor'
last_name: E.g. 'Swift'
email: E.g. '[email protected]'
password: E.g. 'iknewyouweretrouble'
Responds with the session cookie via the `set-cookie` header on success.
Send the associated cookie for all subsequent API requests that accept
user authentication.
"""
# Prevent a CSRF attack from replacing a logged-in user's account with
# a new account with known credentials
current_user = view_helpers.get_current_user()
if current_user:
return api_util.jsonify({'message': 'A user is already logged in.'})
params = flask.request.form.copy()
# Don't log the password
password = params.pop('password', None)
rmclogger.log_event(
rmclogger.LOG_CATEGORY_API,
rmclogger.LOG_EVENT_SIGNUP, {
'params': params,
'type': rmclogger.LOGIN_TYPE_STRING_EMAIL,
},
)
first_name = params.get('first_name')
last_name = params.get('last_name')
email = params.get('email')
if not first_name:
raise api_util.ApiBadRequestError('Must provide first name.')
if not last_name:
raise api_util.ApiBadRequestError('Must provide last name.')
if not email:
raise api_util.ApiBadRequestError('Must provide email.')
if not password:
raise api_util.ApiBadRequestError('Must provide password.')
try:
user = m.User.create_new_user_from_email(
first_name, last_name, email, password)
except m.User.UserCreationError as e:
raise api_util.ApiBadRequestError(e.message)
view_helpers.login_as_user(user)
return api_util.jsonify({
'message': 'Created and logged in user %s' % user.name
}) | e3ecca4bd244d1d20ad166a153a6c3f5c80f4876 | 8,912 |
def calculate_multi_rmse(regressor, n_task):
"""
Method which calculate root mean squared error value for trained model
Using regressor attributes
Return RMSE metrics as dict for train and test datasets
:param regressor: trained regression model object
:param n_task:
:type regressor: TrainedModel, TrainedModelDNN, TrainedModelCV
:return: rmse metrics
:rtype: dict
"""
# calculate mse metric
test_mse_tmp = mean_squared_error(
regressor.y_test.values[:, n_task],
regressor.predict_classes['test'][:, n_task]
)
train_mse_tmp = mean_squared_error(
regressor.y_train.values[:, n_task],
regressor.predict_classes['train'][:, n_task]
)
# convert mse to rmse
return {
(str(n_task), 'train', 'RMSE'): train_mse_tmp ** 0.5,
(str(n_task), 'test', 'RMSE'): test_mse_tmp ** 0.5,
} | 53daee6abb97a96af44831df59767a447fd2786e | 8,913 |
import torch
from re import T
def detr_predict(model, image, thresh=0.95):
"""
Function used to preprocess the image, feed it into the detr model, and prepare the output draw bounding boxes.
Outputs are thresholded.
Related functions: detr_load, draw_boxes in coco.py
Args:
model -- the detr model from detr_load()
image -- Array the original image from openCV [width, height, channels]
Returns:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
def box_cxcywh_to_xyxy(x):
# Converts bounding boxes to (x1, y1, x2, y2) coordinates of top left and bottom right corners
# (center_x, center_y, h, w)
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
# Scale the bounding boxes to the image size
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
# Preprocess image
transform = T.Compose([
T.ToPILImage(),
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
t_image = transform(image).unsqueeze(0)
# output is a dict containing "pred_logits" of [batch_size x num_queries x (num_classes + 1)]
# and "pred_boxes" of shape (center_x, center_y, height, width) normalized to be between [0, 1]
output = model(t_image)
# Scale the class probabilities to add up to 1
probas = output['pred_logits'].softmax(-1)[0,:,:-1]
# Create outputs
boxes = rescale_bboxes(output['pred_boxes'][0], (image.shape[1], image.shape[0])).detach()
labels = probas.max(-1).indices
conf = probas.max(-1).values.detach()
### Threshold scores
conf = conf.detach()
keep = conf > thresh
# Filter out scores, boxes, and labels using threshold
conf = conf[keep]
boxes = boxes.detach()[keep]
labels = labels.detach()[keep]
return boxes, labels, conf | 394824358138eb66b69569963b21ccc2d0f5a4d3 | 8,914 |
def comp_fill_factor(self):
"""Compute the fill factor of the winding"""
if self.winding is None:
return 0
else:
(Nrad, Ntan) = self.winding.get_dim_wind()
S_slot_wind = self.slot.comp_surface_wind()
S_wind_act = (
self.winding.conductor.comp_surface_active()
* self.winding.Ntcoil
* Nrad
* Ntan
)
return S_wind_act / S_slot_wind | 55be8ac7aa2961ad970cd16de961fdcf857016fd | 8,915 |
def idewpt(vp):
"""
Calculate the dew point given the vapor pressure
Args:
vp - array of vapor pressure values in [Pa]
Returns:
dewpt - array same size as vp of the calculated
dew point temperature [C] (see Dingman 2002).
"""
# ensure that vp is a numpy array
vp = np.array(vp)
# take the log and convert to kPa
vp = np.log(vp/float(1000))
# calculate the vapor pressure
Td = (vp + 0.4926) / (0.0708 - 0.00421*vp)
return Td | 68b58d7702a50472a4851e1a7ecdd6ba13fe540a | 8,916 |
def _hexify(num):
"""
Converts and formats to hexadecimal
"""
num = "%x" % num
if len(num) % 2:
num = '0'+num
return num.decode('hex') | 71fabff1191f670ec503c76a3be916636e8045ce | 8,917 |
def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):
""" Returns a recommendation via UCB-PE in the synchronous setting. """
# Define some internal functions.
beta_th = _get_ucb_beta_th(gp.input_dim, anc_data.t)
# 1. An LCB for the function
def _ucbpe_lcb(x):
""" An LCB for GP-UCB-PE. """
mu, sigma = gp.eval(x, uncert_form='std')
return mu - beta_th * sigma
# 2. A modified UCB for the function using hallucinated observations
def _ucbpe_2ucb(x):
""" An LCB for GP-UCB-PE. """
mu, sigma = gp.eval(x, uncert_form='std')
return mu + 2 * beta_th * sigma
# 3. UCB-PE acquisition for the 2nd point in the batch and so on.
def _ucbpe_acq(x, yt_dot, halluc_pts):
""" Acquisition for GP-UCB-PE. """
_, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,
uncert_form='std')
return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds
# Now the algorithm
yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)
yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, gp.input_dim)))
recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]
for _ in range(1, num_workers):
curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))
new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)
recommendations.append(new_rec)
return recommendations | 2c12a608c87d61f64b219aaf301189b6c8ee73a2 | 8,918 |
def get_reward(intervention, state, time):
"""Compute the reward based on the observed state and choosen intervention."""
A_1, A_2, A_3 = 60, 500, 60
C_1, C_2, C_3, C_4 = 25, 20, 30, 40
discount = 4.0 / 365
cost = (
A_1 * state.asymptomatic_humans
+ A_2 * state.symptomatic_humans
+ A_3 * state.mosquito_population
)
cost += 0.5 * (
C_1 * intervention.updates["treated_bednet_use"] ** 2
+ C_2 * intervention.updates["condom_use"] ** 2
+ C_3 * intervention.updates["treatment_of_infected"] ** 2
+ C_4 * intervention.updates["indoor_spray_use"] ** 2
)
return -cost * np.exp(-discount * time) | 72803b1a5f09d0856d29601bc766b6787a8255e7 | 8,919 |
def array_of_floats(f):
"""Read an entire file of text as a list of floating-point numbers."""
words = f.read().split()
return [builtin_float(x) for x in words] | 8b357afb3f977761118f7df2632a4f1c198d721a | 8,920 |
def change_currency():
""" Change user's currency """
form = CurrencyForm()
if form.validate_on_submit():
currency = form.rate.data
redirected = redirect(url_for('cashtrack.overview'))
redirected.set_cookie('filter', currency)
symbol = rates[currency]['symbol']
flash(f'Currency has been changed to {currency} ({symbol})', 'success')
return redirected
return rnd_tmp('currency.html', form=form, rates=rates) | 08a23e47a603ee5d5e49cff0259a83f4a2ffc3e0 | 8,921 |
import subprocess
def get_host_checks():
"""
Returns lxc configuration checks.
"""
out = subprocess.check_output('lxc-checkconfig', shell=True)
response = []
if out:
for line in out.splitlines():
response.append(line.decode('utf-8'))
info = {
'checks': response,
}
return jsonify(info) | d94b3b94ec6f32e2706eaf7b67f570de2fc34f14 | 8,922 |
def q2_1(df: pd.DataFrame) -> int:
"""
Finds # of entries in df
"""
return df.size[0] | d98a3d5592994e7dd3758dfab683cb96b532ce6d | 8,923 |
def is_shell(command: str) -> bool:
"""Check if command is shell."""
return command.startswith(get_shell()) | 0cc1497dc17e1535fdfb23c1b160bfcd63141eb1 | 8,924 |
import argparse
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser(description="bert process")
parser.add_argument("--pipeline_path", type=str, default="./config/fat_deepffm.pipeline", help="SDK infer pipeline")
parser.add_argument("--data_dir", type=str, default="../data/input/Criteo_bin/",
help="Dataset contain batch_spare batch_label batch_dense")
args_opt = parser.parse_args()
return args_opt | d93eca1a5c36d73944762967bf6557ee5e15d346 | 8,925 |
def board_init():
"""
Initializes board with all available values 1-9 for each cell
"""
board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]
return board | e4b7192c02e298de915eb3024f32f194942a061b | 8,926 |
def gen_int_lists(num):
"""
Generate num list strategies of integers
"""
return [
s.lists(s.integers(), max_size=100)
for _ in range(num)
] | f1bd151a09f78b1eee9803ce2a077a4f01d34aaa | 8,927 |
def is_blob(bucket: str, file:str):
""" checking if it's a blob """
client = storage.Client()
blob = client.get_bucket(bucket).get_blob(file)
return hasattr(blob, 'exists') and callable(getattr(blob, 'exists')) | ba9bb07f1f15175a28027907634c37b402c6b292 | 8,928 |
def rotate(q, p):
"""
Rotation of vectors in p by quaternions in q
Format: The last dimension contains the quaternion components which
are ordered as (i,j,k,w), i.e. real component last.
The other dimensions follow the default broadcasting rules.
"""
iw = 3
ii = 0
ij = 1
ik = 2
qi = q[...,ii]
qj = q[...,ij]
qk = q[...,ik]
qw = q[...,iw]
pi = p[...,ii]
pj = p[...,ij]
pk = p[...,ik]
# FIXME: This part does not export to the onnx model, i.e. the shape
# of the tensors will be hardcoded according to the input during
# the export. Not a problem though.
shape = tuple(np.maximum(pi.shape, qi.shape))
tmp = q.new_empty(shape + (4,))
out = q.new_empty(shape + (3,))
# Compute tmp = q*p, identifying p with a purly imaginary quaternion.
tmp[...,iw] = - qi*pi - qj*pj - qk*pk
tmp[...,ii] = qw*pi + qj*pk - qk*pj
tmp[...,ij] = qw*pj - qi*pk + qk*pi
tmp[...,ik] = qw*pk + qi*pj - qj*pi
# Compute tmp*q^-1.
out[...,ii] = -tmp[...,iw]*qi + tmp[...,ii]*qw - tmp[...,ij]*qk + tmp[...,ik]*qj
out[...,ij] = -tmp[...,iw]*qj + tmp[...,ii]*qk + tmp[...,ij]*qw - tmp[...,ik]*qi
out[...,ik] = -tmp[...,iw]*qk - tmp[...,ii]*qj + tmp[...,ij]*qi + tmp[...,ik]*qw
return out | da8c715276d5bef0340ad3378aa127c9ddb75f96 | 8,929 |
from typing import Union
def _is_whitelisted(name: str, doc_obj: Union['Module', 'Class']):
"""
Returns `True` if `name` (relative or absolute refname) is
contained in some module's __pdoc__ with a truish value.
"""
refname = doc_obj.refname + '.' + name
module = doc_obj.module
while module:
qualname = refname[len(module.refname) + 1:]
if module.__pdoc__.get(qualname) or module.__pdoc__.get(refname):
return True
module = module.supermodule
return False | c54c69ae0180c1764c8885d00e96640f1bfff0f8 | 8,930 |
import copy
def permute_bond_indices(atomtype_vector):
"""
Permutes the set of bond indices of a molecule according to the complete set of valid molecular permutation cycles
atomtype_vector: array-like
A vector of the number of each atoms, the length is the total number of atoms.
An A3B8C system would be [3, 8, 1]
Returns many sets permuted bond indices, the number of which equal to the number of cycles
"""
natoms = sum(atomtype_vector)
bond_indices = generate_bond_indices(natoms)
cycles_by_atom = molecular_cycles(atomtype_vector)
bond_indice_permutations = [] # interatomic distance matrix permutations
for atom in cycles_by_atom:
for cycle in atom:
tmp_bond_indices = copy.deepcopy(bond_indices) # need a deep copy, list of lists
for subcycle in cycle:
for i, bond in enumerate(tmp_bond_indices):
tmp_bond_indices[i] = permute_bond(bond, subcycle)
bond_indice_permutations.append(tmp_bond_indices)
return bond_indice_permutations | ebf398e55d8a80a2e4ce2cef4f48d957e47d68a3 | 8,931 |
def get_cell_integer_param(device_resources,
cell_data,
name,
force_format=None):
"""
Retrieves definition and decodes value of an integer cell parameter. The
function can optionally force a specific encoding format if needed.
"""
# Get the parameter definition to determine its type
param = device_resources.get_parameter_definition(cell_data.cell_type,
name)
# Force the format if requested by substituting the paraameter
# definition object.
if not param.is_integer_like() and force_format is not None:
if force_format != param.string_format:
param = ParameterDefinition(
name=name,
string_format=force_format,
default_value=cell_data.attributes[name])
# Decode
return param.decode_integer(cell_data.attributes[name]) | 6ab281004f324e8c40e176d5676cd7e42f50eaa9 | 8,932 |
import hashlib
def get_md5(filename):
""" Calculates the MD5 sum of the passed file
Args:
filename (str): File to hash
Returns:
str: MD5 hash of file
"""
# Size of buffer in bytes
BUF_SIZE = 65536
md5 = hashlib.md5()
# Read the file in 64 kB blocks
with open(filename, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
return md5.hexdigest() | c43538aee954f670c671c2e26e18f4a17e298455 | 8,933 |
import os
def get_py_path(pem_path):
"""Returns the .py filepath used to generate the given .pem path, which may
or may not exist.
Some test files (notably those in verify_certificate_chain_unittest/ have a
"generate-XXX.py" script that builds the "XXX.pem" file. Build the path to
the corresponding "generate-XXX.py" (which may or may not exist)."""
file_name = os.path.basename(pem_path)
file_name_no_extension = os.path.splitext(file_name)[0]
py_file_name = 'generate-' + file_name_no_extension + '.py'
return os.path.join(os.path.dirname(pem_path), py_file_name) | 0bc97d23138c44e051282fdfa22517f1289ab65a | 8,934 |
def is_recurrent(sequence):
"""
Returns true if the given sequence is recurrent (elements can exist more than once), otherwise returns false.
Example
---------
>>> sequence = [1,2,3,4,5]
>>> ps.is_recurrent(sequence)
False
>>> sequence = [1,1,2,2,3]
>>> ps.is_recurrent(sequence)
True
"""
element_counts = get_element_counts(sequence)
truths = [count > 1 for element, count in element_counts.items()]
if True in truths:
return True
return False | e123ddd960b262651b20e54ccbd3d5b11fe3695e | 8,935 |
import torch
def flex_stack(items, dim=0):
"""
"""
if len(items) < 1:
raise ValueError("items is empty")
if len(set([type(item) for item in items])) != 1:
raise TypeError("items are not of the same type")
if isinstance(items[0], list):
return items
elif isinstance(items[0], torch.Tensor):
return torch.stack(items, dim=0)
elif isinstance(items[0], np.ndarray):
return np.stack(items, axis=0)
else:
raise TypeError(f"Unrecognized type f{type(items[0])}") | 47ca0e47647ce86619f1cdc86eef560fbbb9304e | 8,936 |
from pathlib import Path
def download_image_data(gpx_file,
padding,
square,
min_lat,
min_long,
max_lat,
max_long,
cache_dir):
"""
Download satellite imagery from USGS
Args:
gpx_file: (str) A file containing one of more tracks to use to determine the area of terrain to model
padding: (float) Padding to add around the GPX track, in miles
min_lat (float) Southern boundary of the region to model
min_long (float) Eastern boundary of the region to model
max_lat (float) Northern boundary of the region to model
max_long (float) Western boundary of the region to model
cache_dir (str) Directory to download the files to
"""
log = GetLogger()
# Determine the bounds of the output
if gpx_file:
log.info("Parsing GPX file")
gpx = GPXFile(gpx_file)
try:
min_lat, min_long, max_lat, max_long = gpx.GetBounds(padding, square)
except ApplicationError as ex:
log.error(ex)
return False
if None in (min_lat, min_long, max_lat, max_long):
raise InvalidArgumentError("You must specify an area to download")
log.info(f"Requested boundaries top(max_lat)={max_lat} left(min_long)={min_long} bottom(min_lat)={min_lat} right(max_long)={max_long}")
# Get the image data
cache_dir = Path(cache_dir)
image_filename = Path(get_cropped_image_filename(max_lat, min_long, min_lat, max_long))
try:
get_image_data(image_filename, min_lat, min_long, max_lat, max_long, cache_dir)
except ApplicationError as ex:
log.error(ex)
return False
log.passed("Successfully downloaded images")
return True | 4ceef45da21622ab716031e8f68ed4724e168062 | 8,937 |
def find_nearest_values(array, value):
"""Find indexes of the two nearest values of an array to a given value
Parameters
----------
array (numpy.ndarray) : array
value (float) : value
Returns
-------
idx1 (int) : index of nearest value in the array
idx2 (int) : index of second nearest value in the array
"""
# index of nearest value in the array
idx1 = (np.abs(array-value)).argmin()
# check if value is bigger or smaller than nearest value
if array[idx1] >= value:
idx2 = idx1 - 1
else:
idx2 = idx1 + 1
return idx1, idx2 | 9c873692878ef3e4de8762bb89306e7ef907f90a | 8,938 |
def channel_info(channel_id):
"""
Get Slack channel info
"""
channel_info = slack_client.api_call("channels.info", channel=channel_id)
if channel_info:
return channel_info['channel']
return None | 260eeaa2849350e2ede331ddecd68aead798f76c | 8,939 |
from typing import Callable
from typing import Any
import logging
def log(message: str) -> Callable:
"""Returns a decorator to log info a message before function call.
Parameters
----------
message : str
message to log before function call
"""
def decorator(function: Callable) -> Callable:
@wraps(function)
def wrapper(*args: Any, **kwargs: Any) -> None:
logging.info(message)
return function(*args, **kwargs)
return wrapper
return decorator | c8ed8f8119be8d6e80935d73034f752ad2cb1dd9 | 8,940 |
def client(identity: PrivateIdentity) -> Client:
"""Client for easy access to iov42 platform."""
return Client(PLATFORM_URL, identity) | a0ad172765b50a76485bd3ec630a2c3ffeae85ef | 8,941 |
def init_weights(module, init='orthogonal'):
"""Initialize all the weights and biases of a model.
:param module: any nn.Module or nn.Sequential
:param init: type of initialize, see dict below.
:returns: same module with initialized weights
:rtype: type(module)
"""
if init is None: # Base case, no change to default.
return module
init_dict = {
'xavier_uniform': nn.init.xavier_uniform_,
'xavier_normal': nn.init.xavier_normal_,
'orthogonal': nn.init.orthogonal_,
'kaiming_normal': nn.init.kaiming_normal_,
'kaiming_uniform': nn.init.kaiming_uniform_,
}
for m in module.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# print("initializing {} with {} init.".format(m, init))
init_dict[init](m.weight)
if hasattr(m, 'bias') and m.bias is not None:
# print("initial bias from ", m, " with zeros")
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, (nn.Sequential, nn.ModuleList, nn.ModuleDict)):
for mod in m:
init_weights(mod, init)
return module | e8cd95743b8a36dffdb53c7f7b9723e896d2071d | 8,942 |
def getsoundchanges(reflex, root): # requires two ipastrings as input
"""
Takes a modern-day L1 word and its reconstructed form and returns \
a table of sound changes.
:param reflex: a modern-day L1-word
:type reflex: str
:param root: a reconstructed proto-L1 word
:type root: str
:return: table of sound changes
:rtype: pandas.core.frame.DataFrame
:Example:
>>> from loanpy import reconstructor as rc
>>> rc.getsoundchanges("ɟɒloɡ", "jɑlkɑ")
+---+--------+------+
| # | reflex | root |
+---+--------+------+
| 0 | #0 | 0 |
+---+--------+------+
| 1 | #ɟ | j |
+---+--------+------+
| 2 | ɒ | ɑ |
+---+--------+------+
| 3 | l | lk |
+---+--------+------+
| 4 | o | ɑ |
+---+--------+------+
| 5 | ɡ# | 0 |
+---+--------+------+
"""
reflex = ipa2clusters(reflex)
root = ipa2clusters(root)
reflex[0], reflex[-1] = "#" + reflex[0], reflex[-1] + "#"
reflex, root = ["#0"] + reflex, ["0"] + root
if reflex[1][1:] in vow and root[1] in cns:
root = root[1:]
elif reflex[1][1:] in cns and root[1] in vow:
reflex = reflex[1:]
diff = abs(len(root) - len(reflex)) # "a,b","c,d,e,f,g->"a,b,000","c,d,efg
if len(reflex) < len(root):
reflex += ["0#"]
root = root[:-diff] + ["".join(root[-diff:])]
elif len(reflex) > len(root):
root += ["0"]
reflex = reflex[:-diff] + ["".join(reflex[-diff:])]
else:
reflex, root = reflex + ["0#"], root + ["0"]
return pd.DataFrame({"reflex": reflex, "root": root}) | 8230e836e109ed8453c6fdbc72e6a4f77833f69b | 8,943 |
def compute_normals(filename, datatype='cell'):
"""
Given a file, this method computes the surface normals of the mesh stored
in the file. It allows to compute the normals of the cells or of the points.
The normal computed in a point is the interpolation of the cell normals of
the cells adiacent to the point.
:param str filename: the name of the file to parse in order to extract
the geometry information.
:param str datatype: indicate if the normals have to be computed for the
points or the cells. The allowed values are: 'cell', 'point'. Default
value is 'cell'.
:return: the array that contains the normals.
:rtype: numpy.ndarray
"""
points, cells = FileHandler(filename).get_geometry(get_cells=True)
normals = np.array(
[normalize(normal(*points[cell][0:3])) for cell in cells])
if datatype == 'point':
normals_cell = np.empty((points.shape[0], 3))
for i_point in np.arange(points.shape[0]):
cell_adiacent = [cells.index(c) for c in cells if i_point in c]
normals_cell[i_point] = normalize(
np.mean(normals[cell_adiacent], axis=0))
normals = normals_cell
return normals | e0cfc90a299f6db52d9cec2f39eebfc96158265c | 8,944 |
from typing import List
from typing import Optional
def build_layers_url(
layers: List[str], *, size: Optional[LayerImageSize] = None
) -> str:
"""Convenience method to make the server-side-rendering URL of the provided layer URLs.
Parameters
-----------
layers: List[:class:`str`]
The image urls, in ascending order of Zone ID's
size: Optional[:class:`LayerImageSize`]
The desired size for the render. If one is not supplied, it defaults to `LayerImageSize.SIZE_600`.
"""
size_str = str(size or LayerImageSize.SIZE_600)[-3:]
joined = ",".join(quote(layer) for layer in layers)
return f"https://impress-2020.openneo.net/api/outfitImage?size={size_str}&layerUrls={joined}" | 2cc7ab58af2744a4c898903d9a035c77accbae2e | 8,945 |
def SyncBatchNorm(*args, **kwargs):
"""In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
if paddle.get_device() == 'cpu':
return nn.BatchNorm2D(*args, **kwargs)
else:
return nn.SyncBatchNorm(*args, **kwargs) | f08a7141700b36286893bbbc82b28686d1ca88a9 | 8,946 |
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get(uuid, local_id): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!' | 340189bc76bdbbc14666fe542aa05d467c7d4898 | 8,947 |
import re
def parse_path_length(path):
"""
parse path length
"""
matched_tmp = re.findall(r"(S\d+)", path)
return len(matched_tmp) | 762e2b86fe59689800ed33aba0419f83b261305b | 8,948 |
def check_permisions(request, allowed_groups):
""" Return permissions."""
try:
profile = request.user.id
print('User', profile, allowed_groups)
is_allowed = True
except Exception:
return False
else:
return is_allowed | 4bdb54bd1edafd7a0cf6f50196d470e0d3425c66 | 8,949 |
def kanji2digit(s):
"""
1から99までの漢数字をアラビア数字に変換する
"""
k2d = lambda m, i: _kanjitable[m.group(i)]
s = _re_kanjijiu1.sub(lambda m: k2d(m,1) + k2d(m,2), s)
s = _re_kanjijiu2.sub(lambda m: u'1' + k2d(m,1), s)
s = _re_kanji.sub(lambda m: k2d(m,1), s)
s = s.replace(u'十', u'10')
return s | 27589cee8a9b4f14ad7120061f05077b736b8632 | 8,950 |
def calc_rdm_segment(t, c, segment_id_beg, segment_id_end, segment_id, ph_index_beg, segment_ph_cnt, debug=0):
"""
Function to calculate radiometry (rdm)
Input:
t - time or delta_time of ATL03, for a given gt num
c - classification of ATL03 for a given gt num
ensure that no nans exist
segment_id_beg - segment_id_beg from ATL08
segment_id_end - segment_id_end from ATL08
segment_id - segment_id from ATL03 geolocation/
ph_index_beg - ph_index_beg from ATL03 geolocation/
segment_ph_cnt - segment_ph_cnt from ATL03 geolocation/
debug - val != 0 enables print statements if segments
do not match from 03 to 08 (see caveats)
Output:
n_shots_unique - total number of unique ttg per ATL08 100m bin
rdm_ground - rdm of ground photons (c==1)
rdm_veg - rdm of veg photons (c==2)
rdm_canopy - rdm of canopy photons (c==3)
Example:
n_shots_unique, rdm_ground, rdm_veg, rdm_canopy = \
calc_rdm(t, c, segment_id_beg, segment_id_end, ph_index_beg, segment_ph_cnt, debug=0)
Caveats:
Ensure that no nans exist in classification c
rdm_ground/veg/canopy and n_shots_unique are floating point
b/c it's possible to have no overlap in 03 and 08 data, in
which case the radiometry value is NaN; this is implemented by
initializing rdm vectors are NaN. Thus, they are floating-point-
valued.
This functions can handle when 03/08 do not totally overlap,
or when there is no overlap. That said, one should proceed with
caution knowing 03 and 08 do not overlap at all. NaN values are
initialized in rdm vectors based on these cases.
"""
if np.isnan(c).sum() > 0 and debug:
print('warning: NaN values found in c')
rdm_ground = np.full(segment_id_beg.shape, np.nan)
rdm_veg = np.full(segment_id_beg.shape, np.nan)
rdm_canopy = np.full(segment_id_beg.shape, np.nan)
n_shots_unique = np.full(segment_id_beg.shape, np.nan)
n_id = len(segment_id)
for s in range(len(segment_id_beg)):
# _, k0 = iu.getClosest(segment_id, [segment_id_beg[s]])
# _, k1 = iu.getClosest(segment_id, [segment_id_end[s]])
_, k0 = getClosest(segment_id, [segment_id_beg[s]])
_, k1 = getClosest(segment_id, [segment_id_end[s]])
k0, k1 = int(k0), int(k1)
warn = False
b_edge = False
if segment_id[k0] < segment_id_beg[s]:
# left side incomplete
# cm.pause('beg')
k = k0
while segment_id[k] < segment_id_beg[s]:
k += 1
if k >= n_id:
b_edge = True
break
elif segment_id[k0] > segment_id_beg[s]:
# print('warning: 03 seg id beg %d > 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))
warn = True
# else:
# equal, totally fine
# if segment_id[k1] != segment_id_end[s]:
if segment_id[k1] > segment_id_end[s]:
# right side incomplete
# cm.pause('end')
k = k1
while segment_id[k] > segment_id_end[s]:
k -= 1
if k < 0:
b_edge = True
break
elif segment_id[k1] < segment_id_end[s]:
# print('warning: 03 seg id beg %d < 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))
warn = True
# else:
# equal, totally fine
if b_edge and debug:
# 08 segment is entirely outside of 03 segment data
print('outside')
print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))
print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))
# cm.pause()
input('enter to continue')
continue
if warn and debug:
print('partial')
print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))
print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))
# cm.pause()
input('enter to continue')
i0, i1 = ph_index_beg[k0], ph_index_beg[k1] + segment_ph_cnt[k1] - 1
t_seg = t[i0:i1+1] # inclusive index
c_seg = c[i0:i1+1]
n_shots_total_uq = len(np.unique(t_seg))
n_shots_ground = (c_seg == 1).sum()
n_shots_veg = (c_seg == 2).sum()
n_shots_canopy = (c_seg == 3).sum()
n_shots_unique[s] = n_shots_total_uq
rdm_ground[s] = float(n_shots_ground / n_shots_total_uq)
rdm_veg[s] = float(n_shots_veg / n_shots_total_uq)
rdm_canopy[s] = float(n_shots_canopy / n_shots_total_uq)
return n_shots_unique, rdm_ground, rdm_veg, rdm_canopy | 47ac61f816a5f8e9c3f86c5b1e8ad2f9f660f8a2 | 8,951 |
def load_featurizer(pretrained_local_path):
"""Load pretrained model."""
return CNN_tf("vgg", pretrained_local_path) | 1f39acdae01e484302d8f8051c2f55a178aa2301 | 8,952 |
import os
import zipfile
def create_zipfile(dir_to_zip, savepath=''):
"""Create a zip file from all the files under 'dir_to_zip'.
The output zip file will be saved to savepath.
If savepath ends with '.zip', then the output zip file will be
saved AS 'savepath'. Necessary tree subdirectories are created automatically.
Else, savepath is assumed to be a directory path,
hence the output zip file will be saved TO 'savepath'
directory. Necessary tree subdirectories are created automatically.
:return absolute savepath
"""
save_cwd = os.getcwd()
dir_to_zip = os.path.abspath(dir_to_zip)
if dir_to_zip in os.path.split(savepath)[0]: raise ValueError(
'To avoid recursion), resultant "savepath" should not be located inside "dir_to_zip"',
dict(dir_to_zip=dir_to_zip, savepath=savepath))
parent_dir, dir_name = os.path.split(dir_to_zip)
os.chdir(parent_dir)
if savepath:
if savepath.endswith('.zip'):
create_path(savepath, stop_depth=1)
else:
create_path(savepath, stop_depth=0)
savepath = os.path.join(savepath, dir_name + '.zip')
else:
savepath = dir_to_zip + '.zip'
pwd_length = len(os.getcwd())
with zipfile.ZipFile(savepath, "w", compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, dirnames, filenames in os.walk(dir_to_zip):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path[pwd_length + 1:])
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path[pwd_length + 1:])
os.chdir(save_cwd)
return os.path.abspath(savepath) | dd18a1272a25c3fb272345fc8b71a303c6cc4053 | 8,953 |
from templateflow.conf import setup_home
def make_cmdclass(basecmd):
"""Decorate setuptools commands."""
base_run = basecmd.run
def new_run(self):
setup_home()
base_run(self)
basecmd.run = new_run
return basecmd | dc66370f19e2d1b3dbc2da3942f8923a07d8d9a6 | 8,954 |
def rmse(predictions, targets):
"""Compute root mean squared error"""
rmse = np.sqrt(((predictions - targets) ** 2).mean())
return rmse | 1a5fe824c5ef768f3df34463724fdd057d37901a | 8,955 |
import math
def format_timedelta(value, time_format=None):
""" formats a datetime.timedelta with the given format.
Code copied from Django as explained in
http://stackoverflow.com/a/30339105/932593
"""
if time_format is None:
time_format = "{days} days, {hours2}:{minutes2}:{seconds2}"
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
}) | 0ee6a48e0eee5e553e665d44173f0a4843b4007f | 8,956 |
def categorical_log_likelihood(probs: chex.Array, labels: chex.Array):
"""Computes joint log likelihood based on probs and labels."""
num_data, unused_num_classes = probs.shape
assert len(labels) == num_data
assigned_probs = probs[jnp.arange(num_data), jnp.squeeze(labels)]
return jnp.sum(jnp.log(assigned_probs)) | 6209fc59dc6a76f8afc49788b9e5c5a11f58354f | 8,957 |
def ask_name(question: str = "What is your name?") -> str:
"""Ask for the users name."""
return input(question) | 1cc9ec4d3bc48d7ae4be1b2cf8eb64a0b4f94b23 | 8,958 |
from typing import Sequence
def _maxcut(g: Graph, values: Sequence[int]) -> float:
"""
cut by given values $$\pm 1$$ on each vertex as a list
:param g:
:param values:
:return:
"""
cost = 0
for e in g.edges:
cost += g[e[0]][e[1]].get("weight", 1.0) / 2 * (1 - values[e[0]] * values[e[1]])
return cost | 1ca8d2cfce6a741fb4eab55f7fcd9d9db5e3578f | 8,959 |
def cp_als(X, rank, random_state=None, init='randn', **options):
"""Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize() | b6402f03ba4e8be7d0abb2b13232d88b07a73be9 | 8,960 |
import os
def load_station_enu(
station_name,
start_date=None,
end_date=None,
download_if_missing=True,
force_download=False,
zero_by="mean",
to_cm=True,
):
"""Loads one gps station's ENU data since start_date until end_date as a dataframe
Args:
station_name (str): 4 Letter name of GPS station
See http://geodesy.unr.edu/NGLStationPages/gpsnetmap/GPSNetMap.html for map
start_date (datetime or str): Optional. cutoff for beginning of GPS data
end_date (datetime or str): Optional. cut off for end of GPS data
download_if_missing (bool): default True
force_download (bool): default False
"""
# start_date, end_date = _parse_dates(start_date, end_date)
if zero_by not in ("start", "mean"):
raise ValueError("'zero_by' must be either 'start' or 'mean'")
station_name = station_name.upper()
gps_data_file = os.path.join(GPS_DIR, GPS_FILE.format(station=station_name))
if force_download:
try:
os.remove(gps_data_file)
logger.info(f"force removed {gps_data_file}")
except FileNotFoundError:
pass
if not os.path.exists(gps_data_file):
if download_if_missing:
logger.info(f"Downloading {station_name} to {gps_data_file}")
download_station_data(station_name)
else:
raise ValueError(
"{gps_data_file} does not exist, download_if_missing = False"
)
df = pd.read_csv(gps_data_file, header=0, sep=r"\s+", engine="c")
clean_df = _clean_gps_df(df, start_date, end_date)
if to_cm:
# logger.info("Converting %s GPS to cm" % station_name)
clean_df[["east", "north", "up"]] = 100 * clean_df[["east", "north", "up"]]
if zero_by.lower() == "mean":
mean_val = clean_df[["east", "north", "up"]].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - mean_val
clean_df[["east", "north", "up"]] -= mean_val
elif zero_by.lower() == "start":
start_val = clean_df[["east", "north", "up"]].iloc[:10].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - start_val
clean_df[["east", "north", "up"]] -= start_val
# Finally, make the 'date' column a DateIndex
return clean_df.set_index("date") | 208b6b6776ad3eb0ef765a5b56d287e7ed06e63f | 8,961 |
def decode_hint(hint: int) -> str:
"""Decodes integer hint as a string.
The format is:
⬜ (GRAY) -> .
🟨 (YELLOW) -> ?
🟩 (GREEN) -> *
Args:
hint: An integer representing the hint.
Returns:
A string representing the hint.
"""
hint_str = []
for _ in range(_WORD_LENGTH):
hint_chr = hint % 3
hint //= 3
if hint_chr == 0:
hint_str.append(_HINT_NOT_IN_ANY_SPOT)
elif hint_chr == 1:
hint_str.append(_HINT_WRONG_SPOT)
else:
hint_str.append(_HINT_CORRECT_SPOT)
return ''.join(hint_str[::-1]) | 4180b847cd252a1e3c762431327b1b6d359dac3d | 8,962 |
import os
import tempfile
import subprocess
def validate_notebook(nb_path, timeout=60):
""" Executes the notebook via nbconvert and collects the output
Args:
nb_path (string): path to the notebook of interest
timeout (int): max allowed time (in seconds)
Returns:
(parsed nbformat.NotebookNode object, list of execution errors)
"""
dirname, __ = os.path.split(nb_path)
os.chdir(dirname)
kname = find_kernel(nb_path)
if kname is None:
raise OSError("No kernel found")
# Set delete=False as workaround for Windows OS
with tempfile.NamedTemporaryFile(suffix=".ipynb", delete=False) as tf:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
f"--ExecutePreprocessor.timeout={timeout}",
f"--ExecutePreprocessor.kernel_name={kname}",
"--ExecutePreprocessor.allow_errors=True",
"--output",
tf.name,
nb_path,
]
subprocess.check_call(args)
tf.seek(0)
nb = nbformat.read(tf, nbformat.current_nbformat)
errors = list_errors(nb)
# broken urls are currently counted as errors; consider including as
# warnings
broken_urls = find_broken_urls(nb)
if any(broken_urls):
broken_urls = ["broken url: " + u for u in broken_urls]
errors += broken_urls
return nb, errors | 475a11036a5330df8f82d9876731d3d688749aa8 | 8,963 |
def is_symmetric(arr, i_sym=True, j_sym=True):
"""
Takes in an array of shape (n, m) and check if it is symmetric
Parameters
----------
arr : 1D or 2D array
i_sym : array
symmetric with respect to the 1st axis
j_sym : array
symmetric with respect to the 2nd axis
Returns
-------
a binary array with the symmetry condition for the corresponding quadrants.
The globa
Notes
-----
If both **i_sym** = ``True`` and **j_sym** = ``True``, the input array is
checked for polar symmetry.
See `issue #34 comment
<https://github.com/PyAbel/PyAbel/issues/34#issuecomment-160344809>`_
for the defintion of a center of the image.
"""
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
arr, reorient=False)
if i_sym and not j_sym:
valid_flag = [np.allclose(np.fliplr(Q1), Q0),
np.allclose(np.fliplr(Q2), Q3)]
elif not i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(Q1), Q2),
np.allclose(np.flipud(Q0), Q3)]
elif i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(np.fliplr(Q1)), Q3),
np.allclose(np.flipud(np.fliplr(Q0)), Q2)]
else:
raise ValueError('Checking for symmetry with both i_sym=False \
and j_sym=False does not make sense!')
return np.array(valid_flag) | 488744d34d851b690eb6114b06d754e46b04e36f | 8,964 |
def linear_powspec(k, a):
"""linear power spectrum P(k) - linear_powspec(k in h/Mpc, scale factor)"""
return _cosmocalc.linear_powspec(k, a) | 9abe99ef5251b4b8ef04e7113a30dd26ed86d14a | 8,965 |
def light_eff(Pmax, Iz, I0, Ik):
"""
Photosynthetic efficiency based on the light conditions. By definition, the
efficiency has a value between 0 and 1.
Parameters
----------
Pmax : numeric
Maximum photosynthetic rate [-].
Iz : numeric
Coral biomass-averaged light-intensity [mol photons m^-2 s^-1].
I0 : numeric
Light-intensity at the surface water (but within the water column)
[mol photons m^-2 s^-1].
Ik : numeric
Saturation light-intensity [mol photons m^-2 s^-1].
Returns
-------
PI : numeric
Photo-efficiency [-].
"""
# # calculations
try:
if Ik > 0:
PI = Pmax * (np.tanh(Iz / Ik) - np.tanh(.01 * I0 / Ik))
else:
PI = 0.
except ValueError:
PI = np.zeros(len(Ik))
PI[Ik > 0] = Pmax[Ik > 0] * (np.tanh(Iz[Ik > 0] / Ik[Ik > 0]) -
np.tanh(.01 * I0 / Ik[Ik > 0]))
# # Output
return PI | a2e0de2cb0791d3afea15f4c78b7d673200504b3 | 8,966 |
import array
import time
def radial_kernel_evaluate(rmax, kernel, pos, wts, log=null_log, sort_data=False,
many_ngb_approx=None):
"""
Perform evaluation of radial kernel over neighbours.
Note you must set-up the linear-interpolation kernel before calling this
function.
rmax - radius to evaluate within
kernel - kernel table
pos - (N,3) array of positions
wts - (N,) array of weights
[many_ngb_approx - guess for number of neighbours. If this is included and
large, i.e. >140, we will use combine the kernels due to
particles in non-adjacent cells (=monopole approximation
for the 1/r^2 force)]
returns pairs, f
where
pairs - the number of pairs within rmax
f - An (N,3) array s.t.
f_i = Sum_j wts_j (pos_j - pos_i) * kernel(|pos_j - pos_i|)
"""
pos_arr = array(pos)
num_pts = len(pos)
if len(pos) != len(wts):
raise Exception('Number of weights ({:,}) must be the same as number of points ({:,})'.format(len(wts),num_pts))
stencil = None
# Choose a stencil based on number of neighbours
if many_ngb_approx is not None:
guess_ngb = int(many_ngb_approx)
if guess_ngb>400:
# 7x7x7 stencil (only inner 3x3x3 direct)
stencil = 7
ngrid = int(3.0/rmax)
elif guess_ngb>140:
# 5x5x5 stencil (inner 3x3x3 direct)
stencil = 5
ngrid = int(2.0/rmax)
else:
# 3x3x3, all direct
ngrid = int(1.0/rmax)
else:
ngrid = int(1.0/rmax) # 3x3x3 by direct summation
# Avoid nasty hashing problems, make sure ngrid&3 == 3
if ngrid&3!=3 and ngrid >=3:
ngrid = (ngrid//4)*4 -1
print('Using hash grid of size {:,}^3 bins, binning particles.'.format(ngrid), file=log)
cells = get_cells(pos_arr, ngrid, log)
sort_idx, cellbin_data = _bin_id_data(cells, log)
pos = pos_arr[sort_idx].copy()
wts= array(wts)[sort_idx].copy()
print(MU.OKBLUE+'Kernel evalations at {:,} positions'.format(num_pts)+MU.ENDC,
file=log)
t0 = time()
lattice_setup_kernel(rmax, kernel, log)
pairs, accel = lattice_kernel(pos, cellbin_data, ngrid, masses=wts, stencil=stencil)
t1 = time()
dt = t1-t0
if stencil is None:
mean_num_ngb = pairs * 2.0 / num_pts
print('Within r=%.4f, mean number of neighbours was'%rmax,
MU.OKBLUE+'%.2f'%(mean_num_ngb)+MU.ENDC, file=log)
print('{:,} pairs in'.format(pairs), '%.2f seconds'%dt,
'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log)
else:
print('%dx%dx%d monopole approximation, so no exact count for neighbours\n'%((stencil,)*3),
'but {:,} force-pairs in'.format(pairs), '%.2f seconds'%dt,
'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log)
if sort_data:
# return the sort index along with sorted positions and masses, and corresponding accelerations.
# If you want to unsort you need to do it yourself
return pairs, sort_idx, pos, wts, accel
# indices for 'un'-sorting
unsort = empty_like(sort_idx)
unsort[sort_idx] = arange(num_pts)
return pairs, accel[unsort] | 41c4600be3a5684c97d69acb4ebe15846dcc4b0d | 8,967 |
def get_referents(source, exclude=None):
"""
:return: dict storing lists of objects referring to source keyed by type.
"""
res = {}
for obj_cls, ref_cls in [
(models.Language, models.LanguageSource),
(models.ValueSet, models.ValueSetReference),
(models.Sentence, models.SentenceReference),
(models.Contribution, models.ContributionReference),
]:
if obj_cls.mapper_name().lower() in (exclude or []):
continue
q = DBSession.query(obj_cls).join(ref_cls).filter(ref_cls.source_pk == source.pk)
if obj_cls == models.ValueSet:
q = q.options(
joinedload_all(models.ValueSet.parameter),
joinedload_all(models.ValueSet.language))
res[obj_cls.mapper_name().lower()] = q.all()
return res | 2aeccbbe61cdcb2b3183682a5cce8ed959fc14c9 | 8,968 |
import array
def asarray(buffer=None, itemsize=None, shape=None, byteoffset=0,
bytestride=None, padc=" ", kind=CharArray):
"""massages a sequence into a chararray.
If buffer is *already* a chararray of the appropriate kind, it is
returned unaltered.
"""
if isinstance(buffer, kind) and buffer.__class__ is kind:
return buffer
else:
return array(buffer, itemsize, shape, byteoffset, bytestride,
padc, kind) | 346eaaa9ece9671f5b2fa0633552f72e40300adc | 8,969 |
import zipfile
import os
import sys
def _extract_symlink(zipinfo: zipfile.ZipInfo,
pathto: str,
zipfile: zipfile.ZipFile,
nofixlinks: bool=False) -> str:
"""
Extract: read the link path string, and make a new symlink.
'zipinfo' is the link file's ZipInfo object stored in zipfile.
'pathto' is the extract's destination folder (relative or absolute)
'zipfile' is the ZipFile object, which reads and parses the zip file.
"""
assert zipinfo.external_attr >> 28 == SYMLINK_TYPE
zippath = zipinfo.filename
linkpath = zipfile.read(zippath)
linkpath = linkpath.decode('utf8')
# drop Win drive + unc, leading slashes, '.' and '..'
zippath = os.path.splitdrive(zippath)[1]
zippath = zippath.lstrip(os.sep)
allparts = zippath.split(os.sep)
okparts = [p for p in allparts if p not in ('.', '..')]
zippath = os.sep.join(okparts)
# where to store link now
destpath = os.path.join(pathto, zippath)
destpath = os.path.normpath(destpath)
# make leading dirs if needed
upperdirs = os.path.dirname(destpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
# adjust link separators for the local platform
if not nofixlinks:
linkpath = linkpath.replace('/', os.sep).replace('\\', os.sep)
# test+remove link, not target
if os.path.lexists(destpath):
os.remove(destpath)
# windows dir-link arg
isdir = zipinfo.external_attr & SYMLINK_ISDIR
if (isdir and
sys.platform.startswith('win') and
int(sys.version[0]) >= 3):
dirarg = dict(target_is_directory=True)
else:
dirarg ={}
# make the link in dest (mtime: caller)
os.symlink(linkpath, destpath, **dirarg)
return destpath | da3e470cb78474bf56b82f9deb2e1febdd53f668 | 8,970 |
import os
def read_file(fname, ObsClass, verbose=False):
"""This method is used to read the file.
"""
if verbose:
print('reading menyanthes file {}'.format(fname))
if ObsClass == observation.GroundwaterObs:
_rename_dic = {'xcoord': 'x',
'ycoord': 'y',
'upfiltlev': 'bovenkant_filter',
'lowfiltlev': 'onderkant_filter',
'surflev': 'maaiveld',
'filtnr': 'filternr',
'meetpunt': 'measpointlev'
}
_keys_o = ['name', 'x', 'y', 'locatie', 'filternr',
'metadata_available', 'maaiveld', 'meetpunt',
'bovenkant_filter', 'onderkant_filter']
elif ObsClass == observation.WaterlvlObs:
_rename_dic = {'xcoord': 'x',
'ycoord': 'y',
'meetpunt': 'measpointlev'
}
_keys_o = ['name', 'x', 'y', 'locatie']
# Check if file is present
if not (os.path.isfile(fname)):
print('Could not find file ', fname)
mat = loadmat(fname, struct_as_record=False, squeeze_me=True,
chars_as_strings=True)
d_h = read_oseries(mat)
locations = d_h.keys()
obs_list = []
for location in locations:
if verbose:
print('reading location -> {}'.format(location))
metadata = d_h[location]
metadata['projection'] = 'epsg:28992'
metadata['metadata_available'] = True
s = metadata.pop('values')
df = DataFrame(s, columns=['stand_m_tov_nap'])
for key in _rename_dic.keys():
if key in metadata.keys():
metadata[_rename_dic[key]] = metadata.pop(key)
meta_o = {k: metadata[k] for k in _keys_o if k in metadata}
o = ObsClass(df, meta=metadata, **meta_o)
obs_list.append(o)
return obs_list | c35b31048cd823fa1f0157cedd6ce88331be5bf0 | 8,971 |
def cumulative_gain_curve(df: pd.DataFrame,
treatment: str,
outcome: str,
prediction: str,
min_rows: int = 30,
steps: int = 100,
effect_fn: EffectFnType = linear_effect) -> np.ndarray:
"""
Orders the dataset by prediction and computes the cumulative gain (effect * proportional sample size) curve
according to that ordering.
Parameters
----------
df : Pandas' DataFrame
A Pandas' DataFrame with target and prediction scores.
treatment : Strings
The name of the treatment column in `df`.
outcome : Strings
The name of the outcome column in `df`.
prediction : Strings
The name of the prediction column in `df`.
min_rows : Integer
Minimum number of observations needed to have a valid result.
steps : Integer
The number of cumulative steps to iterate when accumulating the effect
effect_fn : function (df: pandas.DataFrame, treatment: str, outcome: str) -> int or Array of int
A function that computes the treatment effect given a dataframe, the name of the treatment column and the name
of the outcome column.
Returns
----------
cumulative gain curve: float
The cumulative gain according to the predictions ordering.
"""
size = df.shape[0]
n_rows = list(range(min_rows, size, size // steps)) + [size]
cum_effect = cumulative_effect_curve(df=df, treatment=treatment, outcome=outcome, prediction=prediction,
min_rows=min_rows, steps=steps, effect_fn=effect_fn)
return np.array([effect * (rows / size) for rows, effect in zip(n_rows, cum_effect)]) | ca493a85d1aa7d74335b1ddb65f2f2a94fcaa152 | 8,972 |
def last(*args):
"""Return last value from any object type - list,tuple,int,string"""
if len(args) == 1:
return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1]
return args[-1] | ad8d836597dd6a5dfe059756b7d8d728f6ea35fc | 8,973 |
from matplotlib.patheffects import withStroke
def load_ann_kwargs():
"""emboss text"""
myeffect = withStroke(foreground="w", linewidth=3)
ann_kwargs = dict(path_effects=[myeffect])
return ann_kwargs | a4ff019fe234b44da20e3b8f686f852554018546 | 8,974 |
import sys
def color_conversion(img_name, color_type="bgr2rgb"):
"""
色空間の変換
Parameters
----------
img_name : numpy.ndarray
入力画像
color_type : str
変換のタイプ
bgr2rgb, bgr2hsv, bgr2gray, rgb2bgr,
rgb2hsv, rgb2gray, hsv2bgr, hsv2rgb
Return
-------
conversion_img : numpy.ndarray
処理後の画像
"""
if color_type == "bgr2rgb":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2RGB)
elif color_type == "bgr2hsv":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2HSV)
elif color_type == "bgr2gray":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2GRAY)
elif color_type == "rgb2bgr":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2BGR)
elif color_type == "rgb2hsv":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2HSV)
elif color_type == "rgb2gray":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2GRAY)
elif color_type == "hsv2bgr":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_HSV2BGR)
elif color_type == "hsv2rgb":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_HSV2RGB)
else:
sys.exit(1)
return conversion_img | 66abc2c44721e262a948b86221063fb57bf1e148 | 8,975 |
def predict(self, celldata):
"""
This is the method that's to perform prediction based on a model
For now it just returns dummy data
:return:
"""
ai_model = load_model_parameter()
ret = predict_unseen_data(ai_model, celldata)
print("celldata: ", celldata)
print("Classification: ", ret)
return ret | 435be195c765aa3823a710982bdc6f7954a24178 | 8,976 |
from typing import List
def statements_to_str(statements: List[ASTNode], indent: int) -> str:
"""Takes a list of statements and returns a string with their C representation"""
stmt_str_list = list()
for stmt in statements:
stmt_str = stmt.to_str(indent + 1)
if not is_compound_statement(stmt) and not isinstance(stmt, Label):
stmt_str += ";" + NEW_LINE
stmt_str_list.append(stmt_str)
return "".join(stmt_str_list) | 01bd0546be8b7a212dbb73fae3c505bbe0086b48 | 8,977 |
def _make_filter(class_name: str, title: str):
"""https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-enumwindows"""
def enum_windows(handle: int, h_list: list):
if not (class_name or title):
h_list.append(handle)
if class_name and class_name not in win32gui.GetClassName(handle):
return True # continue enumeration
if title and title not in win32gui.GetWindowText(handle):
return True # continue enumeration
h_list.append(handle)
return enum_windows | 3b9d5f3fe4afd666cfa7ed43f8abe103b9575249 | 8,978 |
def is_float(s):
"""
Detertmine if a string can be converted to a floating point number.
"""
try:
float(s)
except:
return False
return True | 2df52b4f8e0835d9f169404a6cb4f003ca661fff | 8,979 |
def build_lm_model(config):
"""
"""
if config["model"] == "transformer":
model = build_transformer_lm_model(config)
elif config["model"] == "rnn":
model = build_rnn_lm_model(config)
else:
raise ValueError("model not correct!")
return model | 03a84f28ec4f4a7cd847575fcbcf278943b72b8a | 8,980 |
def __virtual__():
"""
Only load if boto3 libraries exist.
"""
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__["boto3.assign_funcs"](__name__, "cloudfront")
return has_boto_reqs | 63d2f1102713b8da66e75b28c4c642427fe69e8a | 8,981 |
import glob
import os
def extract_binaries(pbitmap, psamples):
"""
Extract sample binaries from subdirectories according to dataset defined in bitmap.
"""
bins = glob.glob(psamples+'/**/*.bin', recursive=True)
bitmap = pd.read_csv(pbitmap) if '.tsv' not in pbitmap else pd.read_csv(pbitmap, sep='\t')
hashes = bitmap['sha1sum'].tolist()
if not os.path.exists('bins'):
os.makedirs('bins')
missed = []
for hash in hashes:
found = False
for bin in bins:
if hash in bin:
cmd = 'cp %s bins/%s.bin' % (bin, hash)
os.system(cmd)
found = True
break
if not found:
missed += [hash]
print('Sample not found: %s' % hash)
res = os.listdir('bins')
print('Total found =', len(res))
return res | e3086e1ec5dcda0b89f34fb917eb422c8cde285b | 8,982 |
def search_range(nums, target):
"""
Find first and last position of target in given array by binary search
:param nums: given array
:type nums : list[int]
:param target: target number
:type target: int
:return: first and last position of target
:rtype: list[int]
"""
result = [-1, -1]
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
# note that we move right pointer when nums[mid] == target
# to find the first occurrence of target
if nums[mid] >= target:
right = mid - 1
else:
left = mid + 1
if 0 <= left < len(nums) and nums[left] == target:
result[0] = left
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
# note that we move left pointer when nums[mid] == target
# to find the last occurrence of target
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
if 0 <= right < len(nums) and nums[right] == target:
result[1] = right
return result | 8165e3a2f33741c15494d5d98a82a85c2fb610ff | 8,983 |
def process_mean_results(data, capacity, constellation, scenario, parameters):
"""
Process results.
"""
output = []
adoption_rate = scenario[1]
overbooking_factor = parameters[constellation.lower()]['overbooking_factor']
constellation_capacity = capacity[constellation]
max_capacity = constellation_capacity['capacity_kmsq']
number_of_satellites = constellation_capacity['number_of_satellites']
satellite_coverage_area = constellation_capacity['satellite_coverage_area']
for idx, item in data.iterrows():
users_per_km2 = item['pop_density_km2'] * (adoption_rate / 100)
active_users_km2 = users_per_km2 / overbooking_factor
if active_users_km2 > 0:
per_user_capacity = max_capacity / active_users_km2
else:
per_user_capacity = 0
output.append({
'scenario': scenario[0],
'constellation': constellation,
'number_of_satellites': number_of_satellites,
'satellite_coverage_area': satellite_coverage_area,
'iso3': item['iso3'],
'GID_id': item['regions'],
'population': item['population'],
'area_m': item['area_m'],
'pop_density_km2': item['pop_density_km2'],
'adoption_rate': adoption_rate,
'users_per_km2': users_per_km2,
'active_users_km2': active_users_km2,
'per_user_capacity': per_user_capacity,
})
return output | 0619c397a21d27440988c4b23284e44700ba69eb | 8,984 |
def identify_ossim_kwl(ossim_kwl_file):
"""
parse geom file to identify if it is an ossim model
:param ossim_kwl_file : ossim keyword list file
:type ossim_kwl_file : str
:return ossim kwl info : ossimmodel or None if not an ossim kwl file
:rtype str
"""
try:
with open(ossim_kwl_file, encoding="utf-8") as ossim_file:
content = ossim_file.readlines()
geom_dict = {}
for line in content:
(key, val) = line.split(": ")
geom_dict[key] = val.rstrip()
if "type" in geom_dict:
if geom_dict["type"].strip().startswith("ossim"):
return geom_dict["type"].strip()
return None
except Exception: # pylint: disable=broad-except
return None | 9a63a8b5e7ece79b11336e71a8afa5a703e3acbc | 8,985 |
def conv_cond_concat(x, y):
""" Concatenate conditioning vector on feature map axis.
# Arguments
x: 4D-Tensor
y: 4D-Tensor
# Return
4D-Tensor
"""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])]) | c30a4328d3a6e8cf2b1e38cf012edca045e9de69 | 8,986 |
def get(args, syn):
"""TODO_Sphinx."""
entity = syn.get(args.id)
## TODO: Is this part even necessary?
## (Other than the print statements)
if 'files' in entity:
for file in entity['files']:
src = os.path.join(entity['cacheDir'], file)
dst = os.path.join('.', file.replace(".R_OBJECTS/",""))
print 'creating %s' % dst
if not os.path.exists(os.path.dirname(dst)):
os.mkdir(dst)
shutil.copyfile(src, dst)
else:
sys.stderr.write('WARNING: No files associated with entity %s\n' % (args.id,))
syn.printEntity(entity)
return entity | 61bb507faaa2821619e77972dc23158fdc3228ba | 8,987 |
def swath_pyresample_gdaltrans(file: str, var: str, subarea: dict, epsilon: float, src_tif: str, dst_tif: str):
"""Reprojects swath data using pyresample and translates the image to EE ready tif using gdal
Parameters
----------
file: str
file to be resampled and uploaded to GC -> EE
var: str
input variable name
subarea: dict
string name of the projection to resample the data onto (pyproj supported)
epsilon: float
The distance to a found value is guaranteed to be no further than (1 + eps)
times the distance to the correct neighbour. Allowing for uncertainty decreases execution time.
src_tif: str
temporary target geotif file
dst_tif: str
final geotif output, GDAL processed
Returns
-------
dict:
global and var attributes
"""
# -----------
# get dataset
# -----------
resample_dst = create_dataset(file=file, key=var, subarea=subarea)
resample_dst['epsilon'] = epsilon
# ---------------
# resample swaths
# ---------------
if var in ('l2_flags', 'QA_flag'):
meta = flags_band(dataset=resample_dst,
key=var,
src_tif=src_tif,
dst_tif=dst_tif)
else:
attrs = resample_dst.pop(var)
glob_attrs = resample_dst.pop('glob_attrs')
proj = resample_dst.pop('proj')
fill_value = attrs['_FillValue']
result = swath_resample(swath=resample_dst, trg_proj=proj)
np.ma.set_fill_value(result, fill_value=fill_value)
# ---------------------
# write out the g-tif-f
# ---------------------
meta = write_tif(file=src_tif,
dataset=result,
data_type='Float32',
metadata={var: attrs, 'glob_attrs': glob_attrs},
area_def=proj)
gdal_translate(src_tif=src_tif,
dst_tif=dst_tif,
ot='Float32',
nodata=fill_value)
return meta | b716a6b45cf48457d0c6ca5849997b7c37c6c795 | 8,988 |
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False):
"""Run DRC check on a cell which is implemented in gds_name."""
global num_drc_runs
num_drc_runs += 1
write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name)
(outfile, errfile, resultsfile) = run_script(cell_name, "drc")
# Check the result for these lines in the summary:
# Total DRC errors found: 0
# The count is shown in this format:
# Cell replica_cell_6t has 3 error tiles.
# Cell tri_gate_array has 8 error tiles.
# etc.
try:
f = open(resultsfile, "r")
except FileNotFoundError:
debug.error("Unable to load DRC results file from {}. Is klayout set up?".format(resultsfile), 1)
results = f.readlines()
f.close()
errors=len([x for x in results if "<visited>" in x])
# always display this summary
result_str = "DRC Errors {0}\t{1}".format(cell_name, errors)
if errors > 0:
debug.warning(result_str)
else:
debug.info(1, result_str)
return errors | ac44030fc343b50d1035f5b584fc4f64f319aa27 | 8,989 |
def getKeyPairPrivateKey(keyPair):
"""Extracts the private key from a key pair.
@type keyPair: string
@param keyPair: public/private key pair
@rtype: base string
@return private key PEM text
"""
return crypto.dump_privatekey(crypto.FILETYPE_PEM, keyPair) | 0decc2dbb77343a7a200ace2af9277ee7e5717a5 | 8,990 |
def playbook_input(request, playbook_id, config_file=None, template=None):
"""Playbook input view."""
# Get playbook
playbook = Playbook.objects.get(pk=playbook_id)
# Get username
user = str(request.user)
# Check user permissions
if user not in playbook.permissions.users:
return playbooks(request)
# Get asset name if provided
asset_name = request.POST.get('asset_name', None)
# Get Assets
if playbook.asset_filter != '*':
inventory = netspot.NetSPOT()
assets = inventory.search(playbook.asset_filter, key='asset')
else:
assets = None
# Get config if confgi_file is provided
config = None
if config_file:
with open(config_file, 'r') as file_handle:
config = file_handle.read().strip()
variables = PlaybookVariable.objects.filter(playbook=playbook)
return render(
request,
'playbook.htm',
context={'playbook': playbook.name,
'playbook_id': playbook.id,
'assets': assets,
'asset_name': asset_name,
'asset_filter': playbook.asset_filter,
'user_auth': playbook.user_auth,
'inputs': variables,
'config_file': config_file,
'config': config,
'template': template,
'description': playbook.description},
) | 4b01e08414f38bdaad45245043ec30adb876e40e | 8,991 |
def _filter_gtf_df(GTF_df, col, selection, keep_columns, silent=False):
"""
Filter a GTF on a specific feature type (e.g., genes)
Parameters:
-----------
GTF_df
pandas DataFrame of a GTF
type: pd.DataFrame
col
colname on which df.loc will be performed
type: str
selection
value in df[col]
type: str, int, float, etc. (most likely str)
keep_columns
A list of strings of colnames to keep. If False (default behavior), all cols are kept.
type: bool
default: False
silent
default: False
type: bool
Returns:
--------
GTF_filtered
type: pandas.DataFrame
"""
msg = _Messages(silent)
msg.filtering(col, selection)
return GTF_df.loc[GTF_df[col] == selection][keep_columns] | 5f41141d69c0c837e396ec95127500a826013500 | 8,992 |
def validation_generator_for_dir(data_dir, model_dict):
"""Create a Keras generator suitable for validation
No data augmentation is performed.
:param data_dir: folder with subfolders for the classes and images therein
:param model_dict: dict as returned by `create_custom_model`
:returns: a generator for batches suitable for validating the model
:rtype: ??
"""
return _generator_for_dir(test_datagen, data_dir, model_dict) | 57b0a83e98438b8e397377a5626094f69ea21083 | 8,993 |
def convert_cbaois_to_kpsois(cbaois):
"""Convert coordinate-based augmentables to KeypointsOnImage instances.
Parameters
----------
cbaois : list of imgaug.augmentables.bbs.BoundingBoxesOnImage or list of imgaug.augmentables.bbs.PolygonsOnImage or list of imgaug.augmentables.bbs.LineStringsOnImage or imgaug.augmentables.bbs.BoundingBoxesOnImage or imgaug.augmentables.bbs.PolygonsOnImage or imgaug.augmentables.bbs.LineStringsOnImage
Coordinate-based augmentables to convert, e.g. bounding boxes.
Returns
-------
list of imgaug.augmentables.kps.KeypointsOnImage or imgaug.augmentables.kps.KeypointsOnImage
``KeypointsOnImage`` instances containing the coordinates of input
`cbaois`.
"""
if not isinstance(cbaois, list):
return cbaois.to_keypoints_on_image()
kpsois = []
for cbaoi in cbaois:
kpsois.append(cbaoi.to_keypoints_on_image())
return kpsois | 6eee2715de3bfc76fac9bd3c246b0d2352101be1 | 8,994 |
import zipfile
import os
import io
import pickle
def gen_stream_from_zip(zip_path, file_extension='wav', label_files=None, label_names=None, utt2spk=None,
corpus_name=None, is_speech_corpus=True, is_rir=False, get_duration=False):
""" Generate speech stream from zip file and utt2spk. The zip file contains wavfiles.
Parameters
-----------
zip_path: path of the zip file that contains the waveforms.
label_files: list of label files. Each line of label_files contains label for one utterance and have following
format:
utt_id_1 label_1
utt_id_2 label_2
...
where utt_id_1 and utt_id_2 are utterance IDs of the sentences and can be any string as far as each utterance
has an unique ID. The utt_ids must be compatible with the file_names (excluding extension) in the zip file.
file_extension: define the extension of the files in the zip file. Used to filter out non-waveform files.
label_names: list of strings specifying the name of the label_files, e.g. "frame_label', 'word_label', etc.
utt2spk: a dictionary mapping from utterance ID to speaker ID. If not provided, corpus_name must be provided.
is_speech_corpus: bool, whether the zip contains speech.
is_rir: bool, whether the zip contains RIR. If True, expect a config file in the zip that contains the meta data
info about the RIRs.
get_duration: bool, whether to get duration of the waveforms
Returned:
An object of type SpeechDataStream, RIRDataStream, or DataStream.
"""
wav_reader = reader.ZipWaveIO(precision="float32")
zip_file = zipfile.ZipFile(zip_path)
all_list = zip_file.namelist()
wav_list = [i for i in all_list if os.path.splitext(i)[1][1:].lower() == file_extension]
utt_id_wav = wavlist2uttlist(wav_list)
# sort wav_list by utterance ID
tmp = sorted(zip(utt_id_wav, wav_list))
utt_id_wav = [i[0] for i in tmp]
wav_list = [i[1] for i in tmp]
def get_label(label_lines, selected_utt_id):
selected_label_list = []
for line in label_lines:
tmp = line.split(" ")
utt_id = tmp[0]
if utt_id in selected_utt_id:
tmp_label = np.asarray([int(j) for j in tmp[1:] if len(j)>0])[np.newaxis,:]
selected_label_list.append(tmp_label)
return selected_label_list
if label_files is not None:
# Find the intersection of the utterance IDs
selected_utt_id = set(utt_id_wav)
utt_id_label = []
label_file_lines = []
for i in range(len(label_files)):
lines = my_cat(label_files[i])
lines.sort() # each lines start with utterance ID, hence effectively sort the labels with utterance ID.
curr_utt_id_label = [i.split(" ")[0] for i in lines]
selected_utt_id = set(curr_utt_id_label) & selected_utt_id
utt_id_label.append(curr_utt_id_label)
label_file_lines.append(lines)
# Build DataStream for each label types
label_streams = dict()
if label_names is None:
label_names = ['label_'+str(i) for i in range(len(label_files))]
for i in range(len(label_files)):
selected_label_list = get_label(label_file_lines[i], selected_utt_id) # selected_label_list is sorted, as label_file_lines[i] is sorted.
label_streams[label_names[i]] = DataStream(selected_label_list, is_file=False, reader=None)
selected_wav_list = [wav_list[i] for i in range(len(wav_list)) if utt_id_wav[i] in selected_utt_id]
selected_utt_id = list(selected_utt_id)
selected_utt_id.sort()
# note that selected_wav_list, selected_label_list, and selected_utt_id are all sorted by utterance ID. So they
# are guaranteed to have one-to-one correspondence if the utterance IDs are unique.
else:
label_streams = None
selected_utt_id = utt_id_wav
selected_wav_list = wav_list
root_dir = zip_path + '@/'
if is_speech_corpus:
assert utt2spk is not None or corpus_name is not None
data_stream = DataStream(selected_wav_list, is_file=True, reader=wav_reader, root=root_dir)
if corpus_name == 'LibriSpeech':
corpus_stream = LibriDataStream(selected_utt_id, data_stream, label_streams=label_streams)
elif corpus_name == 'WSJ':
corpus_stream = WSJDataStream(selected_utt_id, data_stream, label_streams=label_streams)
elif corpus_name == 'TIMIT':
corpus_stream = TimitDataStream(selected_utt_id, data_stream, label_streams=label_streams)
else: # for unknown corpus, you need to provide the utt2spk mapping.
corpus_stream = SpeechDataStream(selected_utt_id, data_stream, utt2spk=utt2spk, label_streams=label_streams)
elif is_rir:
for i in all_list:
if os.path.splitext(i)[1][1:] == 'pkl':
config_file = i
break
byte_chunk = zip_file.read(config_file)
byte_stream = io.BytesIO(byte_chunk)
config = pickle.load(byte_stream)
zip_base = os.path.splitext(os.path.basename(zip_path))[0]
wav_list = [zip_base+'/'+i['file'] for i in config]
data_stream = RIRStream(wav_list, config=config, is_file=True, reader=wav_reader, root=root_dir)
corpus_stream = data_stream
else:
data_stream = DataStream(selected_wav_list, is_file=True, reader=wav_reader, root=root_dir)
corpus_stream = data_stream
if get_duration:
if is_speech_corpus:
corpus_stream.data_stream.set_data_len()
corpus_stream.data_stream.reader = reader.ZipWaveIO(precision="float32")
else:
corpus_stream.set_data_len()
corpus_stream.reader = reader.ZipWaveIO(precision="float32")
return corpus_stream | 93a81a8d102c0b76593bcd44b64675c1c1e1fce7 | 8,995 |
def get_query_dsl(
query_string, global_filters=None, facets_query_size=20, default_operator='and'):
"""
returns an elasticsearch query dsl for a query string
param: query_string : an expression of the form
type: person title:foo AND description:bar
where type corresponds to an elastic search document type
which gets added as a filter
param: global_filters : a dictionary of the form
{user_id: 1234}. This gets added as a filter to the query
so that the query can be narrowed down to fewer documents.
It is translated into an elastic search term filter.
"""
global FACETS_QUERY_SIZE, DEFAULT_OPERATOR
FACETS_QUERY_SIZE = facets_query_size
DEFAULT_OPERATOR = default_operator
global_filters = global_filters if global_filters else {}
expression = tokenizer.tokenize(query_string)
bool_lists = expression['query']['filtered']['filter']['bool']
[bool_lists['should'].append({"term": orele}) for orele in global_filters.get('or', [])]
[bool_lists['must'].append({"term": andele}) for andele in global_filters.get('and', [])]
[bool_lists['must_not'].append({"term": notele}) for notele in global_filters.get('not', [])]
if global_filters.has_key('sort'):
expression['sort'] = global_filters.get('sort')
return expression | 9f6c1371e0de1f28737415c0454f645748af054f | 8,996 |
def prune_visualization_dict(visualization_dict):
"""
Get rid of empty entries in visualization dict
:param visualization_dict:
:return:
"""
new_visualization_dict = {}
# when the form is left blank the entries of visualization_dict have
# COLUMN_NAME key that points to an empty list
for vis_key, vis_dict in visualization_dict.items():
if vis_dict.get(COLUMN_NAME):
new_visualization_dict[vis_key] = vis_dict
return new_visualization_dict | fae81eb69fc25d61282eb151d931d740c51b8bae | 8,997 |
def _LocationListToGoTo( request_data, positions ):
"""Convert a LSP list of locations to a ycmd GoTo response."""
try:
if len( positions ) > 1:
return [
responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, position ) )
for position in positions
]
return responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, positions[ 0 ] ) )
except ( IndexError, KeyError ):
raise RuntimeError( 'Cannot jump to location' ) | 2ee39fdadd721920a3737561979308223a64b57a | 8,998 |
def calculate_average_grades_and_deviation(course):
"""Determines the final average grade and deviation for a course."""
avg_generic_likert = []
avg_contribution_likert = []
dev_generic_likert = []
dev_contribution_likert = []
avg_generic_grade = []
avg_contribution_grade = []
dev_generic_grade = []
dev_contribution_grade = []
for __, contributor, __, results, __ in calculate_results(course):
average_likert = avg([result.average for result in results if result.question.is_likert_question])
deviation_likert = avg([result.deviation for result in results if result.question.is_likert_question])
average_grade = avg([result.average for result in results if result.question.is_grade_question])
deviation_grade = avg([result.deviation for result in results if result.question.is_grade_question])
(avg_contribution_likert if contributor else avg_generic_likert).append(average_likert)
(dev_contribution_likert if contributor else dev_generic_likert).append(deviation_likert)
(avg_contribution_grade if contributor else avg_generic_grade).append(average_grade)
(dev_contribution_grade if contributor else dev_generic_grade).append(deviation_grade)
# the final total grade will be calculated by the following formula (GP = GRADE_PERCENTAGE, CP = CONTRIBUTION_PERCENTAGE):
# final_likert = CP * likert_answers_about_persons + (1-CP) * likert_answers_about_courses
# final_grade = CP * grade_answers_about_persons + (1-CP) * grade_answers_about_courses
# final = GP * final_grade + (1-GP) * final_likert
final_likert_avg = mix(avg(avg_contribution_likert), avg(avg_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_likert_dev = mix(avg(dev_contribution_likert), avg(dev_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_grade_avg = mix(avg(avg_contribution_grade), avg(avg_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_grade_dev = mix(avg(dev_contribution_grade), avg(dev_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_avg = mix(final_grade_avg, final_likert_avg, settings.GRADE_PERCENTAGE)
final_dev = mix(final_grade_dev, final_likert_dev, settings.GRADE_PERCENTAGE)
return final_avg, final_dev | 95b26efedba076e0b9b54c565fe2e0787d5fbb0e | 8,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.