content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _feature_normalization(features, method, feature_type):
"""Normalize the given feature vector `y`, with the stated normalization `method`.
Args:
features (np.ndarray): The signal array
method (str): Normalization method.
'global': Uses global mean and standard deviation values from `train.txt`.
The normalization is being applied element wise.
([sample] - [mean]^T) / [std]^T
Where brackets denote matrices or vectors.
'local': Use local (in sample) mean and standard deviation values, and apply the
normalization element wise, like in `global`.
'local_scalar': Uses only the mean and standard deviation of the current sample.
The normalization is being applied by ([sample] - mean_scalar) / std_scalar
'none': No normalization is being applied.
feature_type (str): Feature type, see `load_sample` for details.
Returns:
np.ndarray: The normalized feature vector.
"""
if method == 'none':
return features
elif method == 'global':
# Option 'global' is applied element wise.
if feature_type == 'mel':
global_mean = __global_mean_mel
global_std = __global_std_mel
elif feature_type == 'mfcc':
global_mean = __global_mean_mfcc
global_std = __global_std_mfcc
else:
raise ValueError('Unsupported global feature type: {}'.format(feature_type))
return (features - global_mean) / global_std
elif method == 'local':
return (features - np.mean(features, axis=0)) / np.std(features, axis=0)
elif method == 'local_scalar':
# Option 'local' uses scalar values.
return (features - np.mean(features)) / np.std(features)
else:
raise ValueError('Invalid normalization method: {}'.format(method)) | 0479363651a4bcf1622e7bdb0906b55e3adb1cce | 12,500 |
def get_constraint(name):
"""
Lookup table of default weight constraint functions.
Parameters
----------
name : Constraint, None, str
Constraint to look up. Must be one of:
- 'l1' : L1 weight-decay.
- 'l2' : L2 weight-decay.
- 'l1-l2' : Combined L1-L2 weight-decay.
- Constraint : A custom implementation.
- None : Return None.
Custom Constraint must implement `constrain`
function.
Returns
-------
constraint : Constraint or None
The constraint function.
"""
if name == 'unit' : return UnitNorm
elif name == 'maxnorm' : return MaxNorm
elif name == 'minmax' : return MinMaxNorm
elif isinstance(name, (None, Constraint)) : return name
else : raise ValueError("Invalid regularizer") | 09927531f4c6770e86ad603063e4edb0b0c4ff48 | 12,501 |
def player_count(conn, team_id):
"""Returns the number of players associated with a particular team"""
c = conn.cursor()
c.execute("SELECT id FROM players WHERE team_id=?", (team_id,))
return len(c.fetchall()) | cfced6da6c8927db2ccf331dca7d23bba0ce67e5 | 12,502 |
def _RedisClient(address):
"""
Return a connection object connected to the socket given by `address`
"""
h1, h2 = get_handle_pair(conn_type=REDIS_LIST_CONN)
c = _RedisConnection(h1)
#redis_client = util.get_redis_client()
redis_client = util.get_cache_client()
ip, port = address
chan = '{}:{}'.format(ip, port)
redis_client.publish(chan, bytes(h2, 'utf-8'))
ack = c.recv()
assert ack == 'OK'
return c | fc8bab786bb521fbd0715da3ab690575d1df865e | 12,503 |
import math
def format_timedelta(value,
time_format="{days} days, {hours2}:{minutes2}:{seconds2}"):
"""Format a datetie.timedelta. See """
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(
**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
}) | 19dc2b175beb1d030f14ae7fe96cb16d66f6c219 | 12,504 |
def random_account_user(account):
"""Get a random user for an account."""
account_user = AccountUser.objects.filter(account=account).order_by("?").first()
return account_user.user if account_user else None | 5fe918af67710d0d1519f56eee15811430a0e139 | 12,505 |
def overwrite(main_config_obj, args):
"""
Overwrites parameters with input flags
Args:
main_config_obj (ConfigClass): config instance
args (dict): arguments used to overwrite
Returns:
ConfigClass: config instance
"""
# Sort on nested level to override shallow items first
args = dict(sorted(args.items(), key=lambda item: item[0].count('.')))
for argument_key, val in args.items():
# Seperate nested keys into outer and inner
outer_keys = argument_key.split('.')
inner_key = outer_keys.pop(-1)
base_err_msg = f"Can't set '{argument_key} = {val}'"
# Check that the nested config has the attribute and is a config class
config_obj = main_config_obj
config_class = type(config_obj).__name__
for key_idx, key_part in enumerate(argument_key.split('.')):
err_msg = f"{base_err_msg}. '{key_part}' isn't an attribute in '{config_class}'"
assert hasattr(config_obj, key_part), err_msg
# Check if the config allows the argument
figutils.check_allowed_input_argument(config_obj, key_part, argument_key)
# Check if the outer attributes are config classes
if key_idx < len(outer_keys):
config_obj = getattr(config_obj, key_part)
config_class = type(config_obj).__name__
err_msg = f"{base_err_msg}. '{'.'.join(outer_keys)}' isn't a registered Anyfig config class"
assert figutils.is_config_class(config_obj), err_msg
value_class = type(getattr(config_obj, inner_key))
base_err_msg = f"Input argument '{argument_key}' with value {val} can't create an object of the expected type"
# Create new anyfig class object
if figutils.is_config_class(value_class):
value_obj = create_config(val)
# Create new object that follows the InterfaceField's rules
elif issubclass(value_class, fields.InterfaceField):
field = getattr(config_obj, inner_key)
if isinstance(value_class, fields.InputField):
value_class = field.type_pattern
else:
value_class = type(field.value)
try:
val = value_class(val)
except Exception as e:
err_msg = f"{base_err_msg} {field.type_pattern}. {e}"
raise RuntimeError(err_msg) from None
field = field.update_value(inner_key, val, config_class)
value_obj = field.finish_wrapping_phase(inner_key, config_class)
# Create new object of previous value type with new value
else:
try:
if isinstance(val, dict): # Keyword specified cli-arguments
value_obj = value_class(**val)
else:
value_obj = value_class(val)
except Exception as e:
err_msg = f"{base_err_msg} {value_class}. {e}"
raise RuntimeError(err_msg) from None
# Overwrite old value
setattr(config_obj, inner_key, value_obj)
return main_config_obj | 98ee9cf034a9b714ae18e737761b06bfd669bfa4 | 12,506 |
def max_delta(model, new_model):
"""Return the largest difference between any two corresponding
values in the models"""
return max( [(abs(model[i] - new_model[i])).max() for i in range(len(model))] ) | faf4a9fb2b24f7e7b4f357eef195e435950ea218 | 12,507 |
def wiener_khinchin_transform(power_spectrum, frequency, time):
"""
A function to transform the power spectrum to a correlation function by the Wiener Khinchin transformation
** Input:**
* **power_spectrum** (`list or numpy.array`):
The power spectrum of the signal.
* **frequency** (`list or numpy.array`):
The frequency discretizations of the power spectrum.
* **time** (`list or numpy.array`):
The time discretizations of the signal.
**Output/Returns:**
* **correlation_function** (`list or numpy.array`):
The correlation function of the signal.
"""
frequency_interval = frequency[1] - frequency[0]
fac = np.ones(len(frequency))
fac[1: len(frequency) - 1: 2] = 4
fac[2: len(frequency) - 2: 2] = 2
fac = fac * frequency_interval / 3
correlation_function = np.zeros(len(time))
for i in range(len(time)):
correlation_function[i] = 2 * np.dot(fac, power_spectrum * np.cos(frequency * time[i]))
return correlation_function | 3cf8916c75632e3a0db52f907ce180eb766f9f2e | 12,508 |
def child_is_flat(children, level=1):
"""
Check if all children in section is in same level.
children - list of section children.
level - integer, current level of depth.
Returns True if all children in the same level, False otherwise.
"""
return all(
len(child) <= level + 1 or child[(level + 1) :][0].isalpha()
for child in children
) | e14f9210a90b40b419d21fffa1542212429d80be | 12,509 |
from pathlib import Path
def load_dataset(name, other_paths=[]):
"""Load a dataset with given (file) name."""
if isinstance(name, Dataset):
return name
path = Path(name)
# First, try if you have passed a fully formed dataset path
if path.is_file():
return _from_npy(name, classes=classes)
# Go through the dataset paths, return the first dataset found
all_paths = dataset_path + other_paths
for p in all_paths:
try:
file = p / path
return _from_npy(file, classes=classes)
except FileNotFoundError:
pass
raise FileNotFoundError(
"Could not find dataset {} in paths {}".format(name, all_paths)
) | 3f3d2e7e7ec577098e1a1599c74638ced5d3c103 | 12,510 |
def isqrtcovresnet101b(**kwargs):
"""
iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs) | fdf166fa3ce9e893e8e97d1057dac89d084d2217 | 12,511 |
def get_data(name: str, level: int, max_level: int) -> str:
"""從維基頁面爬取資料
參數:
name: 程式或節點名稱
level: 欲查詢的等級
回傳:
爬到的資料
"""
reply_msg = []
for dataframe in read_html(generate_url(name)):
if (max_level < dataframe.shape[0] < max_level + 3 and
dataframe.iloc[level, 0].isdigit() and
level == int(dataframe.iloc[level, 0])):
reply_msg.append(zip(*dataframe.iloc[[0, level], 1:].values))
return '\n'.join(':'.join(pair) for data in reply_msg for pair in data) | 4e0f11a33c81993132d45f3fdad5f42c1288bbe5 | 12,512 |
import os
def is_processable(path: str, should_match_extension: str):
"""
Process scandir entries, copying the file if necessary
"""
if not os.path.isfile(path):
return False
filename = os.path.basename(path)
_, extension = os.path.splitext(filename)
if extension.lower() != should_match_extension.lower():
return False
return True | 5d99b821d3653ff452acac1e5fe48cab559c509e | 12,513 |
def insert_data(context, data_dict):
"""
:raises InvalidDataError: if there is an invalid value in the given data
"""
data_dict['method'] = _INSERT
result = upsert_data(context, data_dict)
return result | c631016be36f1988bfa9c98cea42a7f63fddc276 | 12,514 |
import time
def timestamp():
"""Get the unix timestamp now and retuen it.
Attention: It's a floating point number."""
timestamp = time.time()
return timestamp | 8e56a61659da657da9d5dda364d4d9e8f3d58ed2 | 12,515 |
from datetime import datetime
def _n64_to_datetime(n64):
"""Convert Numpy 64 bit timestamps to datetime objects. Units in seconds"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9) | a25327f2cd0093635f86f3145f5674cc1945d3f8 | 12,516 |
import itertools
def cycle(iterable):
"""Make an iterator returning elements from the iterable and saving a copy of each.
When the iterable is exhausted, return elements from the saved copy. Repeats indefinitely.
This function uses single dispatch.
.. seealso:: :func:`itertools.cycle`
"""
return itertools.cycle(iterable) | 13f479fca709dffa77eeca3d32ff7265c81588bf | 12,517 |
def get_availability_zone(name=None,state=None,zone_id=None,opts=None):
"""
`.getAvailabilityZone` provides details about a specific availability zone (AZ)
in the current region.
This can be used both to validate an availability zone given in a variable
and to split the AZ name into its component parts of an AWS region and an
AZ identifier letter. The latter may be useful e.g. for implementing a
consistent subnet numbering scheme across several regions by mapping both
the region and the subnet letter to network numbers.
This is different from the `.getAvailabilityZones` (plural) data source,
which provides a list of the available zones.
:param str name: The full name of the availability zone to select.
:param str state: A specific availability zone state to require. May
be any of `"available"`, `"information"` or `"impaired"`.
:param str zone_id: The zone ID of the availability zone to select.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/d/availability_zone.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['state'] = state
__args__['zoneId'] = zone_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAvailabilityZone:getAvailabilityZone', __args__, opts=opts).value
return AwaitableGetAvailabilityZoneResult(
name=__ret__.get('name'),
name_suffix=__ret__.get('nameSuffix'),
region=__ret__.get('region'),
state=__ret__.get('state'),
zone_id=__ret__.get('zoneId'),
id=__ret__.get('id')) | 6cb20524c1e0a2539e221711f1153949ab72f8e1 | 12,518 |
def _add_u_eq(blk, uex=0.8):
"""Add heat transfer coefficent adjustment for feed water flow rate.
This is based on knowing the heat transfer coefficent at a particular flow
and assuming the heat transfer coefficent is porportial to feed water
flow rate raised to certain power (typically 0.8)
Args:
blk: Heat exchanger block to add correlation to
uex: Correlation parameter value (defalut 0.8)
Returns:
None
"""
ti = blk.flowsheet().time
blk.U0 = pyo.Var(ti)
blk.f0 = pyo.Var(ti)
blk.uex = pyo.Var(ti, initialize=uex)
for t in ti:
blk.U0[t].value = blk.overall_heat_transfer_coefficient[t].value
blk.f0[t].value = blk.tube.properties_in[t].flow_mol.value
blk.overall_heat_transfer_coefficient.unfix()
blk.U0.fix()
blk.uex.fix()
blk.f0.fix()
@blk.Constraint(ti)
def U_eq(b, t):
return (
b.overall_heat_transfer_coefficient[t] ==
b.U0[t]*(b.tube.properties_in[t].flow_mol/b.f0[t])**b.uex[t]
) | f6b34a8e75367b43dbe759d273aa4be7dc371c12 | 12,519 |
def find_process_in_list( proclist, pid ):
"""
Searches for the given 'pid' in 'proclist' (which should be the output
from get_process_list(). If not found, None is returned. Otherwise a
list
[ user, pid, ppid ]
"""
for L in proclist:
if pid == L[1]:
return L
return None | 19eab54b4d04b40a54a39a44e50ae28fbff9457c | 12,520 |
def solution(s, start_pos, end_pos):
"""
Find the minimal nucleotide from a range of sequence DNA.
:param s: String consisting of the letters A, C, G and T,
which correspond to the types of successive nucleotides in the sequence
:param start_pos: array with the start indexes for the intervals to check
:param end_pos: array with the end indexes for the intervals to check
:return: a list with the minimal nucleotide for each interval defined by start_pos and end_pos
"""
highest_class = 'T'
highest_class_value = 4
# The array below must be in ascending order regarding the value assigned to the classes in the challenge description
# (not necessarily in alphabetic order)
other_classes = ['A', 'C', 'G']
other_classes_values = [1, 2, 3]
# We create a prefix_sum list for each class, so we can identify when a range has that specific class
prefix_sums = __class_based_prefix_sums(s, other_classes)
result = []
for i in range(len(start_pos)):
# We don't need to create a prefix_sum list for the class with highest value,
# because we can always use it as a fallback
current_result = highest_class_value
for j in range(len(other_classes)):
if __class_is_present(prefix_sums, j, start_pos[i], end_pos[i]):
current_result = other_classes_values[j]
break
result.append(current_result)
return result | 25ef2f7e9b009de0534f8dde132c0eb44e3fe374 | 12,521 |
def validate_address(value: str, context: dict = {}) -> str:
"""
Default address validator function. Can be overriden by providing a
dotted path to a function in ``SALESMAN_ADDRESS_VALIDATOR`` setting.
Args:
value (str): Address text to be validated
context (dict, optional): Validator context data.
Raises:
ValidationError: In case address is not valid
Returns:
str: Validated value
"""
if not value:
raise ValidationError(_("Address is required."))
return value | 65e04a4780432608aa049687da98bd05a527fbad | 12,522 |
import os
import random
def select_images(img_dir, sample_size=150, random_seed=42):
"""Selects a random sample of image paths."""
img_paths = []
for file in os.listdir(img_dir):
if file.lower().endswith('.jpeg'):
img_paths.append(os.path.join(img_dir, file))
if sample_size is not None:
if random_seed is not None:
random.seed(a=random_seed)
img_paths = random.sample(img_paths, sample_size)
return img_paths | 999bf71eb6b8072bd91cbb98d9fe1b50d5e9b8ac | 12,523 |
import os
import json
def load_period_data(period):
""" Load period data JSON file
If the file does not exist and empty dictionary is returned.
"""
filename = os.path.join(PROTOCOL_DIR, PERIOD_FILE_TEMPLATE % period)
if not os.path.exists(filename):
return {}
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f) | c6c4c7121c55e1fd54d47a964054520f1f2fde97 | 12,524 |
from pathlib import Path
def _get_hg_repo(path_dir):
"""Parse `hg paths` command to find remote path."""
if path_dir == "":
return ""
hgrc = Path(path_dir) / ".hg" / "hgrc"
if hgrc.exists():
config = ConfigParser()
config.read(str(hgrc))
if "paths" in config:
return config["paths"].get("default", "hgrc: no default path?")
else:
return "hgrc: no [paths] section?"
else:
return "not a hg repo" | 773ab4b45ba6883446c8e4a7725b7ac9d707440f | 12,525 |
def array_to_string(array,
col_delim=' ',
row_delim='\n',
digits=8,
value_format='{}'):
"""
Convert a 1 or 2D array into a string with a specified number
of digits and delimiter. The reason this exists is that the
basic numpy array to string conversions are surprisingly bad.
Parameters
------------
array : (n,) or (n, d) float or int
Data to be converted
If shape is (n,) only column delimiter will be used
col_delim : str
What string should separate values in a column
row_delim : str
What string should separate values in a row
digits : int
How many digits should floating point numbers include
value_format : str
Format string for each value or sequence of values
If multiple values per value_format it must divide
into array evenly.
Returns
----------
formatted : str
String representation of original array
"""
# convert inputs to correct types
array = np.asanyarray(array)
digits = int(digits)
row_delim = str(row_delim)
col_delim = str(col_delim)
value_format = str(value_format)
# abort for non-flat arrays
if len(array.shape) > 2:
raise ValueError('conversion only works on 1D/2D arrays not %s!',
str(array.shape))
# allow a value to be repeated in a value format
repeats = value_format.count('{}')
if array.dtype.kind == 'i':
# integer types don't need a specified precision
format_str = value_format + col_delim
elif array.dtype.kind == 'f':
# add the digits formatting to floats
format_str = value_format.replace(
'{}', '{:.' + str(digits) + 'f}') + col_delim
else:
raise ValueError('dtype %s not convertible!',
array.dtype.name)
# length of extra delimiters at the end
end_junk = len(col_delim)
# if we have a 2D array add a row delimiter
if len(array.shape) == 2:
format_str *= array.shape[1]
# cut off the last column delimiter and add a row delimiter
format_str = format_str[:-len(col_delim)] + row_delim
end_junk = len(row_delim)
# expand format string to whole array
format_str *= len(array)
# if an array is repeated in the value format
# do the shaping here so we don't need to specify indexes
shaped = np.tile(array.reshape((-1, 1)),
(1, repeats)).reshape(-1)
# run the format operation and remove the extra delimiters
formatted = format_str.format(*shaped)[:-end_junk]
return formatted | 9e7f189049b1ad3eff3679568a84e7151e2c643c | 12,526 |
def get_dp_logs(logs):
"""Get only the list of data point logs, filter out the rest."""
filtered = []
compute_bias_for_types = [
"mouseout",
"add_to_list_via_card_click",
"add_to_list_via_scatterplot_click",
"select_from_list",
"remove_from_list",
]
for log in logs:
if log["type"] in compute_bias_for_types:
filtered.append(log)
return filtered | e0a7c579fa9218edbf942afdbdb8e6cf940d1a0c | 12,527 |
from typing import List
from typing import Dict
def assign_reports_to_watchlist(cb: CbThreatHunterAPI, watchlist_id: str, reports: List[Dict]) -> Dict:
"""Set a watchlist report IDs attribute to the passed reports.
Args:
cb: Cb PSC object
watchlist_id: The Watchlist ID to update.
reports: The Intel Reports.
Returns:
The Watchlist in dict form.
"""
watchlist_data = get_watchlist(cb, watchlist_id)
if not watchlist_data:
return None
watchlist_data["report_ids"] = [r["id"] for r in reports]
watchlist_data = update_watchlist(cb, watchlist_data)
if not watchlist_data:
LOGGER.error(f"unexpected problem updating watchlist with report IDs.")
return False
return watchlist_data | 92bb0369211c1720fa4d9baa7a4e3965851339f2 | 12,528 |
def visualize_filter(
image,
model,
layer,
filter_index,
optimization_parameters,
transformation=None,
regularization=None,
threshold=None,
):
"""Create a feature visualization for a filter in a layer of the model.
Args:
image (array): the image to be modified by the feature vis process.
model (object): the model to be used for the feature visualization.
layer (string): the name of the layer to be used in the visualization.
filter_index (number): the index of the filter to be visualized.
optimization_parameters (OptimizationParameters): the optimizer class to be applied.
transformations (function): a function defining the transformations to be perfromed.
regularization (function): customized regularizers to be applied. Defaults to None.
threshold (list): Intermediate steps for visualization. Defaults to None.
Returns:
tuple: activation and result image for the process.
"""
image = tf.Variable(image)
feature_extractor = get_feature_extractor(model, layer)
_threshold_figures = figure(figsize=(15, 10), dpi=200)
print("Starting Feature Vis Process")
for iteration in range(optimization_parameters.iterations):
pctg = int(iteration / optimization_parameters.iterations * 100)
if transformation:
if not callable(transformation):
raise ValueError("The transformations need to be a function.")
image = transformation(image)
else:
image = trans.standard_transformation(image)
activation, image = gradient_ascent_step(
image, feature_extractor, filter_index, regularization,
optimization_parameters
)
print('>>', pctg, '%', end="\r", flush=True)
# Routine for creating a threshold image for Jupyter Notebooks
if isinstance(threshold, list) and (iteration in threshold):
threshold_image = _threshold_figures.add_subplot(
1, len(threshold), threshold.index(iteration) + 1
)
threshold_image.title.set_text(f"Step {iteration}")
threshold_view(image)
print('>> 100 %')
if image.shape[1] < 299 or image.shape[2] < 299:
image = tf.image.resize(image, [299, 299])
# Decode the resulting input image
image = imgs.deprocess_image(image[0].numpy())
return activation, image | 09940c0484361240929f61f04c9a96771b440033 | 12,529 |
def subtraction(x, y):
"""
Subtraction x and y
>>> subtraction(-20, 80)
-100
"""
assert isinstance(x, (int, float)), "The x value must be an int or float"
assert isinstance(y, (int, float)), "The y value must be an int or float"
return x - y | 203233897d31cb5bc79fca0f8c911b03d7deb5ba | 12,530 |
import aiohttp
async def paste(text: str) -> str:
"""Return an online bin of given text."""
session = aiohttp.ClientSession()
async with session.post("https://hasteb.in/documents", data=text) as post:
if post.status == 200:
response = await post.text()
return f"https://hasteb.in/{response[8:-2]}"
post = await session.post("https://bin.drlazor.be", data={"val": text})
if post.status == 200:
return post.url | d204f6f1db3aa33c98c4ebeae9888acc438f7dc3 | 12,531 |
def lr_step(base_lr, curr_iter, decay_iters, warmup_iter=0):
"""Stepwise exponential-decay learning rate policy.
Args:
base_lr: A scalar indicates initial learning rate.
curr_iter: A scalar indicates current iteration.
decay_iter: A list of scalars indicates the numbers of
iteration when the learning rate is decayed.
warmup_iter: A scalar indicates the number of iteration
before which the learning rate is not adjusted.
Return:
A scalar indicates the current adjusted learning rate.
"""
if curr_iter < warmup_iter:
alpha = curr_iter / warmup_iter
return base_lr * (1 / 10.0 * (1 - alpha) + alpha)
else:
return base_lr * (0.1 ** get_step_index(curr_iter, decay_iters)) | b8cfe670aba0bed1f84ae09c6271e681fad42864 | 12,532 |
def apo(coalg):
"""
Extending an anamorphism with the ability to halt.
In this version, a boolean is paired with the value that indicates halting.
"""
def run(a):
stop, fa = coalg(a)
return fa if stop else fa.map(run)
return run | a1e64d9ed49a8641095c8a8c20ae08c1cc6e9c19 | 12,533 |
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out | 30009b31dba0eff23010bfe6d531e8c55e46873c | 12,534 |
def download_network(region, network_type):
"""Download network from OSM representing the region.
Arguments:
region {string} -- Location. E.g., "Manhattan Island, New York City, New York, USA"
network_type {string} -- Options: drive, drive_service, walk, bike, all, all_private
Returns:
networkx -- downloaded networkx
"""
# Download graph
G = ox.graph_from_place(region, network_type=network_type)
return G | d77c9464641c90cc029adc0649739b499298d173 | 12,535 |
def extract_text(text):
""" """
l = []
res = []
i = 0
while i < len(text) - 2:
h, i, _ = next_token(text, i)
obj = text[h:i]
l.append(obj)
for j, tok in enumerate(l):
if tok == b'Tf':
font = l[j-2]
fsize = float(l[j-1])
elif tok == b'Td':
x = float(l[j-2])
y = float(l[j-1])
elif tok == b'Tj':
text = l[j-1]
res.append((x, y, font, fsize, text[1:-1]))
return res | 9b0746be6f6fa39548fd34f3bffda7e8baf4a6ef | 12,536 |
def add_pruning_arguments_to_parser(parser):
"""Add pruning arguments to existing argparse parser"""
parser.add_argument('--do_prune', action='store_true',
help="Perform pruning when training a model")
parser.add_argument('--pruning_config', type=str,
default='', help="Path to a pruning config")
parser.add_argument('--pruning_override', type=str, nargs='*', action=ConcatenateStringAction,
default='', help="JSON string to override pruning configuration file")
return parser | 2a94e0986564f4af8fe580ca3500f06c04598f14 | 12,537 |
import itertools
from functools import reduce
import copy
def invokeRule(priorAnswers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess=False,
step=None,
debug=False,
buildProof=False):
"""
Continue invokation of rule using (given) prior answers and list of
remaining body literals (& rule sip). If prior answers is a list,
computation is split disjunctively
[..] By combining the answers to all these subqueries, we generate
answers for the original query involving the rule head
Can also takes a PML step and updates it as it navigates the
top-down proof tree (passing it on and updating it where necessary)
"""
assert not buildProof or step is not None
proofLevel, memoizeMemory, sipCollection, \
factGraph, derivedPreds, processedRules = otherargs
remainingBodyList = [i for i in bodyLiteralIterator]
lazyGenerator = lazyGeneratorPeek(priorAnswers, 2)
if lazyGenerator.successful:
# There are multiple answers in this step, we need to call invokeRule
# recursively for each answer, returning the first positive attempt
success = False
rt = None
_step = None
ansNo = 0
for priorAns in lazyGenerator:
ansNo += 1
try:
if buildProof:
newStep = InferenceStep(step.parent,
step.rule,
source=step.source)
newStep.antecedents = [ant for ant in step.antecedents]
else:
newStep = None
for rt, _step in\
invokeRule([priorAns],
iter([i for i in remainingBodyList]),
sip,
otherargs,
priorBooleanGoalSuccess,
newStep,
debug=debug,
buildProof=buildProof):
if rt:
yield rt, _step
except RuleFailure:
pass
if not success:
# None of prior answers were successful
# indicate termination of rule processing
raise RuleFailure(
"Unable to solve either of %s against remainder of rule: %s" % (
ansNo, remainingBodyList))
# yield False, _InferenceStep(step.parent, step.rule, source=step.source)
else:
lazyGenerator = lazyGeneratorPeek(lazyGenerator)
projectedBindings = lazyGenerator.successful and first(lazyGenerator) or {}
# First we check if we can combine a large group of subsequent body literals
# into a single query
# if we have a template map then we use it to further
# distinguish which builtins can be solved via
# cumulative SPARQl query - else we solve
# builtins one at a time
def sparqlResolvable(literal):
if isinstance(literal, Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return isinstance(literal, N3Builtin) and \
literal.uri in factGraph.templateMap
def sparqlResolvableNoTemplates(literal):
if isinstance(literal, Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return False
conjGroundLiterals = list(
itertools.takewhile(
hasattr(factGraph, 'templateMap') and sparqlResolvable or \
sparqlResolvableNoTemplates,
remainingBodyList))
bodyLiteralIterator = iter(remainingBodyList)
if len(conjGroundLiterals) > 1:
# If there are literals to combine *and* a mapping from rule
# builtins to SPARQL FILTER templates ..
basePredicateVars = set(
reduce(lambda x, y: x + y,
[list(GetVariables(arg, secondOrder=True)) for arg in conjGroundLiterals]))
if projectedBindings:
openVars = basePredicateVars.intersection(projectedBindings)
else:
# We don't have any given bindings, so we need to treat
# the body as an open query
openVars = basePredicateVars
queryConj = EDBQuery([copy.deepcopy(lit) for lit in conjGroundLiterals],
factGraph,
openVars,
projectedBindings)
query, answers = queryConj.evaluate(debug)
if isinstance(answers, bool):
combinedAnswers = {}
rtCheck = answers
else:
if projectedBindings:
combinedAnswers = (mergeMappings1To2(ans,
projectedBindings,
makeImmutable=True) for ans in answers)
else:
combinedAnswers = (MakeImmutableDict(ans) for ans in answers)
combinedAnsLazyGenerator = lazyGeneratorPeek(combinedAnswers)
rtCheck = combinedAnsLazyGenerator.successful
if not rtCheck:
raise RuleFailure("No answers for combined SPARQL query: %s" % query)
else:
# We have solved the previous N body literals with a single
# conjunctive query, now we need to make each of the literals
# an antecedent to a 'query' step.
if buildProof:
queryStep = InferenceStep(None, source='some RDF graph')
queryStep.groundQuery = subquery
queryStep.bindings = {} # combinedAnswers[-1]
queryHash = URIRef(
"tag:[email protected]:Queries#" + \
makeMD5Digest(subquery))
queryStep.identifier = queryHash
for subGoal in conjGroundLiterals:
subNs = NodeSet(subGoal.toRDFTuple(),
identifier=BNode())
subNs.steps.append(queryStep)
step.antecedents.append(subNs)
queryStep.parent = subNs
for rt, _step in invokeRule(
isinstance(answers, bool) and [projectedBindings] or combinedAnsLazyGenerator,
iter(remainingBodyList[len(conjGroundLiterals):]),
sip,
otherargs,
isinstance(answers, bool),
step,
debug=debug,
buildProof=buildProof):
yield rt, _step
else:
# Continue processing rule body condition
# one literal at a time
try:
bodyLiteral = next(bodyLiteralIterator) if py3compat.PY3 else bodyLiteralIterator.next()
# if a N3 builtin, execute it using given bindings for boolean answer
# builtins are moved to end of rule when evaluating rules via sip
if isinstance(bodyLiteral, N3Builtin):
lhs = bodyLiteral.argument
rhs = bodyLiteral.result
lhs = isinstance(lhs, Variable) and projectedBindings[lhs] or lhs
rhs = isinstance(rhs, Variable) and projectedBindings[rhs] or rhs
assert lhs is not None and rhs is not None
if bodyLiteral.func(lhs, rhs):
if debug:
print("Invoked %s(%s, %s) -> True" % (
bodyLiteral.uri, lhs, rhs))
# positive answer means we can continue processing the rule body
if buildProof:
ns = NodeSet(bodyLiteral.toRDFTuple(),
identifier=BNode())
step.antecedents.append(ns)
for rt, _step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
step,
priorBooleanGoalSuccess,
debug=debug,
buildProof=buildProof):
yield rt, _step
else:
if debug:
print("Successfully invoked %s(%s, %s) -> False" % (
bodyLiteral.uri, lhs, rhs))
raise RuleFailure("Failed builtin invokation %s(%s, %s)" %
(bodyLiteral.uri, lhs, rhs))
else:
# For every body literal, subqueries are generated according
# to the sip
sipArcPred = URIRef(GetOp(bodyLiteral) + \
'_' + '_'.join(GetArgs(bodyLiteral)))
assert len(list(IncomingSIPArcs(sip, sipArcPred))) < 2
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
for N, x in IncomingSIPArcs(sip, sipArcPred):
#That is, each subquery contains values for the bound arguments
#that are passed through the sip arcs entering the node
#corresponding to that literal
#Create query out of body literal and apply sip-provided bindings
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
if literalIsGround(subquery):
#subquery is ground, so there will only be boolean answers
#we return the conjunction of the answers for the current
#subquery
answer = False
ns = None
answers = first(
itertools.dropwhile(
lambda item: not item[0],
SipStrategy(
subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network=step is not None and \
step.parent.network or None,
debug=debug,
buildProof=buildProof,
memoizeMemory=memoizeMemory,
proofLevel=proofLevel)))
if answers:
answer, ns = answers
if not answer and not bodyLiteral.naf or \
(answer and bodyLiteral.naf):
#negative answer means the invokation of the rule fails
#either because we have a positive literal and there
#is no answer for the subgoal or the literal is
#negative and there is an answer for the subgoal
raise RuleFailure(
"No solutions solving ground query %s" % subquery)
else:
if buildProof:
if not answer and bodyLiteral.naf:
ns.naf = True
step.antecedents.append(ns)
#positive answer means we can continue processing the rule body
#either because we have a positive literal and answers
#for subgoal or a negative literal and no answers for the
#the goal
for rt, _step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
True,
step,
debug=debug):
yield rt, _step
else:
_answers = \
SipStrategy(subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network=step is not None and \
step.parent.network or None,
debug=debug,
buildProof=buildProof,
memoizeMemory=memoizeMemory,
proofLevel=proofLevel)
# solve (non-ground) subgoal
def collectAnswers(_ans):
for ans, ns in _ans:
if isinstance(ans, dict):
try:
map = mergeMappings1To2(
ans, projectedBindings,
makeImmutable=True)
yield map
except:
pass
combinedAnswers = collectAnswers(_answers)
answers = lazyGeneratorPeek(combinedAnswers)
if not answers.successful \
and not bodyLiteral.naf \
or (bodyLiteral.naf and answers.successful):
raise RuleFailure(
"No solutions solving ground query %s" % subquery)
else:
# Either we have a positive subgoal and answers
# or a negative subgoal and no answers
if buildProof:
if answers.successful:
goals = set([g for a, g in answers])
assert len(goals) == 1
step.antecedents.append(goals.pop())
else:
newNs = NodeSet(
bodyLiteral.toRDFTuple(),
network=step.parent.network,
identifier=BNode(),
naf=True)
step.antecedents.append(newNs)
for rt, _step in invokeRule(
answers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess,
step,
debug=debug,
buildProof=buildProof):
yield rt, _step
except StopIteration:
#Finished processing rule
if priorBooleanGoalSuccess:
yield projectedBindings and projectedBindings or True, step
elif projectedBindings:
#Return the most recent (cumulative) answers and the given step
yield projectedBindings, step
else:
raise RuleFailure("Finished processing rule unsuccessfully") | 66be56a818a620c5c47a7537b612e91341c7d334 | 12,538 |
def read_ult_meta(filebase):
"""Convenience fcn for output of targeted metadata."""
meta = _parse_ult_meta(filebase)
return (meta["NumVectors"],
meta["PixPerVector"],
meta["ZeroOffset"],
meta["Angle"],
meta["PixelsPerMm"],
meta["FramesPerSec"],
meta["TimeInSecsOfFirstFrame"]) | b2237a2dab9faf98179f69de9e9a5f1dc7289f78 | 12,539 |
from typing import Iterable
from typing import List
def safe_identifiers_iterable(val_list: Iterable[str]) -> List[str]:
"""
Returns new list, all with safe identifiers.
"""
return [safe_identifier(val) for val in val_list] | 6b80d90cfac2ea527ace38cc6550571b5f120a7f | 12,540 |
def encode_varint(value, write):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f: # 1 byte
write(value)
return 1
if value <= 0x3fff: # 2 bytes
write(0x80 | (value & 0x7f))
write(value >> 7)
return 2
if value <= 0x1fffff: # 3 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(value >> 14)
return 3
if value <= 0xfffffff: # 4 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(value >> 21)
return 4
if value <= 0x7ffffffff: # 5 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(0x80 | ((value >> 21) & 0x7f))
write(value >> 28)
return 5
else:
# Return to general algorithm
bits = value & 0x7f
value >>= 7
i = 0
while value:
write(0x80 | bits)
bits = value & 0x7f
value >>= 7
i += 1
write(bits)
return i | 075286208008a0b7507eafe19158eebdb2af66b7 | 12,541 |
def heap_sort(li):
""" [list of int] => [list of int]
Heap sort: divides its input into a sorted and an unsorted region,
and it iteratively shrinks the unsorted region by extracting the
largest element from it and inserting it into the sorted region.
It does not waste time with a linear-time scan of the unsorted region;
rather, heap sort maintains the unsorted region in a heap data structure
to more quickly find the largest element in each step.
To implement a heap using arrays, we will use the rule
li[k] >= li[2*k+1] and li[k] >= li[2*k+2] (left child and right child
respectively).
More generally, the array must satisfy the heap quality:
For any given node C, if P is a parent node of C, then
the value of P is greater than or equal to the key of C
(for max heaps)
Graphically, this would look like:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
"""
def heapify(lst, heap_size, root):
""" ([list of int], int, int) => [list of int]
Rearranges the list to satisfy the heap quality.
Root is index of the largest element in the lst.
"""
# the largest node
largest = root
left_child = 2 * largest + 1
right_child = 2 * largest + 2
# check if left_child and root need to be swapped
if left_child < heap_size and lst[largest] < lst[left_child]:
largest = left_child
# check if right_child and root need to be swapped
if right_child < heap_size and lst[largest] < lst[right_child]:
largest = right_child
# change root, if needed
if largest != root:
lst[root], lst[largest] = lst[largest], lst[root]
# continue to heapify the root
heapify(lst, heap_size, largest)
# Build a maxheap by iterating through the list backwards
for i in range(len(li), -1, -1):
heapify(li, len(li), i)
print(li)
# extract elements one by one
for i in range(len(li) - 1, 0, -1):
"""remember, heap sort differs from insertion sort in that
# it searches for the maximum, rather than minimum, element.
li[0:end] is a heap (like a tree, but elements are not guaranteed
to be sorted) and li[end:len(li)] is in sorted order."""
li[i], li[0] = li[0], li[i]
# return to heap, since the heap was messed up by swapping
heapify(li, i, 0)
return li | a72be31e5256c880c157636aa7a15df013ce651d | 12,542 |
def vector_field(v, t, inf_mat, state_meta):
"""vector_field returns the temporal derivative of a flatten state vector
:param v: array of shape (1,mmax+1+(nmax+1)**2) for the flatten state vector
:param t: float for time (unused)
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection rate
:param state_meta: tuple of arrays encoding information of the structure.
:returns vec_field: array of shape (1,(nmax+1)**2) for the flatten
vector field.
"""
mmax = state_meta[0]
nmax = state_meta[1]
m = state_meta[2]
gm = state_meta[3]
pn = state_meta[4]
imat = state_meta[5]
nmat = state_meta[6]
pnmat = state_meta[7]
sm = v[:mmax+1]
fni = v[mmax+1:].reshape(nmax+1,nmax+1)
fni_field = np.zeros(fni.shape) #matrix field
sm_field = np.zeros(sm.shape)
#calculate mean-field quantities
r = np.sum(inf_mat[2:,:]*(nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:])
r /= np.sum((nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:])
rho = r*excess_susceptible_membership(m,gm,sm)
#contribution for nodes
#------------------------
sm_field = 1 - sm - sm*m*r
#contribution for groups
#------------------------
#contribution from above
fni_field[2:,:nmax] += imat[2:,1:]*fni[2:,1:]
#contribution from equal
fni_field[2:,:] += (-imat[2:,:]
-(nmat[2:,:] - imat[2:,:])
*(inf_mat[2:,:] + rho))*fni[2:,:]
#contribution from below
fni_field[2:,1:nmax+1] += ((nmat[2:,:nmax] - imat[2:,:nmax])
*(inf_mat[2:,:nmax] + rho))*fni[2:,:nmax]
return np.concatenate((sm_field,fni_field.reshape((nmax+1)**2))) | 31c8023966fd3e5c35b734759a3747f0d2752390 | 12,543 |
def newton(start, loss_fn, *args, lower=0, upper=None, epsilon=1e-9):
"""
Newton's Method!
"""
theta, origin, destination = args[0], args[1], args[2]
if upper is None:
upper = 1
start = lower
while True:
if loss_fn(start, theta, origin, destination) > 0:
start = (upper+start)/2
else:
start = (lower+start)/2
# print("START", start)
x_cur = start
x_prev = -1
try:
while np.abs(x_cur-x_prev) >= epsilon:
# print(x)
x_prev = x_cur
x_cur = newton_single(x_cur, loss_fn, theta, origin, destination)
# print(x, x-x_prev, np.abs(x-x_prev)>=epsilon)
if np.isnan(x_cur):
continue
return x_cur
except ZeroDivisionError:
print(start, x_cur) | bbd04297639fbc964c55a8c964e5bd5fb24d6e22 | 12,544 |
import torch
def eval_det_cls(pred, gt, iou_thr=None):
"""Generic functions to compute precision/recall for object detection for a
single class.
Args:
pred (dict): Predictions mapping from image id to bounding boxes \
and scores.
gt (dict): Ground truths mapping from image id to bounding boxes.
iou_thr (list[float]): A list of iou thresholds.
Return:
tuple (np.ndarray, np.ndarray, float): Recalls, precisions and \
average precision.
"""
# {img_id: {'bbox': box structure, 'det': matched list}}
class_recs = {}
npos = 0
img_id_npos = {}
for img_id in gt.keys():
cur_gt_num = len(gt[img_id])
if cur_gt_num != 0:
gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32)
for i in range(cur_gt_num):
gt_cur[i] = gt[img_id][i].tensor
bbox = gt[img_id][0].new_box(gt_cur)
else:
bbox = gt[img_id]
det = [[False] * len(bbox) for i in iou_thr]
npos += len(bbox)
img_id_npos[img_id] = img_id_npos.get(img_id, 0) + len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# construct dets
image_ids = []
confidence = []
ious = []
for img_id in pred.keys():
cur_num = len(pred[img_id])
if cur_num == 0:
continue
pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32)
box_idx = 0
for box, score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
pred_cur[box_idx] = box.tensor
box_idx += 1
pred_cur = box.new_box(pred_cur)
gt_cur = class_recs[img_id]['bbox']
if len(gt_cur) > 0:
# calculate iou in each image
iou_cur = pred_cur.overlaps(pred_cur, gt_cur)
for i in range(cur_num):
ious.append(iou_cur[i])
else:
for i in range(cur_num):
ious.append(np.zeros(1))
confidence = np.array(confidence)
# sort by confidence
sorted_ind = np.argsort(-confidence)
image_ids = [image_ids[x] for x in sorted_ind]
ious = [ious[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp_thr = [np.zeros(nd) for i in iou_thr]
fp_thr = [np.zeros(nd) for i in iou_thr]
for d in range(nd):
R = class_recs[image_ids[d]]
iou_max = -np.inf
BBGT = R['bbox']
cur_iou = ious[d]
if len(BBGT) > 0:
# compute overlaps
for j in range(len(BBGT)):
# iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
iou = cur_iou[j]
if iou > iou_max:
iou_max = iou
jmax = j
for iou_idx, thresh in enumerate(iou_thr):
if iou_max > thresh:
if not R['det'][iou_idx][jmax]:
tp_thr[iou_idx][d] = 1.
R['det'][iou_idx][jmax] = 1
else:
fp_thr[iou_idx][d] = 1.
else:
fp_thr[iou_idx][d] = 1.
ret = []
# Return additional information for custom metrics.
new_ret = {}
new_ret["image_ids"] = image_ids
new_ret["iou_thr"] = iou_thr
new_ret["ious"] = [max(x.tolist()) for x in ious]
new_ret["fp_thr"] = [x.tolist() for x in fp_thr]
new_ret["tp_thr"] = [x.tolist() for x in tp_thr]
new_ret["img_id_npos"] = img_id_npos
for iou_idx, thresh in enumerate(iou_thr):
# compute precision recall
fp = np.cumsum(fp_thr[iou_idx])
tp = np.cumsum(tp_thr[iou_idx])
recall = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = average_precision(recall, precision)
ret.append((recall, precision, ap))
return ret, new_ret | 762f70d95261509778a1b015af30eab68f951b15 | 12,545 |
import pathlib
from typing import List
from typing import Dict
import tqdm
def parse_g2o(path: pathlib.Path, pose_count_limit: int = 100000) -> G2OData:
"""Parse a G2O file. Creates a list of factors and dictionary of initial poses."""
with open(path) as file:
lines = [line.strip() for line in file.readlines()]
pose_variables: List[jaxfg.geometry.LieVariableBase] = []
initial_poses: Dict[jaxfg.geometry.LieVariableBase, jaxlie.MatrixLieGroup] = {}
factors: List[jaxfg.core.FactorBase] = []
for line in tqdm(lines):
parts = [part for part in line.split(" ") if part != ""]
variable: jaxfg.geometry.LieVariableBase
between: jaxlie.MatrixLieGroup
if parts[0] == "VERTEX_SE2":
if len(pose_variables) > pose_count_limit:
continue
# Create SE(2) variable
_, index, x, y, theta = parts
index = int(index)
x, y, theta = map(float, [x, y, theta])
assert len(initial_poses) == index
variable = jaxfg.geometry.SE2Variable()
initial_poses[variable] = jaxlie.SE2.from_xy_theta(x, y, theta)
pose_variables.append(variable)
elif parts[0] == "EDGE_SE2":
# Create relative offset between pair of SE(2) variables
before_index = int(parts[1])
after_index = int(parts[2])
if before_index > pose_count_limit or after_index > pose_count_limit:
continue
between = jaxlie.SE2.from_xy_theta(*(float(p) for p in parts[3:6]))
precision_matrix_components = onp.array(list(map(float, parts[6:])))
precision_matrix = onp.zeros((3, 3))
precision_matrix[onp.triu_indices(3)] = precision_matrix_components
precision_matrix = precision_matrix.T
precision_matrix[onp.triu_indices(3)] = precision_matrix_components
sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T
factors.append(
jaxfg.geometry.BetweenFactor.make(
variable_T_world_a=pose_variables[before_index],
variable_T_world_b=pose_variables[after_index],
T_a_b=between,
noise_model=jaxfg.noises.Gaussian(
sqrt_precision_matrix=sqrt_precision_matrix
),
)
)
elif parts[0] == "VERTEX_SE3:QUAT":
# Create SE(3) variable
_, index, x, y, z, qx, qy, qz, qw = parts
index = int(index)
assert len(initial_poses) == index
variable = jaxfg.geometry.SE3Variable()
initial_poses[variable] = jaxlie.SE3(
wxyz_xyz=onp.array(list(map(float, [qw, qx, qy, qz, x, y, z])))
)
pose_variables.append(variable)
elif parts[0] == "EDGE_SE3:QUAT":
# Create relative offset between pair of SE(3) variables
before_index = int(parts[1])
after_index = int(parts[2])
numerical_parts = list(map(float, parts[3:]))
assert len(numerical_parts) == 7 + 21
# between = jaxlie.SE3.from_xy_theta(*(float(p) for p in parts[3:6]))
xyz = numerical_parts[0:3]
quaternion = numerical_parts[3:7]
between = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.from_quaternion_xyzw(onp.array(quaternion)),
translation=onp.array(xyz),
)
precision_matrix = onp.zeros((6, 6))
precision_matrix[onp.triu_indices(6)] = numerical_parts[7:]
precision_matrix = precision_matrix.T
precision_matrix[onp.triu_indices(6)] = numerical_parts[7:]
sqrt_precision_matrix = onp.linalg.cholesky(precision_matrix).T
factors.append(
jaxfg.geometry.BetweenFactor.make(
variable_T_world_a=pose_variables[before_index],
variable_T_world_b=pose_variables[after_index],
T_a_b=between,
noise_model=jaxfg.noises.Gaussian(
sqrt_precision_matrix=sqrt_precision_matrix
),
)
)
else:
assert False, f"Unexpected line type: {parts[0]}"
# Anchor start pose
factors.append(
jaxfg.geometry.PriorFactor.make(
variable=pose_variables[0],
mu=initial_poses[pose_variables[0]],
noise_model=jaxfg.noises.DiagonalGaussian(
jnp.ones(pose_variables[0].get_local_parameter_dim()) * 100.0
),
)
)
return G2OData(factors=factors, initial_poses=initial_poses) | 6c766401220849e337279e8b465f9d67477a1599 | 12,546 |
def _som_actor(env):
"""
Construct the actor part of the model and return it.
"""
nactions = np.product(env.action_shape)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(keras.layers.Dense(400))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(200))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(nactions))
model.add(keras.layers.Activation('sigmoid'))
return model | e3bc1f675b16b2d728b1c070324139f0d99071a7 | 12,547 |
def sendEmail():
"""email sender"""
send_email('Registration ATS',
['[email protected]'],
'Thanks for registering ATS!',
'<h3>Thanks for registering with ATS!</h3>')
return "email sent to [email protected]" | e9125c32adac8267aaa550e59e27db4a10746ace | 12,548 |
import scipy
def Pvalue(chi2, df):
"""Returns the p-value of getting chi2 from a chi-squared distribution.
chi2: observed chi-squared statistic
df: degrees of freedom
"""
return 1 - scipy.stats.chi2.cdf(chi2, df) | 1a2198e5d47396fc785a627d96513ded1d6894e0 | 12,549 |
def template(template_lookup_key: str) -> str:
"""Return template as string."""
with open(template_path(template_lookup_key), "r") as filepath:
template = filepath.read()
return template | d03bbc2baa8cb18174a468579bdea1da906de09d | 12,550 |
def filter_rows(df, condition, reason):
"""
:param reason:
:param df:
:param condition: boolean, true for row to keep
:return: filter country_city_codes df
"""
n_dropped = (condition == False).sum()
print(
f"\nexcluding {n_dropped} locations ({n_dropped / df.shape[0]:.1%}) due to {reason}"
)
return df[condition] | 7e5e6925bfb7d90bc90b42fda202d80e8ef5e3f6 | 12,551 |
def parse_projected_dos(f):
"""Parse `projected_dos.dat` output file."""
data = np.loadtxt(f)
projected_dos = {"frequency_points": data[:, 0], "projected_dos": data[:, 1:].T}
pdos = orm.XyData()
pdos_list = [pd for pd in projected_dos["projected_dos"]]
pdos.set_x(projected_dos["frequency_points"], "Frequency", "THz")
pdos.set_y(
pdos_list,
[
"Projected DOS",
]
* len(pdos_list),
[
"1/THz",
]
* len(pdos_list),
)
pdos.label = "Projected DOS"
return pdos | 89c280e92c7598e3947d8ccda20b921c601c9b10 | 12,552 |
def get_from_parameters(a, b, c, alpha, beta, gamma):
"""
Create a Lattice using unit cell lengths and angles (in degrees).
This code is modified from the pymatgen source code [1]_.
Parameters
----------
a : :class:`float`:
*a* lattice parameter.
b : :class:`float`:
*b* lattice parameter.
c : :class:`float`:
*c* lattice parameter.
alpha : :class:`float`:
*alpha* angle in degrees.
beta : :class:`float`:
*beta* angle in degrees.
gamma : :class:`float`:
*gamma* angle in degrees.
Returns
-------
:class:`tuple` of three :class:`numpy.ndarray`
Tuple of cell lattice vectors of shape (3, ) in Angstrom.
"""
angles_r = np.radians([alpha, beta, gamma])
cos_alpha, cos_beta, cos_gamma = np.cos(angles_r)
sin_alpha, sin_beta, sin_gamma = np.sin(angles_r)
val = (cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta)
# Sometimes rounding errors result in values slightly > 1.
val = cap_absolute_value(val)
gamma_star = np.arccos(val)
vector_a = np.array([a * sin_beta, 0.0, a * cos_beta])
vector_b = np.array([
-b * sin_alpha * np.cos(gamma_star),
b * sin_alpha * np.sin(gamma_star),
b * cos_alpha,
])
vector_c = np.array([0.0, 0.0, float(c)])
return tuple([vector_a, vector_b, vector_c]) | 076763f30da86b12747ede930993d99fc3b742d8 | 12,553 |
import random
def random_chinese_name():
"""生成随机中文名字
包括的名字格式:2个字名字**,3个字名字***,4个字名字****
:return:
"""
name_len = random.choice([i for i in range(4)])
if name_len == 0:
name = random_two_name()
elif name_len == 1:
name = random_three_name()
elif name_len == 2:
name = random_three_names()
else:
name = random_four_name()
return name | c86232cb81c492e2301837f5e330e6140ee503f3 | 12,554 |
def power_list(lists: [list]) -> list:
""" power set across the options of all lists """
if len(lists) == 1:
return [[v] for v in lists[0]]
grids = power_list(lists[:-1])
new_grids = []
for v in lists[-1]:
for g in grids:
new_grids.append(g + [v])
return new_grids | 135e3cde20388d999456e2e8a2fed4d98fac581d | 12,555 |
import time
def send_email(from_email, to, subject, message, html=True):
"""
Send emails to the given recipients
:param from_email:
:param to:
:param subject:
:param message:
:param html:
:return: Boolean value
"""
try:
email = EmailMessage(subject, message, from_email, to)
print("Sending email..")
if html:
email.content_subtype = 'html'
email.send()
return True
except Exception as e:
print("Error in sending email: {0}".format(str(e)))
if 'rate exceeded' in str(e):
time.sleep(2)
send_email(from_email, to, subject, message)
return False | 28751bc30f51148c0389d4127229e6352a18cacb | 12,556 |
import random
def attack(health, power, percent_to_hit):
"""Calculates health from percent to hit and power of hit
Parameters:
health - integer defining health of attackee
power - integer defining damage of attacker
percent to hit - float defining percent chance to hit of attacker
Returns: new health
"""
random_number = random.random() # number between 0.0 and 1.0
# if our random number falls between 0 and percent to hit
if random_number <= percent_to_hit:
# then a hit occurred so we reduce health by power
health = health - power
# return the new health value
return health | 83a74908f76f389c798b28c5d3f9035d2d8aff6a | 12,557 |
def signal_requests_mock_factory(requests_mock: Mocker) -> Mocker:
"""Create signal service mock from factory."""
def _signal_requests_mock_factory(
success_send_result: bool = True, content_length_header: str = None
) -> Mocker:
requests_mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=HTTPStatus.OK,
json={"versions": ["v1", "v2"]},
)
if success_send_result:
requests_mock.register_uri(
"POST",
"http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX,
status_code=HTTPStatus.CREATED,
)
else:
requests_mock.register_uri(
"POST",
"http://127.0.0.1:8080" + SIGNAL_SEND_PATH_SUFIX,
status_code=HTTPStatus.BAD_REQUEST,
)
if content_length_header is not None:
requests_mock.register_uri(
"GET",
URL_ATTACHMENT,
status_code=HTTPStatus.OK,
content=CONTENT,
headers={"Content-Length": content_length_header},
)
else:
requests_mock.register_uri(
"GET",
URL_ATTACHMENT,
status_code=HTTPStatus.OK,
content=CONTENT,
)
return requests_mock
return _signal_requests_mock_factory | 543f73ec004911c87e9986cbd940a733f03287bf | 12,558 |
def test_dwt_denoise_trace():
""" Check that sample data fed into dwt_denoise_trace() can be processed
and that the returned signal is reasonable (for just one trace)"""
# Loma Prieta test station (nc216859)
data_files, origin = read_data_dir('geonet', 'us1000778i', '*.V1A')
trace = []
trace = read_data(data_files[0])
dataOut = dwt.denoise_trace(tr=trace)
# Look at frequency content? Samples?
return dataOut | 4c526e7e76c8672322bec0323974ca2ee20e25dd | 12,559 |
def get_networks(project_id=None,
auth_token=None):
"""
Get a list of all routed networks
"""
url = CATALOG_HOST + "/routednetwork"
try:
response_body = _api_request(url=url,
http_method="GET",
project_id=project_id,
auth_token=auth_token)
except CommandExecutionError as e:
log.exception(e)
return None
networks = [
network
for network
in response_body
if network['internalDeploymentStatus']['phase'] in list(map(str, POSITIVE_PHASES))
]
return networks | c2c9bfe05cfa416c9e37d04aefcc640d5d2250f7 | 12,560 |
def feature_registration(source,target, MIN_MATCH_COUNT = 12):
"""
Obtain the rigid transformation from source to target
first find correspondence of color images by performing fast registration
using SIFT features on color images.
The corresponding depth values of the matching keypoints is then used to
obtain rigid transformation through a ransac process.
Parameters
----------
source : ((n,m) uint8, (n,m) float)
The source color image and the corresponding 3d pointcloud combined in a list
target : ((n,m) uint8, (n,m) float)
The target color image and the corresponding 3d pointcloud combined in a list
MIN_MATCH_COUNT : int
The minimum number of good corresponding feature points for the algorithm to
trust the pairwise registration result with feature matching only
Returns
----------
transform: (4,4) float or None
The homogeneous rigid transformation that transforms source to the target's
frame
if None, registration result using feature matching only cannot be trusted
either due to no enough good matching feature points are found, or the ransac
process does not return a solution
"""
cad_src, depth_src = source
cad_des, depth_des = target
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descripto rs with SIFT
kp1, des1 = sift.detectAndCompute(cad_src,None)
kp2, des2 = sift.detectAndCompute(cad_des,None)
# find good mathces
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
# if number of good matching feature point is greater than the MIN_MATCH_COUNT
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
bad_match_index = np.where(np.array(matchesMask) == 0)
src_index=np.vstack(src_pts).squeeze()
src_index = np.delete(src_index, tuple(bad_match_index[0]), axis=0)
src_index[:,[0, 1]] = src_index[:,[1, 0]]
src_index = tuple(src_index.T.astype(np.int32))
src_depths = depth_src[src_index]
dst_index=np.vstack(dst_pts).squeeze()
dst_index = np.delete(dst_index, tuple(bad_match_index[0]), axis=0)
dst_index[:,[0, 1]] = dst_index[:,[1, 0]]
dst_index = tuple(dst_index.T.astype(np.int32))
dst_depths = depth_des[dst_index]
dst_good=[]
src_good=[]
dst_depths=dst_depths[matchesMask>0][0]
src_depths=src_depths[matchesMask>0][0]
for i in xrange(len(dst_depths)):
if np.sum(dst_depths[i])!=0 and np.sum(src_depths[i])!=0:
dst_good.append(dst_depths[i].tolist())
src_good.append(src_depths[i].tolist())
# get rigid transforms between 2 set of feature points through ransac
transform = match_ransac(np.asarray(src_good),np.asarray(dst_good))
return transform
else:
return None | d5839ef3586acd84c57341f19700de38660f9a9f | 12,561 |
def set_metadata(testbench_config, testbench):
"""
Perform the direct substitutions from the sonar testbench metadata into the
the testbench
Args:
testbench_config (Testbench): Sonar testbench description
testbench (str): The testbench template
"""
for key, value in testbench_config.metadata.items():
if value is None:
replace_str = ""
else:
replace_str = str(value)
search_str = "SONAR_" + key.upper()
testbench = replace_in_testbenches(testbench, search_str, replace_str)
return testbench | 375712b92f7467ee4d49e5d9e91250464c81337d | 12,562 |
def index(a, x):
"""Locate the leftmost value exactly equal to x"""
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError | f77aed5c55750b848fdf51b66b38f3774c812e23 | 12,563 |
def convert_secondary_type_list(obj):
"""
:type obj: :class:`[mbdata.models.ReleaseGroupSecondaryType]`
"""
type_list = models.secondary_type_list()
[type_list.add_secondary_type(convert_secondary_type(t)) for t in obj]
return type_list | d84d20f6d82b462bda5bf04f6784effea47a0265 | 12,564 |
import matplotlib.pyplot as plt
def run(inputs, parameters = None):
"""Function to be callled by DOE and optimization. Design Variables are
the only inputs.
:param inputs: {'sma', 'linear', 'sigma_o'}"""
def thickness(x, t, chord):
y = af.Naca00XX(chord, t, [x], return_dict = 'y')
thickness_at_x = y['u'] - y['l']
return thickness_at_x
if parameters != None:
eng = parameters[0]
import_matlab = False
else:
eng = None
import_matlab = True
sma = inputs['sma']
linear = inputs['linear']
R = inputs['R']
sigma_o = 100e6
airfoil = "naca0012"
chord = 1.#0.6175
J = {'x':0.75, 'y':0.}
#Adding the area key to the dictionaries
sma['area'] = math.pi*(0.000381/2.)**2
linear['area'] = 0.001
# Design constants
#arm length to center of gravity
r_w = 0.10
#Aicraft weight (mass times gravity)
W = 0.0523*9.8 #0.06*9.8
alpha = 0.
V = 10 #m/s
altitude = 10000. #feet
# Temperature
T_0 = 273.15 + 30.
T_final = 273.15 + 140.
#Initial martensitic volume fraction
MVF_init = 1.
# Number of steps and cycles
n = 200
n_cycles = 0
#~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Parameters to select how to output stuff
all_outputs = True
save_data = True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if all_outputs:
eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap(airfoil,
chord, J, sma, linear, sigma_o,
W, r_w, V, altitude, alpha, T_0,
T_final, MVF_init, n, R, all_outputs = True,
import_matlab = import_matlab, eng=eng,
n_cycles = n_cycles)
plt.figure()
plt.plot(np.rad2deg(theta), eps_s, lw=2., label = "$\epsilon_s$")
plt.plot(np.rad2deg(theta), eps_l, 'b--',lw=2, label = "$\epsilon_l$")
# plt.scatter(theta, eps_s, c = 'b')
# plt.scatter(theta, eps_l, c = 'b')
plt.ylabel('$\epsilon$', fontsize=24)
plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
print len(T), len(eps_s), len(eps_l), len(theta), len(eps_t)
plt.figure()
plt.plot(np.rad2deg(theta), eps_t, lw=2.)
# plt.scatter(theta, eps_t, c = 'b')
plt.ylabel('$\epsilon_t$', fontsize=24)
plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
plt.figure()
plt.plot(np.rad2deg(theta), MVF, lw=2.)
# plt.scatter(theta, MVF, c = 'b')
plt.ylabel('$MVF$', fontsize=24)
plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
plt.figure()
plt.plot(T, MVF, lw=2.)
# plt.scatter(T, MVF, c = 'b')
plt.ylabel('$MVF$', fontsize=24)
plt.xlabel('$T (K)$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
plt.figure()
plt.plot(T, sigma, lw=2.)
# plt.scatter(T, sigma, c = 'b')
plt.ylabel('$\sigma$', fontsize=24)
plt.xlabel('$T (K)$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
plt.figure()
plt.plot(T, eps_s, 'b', lw=2., label = "$\epsilon_s$")
plt.plot(T, eps_l, 'b--',lw=2, label = "$\epsilon_l$")
# plt.scatter(T, eps_s, c = 'b')
# plt.scatter(T, eps_l, c = 'b')
plt.xlabel('$T (K)$', fontsize=20)
plt.ylabel('$\epsilon$', fontsize=24)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
plt.figure()
plt.plot(T, np.rad2deg(theta), lw=2.)
# plt.scatter(T, theta, c = 'b')
plt.xlabel('$T (K)$', fontsize=20)
plt.ylabel(r'$\theta ({}^{\circ})$', fontsize=20)
plt.grid()
F_s = []
for i in range(len(sigma)):
F_s.append(sigma[i]*sma['area'])
# sigma_MPa = []
# for sigma_i in sigma:
# sigma_MPa.append(sigma_i/1e6)
plt.figure()
plt.plot(theta, F_s, 'b', lw=2., label = "$F_s$")
plt.plot(theta, F_l, 'b--', lw=2., label = "$F_l$")
# plt.scatter(theta, F_s, c = 'b')
# plt.scatter(theta, F_l, c = 'b')
plt.ylabel('$F (N)$', fontsize=20)
plt.xlabel(r'$\theta ({}^{\circ})$', fontsize=20)
plt.legend(loc = 'best', fontsize = 'x-large')
plt.grid()
else:
theta, k= flap(airfoil, chord, J, sma, linear, sigma_o,
W, r_w, V, altitude, alpha, T_0,
T_final, MVF_init, n, R, all_outputs = False,
import_matlab = import_matlab, eng=eng,
n_cycles = n_cycles)
if save_data == True:
Data = {'theta': theta, 'eps_s': eps_s, 'eps_l': eps_l,
'sigma': sigma, 'xi': MVF, 'T': T, 'eps_t': eps_t,
'F_l': F_l, 'k': k, 'L_s':L_s}
pickle.dump(Data, open( "data.p", "wb" ) )
return {'theta': theta, 'k': k} | 65c1951fb4bbed8ab2832ce4892a6c6308439f79 | 12,565 |
import json
def load_data(path):
"""Load JSON data."""
with open(path) as inf:
return json.load(inf) | 531fc2b27a6ab9588b1f047e25758f359dc21b6d | 12,566 |
from pathlib import Path
def get_extension(file_path):
"""
get_extension(file)
Gets the extension of the given file.
Parameters
----------
file_path
A path to a file
Returns
-------
str
Returns the extension of the file if it exists or None otherwise.
The Returning extension contains a dot. Ex: .csv
"""
if exists(file_path):
return Path(file_path).suffix
else:
return None | 7b1c4ba4f20ac913bb38292d4a704869cab6937e | 12,567 |
def rank_in_group(df, group_col, rank_col, rank_method="first"):
"""Ranks a column in each group which is grouped by another column
Args:
df (pandas.DataFrame): dataframe to rank-in-group its column
group_col (str): column to be grouped by
rank_col (str): column to be ranked for
rank_method (str): rank method to be the "method" argument of pandas.rank() function
Returns:
pandas.DataFrame: dataframe after the rank-in-group operation
"""
df = df.copy()
df_slice = df[[group_col, rank_col]].drop_duplicates()
df_slice["ranked_{}".format(rank_col)] = df_slice[rank_column].rank(
method=rank_method
)
df = pd.merge(
df,
df_slice[[group_col, "ranked_{}".format(rank_col)]],
how="left",
on=group_col,
)
return df | f2ae45641339bf4bc71bc48a415a28602ccf8da3 | 12,568 |
import six
def get_layer_options(layer_options, local_options):
"""
Get parameters belonging to a certain type of layer.
Parameters
----------
layer_options : list of String
Specifies parameters of the layer.
local_options : list of dictionary
Specifies local parameters in a model function.
"""
layer_options_dict = {}
for key, value in six.iteritems(local_options):
if key in layer_options:
layer_options_dict[key] = value
return layer_options_dict | e40945395c4a96c0a0b9447eeb1d0b50cf661bd7 | 12,569 |
def expr(term:Vn,add:Vt,expr:Vn)->Vn:
"""
expr -> term + expr
"""
return {"add":[term,expr]} | f66475ecbd255ac4c4a04b0d705f1c052c4ee123 | 12,570 |
import json
def gene_box(cohort, order='median', percentage=False):
"""Box plot with counts of filtered mutations by gene.
percentage computes fitness as the increase with respect to
the self-renewing replication rate lambda=1.3.
Color allows you to use a dictionary of colors by gene.
Returns a figure."""
# Load gene color dictionary
with open('../Resources/gene_color_dict.json') as json_file:
color_dict = json.load(json_file)
# Create a dictionary with all filtered genes
gene_list = []
for traj in cohort:
gene_list.append(traj.gene)
gene_dict = {element: [] for element in set(gene_list)}
# update the counts for each gene
if percentage is False:
y_label = 'Fitness'
for traj in cohort:
fitness = traj.fitness
gene_dict[traj.gene].append(fitness)
if percentage is True:
y_label = 'fitness_percentage'
for traj in cohort:
fitness = traj.fitness_percentage
gene_dict[traj.gene].append(fitness)
# sort dictionary in descending order
if order == 'mean':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.mean(item[1]),
reverse=True))
if order == 'median':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.median(item[1]),
reverse=True))
if order == 'max':
gene_dict = dict(sorted(gene_dict.items(),
key=lambda item: np.max(item[1]),
reverse=True))
# Bar plot
fig = go.Figure()
# color_dict = dict()
# if isinstance(color, dict):
# color_dict = color
for i, key in enumerate(gene_dict):
fig.add_trace(
go.Box(y=gene_dict[key],
marker_color=color_dict[key],
name=key, boxpoints='all', showlegend=False))
fig.update_layout(title='Gene distribution of filtered mutations',
yaxis_title=y_label,
template="simple_white")
fig.update_xaxes(linewidth=2)
fig.update_yaxes(linewidth=2)
if percentage is False:
fig.update_yaxes(type='log', tickvals=[0.05, 0.1, 0.2, 0.4])
fig.update_layout(xaxis_tickangle=-45)
return fig, gene_dict | 851c166246144b14d51863b4c775baa88ab87205 | 12,571 |
from typing import Union
from typing import List
def _clip_and_count(
adata: AnnData,
target_col: str,
*,
groupby: Union[str, None, List[str]] = None,
clip_at: int = 3,
inplace: bool = True,
key_added: Union[str, None] = None,
fraction: bool = True,
) -> Union[None, np.ndarray]:
"""Counts the number of identical entries in `target_col`
for each group in `group_by`.
"""
if target_col not in adata.obs.columns:
raise ValueError("`target_col` not found in obs.")
groupby = [groupby] if isinstance(groupby, str) else groupby
groupby_cols = [target_col] if groupby is None else groupby + [target_col]
clonotype_counts = (
adata.obs.groupby(groupby_cols, observed=True)
.size()
.reset_index(name="tmp_count")
.assign(
tmp_count=lambda X: [
">= {}".format(min(n, clip_at)) if n >= clip_at else str(n)
for n in X["tmp_count"].values
]
)
)
clipped_count = adata.obs.merge(clonotype_counts, how="left", on=groupby_cols)[
"tmp_count"
].values
if inplace:
key_added = (
"{}_clipped_count".format(target_col) if key_added is None else key_added
)
adata.obs[key_added] = clipped_count
else:
return clipped_count | 20673965557afdcf75b3201cf743fff100981ec3 | 12,572 |
def create_training_patches(images, patch_size, patches_per_image=1, patch_stride=None):
"""
Returns a batch of image patches, given a batch of images.
Args:
images (list, numpy.array): Batch of images.
patch_size (tuple, list): The (width, height) of the patch to
return.
patches_per_image (int): Number of random patches to
generate from each image in the input batch. Default is 1.
patch_stride (int): Stride to use in strided patching. Default
is None, which does not use strided patching. If integer is passed
then strided patching will be used regardless of what is passed
to 'patches_per_image'.
Returns:
(numpy.array): Batch of image patches.
"""
image_patches = []
for im in images:
if patch_stride is None:
for i in range(patches_per_image):
image_patches.append(get_random_patch(im, patch_size))
else:
image_patches += list(get_stride_patches(im, patch_size, patch_stride, 2))
return np.array(image_patches) | 5fce19d2d13f790500e0cbd42934dd6e83c6b084 | 12,573 |
import time
def get_prover_options(prover_round_tag='manual',
prover_round=-1) -> deephol_pb2.ProverOptions:
"""Returns a ProverOptions proto based on FLAGS."""
if not FLAGS.prover_options:
tf.logging.fatal('Mandatory flag --prover_options is not specified.')
if not tf.gfile.Exists(FLAGS.prover_options):
tf.logging.fatal('Required prover options file "%s" does not exist.',
FLAGS.prover_options)
prover_options = deephol_pb2.ProverOptions()
if FLAGS.max_theorem_parameters is not None:
tf.logging.warning(
'Overring max_theorem_parameters in prover options to %d.',
FLAGS.max_theorem_parameters)
prover_options.action_generator_options.max_theorem_parameters = (
FLAGS.max_theorem_parameters)
with tf.gfile.Open(FLAGS.prover_options) as f:
text_format.MergeLines(f, prover_options)
if prover_options.builtin_library:
tf.logging.warning('builtin_library is deprecated. Do not provide.')
if str(prover_options.builtin_library) not in ['core']:
tf.logging.fatal('Unsupported built in library: %s',
prover_options.builtin_library)
if FLAGS.timeout_seconds is not None:
prover_options.timeout_seconds = FLAGS.timeout_seconds
if not FLAGS.output:
tf.logging.fatal('Missing flag --output [recordio_pattern]')
prover_options.prover_round = deephol_pb2.ProverRound(
start_seconds=int(round(time.time())),
tag=prover_round_tag,
round=prover_round)
_verify_prover_options(prover_options)
# Log prover options.
tf.logging.info('Using prover_options:\n %s', str(prover_options))
return prover_options | ee1ee9fb7ce573c543f0750d6b8fd1eed98deec9 | 12,574 |
def bracketpy(pystring):
"""Find CEDICT-style pinyin in square brackets and correct pinyin.
Looks for square brackets in the string and tries to convert its
contents to correct pinyin. It is assumed anything in square
brackets is CC-CEDICT-format pinyin.
e.g.: "拼音[pin1 yin1]" will be converted into "拼音 pīnyīn".
"""
if len(findall("(\[.+?\])", pystring)) >= 1:
cedpylist = findall("(\[.+?\])", pystring)
for item in cedpylist:
pystring = pystring.replace(item, " " + pyjoin(item[1:-1]))
return pystring
if len(findall("(\[.+?\])", pystring)) < 1:
return pystring
else:
return None | 0499047ec45e6b9c66c27dd89663667b13ddbfb1 | 12,575 |
import psutil
def get_ram_usage_bytes(size_format: str = 'M'):
"""
Size formats include K = Kilobyte, M = Megabyte, G = Gigabyte
"""
total = psutil.virtual_memory().total
available = psutil.virtual_memory().available
used = total - available
# Apply size
if size_format == 'K':
used = used / 1024
if size_format == 'M':
used = used / 1024 / 1024
if size_format == 'G':
used = used / 1024 / 1024 / 1024
return int(used) | 990987078b0ad3c2ac2ee76dcf96b7cdf01f0354 | 12,576 |
def weighted_crossentropy(weights, name='anonymous'):
"""A weighted version of tensorflow.keras.objectives.categorical_crossentropy
Arguments:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
name: string identifying the loss to differentiate when models have multiple losses
Returns:
keras loss function named name+'_weighted_loss'
"""
string_globe = 'global ' + name + '_weights\n'
string_globe += 'global ' + name + '_kweights\n'
string_globe += name + '_weights = np.array(weights)\n'
string_globe += name + '_kweights = K.variable('+name+'_weights)\n'
exec(string_globe, globals(), locals())
fxn_postfix = '_weighted_loss'
string_fxn = 'def ' + name + fxn_postfix + '(y_true, y_pred):\n'
string_fxn += '\ty_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n'
string_fxn += '\ty_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n'
string_fxn += '\tloss = y_true * K.log(y_pred) * ' + name + '_kweights\n'
string_fxn += '\tloss = -K.sum(loss, -1)\n'
string_fxn += '\treturn loss\n'
exec(string_fxn, globals(), locals())
loss_fxn = eval(name + fxn_postfix, globals(), locals())
return loss_fxn | 071ab00a723d54194b4b27c889c638debe82f10a | 12,577 |
def _get_package_type(id):
"""
Given the id of a package this method will return the type of the
package, or 'dataset' if no type is currently set
"""
pkg = model.Package.get(id)
if pkg:
return pkg.type or u'dataset'
return None | c84e137e2b0adaf8719d757f178aa47d4a63c46a | 12,578 |
def _find_protruding_dimensions(f, care, fol):
"""Return variables along which `f` violates `care`."""
vrs = joint_support([f, care], fol)
dims = set()
for var in vrs:
other_vars = vrs - {var}
f_proj = fol.exist(other_vars, f)
care_proj = fol.exist(other_vars, care)
if (care_proj | ~ f_proj) != fol.true:
dims.add(var)
return dims | 02da5718645652288c9fa6fcedc07198afe49a58 | 12,579 |
import os
import glob
def prepare_data(train_mode, dataset="Train"):
"""
Args:
dataset: choose train dataset or test dataset
For train dataset, output data would be ['.../t1.bmp',
'.../t2.bmp',..., 't99.bmp']
"""
# Defines list of data path lists for different folders of training data
dataPaths = []
# If mode is train, dataPaths from each folder in Train directory are
# stored into a list which is then appended to dataPaths
# Join the Train dir to current directory
if dataset == "Train":
data_dir = os.path.join(os.getcwd(), dataset)
for root, dirs, files in os.walk(data_dir):
if dirs != []:
for folder in dirs:
dataFolderDir = os.path.join(data_dir, folder)
# make set of all dataset file path
data = glob.glob(os.path.join(dataFolderDir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
else:
if train_mode == 0:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode0")
# make set of all dataset file path
data = glob.glob(os.path.join(data_dir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 1:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode1")
# make set of all dataset file path
data = glob.glob(os.path.join(data_dir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 3:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode3")
# make set of all dataset file path
data = glob.glob(os.path.join(data_dir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 2:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode2")
# make set of all dataset file path
data = glob.glob(os.path.join(data_dir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 4:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode4")
# make set of all dataset file path
data = glob.glob(os.path.join(data_dir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 5:
# Prepares testing data paths for mode 5
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode5")
for root, dirs, files in os.walk(data_dir):
if dirs != []:
for folder in dirs:
dataFolderDir = os.path.join(data_dir, folder)
# make set of all dataset file path
data = glob.glob(os.path.join(dataFolderDir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
elif train_mode == 6:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset),
"Mode6")
for root, dirs, files in os.walk(data_dir):
if dirs != []:
for folder in dirs:
dataFolderDir = os.path.join(data_dir, folder)
# make set of all dataset file path
data = glob.glob(os.path.join(dataFolderDir, "*.png"))
# Sorts by number in file name
data.sort(key=lambda f: int(''.join(filter(str.isdigit,
os.path.basename(f)))))
dataPaths.append(data)
print(dataPaths)
return dataPaths | 08a17868b36b20c495253e366fcb00a1d105cfa0 | 12,580 |
import random
def simulate_relatedness(genotypes, relatedness=.5, n_iter=1000, copy=True):
"""
Simulate relatedness by randomly copying genotypes between individuals.
Parameters
----------
genotypes : array_like
An array of shape (n_variants, n_samples, ploidy) where each
element of the array is an integer corresponding to an allele index
(-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
relatedness : float, optional
Fraction of variants to copy genotypes for.
n_iter : int, optional
Number of times to randomly copy genotypes between individuals.
copy : bool, optional
If False, modify `genotypes` in place.
Returns
-------
genotypes : ndarray, shape (n_variants, n_samples, ploidy)
The input genotype array but with relatedness simulated.
"""
# check genotypes array
genotypes = np.asarray(genotypes)
assert genotypes.ndim >= 2
n_variants = genotypes.shape[0]
n_samples = genotypes.shape[1]
# copy input array
if copy:
genotypes = genotypes.copy()
else:
# modify in place
pass
# determine the number of variants to copy genotypes for
n_copy = int(relatedness * n_variants)
# iteratively introduce relatedness
for i in range(n_iter):
# randomly choose donor and recipient
donor_index = random.randint(0, n_samples-1)
donor = genotypes[:, donor_index]
recip_index = random.randint(0, n_samples-1)
recip = genotypes[:, recip_index]
# randomly pick a set of variants to copy
variant_indices = random.sample(range(n_variants), n_copy)
# copy across genotypes
recip[variant_indices] = donor[variant_indices]
return genotypes | e319f4e15c4c08eb90260b77efc25ea330aac4c9 | 12,581 |
def pages_substitute(content):
"""
Substitute tags in pages source.
"""
if TAG_USERGROUPS in content:
usergroups = UserGroup.objects.filter(is_active=True).order_by('name')
replacement = ", ".join(f"[{u.name}]({u.webpage_url})" for u in usergroups)
content = content.replace(TAG_USERGROUPS, replacement)
return content | 13c2138256a0e1afa0ad376994849f2716020540 | 12,582 |
def vcfanno(vcf, out_file, conf_fns, data, basepath=None, lua_fns=None):
"""
annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno)
"""
if utils.file_exists(out_file):
return out_file
if lua_fns is None:
lua_fns = []
vcfanno = config_utils.get_program("vcfanno", data)
with file_transaction(out_file) as tx_out_file:
conffn = _combine_files(conf_fns, tx_out_file)
luafn = _combine_files(lua_fns, tx_out_file)
luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else ""
basepathflag = "-base-path {0}".format(basepath) if basepath else ""
cores = dd.get_num_cores(data)
cmd = "{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} | sed -e 's/Number=A/Number=1/g' | bgzip -c > {tx_out_file}"
message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals())
do.run(cmd.format(**locals()), message)
return out_file | 808488bd07c56b541694715193df4ae1cb51869c | 12,583 |
def clean(params: dict) -> str:
"""
Build clean rules for Makefile
"""
clean = "\t@$(RM) -rf $(BUILDDIR)\n"
if params["library_libft"]:
clean += "\t@make $@ -C " + params["folder_libft"] + "\n"
if params["library_mlx"] and params["compile_mlx"]:
clean += "\t@make $@ -C " + params["folder_mlx"] + "\n"
return clean | fb7dd0e7a2fbb080dd8b0d5d4489e9c5ef1367ec | 12,584 |
import re
def mathematica(quero: str, meta: str = '') -> bool:
"""mathematica
Rudimentar mathematical operations (boolean result)
Args:
quero (_type_, optional): _description_. Defaults to str.
Returns:
bool: True if evaluate to True.
"""
# neo_quero = quero.replace(' ', '').replace('(', '').replace(')', '')
neo_quero = quero.replace(' ', '')
if quero == 'True':
return True
if quero == 'False':
return False
if neo_quero.find('&&') > -1:
parts = neo_quero.split('&&')
# print(parts)
# return bool(parts[0]) and bool(parts[1])
return logicum(parts[0]) and logicum(parts[1])
if neo_quero.find('||') > -1:
parts = neo_quero.split('||')
# return bool(parts[0]) or bool(parts[1])
return logicum(parts[0]) or logicum(parts[1])
# regula = r"(\d*)(.{1,2})(\d*)"
regula = r"(?P<n1>(\d*))(?P<op>(\D{1,2}))(?P<n2>(\d*))"
r1 = re.match(regula, neo_quero)
if r1.group('op') == '==':
return int(r1.group('n1')) == int(r1.group('n2'))
if r1.group('op') == '!=':
return int(r1.group('n1')) != int(r1.group('n2'))
if r1.group('op') == '<=':
return int(r1.group('n1')) <= int(r1.group('n2'))
if r1.group('op') == '>=':
return int(r1.group('n1')) >= int(r1.group('n2'))
if r1.group('op') == '<':
return int(r1.group('n1')) < int(r1.group('n2'))
if r1.group('op') == '>':
return int(r1.group('n1')) > int(r1.group('n2'))
raise ValueError(
'mathematica: <quaero> [{1}] <op>? [{0}]'.format(str(quero), meta)) | b05777e880688d8f3fc90ba9d54098f341054bd7 | 12,585 |
import sys
def fasta2vcf(f):
"""convert fasta to vcf dataframe
Input
-----
Fasta file, _ref is recognized as ref and _alt is used as alt, these are two keywords
Output
------
vcf dataframe: chr, pos, name, ref, alt, reference sequence
"""
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
print (my_dict)
vcf = pd.DataFrame()
index_list = []
chr_list = []
pos_list = []
ref_list = []
alt_list = []
seq_list = []
for k in my_dict:
if not "_ref" in k:
continue
name = k.replace("_ref","")
if not name+"_alt" in my_dict:
print (k,"alt sequence not found. Please use _ref and _alt keywords. Skip...")
continue
ref_seq,alt_seq = my_dict[k],my_dict[name+"_alt"]
if len(ref_seq) < 30:
print (k,"Please input sequence length at least 30bp. Skip...")
continue
if ref_seq == alt_seq:
print (k,"Ref and Alt sequence is the same. Please check. Skip...")
continue
pos,ref,alt = find_pos_ref_alt(ref_seq,alt_seq)
index_list.append(name)
chr_list.append(k)
seq_list.append(ref_seq)
pos_list.append(pos)
ref_list.append(ref)
alt_list.append(alt)
vcf[0] = chr_list
vcf[1] = pos_list
vcf[2] = index_list
vcf[3] = ref_list
vcf[4] = alt_list
vcf[5] = seq_list
vcf = vcf[vcf[1]!=-1]
if vcf.shape[0] == 0:
print ("no valid sequences in:",f)
print ("Exit...")
sys.exit(1)
return vcf | 673e69e644b6caf68f0738cfaa8204b80877a412 | 12,586 |
def time_pet(power,energy):
"""Usage: time_pet(power,energy)"""
return energy/power | 11e9c82b8c1be84995f9517e04ed5e1270801e27 | 12,587 |
def compute_sigma0(
T,
S,
**kwargs,
):
"""
compute the density anomaly referenced to the surface
"""
return compute_rho(T, S, 0, **kwargs) - 1000 | 300d5552c70e6fd6d8708345aa3eed53795309cf | 12,588 |
def get_neighbors_radius(nelx, nely, coord, connect, radius):
""" Check neighboring elements that have the centroid within the predetermined radius.
Args:
nelx (:obj:`int`): Number of elements on the x axis.
nely (:obj:`int`): Number of elements on the x axis
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
radius (:obj:`float`): Radius to get elements in the vicinity of each element.
Returns:
neighbors, H, centroids
"""
el_number = nelx * nely
centroids = np.empty((el_number, 2))
idx = connect[:, 1:] - 1
centroids[:, 0] = np.sum(coord[idx, 1], axis = 1)/4
centroids[:, 1] = np.sum(coord[idx, 2], axis = 1)/4
ind_rows = []
ind_cols = []
data = []
cols = 0
neighbors = []
for el in range(el_number):
distance = np.sqrt(np.sum((centroids[el] - centroids)**2, axis=1))
mask = distance <= radius
neighbor = mask.nonzero()[0] + 1
neighbors.extend(neighbor - 1)
hi = radius - distance
hi_max = np.maximum(0, hi)
data.extend(hi_max[mask])
aux = len(hi_max[mask])
rows = np.repeat(el, aux) #.tolist()
columns = np.arange(0, aux)
ind_rows.extend(rows)
ind_cols.extend(columns)
if aux > cols:
cols = aux
H = csc_matrix((data, (ind_rows, ind_cols)), shape=(nelx*nely, cols)).toarray()
neighbors = csc_matrix((neighbors, (ind_rows, ind_cols)), shape=(nelx*nely, cols), dtype='int').toarray()
return neighbors, H, centroids | 669e11d3a2890f1485e33e021b2671ff6f197c03 | 12,589 |
import copy
def merge_with(obj, *sources, **kwargs):
"""
This method is like :func:`merge` except that it accepts customizer which is invoked to produce
the merged values of the destination and source properties. If customizer returns ``None``,
merging is handled by this method instead. The customizer is invoked with five arguments:
``(obj_value, src_value, key, obj, source)``.
Args:
obj (dict): Destination object to merge source(s) into.
sources (dict): Source objects to merge from. subsequent sources
overwrite previous ones.
Keyword Args:
iteratee (callable, optional): Iteratee function to handle merging
(must be passed in as keyword argument).
Returns:
dict: Merged object.
Warning:
`obj` is modified in place.
Example:
>>> cbk = lambda obj_val, src_val: obj_val + src_val
>>> obj1 = {'a': [1], 'b': [2]}
>>> obj2 = {'a': [3], 'b': [4]}
>>> res = merge_with(obj1, obj2, cbk)
>>> obj1 == {'a': [1, 3], 'b': [2, 4]}
True
.. versionadded:: 4.0.0
.. versionchanged:: 4.9.3
Fixed regression in v4.8.0 that caused exception when `obj` was ``None``.
"""
if obj is None:
return None
sources = list(sources)
iteratee = kwargs.pop("iteratee", None)
if iteratee is None and sources and callable(sources[-1]):
iteratee = sources.pop()
sources = [copy.deepcopy(source) for source in sources]
if callable(iteratee):
iteratee = partial(callit, iteratee, argcount=getargcount(iteratee, maxargs=5))
else:
iteratee = None
return _merge_with(obj, *sources, iteratee=iteratee, **kwargs) | 94a16ae7d3f3e73ef8e27b32cd38a09c61ad1b2b | 12,590 |
def count_class_nbr_patent_cnt(base_data_list, calculate_type):
"""
统计在所有数据中不同分类号对应的专利数量
:param base_data_list:
:return:
"""
class_number_patent_cnt_dict = dict()
for base_data in base_data_list:
class_number_value = base_data[const.CLASS_NBR]
calculate_class_number_patent_count_dict(class_number_value, class_number_patent_cnt_dict, calculate_type)
return class_number_patent_cnt_dict | 6dfe06c2233fbfafc8083dc968d32520564319f8 | 12,591 |
def plot_pta_L(df):
"""
INPUTS
-df: pandas dataframe containing the data to plot
OUTPUTS
-saves pta graphs in .html
"""
title = generate_title_run_PTA(df, "Left Ear", df.index[0])
labels = {"title": title,
"x": "Frequency (Hz)",
"y": "Hearing Threshold (dB HL)"}
fig = go.Figure()
fig.update_layout(title=labels["title"],
xaxis_title=labels["x"],
yaxis_title=labels["y"],
xaxis_type="log",
xaxis_range=[np.log10(100), np.log10(20000)],
yaxis_range=[80, -20],
yaxis_dtick=10,
xaxis_showline=True,
xaxis_linecolor="black",
yaxis_showline=True,
yaxis_linecolor="black",
yaxis_zeroline=True,
yaxis_zerolinewidth=1,
yaxis_zerolinecolor="black")
x, y = data_to_plot_PTA(df, "LE_")
fig.add_trace(go.Scatter(x=x,
y=y,
line_color="blue",
mode='lines+markers',
name=labels["title"],
hovertemplate="%{x:1.0f} Hz<br>" +
"%{y:1.0f} dB HL"))
completed = save_graph_PTA(fig, df, "Left Ear")
if completed is True:
return True
else:
return False | ec82b58a6a476bee5e864b8678bb90d2998d4a02 | 12,592 |
def create_graph(edge_num: int, edge_list: list) -> dict:
"""
Create a graph expressed with adjacency list
:dict_key : int (a vertex)
:dict_value : set (consisted of vertices adjacent to key vertex)
"""
a_graph = {i: set() for i in range(edge_num)}
for a, b in edge_list:
a_graph[a - 1].add(b - 1) # All graphs always need this line
a_graph[b - 1].add(a - 1) # Only undirected graph needs this line
return a_graph | 6ec1a71cf82a3a669090df42ac7d53e1286fda2d | 12,593 |
from typing import Optional
from typing import Callable
from typing import Any
from typing import List
import inspect
def multikey_fkg_allowing_type_hints(
namespace: Optional[str],
fn: Callable,
to_str: Callable[[Any], str] = repr) -> Callable[[Any], List[str]]:
"""
Equivalent of :func:`dogpile.cache.util.function_multi_key_generator`, but
using :func:`inspect.signature` instead.
Also modified to make the cached function unique per INSTANCE for normal
methods of a class.
"""
namespace = get_namespace(fn, namespace)
sig = inspect.signature(fn)
argnames = [p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
has_self = bool(argnames and argnames[0] in ('self', 'cls'))
def generate_keys(*args: Any, **kw: Any) -> List[str]:
if kw:
raise ValueError("This dogpile.cache key function generator, "
"multikey_fkg_allowing_type_hints, "
"does not accept keyword arguments.")
if has_self:
# Unlike dogpile's default, make it instance- (or class-) specific
# by including a representation of the "self" or "cls" argument:
args = [hex(id(args[0]))] + list(args[1:])
keys = [namespace + "|" + key for key in map(to_str, args)]
if DEBUG_INTERNALS:
log.debug(
"multikey_fkg_allowing_type_hints.generate_keys() -> {!r}",
keys)
return keys
return generate_keys | 33e04169618bae3e5d7239e5256afaa9f65355f6 | 12,594 |
def get_current_version() -> str:
"""Read the version of the package.
See https://packaging.python.org/guides/single-sourcing-package-version
"""
version_exports = {}
with open(VERSION_FILE) as file:
exec(file.read(), version_exports) # pylint: disable=exec-used
return version_exports["VERSION"] | c283d58881aa381503bb3500bd7d745f25df0f7e | 12,595 |
import random
def seed_story(text_dict):
"""Generate random seed for story."""
story_seed = random.choice(list(text_dict.keys()))
return story_seed | 0c0f41186f6eaab84a1d197e9335b4c28fd83785 | 12,596 |
from pathlib import Path
import sys
def detect_conda_env():
"""Inspect whether `sys.executable` is within a conda environment and if it is,
return the environment name and Path of its prefix. Otherwise return None, None"""
prefix = Path(sys.prefix)
if not (prefix / 'conda-meta').is_dir():
# Not a conda env
return None, None
if (prefix / 'envs').is_dir():
# It's the base conda env:
return 'base', prefix
# Not the base env: its name is the directory basename:
return prefix.name, prefix | 2cb88ebfbb8a2919300e1d0072540e448dcf35ad | 12,597 |
def _get_rel_att_inputs(d_model, n_heads): # pylint: disable=invalid-name
"""Global relative attentions bias initialization shared across the layers."""
assert d_model % n_heads == 0 and d_model % 2 == 0
d_head = d_model // n_heads
bias_initializer = init.RandomNormalInitializer(1e-6)
context_bias_layer = core.Weights(bias_initializer,
shape=(1, n_heads, 1, d_head))
location_bias_layer = core.Weights(bias_initializer,
shape=(1, n_heads, 1, d_head))
return context_bias_layer, location_bias_layer | 57f58f29a586571f1cc8fa1fc69956a4168cbf16 | 12,598 |
def two_time_pad():
"""A one-time pad simply involves the xor of a message with a key to produce a ciphertext: c = m ^ k.
It is essential that the key be as long as the message, or in other words that the key not be repeated for two distinct message blocks.
Your task:
In this problem you will break a cipher when the one-time pad is re-used.
c_1 = 3801025f45561a49131a1e180702
c_2 = 07010051455001060e551c571106
These are two hex-encoded ciphertexts that were formed by applying a “one-time pad” to two different messages with
the same key. Find the two corresponding messages m_1 and m_2.
Okay, to make your search simpler, let me lay out a few ground rules. First, every character in the text is either
a lowercase letter or a space, aside from perhaps the first character in the first message which might be capitalized.
As a consequence, no punctuation appears in the messages. Second, the messages consist of English words in ASCII.
Finally, all of the words within each message is guaranteed to come from the set of the 100 most
common English words: https://en.wikipedia.org/wiki/Most_common_words_in_English.
Returns:
Output the concatenation of strings m_1 and m_2. (Don't worry if words get smashed together as a result.)
"""
c_1 = '3801025f45561a49131a1e180702'
c_2 = '07010051455001060e551c571106'
# converting the hexadecimal representaiton to integers for every 2 bytes since it xor operations become on integers
c_1_int = [int(c_1[i] + c_1[i+1], 16) for i in range(0, len(c_1), 2)]
c_2_int = [int(c_2[i] + c_2[i+1], 16) for i in range(0, len(c_1), 2)]
xord = [c_1_int[i] ^ c_2_int[i] for i in range(len(c_1_int))] #xor of the two lists which are integer representations
result = construct('',xord)
if result == None:
return None
else:
print(result)
new_string = ''.join([chr(ord(result[i]) ^ xord[i]) for i in range(len(result))])
return new_string + result | d4c45312f32b372a065365c78a991969e2bc53be | 12,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.