content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def collect_ips():
"""Fill IP addresses into people list. Return if all addresses collected or not."""
out, rc, _ = run_cmd('sudo nmap -sn ' + net, log_error=False)
if rc:
print "Error: nmap is required. Run following command:"
print "sudo apt-get -y install nmap"
sys.exit(4)
# Regex seeks IP @ pos 0 and MAC @ pos 2.
addrs = re.findall('(?is)((\d+\.){3}\d+).*?(([\da-fA-F]{2}:?){6})', out)
for a, b in enumerate(people):
if b.mac in out.upper() and not b.ip:
for g in addrs:
if b.mac in g[2].upper():
people[a].ip = g[0]
people[a].presence = True # Avoid extra ping
people[a].ltsip = time.time()
return all(get_ips())
|
52d1369d4af469a62af465b000489ad43f71d2e3
| 30,186 |
def roles(*role_list):
"""
Decorator defining a list of role names, used to look up host lists.
A role is simply defined as a key in `env` whose value is a list of one or
more host connection strings. For example, the following will ensure that,
barring an override on the command line, ``my_func`` will be executed
against the hosts listed in the ``webserver`` and ``dbserver`` roles::
env.webserver = ['www1', 'www2']
env.dbserver = ['db1']
@roles('webserver', 'dbserver')
def my_func():
pass
Note that this decorator actually just sets the function's ``.roles``
attribute, which is then read prior to executing the function.
"""
def attach_roles(func):
@wraps(func)
def inner_decorator(*args, **kwargs):
return func(*args, **kwargs)
inner_decorator.roles = list(role_list)
return inner_decorator
return attach_roles
|
2e30be0cb8876085c0c071b61a0a62061904816e
| 30,187 |
def stairmaster_mets(setting):
"""
For use in submaximal tests on the StairMaster 4000 PT step ergometer.
Howley, Edward T., Dennis L. Colacino, and Thomas C. Swensen. "Factors Affecting the Oxygen Cost of Stepping on an Electronic Stepping Ergometer." Medicine & Science in Sports & Exercise 24.9 (1992): n. pag. NCBI. Web. 10 Nov. 2016.
args:
setting (int): the setting of the step ergometer
Returns:
float: VO2:subscript:`2max` in kcal/kg*hour
"""
return 0.556 * 7.45 * setting
|
1d6cc9fc846773cfe82dfacb8a34fb6f46d69903
| 30,189 |
from typing import Optional
from typing import List
from pathlib import Path
from typing import Protocol
def get_uri_for_directory(directory: str,
excludes: Optional[List[str]] = None) -> str:
"""Get a content-addressable URI from a directory's contents.
This function will generate the name of the package by the directory.
It'll go through all the files in the directory and hash the contents
of the files to get the hash value of the package.
The final package name is: _ray_pkg_<HASH_VAL>.zip of this package.
e.g., _ray_pkg_029f88d5ecc55e1e4d64fc6e388fd103.zip
Examples:
.. code-block:: python
>>> get_uri_for_directory("/my_directory")
.... _ray_pkg_af2734982a741.zip
Args:
directory (str): The directory.
excludes (list[str]): The dir or files that should be excluded.
Returns:
URI (str)
Raises:
ValueError if the directory doesn't exist.
"""
if excludes is None:
excludes = []
directory = Path(directory).absolute()
if not directory.exists() or not directory.is_dir():
raise ValueError(f"directory {directory} must be an existing"
" directory")
hash_val = _hash_directory(directory, directory,
_get_excludes(directory, excludes))
return "{protocol}://{pkg_name}.zip".format(
protocol=Protocol.GCS.value, pkg_name=RAY_PKG_PREFIX + hash_val.hex())
|
acb7586d9adf210563ba73c3aed46c8ac695be26
| 30,190 |
def clean_cancer_dataset(df_training):
"""
Checks and cleans the dataset of any potential impossible values, e.g. bi-rads columns, the 1st only allows
values in the range of 1-5, ordinal
Age, 2nd column, cannot be negative, integer
Shape, 3rd column, only allows values between 1 and 4, nominal
Margin, only allows a range of 1 to 5, nominal
Density only allows values between 1-4,ordinal.
All deletions will be performed in place.
:return: cleaned up dataframe, count of removed points
"""
rows_pre_cleaning = df_training.shape[0]
df_training.drop(df_training.index[df_training['bi_rads'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['shape'] > 4], inplace=True)
df_training.drop(df_training.index[df_training['margin'] > 5], inplace=True)
df_training.drop(df_training.index[df_training['density'] > 4], inplace=True)
rows_removed = rows_pre_cleaning - df_training.shape[0]
return df_training, rows_removed
|
a30f377b48bb665f42f3efa58b15d289f7e7f9b3
| 30,191 |
from datetime import datetime
def str_to_date(date, form=None):
"""
Return Date with datetime format
:param form:
:param date: str date
:return: datetime date
"""
if form is None:
form = get_form(date)
return datetime.datetime.strptime(date, form)
|
acda6e393b468ffaf8eceb689c859440a53e486e
| 30,192 |
def get_model_results(corpus, texts, ldamodel=None):
"""function extract model result such as topics, percentage distribution and return it as pandas dataframe
in: corpus : encoded features
in: text : main text
in: ldamodel: the trained model
out: dataframe
"""
topics_df = pd.DataFrame()
# Extract the main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Percentage Contribution and Topic Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic, 4), topic_keywords]),
ignore_index=True)
else:
break
topics_df.columns = ['Dominant_Topic_Number', 'Percentage_Contribution', 'Topic_Keywords']
# concat original text to topics_df
contents = pd.Series(texts)
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df.columns = ['Dominant_Topic_Number', 'Percentage_Contribution', 'Topic_Keywords', 'Text']
topics_df = topics_df[["Text", "Topic_Keywords", "Dominant_Topic_Number", "Percentage_Contribution"]]
return topics_df
|
31aa99db41193d2e25bd723720b68eb30606517f
| 30,193 |
def rest_notify():
"""Github rest endpoint."""
sdkid = request.args.get("sdkid")
sdkbase = request.args.get("sdkbase", "master")
sdk_tag = request.args.get("repotag", sdkid.split("/")[-1].lower())
if not sdkid:
return {'message': 'sdkid is a required query parameter'}
rest_bot = RestAPIRepoHandler(sdkid, sdk_tag, sdkbase)
bot = BotHandler(rest_bot)
github_index = {
'ping': ping,
'push': push,
'pull_request': rest_pull_request,
'issue_comment': bot.issue_comment,
'issues': bot.issues
}
if not _WORKER_THREAD.is_alive():
_WORKER_THREAD.start()
return handle_github_webhook(
github_index,
request.headers['X-GitHub-Event'],
request.get_json()
)
|
4f7c15186fbb2d0a4a3dd7199045178b62da362f
| 30,194 |
def lib_pt_loc(sys_chars_vals, tolerance = 1e-12):
"""Computes Non-Dimensionalized Libration Points Location for P1-P2 system
Parameters
----------
sys_chars_vals: object
Object of Class sys_char
tolerance: float
convergence tolerance for Newton-Raphson Method
Returns
-------
lib_loc: numpy ndarray (5x3)
5 Libration Points, [nd]
"""
mu = sys_chars_vals.mu
lib_loc = np.zeros((5, 3))
lib_loc[3, :] = [0.5 - mu, 3**0.5 / 2, 0] # L4, analytical_guessal solution known
lib_loc[4, :] = [0.5 - mu, -(3**0.5) / 2, 0] # L5, analytical solution known
# 5th degree polynomial of L1, L2 and L3
f_lib = np.array(
[
[1, mu - 3, 3 - 2 * mu, -mu, 2 * mu, -mu],
[1, 3 - mu, 3 - 2 * mu, -mu, -2 * mu, -mu],
[1, 2 + mu, 1 + 2 * mu, mu - 1, 2 * mu - 2, -1 + mu],
]
)
# First-order derivative of the polyomial defined in f_lib
fd_lib = np.array(
[
[0, 5, 4 * (mu - 3), 3 * (3 - 2 * mu), 2 * -mu, 2 * mu],
[0, 5, 4 * (3 - mu), 3 * (3 - 2 * mu), 2 * -mu, -2 * mu],
[0, 5, 4 * (2 + mu), 3 * (1 + 2 * mu), 2 * (mu - 1), 2 * mu - 2],
]
)
initial_guess = np.array([0.9, 1.1, -1])
for i in range(3):
val = np.vander([initial_guess[i]], 6)
h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])
while abs(h) >= tolerance:
val = np.vander([initial_guess[i]], 6)
h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])
lib_loc[i, 0] = initial_guess[i] - h
initial_guess[i] = lib_loc[i, 0]
if i == 0:
lib_loc[i, 0] = 1 - mu - lib_loc[i, 0]
elif i == 1:
lib_loc[i, 0] = 1 - mu + lib_loc[i, 0]
elif i == 2:
lib_loc[i, 0] = -mu - lib_loc[i, 0]
return lib_loc
|
2ee92c8f6e91353a675236a7f63eed4d7f807846
| 30,195 |
def _find_connection_file(connection_file):
"""Return the absolute path for a connection file
- If nothing specified, return current Kernel's connection file
- Otherwise, call jupyter_client.find_connection_file
"""
if connection_file is None:
# get connection file from current kernel
return get_connection_file()
else:
return jupyter_client.find_connection_file(connection_file)
|
2e4adfd67e0d2b35545cab1e82def271175b9de3
| 30,197 |
from typing import List
def _symbols_of_input(label: str) -> List[str]:
"""Extracts FST symbols that compose complex input label of the rewrite rule.
FST symbols of a complex input label is;
- Epsilon symbol if the complex input label is an epsilon symbol
(e.g. ['<eps>'] for label '<eps>').
- Digits of the complex input label if it is only composed of digits
without any feature analysis tags (e.g. ['9', '0'] for the label '90').
- Tokenized inflectional group boundaries, inflectional or derivational
morphemes, proper noun and feature analyses tags, numbers, and punction
if the complex input label is composed of these units (e.g. [')([VN]',
'-YAn[Derivation=PresNom]'] for the label
')([VN]-YAn[Derivation=PresNom]').
Args:
label: complex input label of a morphotactics FST rewrite rule.
Returns:
FST symbols that are used in the complex input label of the rewrite rule.
For labels that do not represent epsilon, FST symbols are returned in the
same order as they appear in the complex input label, and duplicate symbols
are preserved.
"""
if label == common.EPSILON:
return [label]
# We add a state transition arc for each digit of a multi-digit number.
if "[" not in label:
return list(label)
# We add a state transition arc for each inflectional or derivational
# morpheme, inflectional group boundary, and proper noun analysis tag.
return _SYMBOLS_REGEX.findall(label)
|
8298a242701aa586ba50ffa6059a8e33e4cf01f3
| 30,198 |
def preset_select_func(area, preset):
"""Create preset selection packet."""
return DynetPacket.select_area_preset_packet(area, preset, 0)
|
9ae5e162cb32c3f3b0ab1d07e1d5cd2961e1e91e
| 30,199 |
def init_model(model_name,
network_config,
classes,
word_dict,
init_weight=None,
log_path=None,
learning_rate=0.0001,
optimizer='adam',
momentum=0.9,
weight_decay=0,
metric_threshold=0.5,
monitor_metrics=None,
silent=False,
save_k_predictions=0):
"""Initialize a `Model` class for initializing and training a neural network.
Args:
model_name (str): Model to be used such as KimCNN.
network_config (dict): Configuration for defining the network.
classes(list): List of class names.
word_dict(torchtext.vocab.Vocab): A vocab object which maps tokens to indices.
init_weight (str): Weight initialization method from `torch.nn.init`.
For example, the `init_weight` of `torch.nn.init.kaiming_uniform_`
is `kaiming_uniform`. Defaults to None.
log_path (str): Path to a directory holding the log files and models.
learning_rate (float, optional): Learning rate for optimizer. Defaults to 0.0001.
optimizer (str, optional): Optimizer name (i.e., sgd, adam, or adamw). Defaults to 'adam'.
momentum (float, optional): Momentum factor for SGD only. Defaults to 0.9.
weight_decay (int, optional): Weight decay factor. Defaults to 0.
metric_threshold (float, optional): Threshold to monitor for metrics. Defaults to 0.5.
monitor_metrics (list, optional): Metrics to monitor while validating. Defaults to None.
silent (bool, optional): Enable silent mode. Defaults to False.
save_k_predictions (int, optional): Save top k predictions on test set. Defaults to 0.
Returns:
Model: A class that implements `MultiLabelModel` for initializing and training a neural network.
"""
network = getattr(networks, model_name)(
embed_vecs=word_dict.vectors,
num_classes=len(classes),
**dict(network_config)
)
if init_weight is not None:
init_weight = networks.get_init_weight_func(
init_weight=init_weight)
network.apply(init_weight)
model = Model(
classes=classes,
word_dict=word_dict,
network=network,
log_path=log_path,
learning_rate=learning_rate,
optimizer=optimizer,
momentum=momentum,
weight_decay=weight_decay,
metric_threshold=metric_threshold,
monitor_metrics=monitor_metrics,
silent=silent,
save_k_predictions=save_k_predictions
)
return model
|
c3fae65c54d12b30ef34b79fbe600d60d7837042
| 30,200 |
import io
def boxed_img(img_data):
"""return base64 encoded boxed image."""
if isinstance(img_data, str):
img_path = img_data
else:
img_path = img_buffer(img_data)
img, result = obj_detect(img_path)
boxed_np_image = draw_boxes(
img.numpy(),
boxes=result["detection_boxes"],
classes=result["detection_class_entities"],
scores=result["detection_scores"],
)
result = Image.fromarray(boxed_np_image, "RGB")
binary_buffer = io.BytesIO()
result.save(binary_buffer, format="JPEG")
return b2a_base64(binary_buffer.getvalue())
|
b5dbfaf2297b2faff776550d252b3010350c09db
| 30,201 |
def exec(statement, table_name=None, commit=True):
""" execute a SQL statement in the database and commit the transaction.
If a table_name is passed in, then the query will be checked for proper completion, returning a boolean """
conn = connection()
cursor = conn.cursor(buffered=True)
if table_name is not None:
cursor.execute(f"SELECT COUNT(message_id) FROM {table_name}")
num_before = cursor.fetchall()
cursor.execute(statement)
display_keywords = ["SELECT", "DESCRIBE"]
is_displayable = statement.split(' ')[0].upper() in display_keywords # Adi's special code
if not is_displayable:
if commit:
conn.commit()
if table_name is not None:
statement_kind = statement.split(' ')[0].upper()
cursor.execute(f"SELECT COUNT(message_id) FROM {table_name}")
num_after = cursor.fetchall()
if statement_kind == "INSERT":
return num_before < num_after
elif statement_kind == "DELETE":
return num_before > num_after
elif statement_kind == "UPDATE":
return num_before == num_after
return cursor.fetchall()
|
c62ec09961cd4f284497f88d91f3b64100eef1ff
| 30,202 |
def isvalid(gridstr, x, y, test_value):
""" Check if it would be legal to place a in pos x,y """
sq_indexes = ((0, 1, 2), (3, 4, 5), (6, 7, 8))
group_indexes = [(x_ind, y_ind)
for x_ind in sq_indexes[x // 3]
for y_ind in sq_indexes[y // 3]]
for index in range(9):
# Check squares in the same column
if gridstr[x + 9 * index] == test_value:
return False
# Check the row
if gridstr[index + 9 * y] == test_value:
return False
# Check the group
x_index, y_index = group_indexes[index]
if gridstr[x_index + 9 * y_index] == test_value:
return False
return True
|
a8481bbb18409814e54ad669bbb14b71e32b1139
| 30,203 |
def GilmoreEick(R0_in, v0_in, Requ, \
t_start, t_end, t_step, \
T_l=20.):
"""Run the calculation (Gilmore + Eick)
with the given initial conditions and parameters.
returns: t, R, R_dot, pg, T, i
"""
global T
global T_gas_0, sc_pvapour
T_gas_0 = T0_Kelvin + T_l # initial gas temperature inside bubble [K]
# Compute vapour pressure using liquid temperature T_l
pvapour_in = get_vapour_pressure(T_l)
print("p_v = {0} Pa".format(pvapour_in))
# scale initial conditions and parameters
set_scale(Requ)
# parameters
scale_parameters(pvapour_in)
#print pvapour_in, sc_pvapour
# initial conditions
scale_initconds(R0_in, v0_in, Requ, pvapour_in)
#print scale_R, R0
# solve system of ODEs
T = np.zeros(0)
t_data = create_tdata(t_start, t_end, t_step)
xsol = odeint(GilmoreEick_deriv, [R0, v0, p0], t_data,
#full_output = True,
)
R = xsol[:, 0] * scale_R
R_dot = xsol[:, 1] * scale_U
pg = xsol[:, 2] * scale_p
t = t_data * scale_t
T = np.reshape(T, (-1, 2))
# np.savetxt('GilmoreEick_result.dat', (t / 1e-6, R / 1e-6, R_dot, pg), \
# delimiter = '\t')
# np.savetxt('GilmoreEick_Temp.dat', (T[:, 0], T[:, 1]))
return (t, R, R_dot, pg, T)
|
3cd06254a67b5ba76674aa59dff190e99e5e6075
| 30,205 |
def gamma(x):
"""
element-wise gamma function
"""
return Gamma().forward(x)
|
73f53c8010974e171ee7a11bdc2ea705dd3c1eb5
| 30,206 |
def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs):
"""Build model with its inputs/params for a specified precision context.
This is highly specific to this codebase, and not intended to be general API.
Advanced users only. DO NOT use it if you don't know what it does.
NOTE: short argument names are intended to avoid conficts with kwargs.
Args:
pp: A string, precision policy name, such as "mixed_float16".
mm: A function, for rmodel builder.
ii: A tensor, for model inputs.
tt: A bool, If true, it is for training; otherwise, it is for eval.
*args: A list of model arguments.
**kwargs: A dict, extra model parameters.
Returns:
the output of mm model.
"""
if pp == 'mixed_bfloat16':
set_precision_policy(pp)
inputs = tf.cast(ii, tf.bfloat16)
with tf.compat.v1.tpu.bfloat16_scope():
outputs = mm(inputs, *args, **kwargs)
set_precision_policy('float32')
elif pp == 'mixed_float16':
set_precision_policy(pp, loss_scale=tt)
inputs = tf.cast(ii, tf.float16)
with float16_scope():
outputs = mm(inputs, *args, **kwargs)
set_precision_policy('float32')
elif not pp or pp == 'float32':
outputs = mm(ii, *args, **kwargs)
else:
raise ValueError('Unknow precision name {}'.format(pp))
# Users are responsible to convert the dtype of all outputs.
return outputs
|
2af975ce06560dc0637da8b8e24b1ca3e9213d65
| 30,207 |
def bisection(a, b, poly, tolerance):
"""
Assume that poly(a) <= 0 and poly(b) >= 0.
Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0.
Return (a+b)/2
:param a: poly(a) <= 0
:param b: poly(b) >= 0
:param poly: polynomial coefficients, low order first
:param tolerance: greater than 0
:return: an approximate root of the polynomial
"""
if evaluate(a, poly) > 0:
raise Exception("poly(a) must be <= 0")
if evaluate(b, poly) < 0:
raise Exception("poly(b) must be >= 0")
mid = (a + b) / 2
if abs(b - a) <= tolerance:
return mid
else:
val = evaluate(mid, poly)
if val <= 0:
return bisection(mid, b, poly, tolerance)
else:
return bisection(a, mid, poly, tolerance)
|
e4068887f41078e00006905512e42645a6bc5405
| 30,208 |
from typing import Iterable
def get_roc_with_band(quotes: Iterable[Quote], lookback_periods: int, ema_periods: int, std_dev_periods: int):
"""Get ROCWB calculated.
Rate of Change with Bands (ROCWB) is the percent change of Close price
over a lookback window with standard deviation bands.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`lookback_periods` : int
Number of periods in the lookback window.
`ema_periods` : int
Number of periods for the ROC EMA line.
`std_dev_periods` : int
Number of periods the standard deviation for upper/lower band lines.
Returns:
`ROCWBResults[ROCWBResult]`
ROCWBResults is list of ROCWBResult with providing useful helper methods.
See more:
- [ROCWB Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Roc/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
results = CsIndicator.GetRocWb[Quote](CsList(Quote, quotes), lookback_periods, ema_periods, std_dev_periods)
return ROCWBResults(results, ROCWBResult)
|
ffed73567f17645fb35e257a69c3ab64002483c4
| 30,209 |
from bs4 import BeautifulSoup
def get_poetry_page_links(html):
"""Read in the html from a poetry page and return an array of links"""
clean_links = []
html_soup = BeautifulSoup(html, 'html.parser')
# remove the table of contents
try:
[e.extract() for e in html_soup.find("div", {"id": "toc"})]
except:
pass
# parse out the links
for list_type in ["ol", "ul"]:
ol_elements = html_soup.findAll(list_type)
for ol_element in ol_elements:
links = ol_element.findAll("a")
for link in links:
# links with .new class are not written, so skip them
if link.has_attr("class"):
if "new" in link["class"]:
continue
clean_links.append(link["href"])
return clean_links
|
bfa0dc5aa4e63b8aeb42f785dd2afb67e8816474
| 30,210 |
def _exponential_rv(t, tau, T):
"""Generate truncated exponential random variable from uniform [0, 1) random variable.
Parameters
----------
t : array-like
Uniform [0, 1) random variable.
tau : array-like
Lifetime.
T : array-like
Truncation parameter.
"""
return -tau * np.log(1 - t * (1 - np.exp(-T / tau)))
|
356e542f83b2a1b78a11b1dcf65c21dcdd2803a6
| 30,212 |
from typing import Any
import signal
def apply_noise_filtering(
fully_calibrated_gmr: NDArray[(2, Any), int],
scipy_filter_sos_coefficients: NDArray[(Any, Any), float],
) -> NDArray[(2, Any), int]:
"""Apply the result of an empty plate calibration.
Actual empty plate calibration will be performed once information obtained from Jason.
Args:
fully_calibrated_gmr: an 2D array of Time and GMR readings after applying the Empty Plate calibration.
scipy_filter_sos_coefficients: The 'second order system' coefficient array that scipy filters generate when created
Returns:
A 2D array of the Time and GMR readings after empty plate calibration. Data will be rounded to integers if calibration results in slight decimal behavior.
"""
time_readings = fully_calibrated_gmr[0, :]
gmr_readings = fully_calibrated_gmr[1, :]
float_array = signal.sosfiltfilt(scipy_filter_sos_coefficients, gmr_readings)
int_array = np.rint(float_array).astype(np.int32)
filtered_data: NDArray[(2, Any), int] = np.vstack((time_readings, int_array))
return filtered_data
|
f44331ce6286f8c1cbe08d243ee6c52e06d52c11
| 30,213 |
from typing import Any
import io
def read_file(filename: Text, encoding: Text = "utf-8") -> Any:
"""Read text from a file."""
with io.open(filename, encoding=encoding) as f:
return f.read()
|
de51cb0edd53dbddb3458adbceafddcb5fc3d6e0
| 30,214 |
def pattern_note_to_midi_note(pattern_note_byte, octave_offset=0):
"""
Convert pattern note byte value into midi note value
:param pattern_note_byte: GT note value
:type pattern_note_byte: int
:param octave_offset: Should always be zero unless some weird midi offset exists
:type octave_offset: int
:return: Midi note number
:rtype: int
"""
midi_note = pattern_note_byte - (GT_NOTE_OFFSET - constants.C0_MIDI_NUM) + (octave_offset * 12)
if not (0 <= midi_note < 128):
raise ChiptuneSAKValueError(f"Error: illegal midi note value {midi_note} from gt {pattern_note_byte}")
return midi_note
|
372f15e9b8b94ac6f37900b85f7d63d75b256669
| 30,215 |
def FileHole(thisMesh, topologyEdgeIndex, multiple=False):
"""
Given a starting "naked" edge index, this function attempts to determine a "hole"
by chaining additional naked edges together until if returns to the start index.
Then it triangulates the closed polygon and either adds the faces to the mesh.
Args:
topologyEdgeIndex (int): Starting naked edge index.
Returns:
bool: True if successful, False otherwise.
"""
url = "rhino/geometry/mesh/filehole-mesh_int"
if multiple: url += "?multiple=true"
args = [thisMesh, topologyEdgeIndex]
if multiple: args = list(zip(thisMesh, topologyEdgeIndex))
response = Util.ComputeFetch(url, args)
return response
|
1a3da0c9c96147e5a18a228cc01558b4d9daca68
| 30,216 |
def wide_to_narrow(X, Y, bins):
"""
Convert data from predicting a Y(Zbin,Cbin) as a vector to
individual predictions of Y(Zbin,Cbin) given a Zbin and Cbin label
in the input data.
"""
varname = "variable"
valname = "Y"
x_vars = get_xnames()
dev = pd.concat([X, Y], axis=1)
left = pd.melt(
dev.reset_index(),
id_vars=x_vars + ["index"],
value_vars=Y.columns,
var_name=varname,
value_name=valname,
)
right = pd.concat([bins, pd.DataFrame(Y.columns, columns=[varname])], axis=1)
narrow = pd.merge(left, right, on=[varname]).set_index(["index", varname])
narrow = narrow.reindex(X.index, level="index")
return narrow.drop(columns=[valname]), narrow[valname]
|
3543e2428f4c38eb668428eb68cc1ccdea9fcb0f
| 30,217 |
def get_coords(p):
"""Function to get coordinates of N, Ca, C.
It also calculates Cb positions from those.
"""
nres = pyrosetta.rosetta.core.pose.nres_protein(p)
# three anchor atoms to build local reference frame
N = np.stack([np.array(p.residue(i).atom('N').xyz()) for i in range(1,nres+1)])
Ca = np.stack([np.array(p.residue(i).atom('CA').xyz()) for i in range(1,nres+1)])
C = np.stack([np.array(p.residue(i).atom('C').xyz()) for i in range(1,nres+1)])
# recreate Cb given N,Ca,C
ca = -0.58273431
cb = 0.56802827
cc = -0.54067466
b = Ca - N
c = C - Ca
a = np.cross(b, c)
Cb = ca * a + cb * b + cc * c
return N, Ca, C, Ca+Cb
|
d87eed8793536b7858ad9aa870c9a5c086b6c8d8
| 30,218 |
import re
import statistics
import string
import copy
import random
def expand_dataset(sentences_file, scores_file, category_getter_fn):
""" Expands Stanford Sentiment Treebank dataset file by substituting nouns, verbs and adjectives in each sentence with synonyms
retrieved from WordNet. Processes into a set of of sentence strings and a set of scores.
"""
space_regex = re.compile(r"^.[\ ]*")
regex: re = re.compile(r"\d+")
data_x = []
data_y = []
entries_count = 0
progress = 0
for score_line, sentence_line in zip(scores_file.readlines(), sentences_file.readlines()):
if progress % 1000 == 0:
print(f"Finished processing line {progress}. So far there are {entries_count} lines.")
scores = [int(score) for score in score_line.split(",")[1:]]
score = statistics.mean(scores)
score = (score - 1) / 24
category = category_getter_fn(score)
sentence = sentence_line.split(",")[1].translate(str.maketrans('','', string.punctuation)).lower().strip("\n").strip()
sentence = regex.sub('0', sentence)
if space_regex.match(sentence) == None:
progress += 1
continue
tokenized = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokenized)
data_x.append(" ".join(tokenized))
data_y.append(category)
word_index = 0
for tag in tagged:
alternatives = set()
if tag[1].startswith("N") or tag[1].startswith("V") or tag[1].startswith('J'):
synonyms = wn.synsets(tag[0])
for synonym in synonyms:
if synonym.pos() == 'v' and tag[1].startswith("V"):
alternatives.add(synonym.lemmas()[0].name())
elif synonym.pos() == 'n' and tag[1].startswith("N"):
alternatives.add(synonym.lemmas()[0].name())
elif synonym.pos() == 'j' and tag[1].startswith('J'):
alternatives.add(synonym.lemmas()[0].name())
alternative_sentences = set()
skip_first = 0
for alternative in alternatives:
if skip_first == 0:
skip_first += 1
continue
alt_sentence = copy.deepcopy(tokenized)
alt_sentence[word_index] = alternative
alternative_sentences.add(" ".join(alt_sentence))
if len(alternative_sentences) > 0:
for alt_sentence in alternative_sentences:
data_x.append(alt_sentence)
data_y.append(category)
word_index += 1
entries_count = len(data_x)
progress += 1
zipped = list(zip(data_x, data_y))
random.shuffle(zipped)
data_x, data_y = zip(*zipped)
return data_x, data_y
|
9fcfd01769a8330ffd2fe533ff66b699cef637aa
| 30,219 |
import io
def load_as_hsv(fname: str) -> np.ndarray:
"""
Load a file into HSV colour space.
Takes a file path and opens the image then converts to HSV colour space.
returns numpy array dtype float 64. Strips the alpha (fourth) channel if it exists.
Input must be colour image. One channel images will be rejected.
:param: fname str -- path to the image
:return: np.ndarray -- numpy array containing image
"""
img = io.imread(fname)
if img.shape[-1] == 4:
img = img[:,:,:3]
assert len(img.shape) == 3, "Image at: {} does not appear to be a 3 channel colour image.".format(fname)
hsv_img = color.rgb2hsv(img)
return hsv_img
|
c71ed010bcce47f756f5d539f73100257dcae2c0
| 30,220 |
def get_databases ():
"""
Returns a list of all Database objects stored.
"""
return _dbobjects[:]
|
b5c3d84fc4a58b0a78a3f8f2c4a5a4974a18e337
| 30,221 |
def GenomicRegions_FilterToOverlapping(
new_name, gr_a, other_grs, summit_annotator=None, sheet_name="Overlaps"
):
"""Filter to just those that overlap one in *all* other_grs.
Note that filtering does not change the coordinates, it only filters,
non annotator additional rows are kept, annotators are recalculated.
"""
if isinstance(other_grs, GenomicRegions):
other_grs = [other_grs]
verify_same_genome([gr_a] + other_grs)
def filter_func(df):
keep = np.ones((len(df)), dtype=np.bool)
for ii, row in enumerate(df[["chr", "start", "stop"]].itertuples()):
for gr in other_grs:
keep[ii] &= gr.has_overlapping(row.chr, row.start, row.stop)
return keep
if gr_a.load_strategy.build_deps:
deps = [other_gr.load() for other_gr in other_grs] + [
ppg.ParameterInvariant(
"GenomicRegions_%s_parents" % new_name,
(gr_a.name, [other_gr.name for other_gr in other_grs]),
) # so if you swap out the gr, it's detected...
]
else:
for other_gr in other_grs:
other_gr.load()
deps = []
return gr_a.filter(
new_name,
df_filter_function=filter_func,
dependencies=deps,
summit_annotator=summit_annotator,
sheet_name=sheet_name,
)
|
192443e04d5bb3be91574e3494248020cf28be37
| 30,222 |
def findPowerPlant(mirror, name):
"""Return power plant agent, if it exists"""
if name in mirror.ppDict:
return mirror.ppDict[name]
else:
print("*** Power Plant '%s' not found." % name)
return None
|
35e432c7ab6dbe57488e2d7f84c3b6d077f2079a
| 30,224 |
import struct
def create_cruise_adjust_msg(spdCtrlLvr_stat, turnIndLvr_Stat, real_steering_wheel_stalk):
"""Creates a CAN message from the cruise control stalk.
Simluates pressing the cruise control stalk (STW_ACTN_RQ.SpdCtrlLvr_Stat)
and turn signal stalk (STW_ACTN_RQ.TurnIndLvr_Stat)
It is probably best not to flood these messages so that the real
stalk works normally.
Args:
spdCtrlLvr_stat: Int value of dbc entry STW_ACTN_RQ.SpdCtrlLvr_Stat
(allowing us to simulate pressing the cruise stalk up or down)
None means no change
TurnIndLvr_Stat: Int value of dbc entry STW_ACTN_RQ.TurnIndLvr_Stat
(allowing us to simulate pressing the turn signal up or down)
None means no change
real_steering_wheel_stalk: Previous STW_ACTN_RQ message sent by the real
stalk. When sending these artifical messages for cruise control, we want
to mimic whatever windshield wiper and highbeam settings the car is
currently sending.
"""
msg_id = 0x045 # 69 in hex, STW_ACTN_RQ
msg_len = 8
msg = create_string_buffer(msg_len)
# Do not send messages that conflict with the driver's actual actions on the
# steering wheel stalk. To ensure this, copy all the fields you can from the
# real cruise stalk message.
fake_stalk = real_steering_wheel_stalk.copy()
if spdCtrlLvr_stat is not None:
# if accelerating, override VSL_Enbl_Rq to 1.
if spdCtrlLvr_stat in [4, 16]:
fake_stalk['VSL_Enbl_Rq'] = 1
fake_stalk['SpdCtrlLvr_Stat'] = spdCtrlLvr_stat
if turnIndLvr_Stat is not None:
fake_stalk['TurnIndLvr_Stat'] = turnIndLvr_Stat
# message count should be 1 more than the previous (and loop after 16)
fake_stalk['MC_STW_ACTN_RQ'] = (int(round(fake_stalk['MC_STW_ACTN_RQ'])) + 1) % 16
# CRC should initially be 0 before a new one is calculated.
fake_stalk['CRC_STW_ACTN_RQ'] = 0
# Set the first byte, containing cruise control
struct.pack_into('B', msg, 0,
(fake_stalk['SpdCtrlLvr_Stat']) +
(int(round(fake_stalk['VSL_Enbl_Rq'])) << 6))
# Set the 2nd byte, containing DTR_Dist_Rq
struct.pack_into('B', msg, 1, fake_stalk['DTR_Dist_Rq'])
# Set the 3rd byte, containing turn indicator, highbeams, and wiper wash
struct.pack_into('B', msg, 2,
int(round(fake_stalk['TurnIndLvr_Stat'])) +
(int(round(fake_stalk['HiBmLvr_Stat'])) << 2) +
(int(round(fake_stalk['WprWashSw_Psd'])) << 4) +
(int(round(fake_stalk['WprWash_R_Sw_Posn_V2'])) << 6)
)
# Set the 7th byte, containing the wipers and message counter.
struct.pack_into('B', msg, 6,
int(round(fake_stalk['WprSw6Posn'])) +
(fake_stalk['MC_STW_ACTN_RQ'] << 4))
# Finally, set the CRC for the message. Must be calculated last!
fake_stalk['CRC_STW_ACTN_RQ'] = add_tesla_crc(msg=msg, msg_len=7)
struct.pack_into('B', msg, msg_len-1, fake_stalk['CRC_STW_ACTN_RQ'])
return [msg_id, 0, msg.raw, 0]
|
145f5841cebf1db7a80faa36225e9abc92b9ea96
| 30,225 |
import logging
def get_build_history(build_ids):
"""Returns build object for the last finished build of project."""
build_getter = BuildGetter()
history = []
last_successful_build = None
for build_id in reversed(build_ids):
project_build = build_getter.get_build(build_id)
if project_build['status'] not in ('SUCCESS', 'FAILURE', 'TIMEOUT'):
continue
if (not last_successful_build and is_build_successful(project_build)):
last_successful_build = {
'build_id': build_id,
'finish_time': project_build['finishTime'],
}
if not upload_log(build_id):
log_name = f'log-{build_id}'
logging.error('Missing build log file %s', log_name)
continue
history.append({
'build_id': build_id,
'finish_time': project_build['finishTime'],
'success': is_build_successful(project_build)
})
if len(history) == MAX_BUILD_LOGS:
break
project = {'history': history}
if last_successful_build:
project['last_successful_build'] = last_successful_build
return project
|
faf833ecd6250d0bd90b3477d99502f1cc7a7597
| 30,226 |
def point_translate(point_in, vector_in):
""" Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
"""
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Translate the point using the input vector
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out
|
3b5346062c47f45736d38dce0219f0543b54da6e
| 30,227 |
def pcmh_1_1b__1_2_3_4():
"""Clinical advice (telephone encounters)"""
telephone_encounter_table_url = URL('init', 'word', 'telephone_log.doc',
vars=dict(type="meeting", **request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id"])
telephone_encounter_log_usage = MultiQNA(
1, 1,
True,
'telephone_encounter_log_usage',
"Does {practice} use <a href='{url}'>this telephone encounter log</a> (or an equivalent system) to track "
"<b>telephone encounters</b>?"
.format(practice=APP.practice_name, url=telephone_encounter_table_url)
)
telephone_encounter_log_usage.set_template("{please_choose}")
telephone_encounter_log_usage.add_warning(
getattr(telephone_encounter_log_usage.row, "please_choose", None) in NOT_YES,
"{practice} must keep a log of all telephone encounters and document advice given to patents into the patient "
"record (refill requests alone do not satisfy the PCMH standard).{carousel}".format(
practice=APP.practice_name,
carousel=
CAROUSEL(
"telephone_encounter_log",
[("Document time and date information",
"Immediately after a telephone call, record the date and approximate time of when the call was "
"received and when the call was addressed. If there are no telephone encounters for a given day, "
"write <i>No Clinical Advice</i>. We recommend using this <a href='{url}'>this telephone encounter "
"log</a> (or an equivalent system) to track telephone encounters.".format(
url=telephone_encounter_table_url),
URL('static', _get_emr_image_rel_url('telephone_encounter_log.png')))]
)
)
)
telephone_encounter_in_record = MultiQNA(
1, 1,
True,
'telephone_encounter_in_record',
"Does {practice} document telephone advice given to patients into the patient record?"
.format(practice=APP.practice_name, url=telephone_encounter_table_url)
)
telephone_encounter_in_record.add_warning(
getattr(telephone_encounter_in_record.row, "please_choose", None) in NOT_YES,
"{practice} must document <b>advice</b> given to patents into the patient "
"record (refill requests alone do not satisfy the PCMH standard).{carousel}".format(
practice=APP.practice_name,
carousel=
CAROUSEL(
"telephone_encounter",
[("1. Create Encounter",
"In the patient's chart, create a new encounter.",
URL('static', _get_emr_image_rel_url('telephone_encounter_create2.png'))),
("2. Describe Encounter",
"Describe when the call was received, when the call was ended/addressed, and a summary of the call. "
"Note if patient expresses understanding of the advice given.",
URL('static', _get_emr_image_rel_url('telephone_encounter_ex.png')))]
)
)
)
telephone_encounter_in_record.set_template("{please_choose}")
telephone_encounter_log = MultiQNA(
1, float("inf"),
getattr(telephone_encounter_log_usage.row, "please_choose", None) == "Yes",
'telephone_encounter_log',
"Please upload logs consisting of at least 7 consecutive "
"business days' worth of telephone encounters. Only include incoming encounters seeking advice (refill "
"requests do not meet the standard). Be sure to document the advice and time/date information into the "
"corresponding patient record."
)
telephone_encounter_log.set_template("{choose_file}")
# telephone encounter examples
# completed_log_file_description = getattr(telephone_encounter_log.row, "file_description", "")
completed_log_file_name = getattr(telephone_encounter_log.rows.last(), "choose_file", "")
completed_log_url = URL("init", request.controller, "download", args=[completed_log_file_name],
vars=dict(**request.get_vars))
temp = "Please provide <b>%s</b> patient%s <a href='{url}'>from your telephone encounter log</a> where the " \
"advice was documented into the patient record <b>%s business hours</b>".format(url=completed_log_url)
telephone_encounter_during_hours_example = MultiQNA(
2, 2, telephone_encounter_log.rows,
'telephone_encounter_during_hours_example',
temp % (2, "s", "during")
)
telephone_encounter_during_hours_example\
.set_template("{patient_name}: {patient_dob}<br>Serviced on: {service_date} {screenshot}")
telephone_encounter_after_hours_example = MultiQNA(
1, 1, telephone_encounter_log.rows,
'telephone_encounter_after_hours_example',
temp % (1, "", "after")
)
telephone_encounter_after_hours_example \
.set_template("{patient_name}: {patient_dob}<br>Serviced on: {service_date} {screenshot}")
return dict(documents=[
dict(
description="Telephone Encounter Log",
url=telephone_encounter_table_url,
permissions=["IS_TEAM"]
),
])
|
21d8c9a0a05aef963d858eca572abe4d23566a52
| 30,228 |
import math
def distance(x1, y1, x2, y2):
"""
l2 distance
"""
return math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))
|
709c32bb9bad08d0413c7de80019514f75fc739d
| 30,229 |
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
|
4341fc45df28dd336001252cc8618018257cd257
| 30,230 |
def get_railway() -> User:
"""鉄道用ユーザ情報を取得する
Raises:
DynamoDBError: 鉄道用ユーザ情報が登録されていない
Returns:
User: 鉄道用ユーザ情報
"""
railway_user = get_user("railway")
if railway_user is None:
raise DynamoDBError("鉄道用ユーザ情報が登録されていません。")
return railway_user
|
23315e567f8136510a8ee821c3167f53aea9556c
| 30,231 |
def check_threshold(service, config_high_threshold, config_low_threshold, curr_util):
""" Checks whether Utilization crossed discrete threshold
Args:
service: Name of the micro/macroservice
config_high_threshold: Upper limit threshold to utilization set in config file
config_low_threshold: Lower limit threshold to utilization set in config file
curr_util: value of the current utilization
Returns:
String "High" if upper limit crossed
String "Low" if lower limit crossed
String "Normal" if none crossed
"""
if float(curr_util) > float(config_high_threshold):
return "High"
elif float(curr_util) < float(config_low_threshold):
return "Low"
else:
return "Normal"
|
80bf8ab4f5b2bbac35df7c48764114e213fba580
| 30,232 |
import requests
import json
def is_human(captcha_response):
""" Validating recaptcha response from google server.
Returns True captcha test passed for the submitted form
else returns False.
"""
secret = RECAPTCHA_KEYS["secret_key"]
if secret != "":
payload = {'response':captcha_response, 'secret':secret}
response = requests.post("https://www.google.com/recaptcha/api/siteverify", payload)
response_text = json.loads(response.text)
return response_text['success']
|
547eca43bf9994539f2e95b4605df24432bf3998
| 30,233 |
def ols_data():
"""
draper and smith p.8
"""
xs = [35.3, 29.7, 30.8, 58.8, 61.4, 71.3, 74.4, 76.7, 70.7, 57.5,
46.4, 28.9, 28.1, 39.1, 46.8, 48.5, 59.3, 70, 70, 74.5, 72.1,
58.1, 44.6, 33.4, 28.6]
ys = [10.98, 11.13, 12.51, 8.4, 9.27, 8.73, 6.36, 8.50,
7.82, 9.14, 8.24, 12.19, 11.88, 9.57, 10.94, 9.58,
10.09, 8.11, 6.83, 8.88, 7.68, 8.47, 8.86, 10.36, 11.08]
# self.Xk = 28.6
# self.ypred_k = 0.3091
solution = {'slope': -0.0798,
'y_intercept': 13.623,
'n': len(xs),
'pred_x': 28.6,
'pred_error': 0.309}
return xs, ys, solution
|
d741195075a51d1485c9f98031ca405cadf1db93
| 30,235 |
def get_as_type(obj):
"""Find the name of ActionScript class mapped to the class of given python object.
If the mapping for the given object class is not found, return the class name of the object."""
type = obj.__module__ + '.' + obj.__class__.__name__
if class_mappings:
for as_class, py_class in class_mappings.iteritems():
if type == py_class:
return as_class
return type
|
83d5019cb9e54a6ad3d8e4e926edb285953ea53d
| 30,236 |
def svn_auth_get_ssl_client_cert_pw_file_provider2(*args):
"""
svn_auth_get_ssl_client_cert_pw_file_provider2(svn_auth_plaintext_passphrase_prompt_func_t plaintext_passphrase_prompt_func,
void prompt_baton,
apr_pool_t pool)
"""
return _core.svn_auth_get_ssl_client_cert_pw_file_provider2(*args)
|
585c3b14505a109908fdb109b0fb4c1f5e3df5cd
| 30,237 |
def valid_field(obj, field):
"""Returns ``True`` if given object (BaseDocument subclass or an instance thereof) has given field defined."""
return object.__getattribute__(obj, 'nanomongo').has_field(field)
|
32e662c5c0e666b7455aacdd6809e31cd20017fe
| 30,238 |
def construct_location_name(location: dict) -> str:
"""
Constructs a location name based on the supplied dictionary of elements, ensuring that
they are in the correct format
"""
if location["type"] == "location":
city_name = capwords(location["city"])
if "country" in location:
return f"{city_name},{location['country']}"
else:
return city_name
elif location["type"] == "geocoords":
return f"{location['latitude']},{location['longitude']}"
elif location["type"] == "place_id":
# Even if we have a place_id, if the city & country key is set, we want to return the city
# and country name instead
if "country" in location and "city" in location:
city_name = capwords(location["city"])
return f"{city_name},{location['country']}"
elif "city" in location:
city_name = capwords(location["city"])
return location["city"]
return str(location["place_id"])
|
d5728bf409a4520f0b8a508797d67cd00e6676d1
| 30,239 |
def anammox(k_anammox, o2s_dn, nh4, no2, o2):
"""Anammox: NO2- + NH4+ -> N2 + 2H2O
k_anammox - velocity of anammox
o2s_dn - half-saturation oxygen inhibitor constant for anammox and denitrification"""
return k_anammox*nh4*no2*hyper_inhibitor(o2s_dn, o2, 1)
|
d65fa90d9aaf83982157811dfb7a30d461da75bd
| 30,240 |
def courbe_vers_c(l, l2, n0,CP): #c,C
"""
B=Bez()
B.co=[l[0],l[1],l[2],l[3],l[4],l[5]]
B.ha=[0,0]
courbes.ITEM[n0].beziers_knot.append(B)
"""
B=Bez()
B.co=[G_move(l[2],0),
G_move(l[3],1),
G_move(l[4],0),
G_move(l[5],1),
G_move(l[0],0),
G_move(l[1],1)]
if len(courbes.ITEM[n0].beziers_knot)==1:
CP=[l[0],l[1]]
courbes.ITEM[n0].Origine=[l[0],l[1]]
if l[-1]=='C':
B.ha=[2,2]
else:
B.ha=[0,0]
courbes.ITEM[n0].beziers_knot.append(B)
if len(l2)>1 and l2[-1] in Actions.keys():
B.co[-2]=G_move(l2[0],0)
B.co[-1]=G_move(l2[1],1)
else:
B.co[-2]=G_move(CP[0],0)
B.co[-1]=G_move(CP[1],1)
return courbes,n0,CP
|
3e3577a90a2fca8d0868c2abf0a96ee73ecdd124
| 30,241 |
def _synthesize_human_beta_vj_background(ts,fn = None, df = None):
"""
_build_vj_background
Parameters
----------
ts: tcrsampler.TCRsampler()
a TCRsampler instance, with gene usage frequencies (ideally computed get_stratified_gene_usage_frequency()
fn: str
file path to MIRA set of TCRs
df : pandas DataFrame
MIRA set of TCRs
Returns
-------
df_vj_bkgd : Pandas DataFrame
A set of background TCRs with the same V and J gene usage as the input set.
These are generated using OLGA (Sethna et al.)
"""
if fn is not None and df is not None:
raise ValueError("_build_vj_background can accept <df> or <fn> arguments but not both")
if fn is not None:
# Load a set set of TCRs.
df_target = pd.read_csv(fn)
if df is not None:
df_target = df.copy()
# Subset columns
df_target = df_target[['v_b_gene','j_b_gene','cdr3_b_aa']]
# Make a gene usage counter
gene_usage_counter = make_gene_usage_counter(df_target) # 2
print("MAKING A V-GENE, J-GENE MATCHED BACKGROUND.")
# Check that sampler is using sample stratified frequencies.
assert ts.v_occur_freq is ts.v_occur_freq_stratified
print("USING STRATIFIED FREQUENCIES.")
# Make a vj matched background.
# Note: <size> aregument should be greater than desired, because Olga can return none due to non-productive CDR3s.
df_vj_bkgd = make_vj_matched_background(ts = ts,
gene_usage_counter = gene_usage_counter,
size = 150000,
recomb_type="VDJ",
chain_folder = "human_T_beta",
cols = ['v_b_gene', 'j_b_gene', 'cdr3_b_aa'])
# Sample to get the desired number of TCRs from teh v,j matched set
df_vj_bkgd = df_vj_bkgd.sample(100000, random_state = 1).reset_index(drop = True)
print("CALCULATE INVERSE PROBABILITY WEIGHT ADJUSTMENT.")
# Calculate the invese weighting adjustmetn
df_vj_bkgd['weights'] = calculate_adjustment(df = df_vj_bkgd, adjcol = "pVJ")
df_vj_bkgd['source'] = "vj_matched"
# Combine
return df_vj_bkgd
|
c4f0126ddeb1f0d3fd2ea05e031316de7c0118b2
| 30,242 |
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Args:
tournament: the id number of the tournament played
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
standings = playerStandings()
swiss_pairings = []
for player1, player2 in zip(standings[0::2], standings[1::2]):
swiss_pairings.append((player1[0], player1[1], player2[0], player2[1]))
return swiss_pairings
|
ed55ccffc866a9d7bab93dbf5b0989709a31d287
| 30,243 |
def double_bin_pharmacophore_graph(distance, bins, delta):
""" Assign two bin values to the distance between pharmacophoric points.
Parameters
----------
distance : float
The distance that will be binned.
bins : np.ndarray
Array of bins. It has to be one dimensional and monotonic.
delta : float
The tolerance from which a distance value is considered to belong to
the lower and upper bin. It has to be a value between 0 and 0.5
Returns
-------
2-tuple of int
The two bins assigned to the distance.
"""
for ii in range(bins.shape[0] - 1):
if distance == bins[ii]:
return (bins[ii], bins[ii])
elif distance > bins[ii] and distance < bins[ii + 1]:
if distance - bins[ii] > delta:
return (bins[ii], bins[ii + 1])
else:
return (bins[ii], bins[ii])
|
b7dedf4f31b5cd08c9875139df837a57a8117001
| 30,244 |
def get_help(path):
"""
Context Sensitive Help (currently not implemented).
"""
try:
helpitem = HelpItem.objects.get(link=path)
except HelpItem.DoesNotExist:
helpitem = ""
return { 'helpitem': helpitem }
|
f2fc3599c86e408e3341870539e1572412b5c8f4
| 30,245 |
def _GKEConnectNamespace(kube_client, project_id):
"""Returns the namespace into which to install or update the connect agent.
Connect namespaces are identified by the presence of the hub.gke.io/project
label. If there is one existing namespace with this label in the cluster, its
name is returned; otherwise, a connect agent namespace with the project
number as a suffix is returned. If there are multiple namespaces with the
hub.gke.io/project label, an error is raised.
Args:
kube_client: a KubernetesClient
project_id: A GCP project identifier
Returns:
a string, the namespace
Raises:
exceptions.Error: if there are multiple Connect namespaces in the cluster
"""
selector = '{}={}'.format(CONNECT_RESOURCE_LABEL, project_id)
namespaces = kube_client.NamespacesWithLabelSelector(selector)
if not namespaces:
return 'gke-connect-{}'.format(p_util.GetProjectNumber(project_id))
if len(namespaces) == 1:
return namespaces[0]
raise exceptions.Error(
'Multiple GKE Connect namespaces in cluster: {}'.format(namespaces))
|
3a98c72fac0f0ae297f4fb026368c137779eb5f6
| 30,247 |
def triplet_loss_compute_semihard(feature1, feature2, labels, margin=1.0):
""" triplet loss with semi-hard negative pairs
"""
batch_size = labels.get_shape().as_list()[0]
labels = tf.cast(tf.reshape(labels, [batch_size, 1]), tf.float32)
feature1 = tf.nn.l2_normalize(tf.reshape(feature1, [batch_size, -1]), dim=-1)
feature2 = tf.nn.l2_normalize(tf.reshape(feature2, [batch_size, -1]), dim=-1)
cross_feaD = 1.0 - tf.matmul(feature1, tf.transpose(feature2)) # cosine distance
labelD = pairwise_distance(labels, labels)
label_mask = tf.cast(tf.greater(labelD, 0.5), tf.float32) # 0-similar 1-dissimilar
# num_match = batch_size*batch_size-tf.reduce_sum(tf.reduce_sum(label_mask,0))
cross_feaD_pos = tf.multiply(1.0 - label_mask, cross_feaD)
cross_feaD_neg = tf.multiply(label_mask, cross_feaD)
# haha = tf.concat([cross_feaD_pos,cross_feaD_neg],1)
cross_pos_col = tf.reduce_max(cross_feaD_pos, axis=0, keep_dims=True)
cross_pos_row = tf.reduce_max(cross_feaD_pos, axis=1, keep_dims=True)
semihard_negD_select = find_semihard_exmaple(cross_pos_col, cross_pos_row, cross_feaD_neg)
cross_posD_select = tf.concat([tf.squeeze(cross_pos_col), tf.squeeze(cross_pos_row)], axis=0)
margin = FLAGS.margin # + hist_distance_compute(cross_posD_select, semihard_negD_select)
pos_select_dist = tf.reduce_mean(cross_posD_select)
neg_select_dist = tf.reduce_mean(semihard_negD_select)
loss = tf.reduce_mean(tf.maximum(margin + cross_posD_select - semihard_negD_select, 0.0))
return loss, pos_select_dist, neg_select_dist, margin
|
153cb805c3aa7ec2b87d29ae15a7c91a3874c95f
| 30,249 |
from typing import List
from typing import Type
def get_all_markups() -> List[Type[AbstractMarkup]]:
"""
:returns: list of all markups (both standard and custom ones)
"""
try: # Python 3.10+
entrypoints = entry_points(group="pymarkups")
except TypeError: # Older versions
entrypoints = entry_points()["pymarkups"]
return [entry_point.load() for entry_point in entrypoints]
|
78bcdb402f52b7a6d6e1b78816ba0d215f449e83
| 30,250 |
def esg_route_list(client_session, esg_name):
"""
This function return the configured static routes
:param client_session: An instance of an NsxClient Session
:param esg_name: The name of the ESG of which the routes should be listed
:return: returns a tuple, the firt item of the tuple contains a list of 1 tuple with
item 0 containing the routes network,
item 1 containing the next hop IP as string,
item 2 containing the vnic used by the route as string,
item 3 containing the admin distance of the route as string,
item 4 containing the mtu of the route as string
The second item in the tuple contains a dict with all the static routing config details
"""
esg_id, esg_params = get_edge(client_session, esg_name)
if not esg_id:
return False
rtg_cfg = client_session.read('routingConfigStatic', uri_parameters={'edgeId': esg_id})['body']
if not rtg_cfg['staticRouting']['staticRoutes']:
return [()], {}
routes = []
routes_api = client_session.normalize_list_return(rtg_cfg['staticRouting']['staticRoutes']['route'])
for route in routes_api:
if 'vnic' in route.keys():
vnic = route['vnic']
else:
vnic = ''
add_route = (route['network'], route['nextHop'], vnic, route['adminDistance'], route['mtu'])
routes.append(add_route)
return routes, rtg_cfg['staticRouting']['staticRoutes']
|
fc8565aba651dbb452a9cab2d80d774130c08b00
| 30,251 |
def create_instance_profile(profile_name, role_name=None):
""" Creates IAM instance profile
:param profile_name: Name of profile to be created
:param role_name: Name of role to attach to instance profile
:return: API response
"""
try:
create_instance_profile_response = iam_client.create_instance_profile(
InstanceProfileName=profile_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'EntityAlreadyExists':
print('Instance profile already exists ' + profile_name)
return
else:
raise e
if role_name:
iam_client.add_role_to_instance_profile(
InstanceProfileName=profile_name,
RoleName=role_name
)
print('Created instance profile: ' + profile_name)
return create_instance_profile_response['InstanceProfile']
|
0088f5b0c7b0ac35cff12b859bc4c27662761705
| 30,252 |
def is_primary(flag):
"""
:return bool: Returns whether the current record is primary alignment
"""
if flag is None:
raise ValueError("No flag associated with this record")
return not SamFlag.IS_UNMAPPED & flag and not SamFlag.IS_SECONDARY_ALIGNMENT & flag
|
09d6cfae6568bd10315f10f5b19790db07d05b58
| 30,254 |
from typing import Optional
from typing import Set
def extract_years(text: str, default: Optional[str] = None) -> Set[str]:
"""Try to locate year numbers in a string such as 'circa 1990'. This will fail if
any numbers that don't look like years are found in the string, a strong indicator
that a more precise date is encoded (e.g. '1990 Mar 03')."""
years: Set[str] = set()
for match in NUMBERS.finditer(text):
year = match.group()
number = int(year)
if 1800 >= number <= 2100:
if default is not None:
return set([default])
return set()
years.add(year)
return years
|
854f62d5c40d3411a7e792c02d7e03db86c68026
| 30,256 |
def is_absolute_url(parsed_url):
""" check if it is an absolute url """
return all([parsed_url.scheme, parsed_url.netloc])
|
578c1443ec18f9b741cd205763604cba2242ac48
| 30,257 |
import torch
def so3_exp_map(log_rot: torch.Tensor, eps: float = 0.0001) -> torch.Tensor:
"""
Convert a batch of logarithmic representations of rotation matrices `log_rot`
to a batch of 3x3 rotation matrices using Rodrigues formula [1].
In the logarithmic representation, each rotation matrix is represented as
a 3-dimensional vector (`log_rot`) who's l2-norm and direction correspond
to the magnitude of the rotation angle and the axis of rotation respectively.
The conversion has a singularity around `log(R) = 0`
which is handled by clamping controlled with the `eps` argument.
Args:
log_rot: Batch of vectors of shape `(minibatch, 3)`.
eps: A float constant handling the conversion singularity.
Returns:
Batch of rotation matrices of shape `(minibatch, 3, 3)`.
Raises:
ValueError if `log_rot` is of incorrect shape.
[1] https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
"""
return _so3_exp_map(log_rot, eps=eps)[0]
|
e839d6398502e920bbc7841b1a9fe8f48e9cfce9
| 30,258 |
import pkg_resources
def get_example_summary_file():
"""Convenience wrapper to retrieve file path from package data."""
return pkg_resources.resource_filename('tempset', 'data/electric/summary.zip')
|
a6fcb3a1f9c78f07e3a5861a93ac8f5a2983bf93
| 30,259 |
def set_pH(in_smi):
"""Function to set the pH of a molecule
Currently uses OpenBabel to perform protonation (to pH 7.4)
Takes a smiles string
Returns the protonated smiles string"""
# Attempt to use the babel that has been included in the distro
try:
d = sys._MEIPASS
babel_path = os.path.join(d, "babel")
# If not use the linux babel or the windows babel in defined locations
except AttributeError:
if "linux" in sys.platform:
babel_path = "/usr/bin/babel"
# Check for mac babel
elif "darwin" in sys.platform:
babel_path = "/usr/local/bin/babel"
if os.path.isfile(babel_path):
pass
else:
print "PY2APP FIX THIS"
# Find the babel path
else:
sys.stderr.write("USING SYSTEM BABEL")
babel_path = r"C:\Program Files (x86)\OpenBabel-2.3.2\babel.exe"
in_f = tempfile.NamedTemporaryFile("w", delete=False)
in_f.write(in_smi)
in_f.close()
out_f = tempfile.NamedTemporaryFile("w", delete=False)
out_f.close()
my_list = [babel_path, "-ismi", in_f.name, "-p", "7.4", "-osmi", out_f.name]
subprocess.call(my_list, stderr=tempfile.NamedTemporaryFile())
out_smi = open(out_f.name).read().rstrip()
return out_smi
|
f815f661efe5f660260f4625d586b81e2cf14d13
| 30,260 |
import requests
import json
def get_hardware_status(ip, port, username, password) -> dict:
"""Gets CPU memory statuses IOS-XE\n
Cisco-IOS-XE-platform-software-oper:cisco-platform-software/control-processes/control-process"""
###### Future Use
data = {}
try:
uri = f"https://{ip}:{port}/restconf/data/Cisco-IOS-XE-platform-software-oper:cisco-platform-software/control-processes/control-process"
response = requests.get(uri, headers=headers, verify=False, auth=(username, password))
hardware_status = json.loads(response.text)
check_error = _check_api_error(hardware_status)
if check_error:
raise AttributeError
get_keys = dict.fromkeys(hardware_status)
parent_key = list(get_keys.keys())[0]
data = hardware_status[parent_key]
except AttributeError:
pass
return data
|
e1b305871c773a1bbf69e60681a4b2718a7e0dcd
| 30,261 |
def q2_2(df: pd.DataFrame) -> tuple:
"""
Calculates mean and median for V2 of df, returns tuple of (mean, median)
"""
V2 = df["V2"]
return V2.mean(), V2.median()
|
3e72b2fe71c4afe2e608c80f7aefc529b9681af8
| 30,262 |
def dup_rr_primitive(f, K):
"""Returns content and a primitive polynomial over a ring. """
cont = dup_content(f, K)
if not f or K.is_one(cont):
return cont, f
else:
return cont, dup_exquo_ground(f, cont, K)
|
7c590daabb04baba51675a3681013434baec87a7
| 30,263 |
def mk_png(html: str, folder=None) -> str:
"""Return generated PNG file path"""
folder = (local.path(folder) if folder else local.path('/tmp/ccb_png')) / uuid4()
folder.mkdir()
png = folder / 'code.png'
(
convert['-trim', '-trim', '-', png]
<< HTML(string=html, media_type='screen').write_png(resolution=384)
)()
return png
|
448bbfbfff6184648542af6426f063e6f5adf2e9
| 30,264 |
def entry_breadcrumbs(entry):
"""
Breadcrumbs for an Entry.
"""
date = entry.publication_date
if is_aware(date):
date = localtime(date)
return [year_crumb(date), month_crumb(date),
day_crumb(date), Crumb(entry.title)]
|
204b061b48622c74bb67e7af90b11b2aa93e3cc7
| 30,265 |
def w_desired_supply_line():
"""
Real Name: b'W Desired Supply Line'
Original Eqn: b'W Delivery Delay*W Expected Customer Orders'
Units: b'SKU'
Limits: (None, None)
Type: component
b''
"""
return w_delivery_delay() * w_expected_customer_orders()
|
c71161e4e306bf8aa6d7a1b2ea679a228c5a991c
| 30,266 |
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
|
ce21fb4826fc6626ade140d84195c7db8b616e39
| 30,267 |
import math
def ReadMartiniMolecules(GroFile, First, Last):
""" Generate the normalized coordinates, name, and vector of the Martini molecule
Access the Martini3 small molecules library and reads the parameterized coordinates from it,
with future view of looking at generating automatically generating Martini 3 representations
from smiles strings
One needs to describe the attaching bead to the main core and the atom furthest away from the
core, to create the directional vector to which the struture will be placed on the surface of the NP
core.
Args:
GroFile:
path the gromacs file of the ligand
Returns:
Placeholder
Raises:
Placeholder
"""
TransformationList= []
#GroPath = "/home/sang/Desktop/GIT/Martini3-small-molecules/models/gros"
#ItpPath = "/home/sang/Desktop/GIT/Martini3-small-molecules/models/itps/cog-mono"
MartiniUniverse = mda.Universe(GroFile) # Load the Martini gro file in as a universe
ids = [i.name for i in MartiniUniverse.atoms]
Molecule = MartiniUniverse.select_atoms('all')
# In this case, the atoms will be N1 and R3
FirstAtom = Molecule.select_atoms('name {}'.format(First))
LastAtom = Molecule.select_atoms('name {}'.format(Last))
LigandAlignmentVector = (FirstAtom.positions - LastAtom.positions)[0] # Get the alignment vector created from the first and COM
# Loop over the positions
for i,j in enumerate(Molecule.positions):
vector = (j - FirstAtom.positions)[0]
vector[0] = LigandAlignmentVector[0] - vector[0]
vector[1] = LigandAlignmentVector[1] - vector[1]
vector[2] = LigandAlignmentVector[2] - vector[2]
if vector[0] == -math.inf:
pass
if vector[0] == 0.0:
pass
else:
TransformationList.append([vector, Molecule.atoms[i].type])
# Return the universe, the transformed (normalized) coordinate list of the ligand molecule, and the
# alignment vector that shows the arrow of direction of the vector, which we will be able to reorientate
return Molecule, TransformationList, LigandAlignmentVector
|
e9979c5cd82f8ff2a63d6f74bb19db3fced3e89d
| 30,268 |
def average_gen_fock(filename, fock_type='plus', estimator='back_propagated',
eqlb=1, skip=1, ix=None):
"""Average AFQMC genralised Fock matrix.
Parameters
----------
filename : string
QMCPACK output containing density matrix (*.h5 file).
fock_type : string
Which generalised Fock matrix to extract. Optional (plus/minus).
Default: plus.
estimator : string
Estimator type to analyse. Options: back_propagated or mixed.
Default: back_propagated.
eqlb : int
Number of blocks for equilibration. Default 1.
skip : int
Number of blocks to skip in between measurements equilibration.
Default 1 (use all data).
ix : int
Back propagation path length to average. Optional.
Default: None (chooses longest path).
Returns
-------
gfock : :class:`numpy.ndarray`
Averaged 1RDM.
gfock_err : :class:`numpy.ndarray`
Error bars for 1RDM elements.
"""
md = get_metadata(filename)
name = 'gen_fock_' + fock_type
mean, err = average_observable(filename, name, eqlb=eqlb, skip=skip,
estimator=estimator, ix=ix)
nbasis = md['NMO']
wt = md['WalkerType']
try:
walker = WALKER_TYPE[wt]
except IndexError:
print('Unknown walker type {}'.format(wt))
if walker == 'closed':
return mean.reshape(1,nbasis,nbasis), err.reshape(1,nbasis, nbasis)
elif walker == 'collinear':
return mean.reshape((2,nbasis,nbasis)), err.reshape((2, nbasis, nbasis))
elif walker == 'non_collinear':
return mean.reshape((1,2*nbasis,2*nbasis)), err.reshape((1,2*nbasis, 2*nbasis))
else:
print('Unknown walker type.')
return None
|
6e9869663ed0866e8d5ee81e0b1adfc9f4ee6fa2
| 30,269 |
def start_ignite(test_context, ignite_version: str, rebalance_params: RebalanceParams) -> IgniteService:
"""
Start IgniteService:
:param test_context: Test context.
:param ignite_version: Ignite version.
:param rebalance_params: Rebalance parameters.
:return: IgniteService.
"""
node_count = test_context.available_cluster_size - rebalance_params.preloaders
if rebalance_params.persistent:
data_storage = DataStorageConfiguration(
max_wal_archive_size=2 * rebalance_params.data_region_max_size,
default=DataRegionConfiguration(
persistent=True,
max_size=rebalance_params.data_region_max_size
)
)
else:
data_storage = DataStorageConfiguration(
default=DataRegionConfiguration(max_size=rebalance_params.data_region_max_size)
)
node_config = IgniteConfiguration(
version=IgniteVersion(ignite_version),
data_storage=data_storage,
metric_exporters={"org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi"},
rebalance_thread_pool_size=rebalance_params.thread_pool_size,
rebalance_batch_size=rebalance_params.batch_size,
rebalance_batches_prefetch_count=rebalance_params.batches_prefetch_count,
rebalance_throttle=rebalance_params.throttle)
ignites = IgniteService(test_context, config=node_config,
num_nodes=node_count if rebalance_params.trigger_event else node_count - 1,
jvm_opts=rebalance_params.jvm_opts)
ignites.start()
return ignites
|
5cbd9e6946fcbaf063d9de72074a14794f65d234
| 30,270 |
def simplify(expr, *exprs, **kwargs):
"""Simplify the given expression[s]."""
if exprs:
return _coconut_tail_call((tuple), (map)(lambda x: x.simplify(**kwargs), (expr,) + exprs))
else:
return _coconut_tail_call(expr.simplify, **kwargs)
|
7982869f98f5296e4e596cfbed909a5c4caa9ba7
| 30,271 |
def ratek_fit_info(rxn_dstr):
""" Read the information describing features of the fits to the
rate constants
"""
# Read the temperatures and the Errors from the lines
pressure_ptt = (
'Pressure:' + app.SPACES +
app.capturing(app.one_of_these([app.FLOAT, 'High']))
)
trange_ptt = (
'Temps: ' + app.SPACES +
app.capturing(app.INTEGER) + '-' + app.capturing(app.INTEGER) +
app.SPACES + 'K'
)
mean_ptt = (
'MeanAbsErr:' + app.SPACES +
app.capturing(app.FLOAT) + app.escape('%') +
','
)
max_ptt = (
'MaxErr:' + app.SPACES +
app.capturing(app.FLOAT) + app.escape('%')
)
pressure_caps = apf.all_captures(pressure_ptt, rxn_dstr)
trange_caps = apf.all_captures(trange_ptt, rxn_dstr)
mean_caps = apf.all_captures(mean_ptt, rxn_dstr)
max_caps = apf.all_captures(max_ptt, rxn_dstr)
pressures = []
for pressure in pressure_caps:
if pressure != 'High':
pressures.append(float(pressure))
elif pressure == 'High':
pressures.append(pressure)
trange_vals = []
for cap in trange_caps:
temp1, temp2 = cap
trange_vals.append([int(temp1), int(temp2)])
if mean_caps is not None:
mean_vals = [float(val) for val in mean_caps]
else:
mean_vals = []
if max_caps is not None:
max_vals = [float(val) for val in max_caps]
else:
max_vals = []
# Build the inf_dct
inf_dct = {}
for idx, pressure in enumerate(pressures):
inf_dct[pressure] = {'temps': trange_vals[idx]}
if mean_vals:
inf_dct[pressure].update({'mean_err': mean_vals[idx]})
if max_vals:
inf_dct[pressure].update({'max_err': max_vals[idx]})
return inf_dct
|
dbed1c66b68a6ddbe0d4d8af3e91736131220383
| 30,272 |
def ifrt2(a):
"""Compute the 2-dimensional inverse finite radon transform (iFRT) for
an (n+1) x n integer array.
Parameters
----------
a : array_like
A 2-D (n+1) row x n column integer array.
Returns
-------
iFRT : 2-D n x n ndarray
Inverse Finite Radon Transform array of n x n integer coefficients.
See Also
--------
frt2 : The two-dimensional FRT
Notes
-----
The FRT has a unique inverse iff n is prime.
See [1]_ for an overview.
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
Apply the Inverse Finite Radon Transform to recover the input
>>> fi = ifrt2(f)
Check that it's identical to the original
>>> assert len(np.nonzero(img-fi)[0]) == 0
References
----------
.. [1] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1] + 1:
raise ValueError("Input must be an (n+1) row x n column, 2-D array")
ai = a.copy()[:-1]
n = ai.shape[1]
f = np.empty((n, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Rolls the pth row of ai right by p places.
for row in range(1, ai.shape[0]):
ai[row] = roll(ai[row], row)
f[m] = ai.sum(axis=0)
f += a[-1][newaxis].T
f = (f - ai[0].sum()) / n
return f
|
2fa7f3c8fc3b6ed0e7ee9c58d9587644314d6608
| 30,274 |
def get_levelized_cost(solution, cost_class='monetary', carrier='power',
group=None, locations=None,
unit_multiplier=1.0):
"""
Get the levelized cost per unit of energy produced for the given
``cost_class`` and ``carrier``, optionally for a subset of technologies
given by ``group`` and a subset of ``locations``.
Parameters
----------
solution : solution container
cost_class : str, default 'monetary'
carrier : str, default 'power'
group : str, default None
Limit the computation to members of the given group (see the
groups table in the solution for valid groups).
locations : str or iterable, default None
Limit the computation to the given location or locations.
unit_multiplier : float or int, default 1.0
Adjust unit of the returned cost value. For example, if model units
are kW and kWh, ``unit_multiplier=1.0`` will return cost per kWh, and
``unit_multiplier=0.001`` will return cost per MWh.
"""
if group is None:
group = 'supply'
members = solution.groups.to_pandas().at[group, 'members'].split('|')
if locations is None:
locations_slice = slice(None)
elif isinstance(locations, (str, float, int)):
# Make sure that locations is a list if it's a single value
locations_slice = [locations]
else:
locations_slice = locations
cost = solution['costs'].loc[dict(k=cost_class, x=locations_slice, y=members)]
ec_prod = solution['ec_prod'].loc[dict(c=carrier, x=locations_slice, y=members)]
if locations is None:
cost = cost.sum(dim='x').to_pandas()
ec_prod = ec_prod.sum(dim='x').to_pandas()
else:
cost = cost.to_pandas()
ec_prod = ec_prod.to_pandas()
return (cost / ec_prod) * unit_multiplier
|
96b8f9a9fceaa932bcee72033e73ad8b9551759d
| 30,275 |
def weighting_system_c():
"""C-weighting filter represented as polynomial transfer function.
:returns: Tuple of `num` and `den`.
See equation E.1 of the standard.
"""
f1 = _POLE_FREQUENCIES[1]
f4 = _POLE_FREQUENCIES[4]
offset = _NORMALIZATION_CONSTANTS['C']
numerator = np.array([(2.0*np.pi*f4)**2.0 * (10**(-offset/20.0)), 0.0, 0.0])
part1 = [1.0, 4.0*np.pi*f4, (2.0*np.pi*f4)**2.0]
part2 = [1.0, 4.0*np.pi*f1, (2.0*np.pi*f1)**2.0]
denomenator = np.convolve(part1, part2)
return numerator, denomenator
|
02b77bafbbba2671c15667f81bad965383b21f33
| 30,276 |
def make_fully_qualified_url(url):
""" Ensure url is qualified """
if url.startswith("//"):
return "https:" + url
if url.startswith("/"):
return "https://en.wikipedia.org" + url
assert url.startswith("http"), "Bad URL (relative to unknown location): " + url
return url
|
9b87adaa0a30c5a09e81dd73aea3f282af92ac53
| 30,277 |
def reduce_range_overlaps(ranges):
"""Given a list with each element is a 2-tuple of min & max, returns a similar list simplified if possible. """
ranges = [ea for ea in ranges if ea]
if len(ranges) < 2:
return ranges
first, *ranges_ordered = list(reversed(sorted(ranges, key=lambda ea: ea[1] - ea[0])))
r_min = first[0]
r_max = first[1]
disjointed_ranges = []
for r in ranges_ordered:
if r_min <= r[0] <= r_max:
r_max = max(r[1], r_max)
elif r_min <= r[1] <= r_max:
r_min = min(r[0], r_min)
# Since we already looked at 'first' sorted by max range, not possible: r[0] < r_min and r[1] > r_max
else: # range is possibly disjointed from other ranges. There may be a gap.
disjointed_ranges.append(r)
big_range = (r_min, r_max)
clean_ranges = [big_range, *disjointed_ranges]
return clean_ranges
|
fe62dd8bbb1fd0a985757cc417c9c230659294c5
| 30,278 |
def get_averages_by_addon_from_bigquery(today, exclude=None):
"""This function is used to compute the 'hotness' score of each add-on (see
also `update_addon_hotness()` cron task). It returns a dict with top-level
keys being add-on GUIDs and values being dicts containing average
values."""
client = create_client()
one_week_date = today - timedelta(days=7)
four_weeks_date = today - timedelta(days=28)
query = f"""
WITH
this_week AS (
SELECT
addon_id,
AVG(dau) AS avg_this_week
FROM
`{get_amo_stats_dau_view_name()}`
WHERE
submission_date >= @one_week_date
GROUP BY
addon_id),
three_weeks_before_this_week AS (
SELECT
addon_id,
AVG(dau) AS avg_three_weeks_before
FROM
`{get_amo_stats_dau_view_name()}`
WHERE
submission_date BETWEEN @four_weeks_date AND @one_week_date
GROUP BY
addon_id)
SELECT
*
FROM
this_week
JOIN
three_weeks_before_this_week
USING
(addon_id)
"""
query_parameters = [
bigquery.ScalarQueryParameter('one_week_date', 'DATE', one_week_date),
bigquery.ScalarQueryParameter('four_weeks_date', 'DATE', four_weeks_date),
]
if exclude and len(exclude) > 0:
query = f'{query} WHERE addon_id NOT IN UNNEST(@excluded_addon_ids)'
query_parameters.append(
bigquery.ArrayQueryParameter('excluded_addon_ids', 'STRING', exclude)
)
rows = client.query(
query,
job_config=bigquery.QueryJobConfig(query_parameters=query_parameters),
).result()
return {
row['addon_id']: {
'avg_this_week': row['avg_this_week'],
'avg_three_weeks_before': row['avg_three_weeks_before'],
}
for row in rows
if row['addon_id']
}
|
5c50f10ffa3c15beab6ae204bbd796050e85e66a
| 30,279 |
def main_menu():
"""Dialog for the ATM Main Menu."""
# Determines action taken by application.
action = questionary.select(
"Would you like to check your balance, make a deposit or make a withdrawal?",
choices=["check balance", "deposit", "withdrawal"],
).ask()
return action
|
add02dfc371c24e89c73e336c5efe3ff800e6d00
| 30,280 |
def load_sentences_from_naf(iteration, root, naf_entity_layer, modify_entities):
"""Load sentences from a single NAF file (already loaded). Potentially replace entity mentions with their identity."""
if modify_entities:
to_replace=map_mentions_to_identity(root, naf_entity_layer)
# Create list of lists of sentences in a file
token_layer = root.find('text')
old_sent = '1'
sentences = []
current_sentence = []
for w in token_layer.findall('wf'):
idx = w.get('id').replace('w', 't')
sent = w.get('sent')
txt = w.text
if old_sent != sent:
sentences.append(current_sentence)
current_sentence = []
if not modify_entities or idx not in to_replace:
current_sentence.append(txt)
elif idx in to_replace and to_replace[idx]:
current_sentence.append(to_replace[idx])
old_sent = sent
sentences.append(current_sentence)
return sentences
|
054a5adadcd170ac5525bc0bc6d5dd32bcdf14ae
| 30,281 |
def get_detection_eff_matrix(summary_table, num):
"""Computes the detection efficiency matrix for the input detection summary table.
Input argument num sets the maximum number of true objects per blend in the
test set for which the
detection efficiency matrix is to be created for. Detection efficiency is
computed for a number of true objects in the range (0-num) as columns and
the detection percentage as rows. The percentage values in a column sum to
100.
The input summary table must be a numpy array of shape [N, 5], where N is
the test set size. The 5 columns in the summary_table are number of true
objects, detected sources, undetected objects, spurious detections and
shredded objects for each of the N blend scenes in the test set.
Args:
summary_table (`numpy.array`): Detection summary as a table [N, 5].
num (int): Maximum number of true objects to create matrix for. Number
of columns in efficiency matrix will be num+1. The first column
will correspond to no true objects.
Returns:
numpy.ndarray of size[num+2, num+1] that shows detection efficiency.
"""
eff_matrix = np.zeros((num + 2, num + 1))
for i in range(0, num + 1):
(q_true,) = np.where(summary_table[:, 0] == i)
for j in range(0, num + 2):
if len(q_true) > 0:
(q_det,) = np.where(summary_table[q_true, 1] == j)
eff_matrix[j, i] = len(q_det)
norm = np.sum(eff_matrix, axis=0)
# If no detections along a column, set sum to 1 to avoid dividing by zero.
norm[norm == 0.0] = 1
# normalize over columns.
eff_matrix = eff_matrix / norm[np.newaxis, :] * 100.0
return eff_matrix
|
8fa28c1f278bc14ee6877f50297f6554bc413392
| 30,282 |
def unicode_to_base64(text, strip_newlines=True):
"""Safe conversion of ``text`` to base64 representation using
utf-8 bytes.
Strips newlines from output unless ``strip_newlines`` is `False`.
"""
text = to_unicode(text)
if strip_newlines:
return text.encode('utf-8').encode('base64').replace('\n', '')
return text.encode('utf-8').encode('base64')
|
34593c1811dac718bc82f47e590da62d1d704de4
| 30,283 |
def _valid_path(g, path, master_nodes):
"""
Test if path contains masternodes.
"""
valid = True
if path[0] not in master_nodes: valid = False
if path[-1] not in master_nodes: valid = False
for n in path[1:-1]:
if 'pin' in g.node[n]:
# if _is_master(g, n):
# masternodes = (spira.JunctionDevice, spira.UserNode, spira.PortNode)
if isinstance(g.node[n]['pin'], BaseVia):
valid = False
return valid
|
e91c0a53b994316073f97e854b9e4933d760c078
| 30,284 |
import io
def usgs_stonecr_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[5:15]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
if len(df_data_1.columns) > 11:
for x in range(11, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['stonecrushed'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
return df_data_1
|
41d63f8175eaa0b5a5b2d03e40ea0bfb73ed70d4
| 30,285 |
def read_line(**kwargs):
"""
Gets next line in input. If `skip_empty` is True, only lines with
at least one non-whitespace character are returned.
:return: str
"""
tokenizer = _get_tokenizer(kwargs)
skip_empty, rstrip = kwargs['skip_empty'], kwargs['rstrip']
try:
if skip_empty:
return tokenizer.get_nonempty_line()
line = tokenizer.get_line()
return line.rstrip('\r\n') if rstrip else line
except EOFError:
return _handle_eof()
|
82a24906872076afc26193dcd8bfb3be225c70d9
| 30,286 |
def _coco17_category():
"""
Get class id to category id map and category id
to category name map of COCO2017 dataset
"""
clsid2catid = {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 13,
13: 14,
14: 15,
15: 16,
16: 17,
17: 18,
18: 19,
19: 20,
20: 21,
21: 22,
22: 23,
23: 24,
24: 25,
25: 27,
26: 28,
27: 31,
28: 32,
29: 33,
30: 34,
31: 35,
32: 36,
33: 37,
34: 38,
35: 39,
36: 40,
37: 41,
38: 42,
39: 43,
40: 44,
41: 46,
42: 47,
43: 48,
44: 49,
45: 50,
46: 51,
47: 52,
48: 53,
49: 54,
50: 55,
51: 56,
52: 57,
53: 58,
54: 59,
55: 60,
56: 61,
57: 62,
58: 63,
59: 64,
60: 65,
61: 67,
62: 70,
63: 72,
64: 73,
65: 74,
66: 75,
67: 76,
68: 77,
69: 78,
70: 79,
71: 80,
72: 81,
73: 82,
74: 84,
75: 85,
76: 86,
77: 87,
78: 88,
79: 89,
80: 90
}
catid2name = {
0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush'
}
clsid2catid = {k - 1: v for k, v in clsid2catid.items()}
catid2name.pop(0)
return clsid2catid, catid2name
|
0a242332391c5fbf21be0c434c64c104db1b3055
| 30,287 |
def identical_sort(df_true, df_subm):
"""
Check if 2 DataFrames are sorted the same
This check is only conducted whenever a perfect F1 score is
achieved.
It only verifies whether or not the first and last tuple have the same
relative order in both DataFrames. It is thus not an explicit check! The
reason for this is the indeterminacy on the database side.
Albeit rough, it suffices for our purposes.
"""
try:
first_tuple_subm = tuple(df_subm.values[0])
idx_first = idx_tuple_in_df(first_tuple_subm, df_true)
except:
idx_first = None
try:
final_tuple_subm = tuple(df_subm.values[-1])
idx_last = idx_tuple_in_df(final_tuple_subm, df_true)
except:
idx_last = None
check_1 = isinstance(idx_first, int) & isinstance(idx_last, int)
if check_1:
check_2 = idx_first <= idx_last
else:
check_2=False
return check_2
|
94363275f66fe84829f90f49afa57237fa05c1a5
| 30,288 |
def bigbird_block_sparse_attention(
query_layer, key_layer, value_layer, band_mask, from_mask, to_mask,
from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads,
num_rand_blocks, size_per_head, batch_size, from_seq_length, to_seq_length,
from_block_size, to_block_size):
"""BigBird attention sparse calculation using blocks in linear time.
Assumes from_seq_length//from_block_size == to_seq_length//to_block_size.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
band_mask: (optional) int32 Tensor of shape [batch_size, 1,
from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. The
values should be 1 or 0. The attention scores will effectively be set to
-infinity for any positions in the mask that are 0, and will be unchanged
for positions that are 1.
from_mask: (optional) int32 Tensor of shape [batch_size, 1, from_seq_length,
1]. The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
to_mask: (optional) int32 Tensor of shape [batch_size, 1, 1, to_seq_length].
The values should be 1 or 0. The attention scores will effectively be set
to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_blocked_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size]. Same as from_mask,
just reshaped.
to_blocked_mask: (optional) int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size]. Same as to_mask, just
reshaped.
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
size_per_head: int. Size of each attention head.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
rand_attn = tf.expand_dims(rand_attn, 0)
rand_attn = tf.repeat(rand_attn, batch_size, 0)
rand_mask = create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
)
# Define shorthands
h = num_attention_heads
r = num_rand_blocks
d = size_per_head
b = batch_size
m = from_seq_length
n = to_seq_length
wm = from_block_size
wn = to_block_size
dtype = query_layer.dtype
query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
blocked_query_matrix = tf.reshape(query_layer, (b, h, m // wm, wm, -1))
blocked_key_matrix = tf.reshape(key_layer, (b, h, n // wn, wn, -1))
blocked_value_matrix = tf.reshape(value_layer, (b, h, n // wn, wn, -1))
gathered_key = tf.reshape(
tf.gather(blocked_key_matrix, rand_attn, batch_dims=2, name="gather_key"),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
gathered_value = tf.reshape(
tf.gather(
blocked_value_matrix, rand_attn, batch_dims=2, name="gather_value"),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
first_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 0],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
first_product = tf.multiply(first_product, 1.0 / np.sqrt(d))
first_product += (1.0 - tf.cast(to_mask, dtype=dtype)) * -10000.0
first_attn_weights = tf.nn.softmax(first_product) # [b, h, wm, n]
first_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", first_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
first_context_layer = tf.expand_dims(first_context_layer, 2)
second_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :,
-1], gathered_key[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 1], second_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_seq_pad = tf.concat([
to_mask[:, :, :, :3 * wn], to_mask[:, :, :, -wn:],
tf.ones([b, 1, 1, r * wn], dtype=dtype)
], 3)
second_rand_pad = tf.concat([
tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, 0]
], 3)
second_product = tf.multiply(second_product, 1.0 / np.sqrt(d))
second_product += (1.0 -
tf.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = tf.nn.softmax(second_product) # [b , h, wm, (4+r)*wn]
second_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_attn_weights, second_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_context_layer = tf.expand_dims(second_context_layer, 2)
exp_blocked_key_matrix = tf.concat([
blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2],
blocked_key_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
exp_blocked_value_matrix = tf.concat([
blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2],
blocked_value_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
inner_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix, exp_blocked_key_matrix
) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, 3*wn]
inner_band_product = tf.multiply(inner_band_product, 1.0 / np.sqrt(d))
rand_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix,
gathered_key[:, :,
1:-1]) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, r*wn]
rand_band_product = tf.multiply(rand_band_product, 1.0 / np.sqrt(d))
first_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
first_band_product = tf.multiply(first_band_product, 1.0 / np.sqrt(d))
last_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
last_band_product = tf.multiply(last_band_product, 1.0 / np.sqrt(d))
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 -
tf.expand_dims(to_mask[:, :, :, :wn], 3)) * -10000.0
last_band_product += (1.0 -
tf.expand_dims(to_mask[:, :, :, -wn:], 3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
band_product = tf.concat([
first_band_product, inner_band_product, rand_band_product,
last_band_product
], -1) # [b, h, m//wm-4, wm, (5+r)*wn]
attn_weights = tf.nn.softmax(band_product) # [b, h, m//wm-4, wm, (5+r)*wn]
context_layer = tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :,
wn:4 * wn], exp_blocked_value_matrix
) # [b, h, m//wm-4, wm, 3*wn] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :,
4 * wn:-wn], gathered_value[:, :, 1:-1]
) # [b, h, m//wm-4, wm, r*wn] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, :wn],
blocked_value_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :,
-wn:], blocked_value_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
second_last_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -2], second_last_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_last_seq_pad = tf.concat([
to_mask[:, :, :, :wn], to_mask[:, :, :, -3 * wn:],
tf.ones([b, 1, 1, r * wn], dtype=dtype)
], 3)
second_last_rand_pad = tf.concat(
[tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, -1]], 3)
second_last_product = tf.multiply(second_last_product, 1.0 / np.sqrt(d))
second_last_product += (
1.0 - tf.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = tf.nn.softmax(
second_last_product) # [b, h, wm, (4+r)*wn]
second_last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_last_attn_weights, second_last_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_last_context_layer = tf.expand_dims(second_last_context_layer, 2)
last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -1],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
last_product = tf.multiply(last_product, 1.0 / np.sqrt(d))
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = tf.nn.softmax(last_product) # [b, h, wm, n]
last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", last_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
last_context_layer = tf.expand_dims(last_context_layer, 2)
context_layer = tf.concat([
first_context_layer, second_context_layer, context_layer,
second_last_context_layer, last_context_layer
], 2)
context_layer = tf.reshape(context_layer, (b, h, m, -1)) * from_mask
context_layer = tf.transpose(context_layer, (0, 2, 1, 3))
return context_layer
|
73a486a26510137de84017063e624de5379e4fbe
| 30,289 |
def color_diff(rgb_x: np.array, rgb_y: np.array) -> float:
"""
Computes the distance between two colors using Euclidean distance.
:param rgb_x: a vector of one rbg color
:param rgb_y: a vector of another rgb color
:return: the distance between two vectors
"""
return np.sum((rgb_x - rgb_y) ** 2) ** 1 / 2
|
0e9136c68a00f5d85d2a13311f083d8036329b0a
| 30,290 |
def functions(node):
""":returns: list of function tags for node, or an empty list."""
if not node.source:
return []
a = node.source[FUNC]
if a == '--' or a == '' or a is None:
return []
return a.split('-')
|
3167ebd1d382b5ac8b46dd4565cd2184844840f1
| 30,292 |
def _calibrate_bg_and_psf_im(im, divs=5, keep_dist=8, peak_mea=11, locs=None):
"""
Run background & PSF calibration for one image.
These are typically combined from many fields and for each channel
to get a complete calibration.
This returns the accepted locs so that a z-stack can be estimated
by using the most in-focus frame for the locations
Arguments:
im: One image
divs: Spatial divisions
keep_dist: Pixel distancer under which is considered a collision
peak_mea: n pixel width and height to hold the peak image
locs: If None it will use the peak finder; otherwise these
locs are being passed in and are expected to coorespond
to the peak locs found in a previous step.
Returns:
locs (location of accepted peaks)
regional_bg_mean
regional_bg_std
regional_psf_zstack
"""
check.array_t(im, ndim=2)
stats = _regional_bg_fg_stats(im, divs=divs)
reg_bg_mean = stats[:, :, 0]
reg_bg_std = stats[:, :, 1]
check.array_t(reg_bg_mean, shape=(divs, divs))
check.array_t(reg_bg_std, shape=(divs, divs))
bg_im = imops.interp(reg_bg_mean, im.shape[-2:])
im = im - bg_im
if locs is None:
locs = _peak_find(im)
n_locs = locs.shape[0]
accepted = np.zeros((n_locs,))
# In each region gather a PSF estimate and a list of
# locations that were accepted. These locs can be
# re-used when analyzing other z slices
reg_psfs = np.zeros((divs, divs, peak_mea, peak_mea))
for win_im, y, x, coord in imops.region_enumerate(im, divs):
mea = win_im.shape[0]
assert win_im.shape[1] == mea
local_locs = locs - coord
local_locs_mask = np.all((local_locs > 0) & (local_locs < mea), axis=1)
local_locs = local_locs[local_locs_mask]
n_local_locs = local_locs.shape[0]
psfs, reasons = _psf_estimate(
win_im, local_locs, peak_mea, keep_dist=keep_dist, return_reasons=True
)
reg_psfs[y, x] = psfs
# for reason in (
# PSFEstimateMaskFields.accepted,
# # PSFEstimateMaskFields.skipped_near_edges,
# # PSFEstimateMaskFields.skipped_too_crowded,
# # PSFEstimateMaskFields.skipped_has_nan,
# # PSFEstimateMaskFields.skipped_empty,
# # PSFEstimateMaskFields.skipped_too_dark,
# # PSFEstimateMaskFields.skipped_too_oval,
# ):
# n_local_rejected = (reasons[:, reason] > 0).sum()
# print(f"y,x={y},{x} {str(reason)}:, {n_local_rejected}")
# Go backwards from local to global space.
local_accepted_iz = np.argwhere(
reasons[:, PSFEstimateMaskFields.accepted] == 1
).flatten()
local_loc_i_to_global_loc_i = np.arange(n_locs)[local_locs_mask]
assert local_loc_i_to_global_loc_i.shape == (n_local_locs,)
global_accepted_iz = local_loc_i_to_global_loc_i[local_accepted_iz]
accepted[global_accepted_iz] = 1
return locs[accepted > 0], reg_bg_mean, reg_bg_std, reg_psfs
|
3eb73736470253c72b03875415ea413802470153
| 30,293 |
def get_default_database_engine(rm: ResourceManager, database_name: str) -> Engine:
"""
Get the default engine of the database. If the default engine doesn't exists
raise FireboltError
"""
database = rm.databases.get_by_name(name=database_name)
bindings = rm.bindings.get_many(database_id=database.database_id)
if len(bindings) == 0:
raise FireboltError("No engines attached to the database")
for binding in bindings:
if binding.is_default_engine:
return rm.engines.get(binding.engine_id)
raise FireboltError("No default engine is found.")
|
c26d7bcac96b0272b38126cae824d7d0f1df0261
| 30,294 |
def _execute_onestep(seq, order=2, verbose=False):
"""
This function runs one full step of the non-sequential recursive window-
substitution algorithm. (For pairs or order=2, called NSRPS).
For internal use only, as this function does not carry out sanity checks.
For general/external usage, refer to run_once_NSRPS.
Parameters
----------
seq : list
Sequence of integers.
order : int, optional
Number of elements in window for substitution.
The default is 2 for pairs.
verbose : bool, optional
If True, returns most frequent pair with counts. The default is False.
Returns
-------
reduced_seq : list
Sequence of integers with most frequent window substituted.
The following are returned if verbose=True
freq_win : tuple
Most frequent window in seq.
count : int
Number of times the most frequent window occurred in seq.
"""
# Get sliding overlapping windows
windows = _find_overlapping_windows(seq, order)
if order == 2:
# Get mask that filters overlapping pairs out
mask = _filter_pairs(windows)
else:
# Get mask that filters overlapping windows out
mask = _filter_windows(windows, order)
# Apply mask, get filtered windows and indices
filt_win, filt_idx = _apply_filter_mask(windows, mask)
# Get most frequent window, with counts and indices
freq_win, count, idx_freq_win = _find_frequent_windows(filt_win, filt_idx)
# Carry out substitution of most frequent window at given indices
reduced_seq = _substitute_window(seq, idx_freq_win, order)
# Return substituted sequence with optional outputs
if verbose:
return reduced_seq, freq_win, count
return reduced_seq
|
5f655cc5f67ddbd9ae27b0461a9932e87d2a91b9
| 30,295 |
def getModulePower():
"""Returns the current power consumption of the entire module in mW."""
return float(readValue(i2cAddr='0041', channel='0', valType='power'))
|
4bfb362ffdeb9a210cb8bfc5cc203d50f9c28582
| 30,296 |
def roots(repo, subset, x):
"""``roots(set)``
Changesets in set with no parent changeset in set.
"""
s = set(getset(repo, repo.changelog, x))
subset = [r for r in subset if r in s]
cs = _children(repo, subset, s)
return [r for r in subset if r not in cs]
|
2acb2f803966c3024a5d148e4856d44c13287cef
| 30,297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.