content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def build_delete(table, where):
"""
Build a delete request.
Parameters
----------
table : str
Table where query will be directed.
where: iterable
The list of conditions to constrain the query.
Returns
-------
str
Built query.
"""
sql_q = "DELETE "
sql_q += 'FROM \"' + table + '\"'
sql_q += ' WHERE '
sql_q += ' AND '.join('{0} = :{0}'.format(w) for w in where)
return sql_q
|
1f065b5905b6c7af4e19863ae48e228358278f06
| 32,370 |
def filter_resources_sets(used_resources_sets, resources_sets, expand_resources_set, reduce_resources_set):
""" Filter resources_set used with resources_sets defined.
It will block a resources_set from resources_sets if an used_resources_set in a subset of a resources_set"""
resources_expand = [expand_resources_set(resources_set) for resources_set in resources_sets]
used_resources_expand = [expand_resources_set(used_resources_set) for used_resources_set in used_resources_sets]
real_used_resources_sets = []
for resources_set in resources_expand:
for used_resources_set in used_resources_expand:
if resources_set.intersection(used_resources_set):
real_used_resources_sets.append(reduce_resources_set(resources_set))
break
return list(set(resources_sets).difference(set(real_used_resources_sets)))
|
2ecd752a0460fff99ecc6b8c34ed28782e848923
| 32,371 |
def get_index_base():
"""获取上海及深圳指数代码、名称表"""
url_fmt = 'http://quotes.money.163.com/hs/service/hsindexrank.php?host=/hs/service/'
url_fmt += 'hsindexrank.php&page={page}&query=IS_INDEX:true;EXCHANGE:CNSE{ex}&fields=no,SYMBOL,NAME&'
url_fmt += 'sort=SYMBOL&order=asc&count={count}&type=query'
one_big_int = 10000 # 设定一个比较大的整数
def get_index_from(ex):
url = url_fmt.format_map({'page': 0, 'ex': ex, 'count': one_big_int})
#response = get_response(url, 'get', 'json')
response = get_page_response(url, method='post')
df = pd.DataFrame(response.json()['list'])
return df.loc[:, ['SYMBOL', 'NAME']]
# 查询代码(深圳+1,上海+0)
dfs = [get_index_from('SH'), get_index_from('SZ')]
df = pd.concat(dfs)
df.columns = df.columns.str.lower()
df.rename(columns={'symbol': 'code'}, inplace=True)
df.set_index('code', inplace=True, drop=True)
return df
|
4639e94d9412967c0a5403a5e49fc43c8033c40b
| 32,372 |
def merge_list_of_dicts(old, new, key):
"""
Merge a list of dictionary items based on a specific key.
Dictionaries inside the list with a matching key get merged together.
Assumes that a value for the given key is unique and appears only once.
Example:
list1 = [{"name": "one", "data": "stuff"}, {"name": "two", "data": "stuff2"}]
list2 = [{"name": "one", "data": "newstuff"}]
merge_list_of_dicts(list1, list2) returns:
[{"name": "one", "data": "newstuff"}, {"name": "two", "data": "stuff2"}]
"""
for old_item in reversed(old):
matching_val = old_item[key]
for new_item in new:
if new_item[key] == matching_val:
object_merge(old_item, new_item)
break
else:
new.append(old_item)
return new
|
a56c0b3476ea67d6b77126a34c14005aad345cfa
| 32,373 |
from masci_tools.util.schema_dict_util import read_constants, eval_simple_xpath
from masci_tools.util.schema_dict_util import evaluate_text, evaluate_attribute
from masci_tools.util.xml.common_functions import clear_xml
def get_kpoints_data_max4(xmltree, schema_dict, logger=None, convert_to_angstroem=True):
"""
Get the kpoint sets defined in the given fleur xml file.
.. note::
This function is specific to file version before and including the
Max4 release of fleur
:param xmltree: etree representing the fleur xml file
:param schema_dict: schema dictionary corresponding to the file version
of the xmltree
:param logger: logger object for logging warnings, errors
:param convert_to_angstroem: bool if True the bravais matrix is converted to angstroem
:returns: tuple containing the kpoint information
The tuple contains the following entries:
1. :kpoints: list containing the coordinates of the kpoints
2. :weights: list containing the weights of the kpoints
3. :cell: numpy array, bravais matrix of the given system
4. :pbc: list of booleans, determines in which directions periodic boundary conditions are applicable
"""
if isinstance(xmltree, etree._ElementTree):
xmltree, _ = clear_xml(xmltree)
root = xmltree.getroot()
else:
root = xmltree
constants = read_constants(root, schema_dict, logger=logger)
cell, pbc = get_cell(root, schema_dict, logger=logger, convert_to_angstroem=convert_to_angstroem)
kpointlist = eval_simple_xpath(root,
schema_dict,
'kPointList',
list_return=True,
not_contains='altKPoint',
logger=logger)
if len(kpointlist) == 0:
raise ValueError('No Kpoint lists found in the given inp.xml')
kpointlist = kpointlist[0]
kpoints = evaluate_text(kpointlist,
schema_dict,
'kPoint',
constants=constants,
not_contains='altKPoint',
list_return=True,
logger=logger)
weights = evaluate_attribute(kpointlist,
schema_dict,
'weight',
constants=constants,
not_contains='altKPoint',
list_return=True,
logger=logger)
return kpoints, weights, cell, pbc
|
23001a430e8cb1b2434fce7de67e5249f345806c
| 32,374 |
def contains_badwords(string):
"""
Return whether a string contains bad words
"""
return any([x in string for x in bad_words])
|
499e338599441e24845a19ba8504a77bd7838d8e
| 32,376 |
def _learn_individual_mixture_weights(n_users, alpha, multinomials, max_iter, tol, val_mat, prior_strength, num_proc):
"""
Learns the mixing weights for each individual user, uses multiple-processes to make it faster.
:param n_users: Int, total number of users.
:param alpha: prior (learned through global weights) for the pi's
:param multinomials: List of components (Arrays of vectors).
:param max_iter: max number of em iterations
:param tol: convergence threshold
:param val_mat: validation data to optimize on. U x C matrix.
:param prior_strength: float, how much to increase the strength of the prior.
:param num_proc: number of processes to be used.
:return: 1. Matrix of mixing weights (Users x Components)
2. Event log likelihood for validation data.
"""
lls = np.ones(n_users)
pis = np.tile(alpha, n_users).reshape(n_users, len(multinomials))
pis = normalize(pis, 'l1', axis=1) # pi's for each user.
log.info('Doing individual weights with %d proc' % num_proc)
mix_weights = []
alpha *= prior_strength
if any(alpha < 1):
alpha += 1
# multi-process. Essentially calls _mp_learn_user_mix for a set of users.
batch_size = int(np.ceil(1. * n_users / num_proc)) # how many users per process
args = (alpha, multinomials, val_mat, max_iter, tol)
uids = range(n_users)
queue = Queue()
num_eof = 0
proc_pool = []
# set-up the processes
for i in range(num_proc):
p_uids = uids[i * batch_size:(i + 1) * batch_size] # define which users this process will handle.
if len(p_uids) == 0:
break
proc = Process(target=_mp_learn_user_mix, args=(queue, p_uids, args))
proc_pool.append(proc)
# start the processes
[proc.start() for proc in proc_pool]
# collect end tokens
while num_eof < len(proc_pool):
resp = queue.get()
if type(resp) == str:
num_eof += 1
else:
mix_weights.append(resp)
[proc.join() for proc in proc_pool]
queue.close()
# end multi-process
for id, u_mix_weights, u_ll in mix_weights:
pis[id] = np.array(u_mix_weights)
lls[id] = u_ll
mask = np.where(lls != 1)
lls = lls[mask] * np.squeeze(np.array(val_mat.sum(axis=1)))[mask]
event_ll = np.sum(lls) / np.sum(val_mat)
return pis, event_ll
|
7ee9020685ec8fc0538ce4695fcefedc6280d55e
| 32,379 |
def banana(cls):
"""
A decorator for a class that adds the ability to create Permissions and Handlers
from their Checks.
"""
cls.__checks = set()
# Basically tell checks that we are the class, not a medium to pass things through
cls.__banana = True
cls_annotations = cls.__dict__.get("__annotations__", {})
for name in cls_annotations.keys():
check = get_check(cls, name)
if check is None:
continue
cls.__checks.add(name)
setattr(cls, name, check)
for base in cls.__bases__:
if base is Permission or PermissionHandler:
setattr(cls, "from_checks", classmethod(from_checks(cls)))
break
return cls
|
6392d5a7e029dca556c92f4d7546fb6f76078858
| 32,380 |
def residual_v2_conv(
kernel_size: int,
stride: int,
depth: int,
is_deconv: bool,
add_max_pool: bool,
add_bias: bool,
is_train: bool,
input_op: tf.Tensor,
name: str = None,
) -> tf.Tensor:
"""Creates a residual convolution in the style of He et al. April 2016.
This is the second version of their proposed residual structure, where the
order of operations is batch_norm -> activation -> convolution.
We use RELU and TANH activations, and we optionally add a max pool.
Args:
kernel_size: The size of the kernel.
stride: The stride of the convolution.
depth: The depth of the reduction layer.
is_deconv: Whether this is a deconvolution.
add_max_pool: Whether to add a parallel max pool with the same parameters
as the convolution.
add_bias: Whether to add bias to the convolution.
is_train: Whether we're training this graph.
input_op: The input.
name: An optional op name.
Returns:
The tensor output of residual convolution.
"""
with tf.variable_scope(name, 'residual_v2_conv', [input_op]) as scope:
[_, num_rows, num_columns, _] = input_op.shape.as_list()
if not is_deconv:
assert num_rows >= kernel_size
assert num_columns >= kernel_size
# Make sure we can do a valid convolution.
assert (num_rows - kernel_size) % stride == 0
assert (num_columns - kernel_size) % stride == 0
# In the future it may be necessary to set epsilon to a larger value
# than the default here.
bn_op = slim.batch_norm(input_op, is_training=is_train, scale=True)
concat_op = tf.concat([tf.nn.relu(bn_op), tf.nn.tanh(bn_op)], 3)
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose, slim.max_pool2d],
kernel_size=kernel_size,
stride=stride,
padding='VALID'):
if add_bias:
biases_initializer = tf.zeros_initializer()
else:
biases_initializer = None
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
inputs=concat_op,
num_outputs=depth,
activation_fn=None,
biases_initializer=biases_initializer):
if is_deconv:
conv_op = slim.conv2d_transpose()
else:
conv_op = slim.conv2d()
if add_max_pool:
assert not is_deconv
assert kernel_size > 1
return tf.concat(
[conv_op, slim.max_pool2d(input_op)], 3, name=scope.name)
else:
return tf.identity(conv_op, name=scope.name)
|
63d7589caed876ed9b0d3617442e6d676555c791
| 32,381 |
def dump_cups_with_first(cups: list[int]) -> str:
"""Dump list of cups with highlighting the first one
:param cups: list of digits
:return: list of cups in string format
"""
dump_cup = lambda i, cup: f'({cup})' if i == 0 else f' {cup} '
ret_val = ''.join([dump_cup(i, cup) for i, cup in enumerate(cups)])
return ret_val
|
5fe4111f09044c6afc0fbd0870c2b5d548bd3c1a
| 32,382 |
def init(strKernel, iKernelPar=1, iALDth=1e-4, iMaxDict=1e3):
"""
Function initializes krls dictionary. |br|
Args:
strKernel (string): Type of the kernel
iKernelPar (float): Kernel parameter [default = 1]
iALDth (float): ALD threshold [default = 1e-4]
iMaxDict (int): Max size of the dictionary [default = 1e3]
Returns:
dAldKRLS (dictionary): Python dictionary which contains all the data of the current KRLS algorithm.
Fields in the output dictionary:
- a. **iALDth** (*int*): ALD threshold
- b. **iMaxDt** (*float*): Max size of the dictionary
- c. **strKernel** (*string*): Type of the kernel
- d. **iKernelPar** (*float*): Kernel parameter
- e. **bInit** (*int*): Initialization flag = 1. This flag is cleared with a first call to the 'train' function.
"""
dAldKRLS = {} # Initialize dictionary with data for aldkrls algorithm
# Store all the parameters in the dictionary
dAldKRLS['iALDth'] = iALDth; # ALD threshold
dAldKRLS['iMaxDt'] = iMaxDict; # Maximum size of the dictionary
dAldKRLS['strKernel'] = strKernel # Type of the kernel
dAldKRLS['iKernelPar'] = iKernelPar # Kernel parameter
dAldKRLS['bInit'] = 0 # Clear 'Initialization done' flag
return dAldKRLS
|
652e0c498b4341e74bcd30ca7119163345c7f2cc
| 32,383 |
def prune_scope():
"""Provides a scope in which Pruned layers and models can be deserialized.
For TF 2.X: this is not needed for SavedModel or TF checkpoints, which are
the recommended serialization formats.
For TF 1.X: if a tf.keras h5 model or layer has been pruned, it needs to be
within this
scope to be successfully deserialized. This is not needed for loading just
keras weights.
Returns:
Object of type `CustomObjectScope` with pruning objects included.
Example:
```python
pruned_model = prune_low_magnitude(model, **self.params)
keras.models.save_model(pruned_model, keras_file)
with prune_scope():
loaded_model = keras.models.load_model(keras_file)
```
"""
return tf.keras.utils.custom_object_scope(
{'PruneLowMagnitude': pruning_wrapper.PruneLowMagnitude})
|
64569464611640ac5c13cbb0bf41c3f7ba16424a
| 32,384 |
import re
def is_valid_br_cnpj(cnpj):
"""
Accept an string parameter cnpj and
Check if is brazilian CNPJ valid.
Return True or False
"""
# Extract dots, stroke, slash
cnpj = re.sub('[.|\-/|/]', '', str(cnpj))
# if does not contain numerical characters
if not re.match(r'^\d{14}$', cnpj) or cnpj in _INVALID_CNPJ:
return False
# checks if all digits are equal
for i in range(10):
text = str(i) * 14
if text == cnpj:
return False
# first checksum1
multi = 5
result = 0
for i in cnpj[:12]:
result += int(i) * multi
multi -= 1
if multi < 2:
multi = 9
remainder = result % 11
if remainder < 2:
checksum1 = 0
else:
checksum1 = 11 - remainder
assemble_cnpj = cnpj[:12] + str(checksum1)
# secound checksum
multi = 6
result = 0
for i in assemble_cnpj:
result += int(i) * multi
multi -= 1
if multi < 2:
multi = 9
remainder = result % 11
if remainder < 2:
checksum2 = 0
else:
checksum2 = 11 - remainder
assemble_cnpj += str(checksum2)
return True if cnpj == assemble_cnpj else False
|
f41f9814cfef7d75e287834ac2a5514d03cd8fdb
| 32,386 |
def get_supported_locales():
"""
Returns a list of Locale objects that the Web Interfaces supports
"""
locales = BABEL.list_translations()
locales.append(Locale("en"))
sorted_locales = sorted(locales, key=lambda x: x.language)
return sorted_locales
|
3068889d0c7888b23f207d3397e0aec58418cef2
| 32,387 |
from typing import Optional
from pathlib import Path
import site
def get_pipx_user_bin_path() -> Optional[Path]:
"""Returns None if pipx is not installed using `pip --user`
Otherwise returns parent dir of pipx binary
"""
# NOTE: using this method to detect pip user-installed pipx will return
# None if pipx was installed as editable using `pip install --user -e`
# https://docs.python.org/3/install/index.html#inst-alt-install-user
# Linux + Mac:
# scripts in <userbase>/bin
# Windows:
# scripts in <userbase>/Python<XY>/Scripts
# modules in <userbase>/Python<XY>/site-packages
pipx_bin_path = None
script_path = Path(__file__).resolve()
userbase_path = Path(site.getuserbase()).resolve()
try:
_ = script_path.relative_to(userbase_path)
except ValueError:
pip_user_installed = False
else:
pip_user_installed = True
if pip_user_installed:
test_paths = (
userbase_path / "bin" / "pipx",
Path(site.getusersitepackages()).resolve().parent / "Scripts" / "pipx.exe",
)
for test_path in test_paths:
if test_path.exists():
pipx_bin_path = test_path.parent
break
return pipx_bin_path
|
ccf9b886af41b73c7e2060704d45781938d8e811
| 32,388 |
def _normalize_int_key(key, length, axis_name=None):
"""
Normalizes an integer signal key.
Leaves a nonnegative key as it is, but converts a negative key to
the equivalent nonnegative one.
"""
axis_text = '' if axis_name is None else axis_name + ' '
if key < -length or key >= length:
raise IndexError(
f'Index {key} is out of bounds for signal {axis_text}axis with '
f'length {length}.')
return key if key >= 0 else key + length
|
9b58b09e70c20c9ac5ee0be059333dd5058802ef
| 32,389 |
def create_interview_in_jobma(interview):
"""
Create a new interview on Jobma
Args:
interview (Interview): An interview object
"""
client = get_jobma_client()
url = urljoin(settings.JOBMA_BASE_URL, "interviews")
job = interview.job
first_name, last_name = get_first_and_last_names(interview.applicant)
response = client.post(
url,
json={
"interview_template_id": str(job.interview_template_id),
"job_id": str(job.job_id),
"job_code": job.job_code,
"job_title": job.job_title,
"callback_url": urljoin(
settings.SITE_BASE_URL,
reverse("jobma-webhook", kwargs={"pk": interview.id}),
),
"candidate": {
"first_name": first_name,
"last_name": last_name,
"phone": "",
"email": interview.applicant.email,
},
},
)
response.raise_for_status()
result = response.json()
interview_link = result.get("interview_link")
if interview_link is not None:
interview.interview_url = interview_link
else:
log.error("Interview link not found in payload - %s", result)
interview_token = result.get("interview_token")
if interview_token is not None:
interview.interview_token = interview_token
interview.save_and_log(None)
return interview_link
|
36834c0e6557627a52a179b9d8529d5693cc92cb
| 32,390 |
def get_solutions(N, K, W_hat, x):
"""
Get valid indices of x that sum up to S
"""
# Scalar form of y = W_hat * x
S = scalar(W_hat @ x)
# print(f'Scalar value = {S}')
solutions = []
for partition in sum_to_S(S, K):
if len(set(partition)) == len(partition) and max(partition) < N:
partition = sorted(partition)
if partition not in solutions:
solutions.append(partition)
x_vectors = []
for sol in solutions:
tmp = np.zeros(N)
tmp[sol] = 1
x_vectors.append(tmp)
return x_vectors
|
6de6b0f77070b40f6e0028009f9b96264f6daa64
| 32,391 |
def get_actual_order(geometry, order):
"""
Return the actual integration order for given geometry.
Parameters
----------
geometry : str
The geometry key describing the integration domain,
see the keys of `quadrature_tables`.
Returns
-------
order : int
If `order` is in quadrature tables it is this
value. Otherwise it is the closest higher order. If no
higher order is available, a warning is printed and the
highest available order is used.
"""
table = quadrature_tables[geometry]
if order not in table:
orders = list(table.keys())
ii = nm.searchsorted(orders, order)
if ii >= len(orders):
omax = max(orders)
output(_msg1 % (order, geometry))
output(_msg2 % omax)
order = omax
else:
order = orders[ii]
return order
|
876c9a70418de7d4768ab0234abb86bf676884c0
| 32,392 |
def getcwd(*args,**kw):
"""getcwd() -> path
Return a unicode string representing the current working directory."""
return __BRYTHON__.brython_path
|
1d0e9491a2a35b326ec87314887fb1dede23c927
| 32,393 |
def sample_graph(B, logvars, n_samp):
"""
Generate data given B matrix, variances
"""
p = len(logvars)
N = np.random.normal(0, np.sqrt(np.exp(logvars)), size=(n_samp, p))
return (np.linalg.inv(np.eye(p) - B.T)@N.T).T
|
2e798035bcb807e670ff9b9f4a39236ffe6b1157
| 32,394 |
def rotate_ne_rt(n, e, ba):
"""
Rotates horizontal components of a seismogram.
The North- and East-Component of a seismogram will be rotated in Radial
and Transversal Component. The angle is given as the back-azimuth, that is
defined as the angle measured between the vector pointing from the station
to the source and the vector pointing from the station to the North.
:type n: :class:`~numpy.ndarray`
:param n: Data of the North component of the seismogram.
:type e: :class:`~numpy.ndarray`
:param e: Data of the East component of the seismogram.
:type ba: float
:param ba: The back azimuth from station to source in degrees.
:return: Radial and Transversal component of seismogram.
"""
if len(n) != len(e):
raise TypeError("North and East component have different length.")
if ba < 0 or ba > 360:
raise ValueError("Back Azimuth should be between 0 and 360 degrees.")
ba = radians(ba)
r = - e * sin(ba) - n * cos(ba)
t = - e * cos(ba) + n * sin(ba)
return r, t
|
c374ad762e122b519698bd1c199e2aa773e295cb
| 32,395 |
def pwgen(pw_len=16):
""" Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(pw_len, 'abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789')
|
747bb049ad3cca47d3898f0ea6b52108938aa2b2
| 32,396 |
import requests
from bs4 import BeautifulSoup
def get_property_data(sch=""):
"""Get property id and return dictionary with data
Attributes:
sch: property id
"""
property_url = "http://ats.jeffco.us/ats/displaygeneral.do?sch={0}".format(sch)
r = requests.get(property_url)
property_page = BeautifulSoup(r.text, "lxml")
property_dict = {}
# Get data from the single data fields
data_cells = property_page.find_all("td")
for i, data_cell in enumerate(data_cells):
try:
cell_text = data_cell.text.strip()
if "PIN/Schedule" in cell_text:
property_dict["PIN"] = ":".join(cell_text.split(":")[1:]).strip()
elif "status:" in cell_text:
property_dict["Status"] = ":".join(cell_text.split(":")[1:]).strip()
elif "AIN/Parcel ID:" in cell_text:
property_dict["AIN"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Property Type:" in cell_text:
property_dict["property_type"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Neighborhood:" in cell_text:
property_dict["neighborhood"] = " ".join(":".join(cell_text.split(":")[1:]).strip().split())
elif "Subdivision Name:" in cell_text:
property_dict["subdivision_name"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Adjusted Year Built:" in cell_text:
property_dict["adjusted_year_built"] = ":".join(cell_text.split(":")[1:]).strip()
elif "Year Built:" in cell_text:
property_dict["year_built"] = ":".join(cell_text.split(":")[1:]).strip()
except (AttributeError, IndexError):
continue
# Define data from tables
data_tables = property_page.find_all("table")
for data_table in data_tables:
try:
table_header = data_table.find("tr", class_="tableheaders").text
if "Owner Name(s)" in table_header:
property_dict["owners"] = parse_one_column_table(data_table)
elif "Assessor Parcel Maps Associated" in table_header:
property_dict["Assessor Parcel Maps Associated with Schedule"] = parse_one_column_table(data_table)
elif "Land Characteristics" in table_header:
property_dict["land_characteristics"] = parse_one_column_table(data_table)
elif (
"Block" in table_header and
"Lot" in table_header and
"Key" in table_header
):
property_dict["property_description"] = parse_many_columns_table(data_table, name="property_description")
elif (
"Item" in table_header and
"Quality" in table_header
):
property_dict["property_inventory_1"] = parse_many_columns_table(data_table)
elif (
"Areas" in table_header and
"Quality" in table_header
):
property_dict["property_inventory_2"] = parse_many_columns_table(data_table)
elif (
"Adjustment Code" in table_header and
"Adjustment SqFt" in table_header
):
property_dict["property_inventory_3"] = parse_many_columns_table(data_table)
elif (
"Sale Date" in table_header and
"Sale Amount" in table_header
):
property_dict["sales_history"] = parse_many_columns_table(data_table)
elif (
"Payable" in table_header and not data_table.table
):
property_dict["tax_information"] = parse_many_columns_table(data_table, name="tax_info")
elif (
"Mill Levy" in table_header and not data_table.table
):
property_dict["mill_levy_information"] = parse_many_columns_table(data_table, name="mill_levy_information")
except AttributeError:
pass
if "Property Address:" in data_table.text and not data_table.table:
address_data = parse_address_table(data_table)
property_dict["property_address"] = address_data[0]
property_dict["mailing_address"] = address_data[1]
return property_dict
|
d7a0f462340c75d14f00a1712923988b415258fb
| 32,398 |
def tcl_delta_remote(curef):
"""
Prepare remote version for delta scanning.
:param curef: PRD of the phone variant to check.
:type curef: str
"""
remotedict = networkutilstcl.remote_prd_info()
fvver = remotedict.get(curef, "AAA000")
if fvver == "AAA000":
print("NO REMOTE VERSION FOUND!")
raise SystemExit
return fvver
|
65d1aeb25ce58c066465c3b7eb3e560a54224ba7
| 32,400 |
from typing import Iterable
def prodi(items: Iterable[float]) -> float:
"""Imperative product
>>> prodi( [1,2,3,4,5,6,7] )
5040
"""
p: float = 1
for n in items:
p *= n
return p
|
3b8e52f40a760939d5b291ae97c4d7134a5ab450
| 32,401 |
def transformer_prepare_encoder(inputs, target_space, hparams):
"""Prepare one shard of the model for the encoder.
Args:
inputs: a Tensor.
target_space: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly baises for diagonal
alignments
encoder_padding: a Tensor
"""
# Flatten inputs.
ishape_static = inputs.shape.as_list()
encoder_input = inputs
encoder_padding = common_attention.embedding_to_padding(encoder_input)
encoder_self_attention_bias = common_attention.attention_bias_ignore_padding(
encoder_padding)
# Append target_space_id embedding to inputs.
emb_target_space = common_layers.embedding(
target_space, 32, ishape_static[-1], name="target_space_embedding")
emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
encoder_input += emb_target_space
if hparams.pos == "timing":
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
return (encoder_input, encoder_self_attention_bias, encoder_padding)
|
af9c5e2ab8fe3508722af822f19671461e92e62d
| 32,402 |
from typing import Tuple
def get_bottom_left_coords(
text_width: int,
text_height: int,
text_x: int,
text_y: int,
) -> Tuple[TextOrg, BoxCoords]:
"""Get coordinates for text and background in bottom left corner.
Args:
text_width: Width of the text to be drawn.
text_height: Height of the text to be drawn.
text_x: X coordinate of the bottom-left corner of the text.
text_y: Y coordinate of the bottom-left corner of the text.
Returns:
A tuple consisting of tuples for text point and text box coordinates.
"""
text_offset_x = (
text_x +
config.settings.annotation_padding +
config.settings.annotation_margin
)
text_offset_y = (
text_y -
config.settings.annotation_padding -
config.settings.annotation_margin
)
box_coords = (
(
text_offset_x - config.settings.annotation_padding,
text_offset_y + config.settings.annotation_padding,
),
(
text_offset_x + text_width + config.settings.annotation_padding,
text_offset_y - text_height - config.settings.annotation_padding,
),
)
text_org = (text_offset_x, text_offset_y)
return AnnotationCoords(text_org, box_coords)
|
8cf1f88f98990bb727de86152ea5239b725c0fbc
| 32,403 |
def read_gcs_zarr(zarr_url, token='/opt/gcsfuse_tokens/impactlab-data.json', check=False):
"""
takes in a GCSFS zarr url, bucket token, and returns a dataset
Note that you will need to have the proper bucket authentication.
"""
fs = gcsfs.GCSFileSystem(token=token)
store_path = fs.get_mapper(zarr_url, check=check)
ds = xr.open_zarr(store_path)
return ds
|
f6ac1e149639afbc10af052a288eb6536a43a13a
| 32,404 |
import operator
def bor(*args: int) -> int:
"""Bitwise or.
Example:
bor(0x01, 0x10) == 0x01 | 0x10
Returns:
int: Inputs.
"""
return list(accumulate(args, operator.or_))[-1]
|
6e40612b0117fef5584857c947910fa4a0fa865f
| 32,405 |
from Scikit.ML.DocHelperMlExt import MamlHelper
def mlnet_components_kinds():
"""
Retrieves all kinds.
"""
kinds = list(MamlHelper.GetAllKinds())
kinds += ["argument", "command"]
kinds = list(set(kinds))
titles = {
'anomalydetectortrainer': 'Anomaly Detection',
'binaryclassifiertrainer': 'Binary Classification',
'clusteringtrainer': 'Clustering',
'dataloader': 'Data Loader',
'datasaver': 'Data Saver',
'datascorer': 'Scoring',
'datatransform': 'Transforms (all)',
'ensembledataselector': 'Data Selection',
'evaluator': 'Evaluation',
'multiclassclassifiertrainer': 'Multiclass Classification',
'ngramextractorfactory': 'N-Grams',
'rankertrainer': 'Ranking',
'regressortrainer': 'Regression',
'tokenizetransform': 'Tokenization',
'argument': 'Arguments',
'command': 'Commands',
}
return {k: titles[k] for k in kinds if k in titles}
|
f5a365f2054a263786ca17a96d58d9f39c7061fe
| 32,406 |
def hexString(s):
"""
Output s' bytes in HEX
s -- string
return -- string with hex value
"""
return ":".join("{:02x}".format(ord(c)) for c in s)
|
22c1e94f0d54ca3d430e0342aa5b714f28a5815b
| 32,410 |
def hydrate_board_from_model(a, radius, rect_width):
"""
:type a: ndarray
:type radius: int
:return: Board
"""
b = Board(radius)
for cellId in b.cells:
thid = get_thid_from_cellId(cellId, rect_width)
value = a[thid.y][thid.x]
b.change_ownership(cellId, get_player_name_from_resource(value), int(abs(value)))
return b
|
d7486e43beb2676aed32da627d03f018b4b91d65
| 32,411 |
from pathlib import Path
def tree_walk():
"""Walk the source folder using pathlib. Populate 3 dicts, a folder dict,
a file dict, and a stats dict.
- Returns:
- [dict]: k: folders; v: size
- [dict]: k: files; v: size
- [dict]:
'file_size'
'num_dirs'
'num_files'
"""
try:
# ~~~ # -variables-
walk_dirs_dict, walk_files_dict = dd(list), dd(list)
stat_dict = {'file_size': 0, 'num_dirs': 0, 'num_files': 0}
# create exdir and exfile lists
if args.exdir:
exdir = args.exdir.split(',')
if args.exfile:
exfile = args.exfile.split(',')
p = Path(args.source)
# ~~~ # -rglob-
for item in p.rglob('*'):
if item.is_dir():
# add folders if no folder exclusions
if not args.exdir:
walk_dirs_dict[item] = item.stat().st_size
stat_dict['num_dirs'] += 1
else:
# add folders if the exclusion is not in the folder path
for z in exdir:
if z not in item:
walk_dirs_dict[item] = item.stat().st_size
stat_dict['num_dirs'] += 1
else:
# add files if no file exclusions
if not args.exfile:
walk_files_dict[item] = item.stat().st_size
stat_dict['num_files'] += 1
stat_dict['file_size'] += item.stat().st_size
else:
# add files if the exclusion is not in the folder path
for z in exfile:
if z not in item:
walk_files_dict[item] = item.stat().st_size
stat_dict['num_files'] += 1
stat_dict['file_size'] += item.stat().st_size
except OSError as e:
bp([f'tree walk failure: {args.source}\n{e}', Ct.RED], err=2)
return walk_dirs_dict, walk_files_dict, stat_dict
|
e7edf9897560ee5d0ddab5344e0993e4185e6009
| 32,412 |
def handle_response(response, content_type, file_path=None):
"""handle response. Extract, transform and emit/write to file"""
if content_type == "application/json":
if file_path is None:
return response.json()
else:
save_json(response.json(), file_path)
elif content_type == "image/jpeg":
if file_path is None:
return response._content
else:
save_bytes_as_png(response._content, file_path)
elif content_type == "text/csv":
if file_path is None:
return response.text
else:
save_csv(response.text, file_path)
|
43291fdf367f27ee3c982235518d6bf28d600691
| 32,413 |
import random
def randomCaptchaText(char_set=CAPTCHA_LIST, captcha_size=CAPTCHA_LENGTH):
"""
随机生成定长字符串
:param char_set: 备选字符串列表
:param captcha_size: 字符串长度
:return: 字符串
"""
captcha_text = [random.choice(char_set) for _ in range(captcha_size)]
return ''.join(captcha_text)
|
ee426c26051e720636659cd013617abce2f77a5e
| 32,415 |
def integral_total(Nstrips):
"""
The total integral.
"""
return integral_4(Nstrips) + integral_1(Nstrips)
|
cc2468c69a3e6c98ee139125afc2f4a571cc588b
| 32,416 |
def calculate_amplitude(dem, Template, scale, age, angle):
"""Calculate amplitude and SNR of features using a template
Parameters
----------
dem : DEMGrid
Grid object of elevation data
Template : WindowedTemplate
Class representing template function
scale : float
Scale of template function in DEM cell units
age : float
Age parameter for template function
angle : float
Orientation of template in radians
Returns
-------
amp : np.array
2-D array of amplitudes for each DEM pixel
snr : np.array
2-D array of signal-to-noise ratios for each DEM pixel
"""
ny, nx = dem._griddata.shape
de = dem._georef_info.dx
t = Template(scale, age, angle, nx, ny, de)
template = t.template()
curv = dem._calculate_directional_laplacian(angle)
amp, age, angle, snr = match_template(curv, template)
mask = t.get_window_limits()
amp[mask] = 0
snr[mask] = 0
return amp, snr
|
b286ed97952667052a8ecfacb152b70a7a1be2ba
| 32,417 |
from typing import OrderedDict
def number_limit_sub_validator(entity_config: OrderedDict) -> OrderedDict:
"""Validate a number entity configurations dependent on configured value type."""
value_type = entity_config[CONF_TYPE]
min_config: float | None = entity_config.get(NumberSchema.CONF_MIN)
max_config: float | None = entity_config.get(NumberSchema.CONF_MAX)
step_config: float | None = entity_config.get(NumberSchema.CONF_STEP)
dpt_class = DPTNumeric.parse_transcoder(value_type)
if dpt_class is None:
raise vol.Invalid(f"'type: {value_type}' is not a valid numeric sensor type.")
# Inifinity is not supported by Home Assistant frontend so user defined
# config is required if if xknx DPTNumeric subclass defines it as limit.
if min_config is None and dpt_class.value_min == float("-inf"):
raise vol.Invalid(f"'min' key required for value type '{value_type}'")
if min_config is not None and min_config < dpt_class.value_min:
raise vol.Invalid(
f"'min: {min_config}' undercuts possible minimum"
f" of value type '{value_type}': {dpt_class.value_min}"
)
if max_config is None and dpt_class.value_max == float("inf"):
raise vol.Invalid(f"'max' key required for value type '{value_type}'")
if max_config is not None and max_config > dpt_class.value_max:
raise vol.Invalid(
f"'max: {max_config}' exceeds possible maximum"
f" of value type '{value_type}': {dpt_class.value_max}"
)
if step_config is not None and step_config < dpt_class.resolution:
raise vol.Invalid(
f"'step: {step_config}' undercuts possible minimum step"
f" of value type '{value_type}': {dpt_class.resolution}"
)
return entity_config
|
96c33af5e3764cc6cfe0f355de216945b0ab3920
| 32,419 |
from typing import Tuple
from typing import Union
def patch_2D_aggregator(
patches: np.ndarray,
orig_shape: Tuple[int],
patch_loc: np.array,
count_ndarray: Union[np.array, None] = None,
) -> np.ndarray:
"""
Aggregate patches to a whole 2D image.
Args:
patches: shape is [patch_num, Channel, patch_size, patch_size]
orig_shape: the image shape after aggregating
patch_loc: the starting position where each patch in the original images
count_ndarray: using to divide the aggregating image to average the overlapped regions
"""
NUM_PATCH = 4
dim_stack = []
for dim in range(patches.shape[1]):
orig = np.zeros(orig_shape)
for idx in range(NUM_PATCH):
orig[
patch_loc[idx][0] : patch_loc[idx][0] + PATCH_SIZE,
patch_loc[idx][1] : patch_loc[idx][1] + PATCH_SIZE,
] += patches[idx, dim, :, :]
dim_stack.append(orig)
orig = np.stack(dim_stack)
if count_ndarray is not None:
orig = np.divide(orig, count_ndarray)
return orig.squeeze()
|
3fd7d98c7b792cb3df646045ad32d0cde2c94e56
| 32,420 |
from typing import Callable
def vmap_grad(forward_fn: Callable, params: PyTree, samples: Array) -> PyTree:
"""
compute the jacobian of forward_fn(params, samples) w.r.t params
as a pytree using vmapped gradients for efficiency
"""
complex_output = nkjax.is_complex(jax.eval_shape(forward_fn, params, samples))
real_params = not nkjax.tree_leaf_iscomplex(params)
if real_params and complex_output:
return vmap_grad_rc(forward_fn, params, samples)
else:
return vmap_grad_rr_cc(forward_fn, params, samples)
|
411a3ce1a38c31fef9422ed740d1a7d0a4cf887b
| 32,421 |
def random_crop_list(images, size, pad_size=0, order="CHW", boxes=None):
"""
Perform random crop on a list of images.
Args:
images (list): list of images to perform random crop.
size (int): size to crop.
pad_size (int): padding size.
order (string): order of the 'height', 'width' and 'channel'.
boxes (list): optional. Corresponding boxes to images.
Dimension is 'num boxes' x 4.
Returns:
cropped (ndarray): the cropped list of images with dimension of
'height' x 'width' x 'channel'.
boxes (list): optional. Corresponding boxes to images. Dimension
is 'num boxes' x 4.
"""
assert order in ["CHW", "HWC"], "order {} is not supported".format(order)
# explicitly dealing processing per image order to avoid flipping images.
if pad_size > 0:
images = [
pad_image(pad_size=pad_size, image=image, order=order)
for image in images
]
# image format should be CHW.
if order == "CHW":
if images[0].shape[1] == size and images[0].shape[2] == size:
return images, boxes
height = images[0].shape[1]
width = images[0].shape[2]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = [
image[:, y_offset: y_offset + size, x_offset: x_offset + size]
for image in images
]
assert cropped[0].shape[1] == size, "Image not cropped properly"
assert cropped[0].shape[2] == size, "Image not cropped properly"
elif order == "HWC":
if images[0].shape[1] == size and images[0].shape[2] == size:
return images, boxes
height = images[0].shape[0]
width = images[0].shape[1]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = [
image[y_offset: y_offset + size, x_offset: x_offset + size, :]
for image in images
]
assert cropped[0].shape[1] == size, "Image not cropped properly"
assert cropped[0].shape[2] == size, "Image not cropped properly"
else:
raise NotImplementedError("Unknown order {}".format(order))
if boxes is not None:
boxes = [crop_boxes(proposal, x_offset, y_offset) for proposal in boxes]
return cropped, boxes
|
e4e7933a02c356c509bbd3d7dbc54814ec1f7bc1
| 32,422 |
from typing import List
def normalize_resource_paths(resource_paths: List[str]) -> List[str]:
"""
Takes a list of resource relative paths and normalizes to lowercase
and with the "ed-fi" namespace prefix removed.
Parameters
----------
resource_paths : List[str]
The list of resource relative paths
Returns
-------
List[str]
A list of normalized resource relative paths.
For example: ["studentschoolassociations", "tpdm/candidates"]
"""
return list(
map(
lambda r: r.removeprefix("/").removeprefix("ed-fi/").lower(),
resource_paths,
)
)
|
ec7e5020ae180cbbdc5b35519106c0cd0697a252
| 32,423 |
def GetDegenerateSites(seq1, seq2,
degeneracy=4,
position=3):
"""returns two new sequenes containing only degenerate sites.
Only unmutated positions are counted.
"""
new_seq1 = []
new_seq2 = []
for x in range(0, len(seq1), 3):
c1 = seq1[x:x + 3]
c2 = seq2[x:x + 3]
if c1 in GeneticCodeAA and c2 in GeneticCodeAA:
if GeneticCodeAA[c1] == GeneticCodeAA[c2]:
if Degeneracy[c1][position] == degeneracy \
and Degeneracy[c2][position] == degeneracy:
new_seq1.append(c1[position - 1])
new_seq2.append(c2[position - 1])
return "".join(new_seq1), "".join(new_seq2)
|
de9aa02b6ef46cb04e64094b67436922e86e10bb
| 32,424 |
def ransac(a, b, model: str ='rigid', inlier_threshold: float = 1.0, ransac_it: int = 100):
"""Estimates parameters of given model by applying RANSAC on corresponding point sets A and B
(preserves handedness).
:param a: nx4 array of points
:param b: nx4 array of points
:param model: Specify the model for RANSAC. Can be 'translation', 'rigid' or 'affine'
:param inlier_threshold: Specify the inlier threshold in RANSAC process
:param ransac_it: number of ransac iterations
:return: corresponding transformation matrix (None if no transformation was found)
:raise: NotImplementedError for models which are not implemented yet"""
max_ransac_it = ransac_it
num_samples = 0
estimate_transformation = None
assert a.shape == b.shape
if a.shape[1] == 3:
a = np.concatenate((a, np.ones((a.shape[0], 1))), axis=1)
b = np.concatenate((b, np.ones((a.shape[0], 1))), axis=1)
if model == 'translation':
num_samples = 1
estimate_transformation = translation_transformation
elif model == 'rigid':
num_samples = 4
estimate_transformation = rigid_transformation
elif model == 'affine':
num_samples = 4
estimate_transformation = affine_transformation
assert a.shape[0] >= num_samples
best_inlier = 0
best_inlier_idx = []
best_t = None
for _ in range(max_ransac_it):
# random sample data for generating hypothetical inliers
hyp_inliers_idx = np.random.choice(a.shape[0], size=num_samples, replace=False)
hyp_inliers_a = np.array([a[i] for i in hyp_inliers_idx])
hyp_inliers_b = np.array([b[i] for i in hyp_inliers_idx])
# calculate transformation based on hypothetical inliers and selected model
try:
t = estimate_transformation(hyp_inliers_a, hyp_inliers_b)
except AssertionError:
t = np.eye(4)
# calculate consensus set for this transformation
b_ = np.matmul(t, a.T).T
dists = [np.linalg.norm((x - y)[:3]) for x, y in zip(b_, b)]
inlier_idx = [i for i, x in enumerate(dists) if x < inlier_threshold]
# save better consensus set
if len(inlier_idx) > best_inlier:
best_inlier = len(inlier_idx)
best_inlier_idx = inlier_idx
best_t = t
# recalculate transformation with best consensus set
if len(best_inlier_idx) > 0:
consensus_set_a = np.array([a[i] for i in best_inlier_idx])
consensus_set_b = np.array([b[i] for i in best_inlier_idx])
try:
best_t = estimate_transformation(consensus_set_a, consensus_set_b)
except AssertionError:
pass
return best_t, best_inlier_idx
|
bbbf3c7695437ef00f4fc4570033575808d84604
| 32,425 |
from typing import List
from datetime import datetime
def create_telescope_types(session: scoped_session, telescope_types: List, created: datetime):
"""Create a list of TelescopeType objects.
:param session: the SQLAlchemy session.
:param telescope_types: a list of tuples of telescope type id and names.
:param created:the created datetime in UTC.
:return: a list of TelescopeType objects.
"""
items = []
for type_id, name in telescope_types:
item = TelescopeType(name=name, type_id=type_id, created=created, modified=created)
items.append(item)
session.add(item)
session.commit()
return items
|
011a8f3950fd0f4bdd3809085d74c45ae5756716
| 32,426 |
from functools import reduce
def update(*p):
""" Update dicts given in params with its precessor param dict
in reverse order """
return reduce(lambda x, y: x.update(y) or x,
(p[i] for i in range(len(p)-1,-1,-1)), {})
|
de7f5adbe5504dd9b1be2bbe52e14d11e05ae86f
| 32,427 |
def alphabet_to_use(three_letter_code, parity, direction):
"""Return tuple of alphabet to be used for glue in given direction on tile of
given parity.
Note that this refers to the alphabet used for the CANONICAL direction, which
may be the opposite of direction."""
if not parity in (0,1):
raise ValueError('parity must be 0 or 1, cannot be %s' % parity)
if not direction in directions:
raise ValueError('direction must be in %s, cannot be %s' % (directions, direction))
if not three_letter_code:
return ('A','C','G','T')
if (parity == 1 and is_canonical(direction)) or (parity == 0 and not is_canonical(direction)):
return ('A','C','T')
else:
return ('A','G','T')
|
b7bb02d5a5b9d5144ab8a6026600bd16096680aa
| 32,428 |
def get_tipranks_sentiment(collection):
"""
:param collection: "100-most-popular", "upcoming-earnings", "new-on-robinhood", "technology", "oil-and-gas",
"finance", "software-service", "energy", "manufacturing", "consumer-products", "etf", "video-games", "social-media",
"health", "entertainment"
:return: pandas dataframe of collection stocks
"""
url = f'https://robinhood.com/collections/{collection}'
[df] = pd.read_html(url)
symbols = list(df.Symbol.values)
for i, s in enumerate(symbols):
# print("Processing {}".format(s))
url = "https://www.tipranks.com/api/stocks/getNewsSentiments/?ticker={}".format(s)
s2 = pd.read_json(url, orient="index", typ="series")
df2 = pd.DataFrame(s2).T
# print("Processing {}: cols={}".format(s, df2.columns))
if df2.shape[1] > 0:
if len(df2.buzz) > 0:
df.loc[i, 'buzz'] = df2.buzz.iloc[0]['buzz']
if (df2.sentiment.any()):
df.loc[i, 'bullish_pct'] = df2.sentiment.iloc[0]['bullishPercent']
df.loc[i, 'sector_avg_bullish_pct'] = df2.sectorAverageBullishPercent.iloc[0]
df.loc[i, 'score'] = df2.score.iloc[0]
df.loc[i, 'sector_avg_news_score'] = df2.sectorAverageNewsScore.iloc[0]
return df
|
a6a6527314d2610f20de640aa8e17ad9234d5664
| 32,429 |
def lBoundedForward(x, lower):
"""
Transform from transformed (unconstrained) parameters to physical ones with upper limit
Args:
x (float): vector of transformed parameters
lower (float): vector with lower limits
Returns:
Float: transformed variables and log Jacobian
"""
return np.exp(x) + lower, x
|
af3b7613f4b08917c835c51c38b6e506f619ab6a
| 32,430 |
from datetime import datetime
def date_range(begin_date, end_date):
"""
:param begin_date: 起始日期,string
:param end_date: 结束日期,string
:return: dates: 指定日期范围内日期列表,元素类型string
"""
dates = []
dt = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
date = begin_date[:]
while date <= end_date:
dates.append(date)
dt = dt + datetime.timedelta(days=1)
date = dt.strftime("%Y-%m-%d")
return dates
|
e168f291226fa00806992f85b3ac2c89f96b8426
| 32,431 |
import collections
def createNeighDict(rP, lP, b, c):
"""Finds the neighbours nearest to a lost packet in a particular tensor plane
# Arguments
rP: packets received in that tensor plane
lp: packets lost in that tensor plane
b,c : batch and channel number denoting the tensor plane
# Returns
Dictionary containing the neighbours nearest to the lost packets
"""
insertPos = np.searchsorted(rP, lP)
neighDict = collections.OrderedDict()
if len(rP)==0:
return neighDict
for i in range(len(lP)):
ind = insertPos[i] #position at which lP is to be inserted in rP
if ind==0: #check if insert position is at beginning i.e no top neighbour
k = ((b, -1, c), (b, rP[ind], c))
# k = (tuple((b, -1, c)), tuple((b, rP[ind], c)))
v = np.array([b, lP[i], c])
if k not in neighDict:
neighDict[k] = v
else:
neighDict[k] = np.vstack((neighDict[k], v))
continue
if ind==len(rP): #check if insert position is at the end i.e no bottom neighbour
k = ((b, rP[-1], c), (b, 0, c))
# k = (tuple((b, rP[-1], c)), tuple((b, 0, c)))
v = np.array([b, lP[i], c])
if k not in neighDict:
neighDict[k] = v
else:
neighDict[k] = np.vstack((neighDict[k], v))
continue
k = ((b, rP[ind-1], c), (b, rP[ind], c))
# k = (tuple((b, rP[ind-1], c)), tuple((b, rP[ind], c)))
v = np.array([b, lP[i], c])
if tuple(k) not in neighDict:
neighDict[k] = v
else:
neighDict[k] = np.vstack((neighDict[k], v))
return neighDict
|
0b67024dac04678b8cb084eb18312ed8df468d93
| 32,433 |
def get_norm_3d(norm: str, out_channels: int, bn_momentum: float = 0.1) -> nn.Module:
"""Get the specified normalization layer for a 3D model.
Args:
norm (str): one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``.
out_channels (int): channel number.
bn_momentum (float): the momentum of normalization layers.
Returns:
nn.Module: the normalization layer
"""
assert norm in ["bn", "sync_bn", "gn", "in", "none"], \
"Get unknown normalization layer key {}".format(norm)
if norm == "gn": assert out_channels%8 == 0, "GN requires channels to separable into 8 groups"
norm = {
"bn": nn.BatchNorm3d,
"sync_bn": nn.SyncBatchNorm,
"in": nn.InstanceNorm3d,
"gn": lambda channels: nn.GroupNorm(8, channels),
"none": nn.Identity,
}[norm]
if norm in ["bn", "sync_bn", "in"]:
return norm(out_channels, momentum=bn_momentum)
else:
return norm(out_channels)
|
2def355c8b775512fec9d58a4fa43a0b54734f96
| 32,434 |
import json
def cancel_cheque():
"""取消支票"""
user_id = '96355632'
sn = request.values['sn']
result = pay_client.app_cancel_cheque(user_id, sn, ret_result=True)
return render_template('sample/info.html', title='取消支票结果',
msg=json.dumps({'status_code': result.status_code, 'data': result.data}))
|
6734aa86a1300f678a22b45407a8597255ad0a33
| 32,435 |
def to_relative_engagement(lookup_table, duration, wp_score, lookup_keys=None):
""" Convert watch percentage to relative engagement.
:param lookup_table: duration ~ watch percentage table, in format of dur: [1st percentile, ..., 1000th percentile]
:param duration: target input duration
:param wp_score: target input watch percentage score
:param lookup_keys: pre-computed duration split points, for faster computation
"""
if lookup_keys is None:
lookup_keys = lookup_table['duration']
lookup_keys = np.array(lookup_keys)
if isinstance(wp_score, list):
re_list = []
if isinstance(duration, list):
for d, s in zip(duration, wp_score):
re_list.append(to_relative_engagement(lookup_table, d, s, lookup_keys=lookup_keys))
elif isinstance(duration, int):
for s in wp_score:
re_list.append(to_relative_engagement(lookup_table, duration, s, lookup_keys=lookup_keys))
return re_list
else:
bin_idx = np.sum(lookup_keys < duration)
duration_bin = np.array(lookup_table[bin_idx])
re = np.sum(duration_bin <= wp_score) / 1000
# re = (np.sum(duration_bin < wp_score) + np.sum(duration_bin <= wp_score)) / 2000
return re
|
cfecebe5830a7681417d6fbd14485adc6908cb5d
| 32,436 |
import dmsky.factory
def factory(ptype, **kwargs):
"""Factory method to build `DenityProfile` objects
Keyword arguments are passed to class c'tor
Parameters
----------
ptype : str
Density profile type
Returns
-------
profile : `DensityProfile`
Newly created object
"""
prof_copy = kwargs.copy()
units = prof_copy.pop('units', None)
if units:
density, distance = units.rsplit('_', 1)
scale_density = getattr(Units, density)
scale_distance = getattr(Units, distance)
scale_dict_param(prof_copy, 'rhos', scale_density, DensityProfile._params['rhos'].default)
scale_dict_param(prof_copy, 'rs', scale_distance, DensityProfile._params['rs'].default)
scale_dict_param(prof_copy, 'rmin', scale_distance, DensityProfile._params['rmin'].default)
scale_dict_param(prof_copy, 'rmax', scale_distance, DensityProfile._params['rmax'].default)
scale_dict_param(prof_copy, 'rhomax', scale_density,
DensityProfile._params['rhomax'].default)
return dmsky.factory.factory(ptype, module=__name__, **prof_copy)
|
9acb4b93fc3e82e22ec0360a38def9058cea5640
| 32,437 |
def r2_score(y, y_predicted):
"""Calculate the R2 score.
Parameters
----------
y : array-like of shape = number_of_outputs
Represent the target values.
y_predicted : array-like of shape = number_of_outputs
Target values predicted by the model.
Returns
-------
loss : float
R2 output can be non-negative values or negative value.
Becoming 1.0 means your model outputs are exactly
matched by true target values. Lower values means worse results.
Notes
-----
This is not a symmetric function.
References
----------
[1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> y = [3, -0.5, 2, 7]
>>> y_predicted = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y, y_predicted)
0.948
"""
numerator = ((y - y_predicted) ** 2).sum(axis=0, dtype=np.float64)
denominator = ((y - np.average(y, axis=0)) **
2).sum(axis=0, dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y.shape[0]])
output_scores[valid_score] = (1 - (numerator[valid_score] /
denominator[valid_score]))
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
return np.average(output_scores)
|
00e8004f076e8147f70896bd5304cd73c389522e
| 32,439 |
def vcg_solve(goal):
"""Compute the verification conditions for a hoare triple, then
solves the verification conditions using SMT.
"""
assert goal.is_comb("Valid", 3), "vcg_solve"
P, c, Q = goal.args
T = Q.get_type().domain_type()
pt = vcg_norm(T, goal)
vc_pt = [ProofTerm("z3", vc, []) for vc in pt.assums]
return ProofTerm("vcg", goal, vc_pt)
|
9f496edd1a3725640582f4016a086fd5dfe70d72
| 32,440 |
def ism_extinction(av_mag: float,
rv_red: float,
wavelengths: np.ndarray) -> np.ndarray:
"""
Function for calculating the optical and IR extinction with the empirical relation from
Cardelli et al. (1989).
Parameters
----------
av_mag : float
Extinction (mag) in the V band.
rv_red : float
Reddening in the V band, ``R_V = A_V / E(B-V)``.
wavelengths : np.ndarray
Array with the wavelengths (um) for which the extinction is calculated.
Returns
-------
np.ndarray
Extinction (mag) at ``wavelengths``.
"""
x_wavel = 1./wavelengths
y_wavel = x_wavel - 1.82
a_coeff = np.zeros(x_wavel.size)
b_coeff = np.zeros(x_wavel.size)
indices = np.where(x_wavel < 1.1)[0]
if len(indices) > 0:
a_coeff[indices] = 0.574*x_wavel[indices]**1.61
b_coeff[indices] = -0.527*x_wavel[indices]**1.61
indices = np.where(x_wavel >= 1.1)[0]
if len(indices) > 0:
a_coeff[indices] = 1. + 0.17699*y_wavel[indices] - 0.50447*y_wavel[indices]**2 - \
0.02427*y_wavel[indices]**3 + 0.72085*y_wavel[indices]**4 + \
0.01979*y_wavel[indices]**5 - 0.77530*y_wavel[indices]**6 + 0.32999*y_wavel[indices]**7
b_coeff[indices] = 1.41338*y_wavel[indices] + 2.28305*y_wavel[indices]**2 + \
1.07233*y_wavel[indices]**3 - 5.38434*y_wavel[indices]**4 - \
0.62251*y_wavel[indices]**5 + 5.30260*y_wavel[indices]**6 - 2.09002*y_wavel[indices]**7
return av_mag * (a_coeff + b_coeff/rv_red)
|
cb2bba0cbb396fbac900492fe9b49d70646a4255
| 32,441 |
def rangify(values):
"""
Given a list of integers, returns a list of tuples of ranges (interger pairs).
:param values:
:return:
"""
previous = None
start = None
ranges = []
for r in values:
if previous is None:
previous = r
start = r
elif r == previous + 1:
pass
else: # r != previous + 1
ranges.append((start, previous))
start = r
previous = r
ranges.append((start, previous))
return ranges
|
672b30d4a4ce98d2203b84db65ccebd53d1f73f5
| 32,442 |
def load_balancers_with_instance(ec2_id):
"""
@param ec2_id: ec2 instance id
@return: list of elb names with the ec2 instance attached
"""
elbs = []
client = boto3.client('elb')
paginator = client.get_paginator('describe_load_balancers')
for resp in paginator.paginate():
for elb in resp['LoadBalancerDescriptions']:
# filter for ec2_instance
ec2_ids = [i['InstanceId'] for i in elb['Instances']]
if ec2_id in ec2_ids:
elbs.append(elb['LoadBalancerName'])
return elbs
|
b9ad53b7cafdbc44f88044e976a700a187605b2d
| 32,443 |
def parse_json_frequency_high(df, column, key):
"""
Takes a JETS dataframe and column containing JSON strings
and finds the highest 'Mode' or 'Config' frequency.
Excludes intermediate frequencies
Parameters
----------
df : pandas dataframe
JETS dataframe
column : str
The column where the frequencies are located
key : str
A substring of the key you are looking for in the json record(ex. 'FREQ' or 'MIN_FREQ')
Returns
-------
float
"""
parsed_json = json_extract_with_key(df['{}'.format(column)], key)
arr = []
for i in range(len(parsed_json)):
if search("MODE", parsed_json[i][0]):
arr.append(parsed_json[i][1])
elif search("CONFIG", parsed_json[i][0]):
arr.append(parsed_json[i][1])
try:
return max(arr)
except:
return ''
|
e8489ed7bca2357d4be3421932898696daf27bac
| 32,444 |
def adapter_checker(read, args):
"""
Retrieves the end sequences and sorts adapter information for each end.
"""
cigar = read.cigartuples
seq = read.query_sequence
leftend, check_in_softl, left_match = get_left_end(seq, cigar, args)
rightend, check_in_softr, right_match = get_right_end(seq, cigar, args)
left_cigar_info = cigar
right_cigar_info = list(cigar)[::-1]
if leftend != "no softclip":
three_left = get_adapter_info(Seq(leftend).complement(),
args.three_adapter,
args.three_score,
left_cigar_info,
args,
check_in_softl,
Seq(left_match).complement())
five_left = get_adapter_info(leftend,
args.five_adapter,
args.five_score,
left_cigar_info,
args,
check_in_softl,
left_match)
else:
three_left = (0, 0, 0, "no softclip")
five_left = (0, 0, 0, "no softclip")
if rightend != "no softclip":
three_right = get_adapter_info(rightend[::-1],
args.three_adapter,
args.three_score,
right_cigar_info,
args,
check_in_softr,
right_match[::-1])
five_right = get_adapter_info(Seq(rightend).reverse_complement(),
args.five_adapter,
args.five_score,
right_cigar_info,
args,
check_in_softr,
Seq(right_match).reverse_complement())
else:
three_right = (0, 0, 0, "no softclip")
five_right = (0, 0, 0, "no softclip")
return {"l3": three_left,
"r3": three_right,
"l5": five_left,
"r5": five_right}
|
096fff89eafe7ce7915daac55e95a2ea51e7f302
| 32,445 |
def create_pane(widgets, horizontal, parent_widget=None, compact=False,
compact_spacing=2):
"""Create a widget containing an aligned set of widgets.
Args:
widgets (list of `QWidget`).
horizontal (bool).
align (str): One of:
- 'left', 'right' (horizontal);
- 'top', 'bottom' (vertical)
parent_widget (`QWidget`): Owner widget, QWidget is created if this
is not provided.
Returns:
`QWidget`
"""
pane = parent_widget or QtWidgets.QWidget()
type_ = QtWidgets.QHBoxLayout if horizontal else QtWidgets.QVBoxLayout
layout = type_()
if compact:
layout.setSpacing(compact_spacing)
layout.setContentsMargins(compact_spacing, compact_spacing,
compact_spacing, compact_spacing)
for widget in widgets:
stretch = 0
if isinstance(widget, tuple):
widget, stretch = widget
if isinstance(widget, int):
layout.addSpacing(widget)
elif widget:
layout.addWidget(widget, stretch)
else:
layout.addStretch()
pane.setLayout(layout)
return pane
|
f291b6482c8d5bb8ecb312b5f5747cf6c4e36e53
| 32,446 |
def image_TOKEN_search_by_word_query_TOKEN(query_snd_ix, multi_distances,
snd_fnames, img_fnames,
id2pic):
"""map a word token query into the embedding space and find images in the same space
return rank of first neighbor whose TOKEN is in the picture list of the id"""
n_images, n_sounds = multi_distances.shape
query_id = snd_fnames[query_snd_ix]
img_neighbors = np.argsort(multi_distances[:, query_snd_ix])
pictures_for_query = id2pic[query_id]
rank = img_neighbors.shape[0]
for i in xrange(img_neighbors.shape[0]):
if img_fnames[img_neighbors[i]] in pictures_for_query:
rank = i + 1
break
return rank
|
81fcd8ef466e4712cbab396ed55626e61f297fac
| 32,448 |
from packaging import version
def _evolve_angles_forwards(
mass_1, mass_2, a_1, a_2, tilt_1, tilt_2, phi_12, f_start, final_velocity,
tolerance, dt, evolution_approximant
):
"""Wrapper function for the SimInspiralSpinTaylorPNEvolveOrbit function
Parameters
----------
mass_1: float
primary mass of the binary
mass_2: float
secondary mass of the binary
a_1: float
primary spin magnitude
a_2: float
secondary spin magnitude
tilt_1: float
primary spin tilt angle from the orbital angular momentum
tilt_2: float
secondary spin tilt angle from the orbital angular momentum
phi_12: float
the angle between the in-plane spin components
f_start: float
frequency to start the evolution from
final_velocity: float
Final velocity to evolve the spins up to
tolerance: float
Only evolve spins if at least one spins magnitude is greater than
tolerance
dt: float
steps in time for the integration, in terms of the mass of the binary
evolution_approximant: str
name of the approximant you wish to use to evolve the spins.
"""
if np.logical_or(a_1 > tolerance, a_2 > tolerance):
# Total mass in seconds
total_mass = (mass_1 + mass_2) * MTSUN_SI
f_final = final_velocity ** 3 / (total_mass * np.pi)
_approx = getattr(lalsimulation, evolution_approximant)
if version.parse(lalsimulation.__version__) >= version.parse("2.5.2"):
spinO = 6
else:
spinO = 7
data = SimInspiralSpinTaylorPNEvolveOrbit(
deltaT=dt * total_mass, m1=mass_1 * MSUN_SI,
m2=mass_2 * MSUN_SI, fStart=f_start, fEnd=f_final,
s1x=a_1 * np.sin(tilt_1), s1y=0.,
s1z=a_1 * np.cos(tilt_1),
s2x=a_2 * np.sin(tilt_2) * np.cos(phi_12),
s2y=a_2 * np.sin(tilt_2) * np.sin(phi_12),
s2z=a_2 * np.cos(tilt_2), lnhatx=0., lnhaty=0., lnhatz=1.,
e1x=1., e1y=0., e1z=0., lambda1=0., lambda2=0., quadparam1=1.,
quadparam2=1., spinO=spinO, tideO=0, phaseO=7, lscorr=0,
approx=_approx
)
# Set index to take from array output by SimInspiralSpinTaylorPNEvolveOrbit:
# -1 for evolving forward in time and 0 for evolving backward in time
if f_start <= f_final:
idx_use = -1
else:
idx_use = 0
a_1_evolve = np.array(
[
data[2].data.data[idx_use], data[3].data.data[idx_use],
data[4].data.data[idx_use]
]
)
a_2_evolve = np.array(
[
data[5].data.data[idx_use], data[6].data.data[idx_use],
data[7].data.data[idx_use]
]
)
Ln_evolve = np.array(
[
data[8].data.data[idx_use], data[9].data.data[idx_use],
data[10].data.data[idx_use]
]
)
tilt_1_evol, tilt_2_evol, phi_12_evol = \
tilt_angles_and_phi_12_from_spin_vectors_and_L(
a_1_evolve, a_2_evolve, Ln_evolve
)
else:
tilt_1_evol, tilt_2_evol, phi_12_evol = tilt_1, tilt_2, phi_12
return tilt_1_evol, tilt_2_evol, phi_12_evol
|
b4a000db741aab65076ca2257230aaac45634465
| 32,451 |
import logging
def get_execution(execution):
"""Get an execution"""
logging.info('[ROUTER]: Getting execution: '+execution)
include = request.args.get('include')
include = include.split(',') if include else []
exclude = request.args.get('exclude')
exclude = exclude.split(',') if exclude else []
try:
execution = ExecutionService.get_execution(execution, current_identity)
except ExecutionNotFound as e:
logging.error('[ROUTER]: '+e.message)
return error(status=404, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: '+str(e))
return error(status=500, detail='Generic Error')
return jsonify(data=execution.serialize(include, exclude)), 200
|
dfaba70a41e74423f86eca2c645f71dd2c4117ac
| 32,452 |
def fz_Kd_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Fit function for Cl titration."""
return (p[0] + p[1] * x / K) / (1 + x / K)
|
8054447e87c70adb4d6f505c45336ccd839a69c9
| 32,453 |
def show_exam_result(request, course_id, submission_id):
""" Returns exam result template """
course_obj = get_object_or_404(Course, pk=course_id)
submission_obj = get_object_or_404(Submission, pk=submission_id)
submission_choices = submission_obj.choices.all()
choice_ids = [choice_obj.id for choice_obj in submission_choices]
max_score, question_score = 0, 0
course_questions = course_obj.question_set.all()
for question in course_questions:
max_score += question.grade
if question.is_get_score(choice_ids):
question_score += question.grade
context = {
"course": course_obj,
"choices": submission_choices,
"grade": int(question_score / max_score * 100),
}
print(question_score, max_score)
return render(request, 'onlinecourse/exam_result_bootstrap.html', context)
|
e61ffc8748e3a9aa2cb62e4ed277a08f0be05c07
| 32,454 |
def createInvoiceObject(account_data: dict, invoice_data: dict) -> dict:
"""
example: https://wiki.wayforpay.com/view/852498
param: account_data: dict
merchant_account: str
merchant_password: str
param: invoice_data
reqularMode -> one of [
'once',
'daily',
'weekly',
'quartenly',
'monthly',
'halfyearly',
'yearly'
]
merchantPassword : str
amount : str
currency : str
dateNext -> dd.mm.yyyy : str
dateEnd -> dd.mm.yyyy : str
orderReference -> timestamp : str
email -> client email to notify
return: object for invoice creation
"""
return {
"requestType": "CREATE",
"merchantAccount": account_data['merchant_account'],
"merchantPassword": account_data['merchant_password'],
"regularMode": invoice_data['regularMode'],
"amount": str(invoice_data['currency']),
"currency": invoice_data['currency'],
"dateBegin": invoice_data['dateBegin'],
"dateEnd": invoice_data['dateEnd'],
"orderReference": str(invoice_data['orderReference']),
"email": invoice_data['email']
}
|
0ea61d68916c5b6f43e568cb1978bcb05b8eba04
| 32,455 |
from sklearn.cluster import KMeans
def _kmeans_seed_points(points, D, d, C, K, trial=0):
"""A seed point generation function that puts the seed points at customer
node point cluster centers using k-Means clustering."""
kmeans = KMeans(n_clusters=K, random_state=trial).fit(points[1:])
return kmeans.cluster_centers_.tolist()
|
7131b53b9cf0c8719daa577f7a03c41f068df90d
| 32,456 |
def site_geolocation(site):
""" Obtain lat-lng coordinate of active trials in the Cancer NCI API"""
try:
latitude = site['org_coordinates']['lat']
longitude = site['org_coordinates']['lon']
lat_lng = tuple((latitude, longitude))
return lat_lng
except KeyError: # key ['org_coordinates'] is missing
return None
|
9fefcd3f49d82233005c88e645efd1c00e1db564
| 32,457 |
def get_scaling_desired_nodes(sg):
"""
Returns the numb of desired nodes the scaling group will have in the future
"""
return sg.get_state()["desired_capacity"]
|
5a417f34d89c357e12d760b28243714a50a96f02
| 32,458 |
def _BitmapFromBufferRGBA(*args, **kwargs):
"""_BitmapFromBufferRGBA(int width, int height, buffer data) -> Bitmap"""
return _gdi_._BitmapFromBufferRGBA(*args, **kwargs)
|
91fc08c42726ad101e1d060bf4e60d498d1f0b0f
| 32,459 |
def get_user_analysis_choice():
"""
Function gets the user input to determine what kind of data
quality metrics s/he wants to investigate.
:return:
analytics_type (str): the data quality metric the user wants to
investigate
percent_bool (bool): determines whether the data will be seen
as 'percentage complete' or individual instances of a
particular error
target_low (bool): determines whether the number displayed should
be considered a desirable or undesirable characteristic
"""
analysis_type_prompt = \
"\nWhat kind of analysis over time report would you like " \
"to generate for each site?\n\n" \
"A. Duplicates\n" \
"B. Amount of data following death dates\n" \
"C. Amount of data with end dates preceding start dates\n" \
"D. Success rate for concept_id field\n" \
"E. Population of the 'unit' field in the measurement table (" \
"only for specified measurements)\n" \
"F. Population of the 'route' field in the drug exposure table\n" \
"G. Percentage of expected drug ingredients observed\n" \
"H. Percentage of expected measurements observed\n" \
"I. Date consistency across tables \n\n" \
"Please specify your choice by typing the corresponding letter."
user_command = input(analysis_type_prompt).lower()
choice_dict = {
'a': 'duplicates',
'b': 'data_after_death',
'c': 'end_before_begin',
'd': 'concept',
'e': 'measurement_units',
'f': 'drug_routes',
'g': 'drug_success',
'h': 'sites_measurement',
'i': 'visit_date_disparity'}
while user_command not in choice_dict.keys():
print("\nInvalid choice. Please specify a letter that corresponds "
"to an appropriate analysis report.\n")
user_command = input(analysis_type_prompt).lower()
# NOTE: This dictionary needs to be expanded in the future
percentage_dict = {
'duplicates': False,
'data_after_death': True,
'end_before_begin': True,
'concept': True,
'measurement_units': True,
'drug_routes': True,
'drug_success': True,
'sites_measurement': True,
'visit_date_disparity': True
}
# dictionary indicates if the target is to minimize or maximize number
target_low = {
'duplicates': True,
'data_after_death': True,
'end_before_begin': True,
'concept': False,
'measurement_units': False,
'drug_routes': False,
'drug_success': False,
'sites_measurement': False,
'visit_date_disparity': False
}
analytics_type = choice_dict[user_command]
percent_bool = percentage_dict[analytics_type]
target_low = target_low[analytics_type]
return analytics_type, percent_bool, target_low
|
58ebda03cd4eb12c92951649fc946b00eb1a8075
| 32,460 |
def points_to_segments(points):
"""Convert a list of points, given in clockwise order compared to the inside of the system to a list of segments.
The last point being linked to the first one.
Args:
points (list): list of lists of size 2
Returns:
[np.ndarray]: 2D-array of segments - each row is [x1,y1,x2,y2].
"""
nb_of_points = len(points)
points = np.array(points)
first, last = points[0], points[-1]
segments = np.concatenate((points[:nb_of_points-1],points[1:]), axis = 1)
segments = np.concatenate((segments, np.expand_dims(np.concatenate((last,first)), axis = 0)), axis = 0)
return segments
# --------------------- Utils functions -------------------- #
|
1e560d8e752d34250f73c6e2305c7741a14afe04
| 32,461 |
def resnet_v1_34(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_34', **kwargs):
"""ResNet-34 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_simple_block('block1', out_depth=64, num_units=3, stride=1),
resnet_v1_simple_block('block2', out_depth=128, num_units=4, stride=2),
resnet_v1_simple_block('block3', out_depth=256, num_units=6, stride=2),
resnet_v1_simple_block('block4', out_depth=512, num_units=3, stride=2),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope, **kwargs)
|
26e7d866a14d6a17acf92c10e4c5d48883c6b5c7
| 32,462 |
from typing import Union
def from_dlpack(x: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
"""Returns a new array containing the data from another (array) object with a
``__dlpack__`` method.
Parameters
----------
x object
input (array) object.
Returns
-------
ret
an array containing the data in `x`.
.. admonition:: Note
:class: note
The returned array may be either a copy or a view. See
:ref:`data-interchange` for details.
"""
return _cur_backend(x).from_dlpack(x)
|
c3213a607eb150791e74ffb8a5781e789dcd989f
| 32,463 |
def render_webpage_string(vegalite_spec: str) -> str:
"""
Renders the given Vega-lite specification into a string of an HTML webpage
that displays the specified plots.
:param vegalite_spec str: The Vega-lite plot specification to create a
webpage for.
:returns: A string of a webpage with the specified plots.
:rtype: str
"""
with open(PLOTS_TEMPLATE_FILE, "r") as template_file:
return chevron.render(template_file, {SPEC_TAG: vegalite_spec})
|
6b9c410046d6aba3b3de04bcb2cce4779f55b3d1
| 32,464 |
def _parse_blog(element):
"""
Parse and return genral blog data (title, tagline etc).
"""
title = element.find("./title").text
tagline = element.find("./description").text
language = element.find("./language").text
site_url = element.find("./{%s}base_site_url" % WP_NAMESPACE).text
blog_url = element.find("./{%s}base_blog_url" % WP_NAMESPACE).text
return {
"title": title,
"tagline": tagline,
"language": language,
"site_url": site_url,
"blog_url": blog_url,
}
|
a2678c0e55a8db5aee042744f1f343c96c7fe6f1
| 32,465 |
def summarize_block(block):
"""
Return the sentence that best summarizes block.
"""
sents = nltk.sent_tokenize(block)
word_sents = map(nltk.word_tokenize, sents)
d = dict((compute_score(word_sent, word_sents), sent)
for sent, word_sent in zip(sents, word_sents))
return d[max(d.keys())]
|
991d389366f0587f7dc7fe2eaf6966d0b531012f
| 32,466 |
def get_club_result() -> list:
""" Returns the club's page. """
d = api_call("ion", "activities")
while "next" in d and d["next"] is not None:
for result in d["results"]:
if "cube" in result["name"].lower():
return result
d = api_call("ion", d["next"], False)
|
3f335aeb2c476dc29e0d335b00722e5b56eb6716
| 32,468 |
def DiffuserConst_get_decorator_type_name():
"""DiffuserConst_get_decorator_type_name() -> std::string"""
return _RMF.DiffuserConst_get_decorator_type_name()
|
97c9a68d35b079a1ecbf71a742c6799c4b6411bb
| 32,469 |
import tqdm
def lemmatizer():
"""
Substitutes words by their lemma
"""
lemmatizer = WordNetLemmatizer()
preprocessor = lambda text: [lemmatizer.lemmatize(w) for w in \
text.split(" ")]
def preprocess(name, dataset):
description = " Running NLTK Lemmatizer - preprocessing dataset "
description += "{}...".format(name)
data = [preprocessor(x) for x in tqdm(dataset, desc=description)]
return data
return preprocess
|
627ce460abb71969ac3f19832f2854a1a00db7c3
| 32,470 |
import io
def convert_numpy_array(numpy_array: np.ndarray):
"""
Converts a numpy array into compressed bytes
:param numpy_array: An array that is going to be converted into bytes
:return: A BytesIO object that contains compressed bytes
"""
compressed_array = io.BytesIO() # np.savez_compressed() requires a file-like object to write to
np.save(compressed_array, numpy_array, allow_pickle=True, fix_imports=False)
return compressed_array
|
1fe24003d00736b86361cf5eef03da304edc6bf6
| 32,471 |
def notas(* valores, sit=False):
"""
-> Função para analisar notas e situações de vários alunos.
:param valores: uma ou mais notas dos alunos (aceita várias)
:param sit: valor opcional, indicando se deve ou não adicionar a situação
:return: dicionário com várias informações sobre a situação da turma.
"""
dicionario = dict()
dicionario["Quantidade de notas"] = len(valores)
dicionario["Maior nota"] = max(valores)
dicionario["Menor nota"] = min(valores)
dicionario["Média da turma"] = sum(valores)/len(valores)
if sit == True:
if dicionario["Média da turma"] >= 7.0:
dicionario["A situação"] = 'BOA'
elif 5.0 <= dicionario["Média da turma"] < 7.0:
dicionario["A situação"] = 'RAZOÁVEL'
else:
dicionario["A situação"] = 'RUIM'
return dicionario
|
a6915e9b7b1feef0db2be6fdf97b6f236d73f282
| 32,472 |
def append_OrbitSection(df):
"""Use OrbitDirection flags to identify 4 sections in each orbit."""
df["OrbitSection"] = 0
ascending = (df["OrbitDirection"] == 1) & (df["QDOrbitDirection"] == 1)
descending = (df["OrbitDirection"] == -1) & (df["QDOrbitDirection"] == -1)
df["OrbitSection"].mask(
(df["QDLat"] > 50) & ascending, 1, inplace=True
)
df["OrbitSection"].mask(
(df["QDLat"] > 50) & descending, 2, inplace=True
)
df["OrbitSection"].mask(
(df["QDLat"] < -50) & descending, 3, inplace=True
)
df["OrbitSection"].mask(
(df["QDLat"] < -50) & ascending, 4, inplace=True
)
return df
|
4f2cad6cb2facf6a7a8c7a89ed7b3df0a56a54c2
| 32,473 |
def _get_old_time(request):
"""
Get's the alarm time the user wants to change
Args:
request (Request): contains info about the conversation up to this point
(e.g. domain, intent, entities, etc)
Returns:
string: resolved 24-hour time in XX:XX:XX format
"""
old_time_entity = next(
(e for e in request.entities if e['role'] == 'old_time'), None)
if old_time_entity:
duckling_result = parse_numerics(old_time_entity['text'].lower(), dimensions=['time'])
for candidate in duckling_result[0]:
if candidate['body'] == old_time_entity['text'].lower():
return candidate['value']['value'][TIME_START_INDEX:TIME_END_INDEX]
else:
return None
|
6a2929ccffb4b397bd9f1dd044e70c871e302e33
| 32,474 |
def sxxxxx(p, nss):
"""
Defines a scalar wavefunction. Input momenta have shape (num events, 4).
Parameters
----------
p: tf.Tensor, scalar boson four-momenta of shape=(None,4)
nss: tf.Tensor, final|initial state of shape=(), values=(+1|-1)
Returns
-------
phi: tf.Tensor, scalar wavefunction of shape=(3,None)
"""
v0 = tf.expand_dims(complex_tf(p[:, 0] * nss, p[:, 3] * nss), 1)
v1 = tf.expand_dims(complex_tf(p[:, 1] * nss, p[:, 2] * nss), 1)
v = tf.expand_dims(complex_tf(1.0, 0.0), 1)
phi = tf.concat([v0, v1, v], axis=1)
return tf.transpose(phi)
|
429fe82c9781ec8918fe57a68e899f899df8f32f
| 32,475 |
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1 / n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs - target_risk) ** 2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
|
82d338f2bc8c6b712e7489b70a3122eee21d0aab
| 32,477 |
import urllib, datetime
import xarray as xr
import numpy as np
def read_monthly_indices_from_CLIMEXP(name_of_index):
"""
Try reading various monthly indices from KNMI's Climate Explorer
"""
name_to_url = {
'M1i': 'http://climexp.knmi.nl/data/iM1.dat', # 1910 ->
'M2i': 'http://climexp.knmi.nl/data/iM2.dat', # 1910 ->
'M3i': 'http://climexp.knmi.nl/data/iM3.dat',
'M4i': 'http://climexp.knmi.nl/data/iM4.dat', # 1910 ->
'M5i': 'http://climexp.knmi.nl/data/iM5.dat',
'M6i': 'http://climexp.knmi.nl/data/iM6.dat', # 1910 ->
'NAO': 'http://climexp.knmi.nl/data/inao.dat', # 1821 ->
'NINO12': 'http://climexp.knmi.nl/data/inino2.dat',
'NINO3': 'http://climexp.knmi.nl/data/inino3.dat',
'NINO34': 'http://climexp.knmi.nl/data/inino5.dat',
'NINO4': 'http://climexp.knmi.nl/data/inino4.dat',
'AMO1': 'http://climexp.knmi.nl/data/iamo_hadsst.dat',
'AMO2': 'http://climexp.knmi.nl/data/iamo_hadsst_ts.dat',
'PDO1': 'http://climexp.knmi.nl/data/ipdo.dat',
'PDO2': 'http://climexp.knmi.nl/data/ipdo_hadsst3.dat',
'SOI': 'http://climexp.knmi.nl/data/isoi.dat',
}
url_string = name_to_url[name_of_index]
try:
fp2 = urllib.request.urlopen(url_string)
data_extracted = fp2.readlines()
except:
pass
data_asarray = []
for row in range(len(data_extracted)):
try:
dline = np.array(data_extracted[row].split()).astype(float)
except:
dline = []
if (len(dline) > 0):
data_asarray.append(np.array(data_extracted[row].split()).astype(float))
data = np.array(data_asarray);
dates = np.array([])
data_years = data[:, 0].astype(int);
if (data.shape[1] > 3):
data_tser = data[:, 1:13].ravel()
for y in data_years:
for m in range(1, 13):
dates = np.append(dates, datetime.date(y, m, 1))
if (data.shape[1] <= 3):
data_tser = data[:, 2]
for row in data:
dates = np.append(dates, datetime.date(int(row[0]), int(row[1]), 1))
data_tser[data_tser < -990] = np.nan
if (name_of_index == 'Volc'):
data_tser[data_tser == 0] = np.nan
data_tser = np.sqrt(data_tser)
data_tser[np.isinf(data_tser)] = np.nan
data_tser = inpaint_nans(data_tser)
date_range = [1800, 2150];
idxs = np.zeros(dates.shape, bool)
for i, date in enumerate(dates):
if ((date.year >= date_range[0]) & (date.year <= date_range[1])): idxs[i] = True
ds = xr.Dataset(data_vars={name_of_index: ('time', data_tser[idxs])},
coords={'time': dates[idxs].astype(np.datetime64)})
return ds.resample(time='1M').mean()
|
8ea42dbca11e587267ef8e2c13ee1787be9db430
| 32,478 |
def generate_xdataEMX(parm):
"""
Generate the x data from the parameters dictionary
Parameters:
parm: [dict] parameters
Returns:
xdata = nd.array[XNbPoints]
"""
# Extracts the x axis data from the parameter file
try:
xpoints = parm['SSX']
except KeyError:
xpoints = parm['ANZ']
try:
xwid = parm['GSI']
xstart = parm['GST']
except KeyError:
xwid = parm['XXWI']
xstart = parm['XXLB']
xdata = np.linspace(xstart, xstart+xwid, int(xpoints))
return xdata
|
a65b48e51f5013fe82d0b9baafe70330b15f0477
| 32,479 |
import csv
def csv_2d_cartesian(filename, polar=False, scan=False):
"""extract 2d cartesian coordinates from a file"""
x_values = []
y_values = []
with open(filename) as data_file:
odom_data = csv.reader(data_file)
for row in odom_data:
# if scan:
# row[1] = pi/2 + float(row[1])
if polar:
x = float(row[0]) * cos(float(row[1]))
y = float(row[0]) * sin(float(row[1]))
else:
x = float(row[0])
y = float(row[1])
x_values.append(x)
y_values.append(y)
return x_values, y_values
|
648a8f9bbee8b0b61284bf5a8b93c729bb085c9d
| 32,480 |
def get_edge_similarity(node_pos,neighbor_positions):
"""
useful for finding approximate colinear neighbors.
"""
displacements = get_displacement_to_neighbors(node_pos,neighbor_positions)
n_neighbors = neighbor_positions.shape[0]
# Quick and dirty, can reduce computation by factor 2.
similarity = []
for d1 in displacements:
for d2 in displacements:
similarity+=[np.sum(d1*d2)/(np.linalg.norm(d1)*np.linalg.norm(d2))]
similarity = np.array(similarity).reshape(n_neighbors,n_neighbors)
return similarity
|
b1b64384d84ffbdd6a042b1e2c3a8a9a2212e61e
| 32,481 |
def high_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
# This needs to be this way because truncation is bad and actually
# zero padding
print("zero padding", zeroPadding)
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
|
8a114e1868c28f1de8ee4ac445bd620cb45482ff
| 32,482 |
def hyetograph(dataframe, col="precipitation", freq="hourly", ax=None, downward=True):
"""Plot showing rainfall depth over time.
Parameters
----------
dataframe : pandas.DataFrame
Must have a datetime index.
col : string, optional (default = 'precip')
The name of the column in *dataframe* that contains the
rainall series.
freq : str, optional (default = 'hourly')
The frequency to which the rainfall depth should be
accumulated.
ax : matplotlib.Axes object, optional
The Axes on which the plot will be placed. If not provided,
a new Figure and Axes will be created.
downward : bool, optional (default = True)
Inverts the y-axis to show the rainfall depths "falling"
from the top.
Returns
-------
fig : matplotlib.Figure
"""
ylabel = "%s Rainfall Depth (in)" % freq.title()
fig = _plotter(
dataframe, col, ylabel, freq=freq, fillna=0, how="sum", ax=ax, downward=downward
)
return fig
|
17deb837058ddd8ad8db9ed47c960cacfde957db
| 32,483 |
def objective(z, x):
""" Objective. """
return park2_3_mf(z, x)
|
fb65c09f084b0af8848e78582703a1bb4e11e735
| 32,484 |
import json
import re
def validate_config(crawler_path):
"""
Validates config
"""
with open(crawler_path) as file:
config = json.load(file)
if 'total_articles_to_find_and_parse' not in config:
raise IncorrectNumberOfArticlesError
if 'seed_urls' not in config:
raise IncorrectURLError
urls = config["seed_urls"]
articles = config["total_articles_to_find_and_parse"]
if not urls:
raise IncorrectURLError
if not isinstance(articles, int) or articles <= 0:
raise IncorrectNumberOfArticlesError
if articles > 100:
raise NumberOfArticlesOutOfRangeError
for url in urls:
check = re.search(DOMAIN, url)
if not check:
raise IncorrectURLError
return urls, articles
|
ba46667fcc0d75be6b28d19c0f5fa2d41f9123dd
| 32,485 |
def parse_FORCE_SETS(natom=None, filename="FORCE_SETS", to_type2=False):
"""Parse FORCE_SETS from file.
to_type2 : bool
dataset of type2 is returned when True.
Returns
-------
dataset : dict
Displacement dataset. See Phonopy.dataset.
"""
with open(filename, "r") as f:
return _get_dataset(
f,
natom=natom,
to_type2=to_type2,
)
|
54b39f8b111292f53c6231facafb177109972965
| 32,486 |
import time
def DeserializeFileAttributesFromObjectMetadata(obj_metadata, url_str):
"""Parses the POSIX attributes from the supplied metadata.
Args:
obj_metadata: The metadata for an object.
url_str: File/object path that provides context if a warning is thrown.
Returns:
A POSIXAttribute object with the retrieved values or a default value for
any attribute that could not be found.
"""
posix_attrs = POSIXAttributes()
# Parse atime.
found, atime = GetValueFromObjectCustomMetadata(obj_metadata, ATIME_ATTR,
NA_TIME)
try:
atime = long(atime)
if found and atime <= NA_TIME:
WarnNegativeAttribute('atime', url_str)
atime = NA_TIME
elif atime > long(time.time()) + SECONDS_PER_DAY:
WarnFutureTimestamp('atime', url_str)
atime = NA_TIME
except ValueError:
WarnInvalidValue('atime', url_str)
atime = NA_TIME
posix_attrs.atime = atime
# Parse gid.
DeserializeIDAttribute(obj_metadata, GID_ATTR, url_str, posix_attrs)
# Parse uid.
DeserializeIDAttribute(obj_metadata, UID_ATTR, url_str, posix_attrs)
found, mode = GetValueFromObjectCustomMetadata(obj_metadata, MODE_ATTR,
NA_MODE)
if found and MODE_REGEX.match(mode):
try:
# Parse mode into a 3-digit base-8 number.
posix_attrs.mode = POSIXMode(int(mode))
except ValueError:
WarnInvalidValue('mode', url_str)
return posix_attrs
|
3fb3d3f4e45a622cc12ce1d65b6f02d59efd3f58
| 32,487 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.