content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def set_edge_font_size_mapping(table_column, table_column_values=None, sizes=None, mapping_type='c', default_size=None,
style_name=None, network=None, base_url=DEFAULT_BASE_URL):
"""Map table column values to sizes to set the edge size.
Args:
table_column (str): Name of Cytoscape table column to map values from
table_column_values (list): List of values from Cytoscape table to be used in mapping
sizes (list): List of size values to map to ``table_column_values``
mapping_type (str): discrete or passthrough (d,p); default is discrete
default_size (int): Size value to set as default
style_name (str): name for style
network (SUID or str or None): Name or SUID of a network or view. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
str: ''
Raises:
CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid size
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> set_edge_font_size_mapping('EdgeBetweenness', table_column_values=[2.0, 20000.0], sizes=[20, 80], style_name='galFiltered Style')
''
>>> set_edge_font_size_mapping('interaction', table_column_values=['pp', 'pd'], sizes=[40, 90], mapping_type='d', style_name='galFiltered Style')
''
>>> set_edge_font_size_mapping(**gen_edge_size_map('interaction', mapping_type='d'))
''
>>> set_edge_font_size_mapping(**gen_edge_size_map('EdgeBetweenness', scheme_c_number_continuous(100, 200), style_name='galFiltered Style'))
''
>>> set_edge_font_size_mapping('PassthruCol', mapping_type='p', default_size=20, style_name='galFiltered Style')
''
See Also:
:meth:`gen_edge_size_map`
See Also:
`Value Generators <https://py4cytoscape.readthedocs.io/en/0.0.9/concepts.html#value-generators>`_ in the Concepts section in the py4cytoscape User Manual.
"""
verify_dimensions('size', sizes)
if default_size is not None:
style_defaults.set_edge_font_size_default(default_size, style_name=style_name, base_url=base_url)
return _update_visual_property('EDGE_LABEL_FONT_SIZE', table_column, table_column_values=table_column_values,
range_map=sizes, mapping_type=mapping_type, style_name=style_name, network=network,
base_url=base_url, table='edge')
|
7360f57d1e4921d58eaf5af4e57a6c5f636fefdb
| 20,500 |
def compute_time_overlap(appointment1, appointment2):
"""
Compare two appointments on the same day
"""
assert appointment1.date_ == appointment2.date_
print("Checking for time overlap on \"{}\"...".
format(appointment1.date_))
print("Times to check: {}, {}".
format(appointment1.time_range_, appointment2.time_range_))
latest_start = max(appointment1.start_time_, appointment2.start_time_)
earliest_end = min(appointment1.end_time_, appointment2.end_time_)
delta = (earliest_end - latest_start).seconds
overlap = max(0, delta)
if overlap == 0:
print("No time overlap.")
return False
print("\033[93mFound time overlap.\033[0m")
return True
|
c459ef52d78bc8dd094d5be9c9f4f035c4f9fcaa
| 20,501 |
def set_up_prior(data, params):
"""
Function to create prior distribution from data
Parameters
----------
data: dict
catalog dictionary containing bin endpoints, log interim prior, and log
interim posteriors
params: dict
dictionary of parameter values for creation of prior
Returns
-------
prior: chippr.mvn object
prior distribution as multivariate normal
"""
zs = data['bin_ends']
# print(str(len(zs))+' input redshift bin ends')
log_nz_intp = data['log_interim_prior']
# modify above line if testing implicit prior misspecification
print('reading implicit prior '+str(log_nz_intp))
log_z_posts = data['log_interim_posteriors']
z_difs = zs[1:]-zs[:-1]
z_mids = (zs[1:]+zs[:-1])/2.
n_bins = len(z_mids)
# print(str(n_bins)+' bin centers')
n_pdfs = len(log_z_posts)
a = 1.#amplitude
b = 5.#inverse wavelength
c = 1.e-2#random fluctuations
prior_var = np.eye(n_bins)
for k in range(n_bins):
# print(k)
prior_var[k] = a * np.exp(-0.5 * b * (z_mids[k] - z_mids) ** 2)
prior_var += c * np.eye(n_bins)
prior_mean = log_nz_intp
# print('prior dimensions: '+str((np.shape(prior_mean), np.shape(prior_var))))
prior = mvn(prior_mean, prior_var)
if params['prior_mean'] is 'sample':
prior_mean = prior.sample_one()
prior = mvn(prior_mean, prior_var)
return (prior, prior_var)
|
eb75965b9425bbf9fe3a33b7f0c850e27e2d454a
| 20,502 |
def is_nonincreasing(arr):
""" Returns true if the sequence is non-increasing. """
return all([x >= y for x, y in zip(arr, arr[1:])])
|
6d78ef5f68ca93767f3e204dfea2c2be8b3040af
| 20,503 |
def restore_missing_features(nonmissing_X, missing_features):
"""Insert columns corresponding to missing features.
Parameters
----------
nonmissing_X : array-like, shape (n_samples, n_nonmissing)
Array containing data with missing features removed.
missing_features : array-like, shape (n_missing,)
Array containing the column indices in the original
data that correspond to missing features.
Returns
-------
X : array-like, shape (n_samples, n_features)
Array with missing features inserted.
"""
if missing_features is None:
missing_features = []
n_samples, n_nonmissing = nonmissing_X.shape
n_missing = len(missing_features)
n_features = n_missing + n_nonmissing
# Ensure missing indices are consistent with the
# inferred number of features.
if len(missing_features) > 0:
if min(missing_features) < 0 or max(missing_features) >= n_features:
raise ValueError(
'Missing features are inconsistent with '
'number of features in complete data')
if is_dask_array(nonmissing_X):
cols = []
idx = 0
for i in range(n_features):
if i in missing_features:
cols.append(dask.array.full((n_samples, 1), np.NaN))
else:
cols.append(nonmissing_X[:, idx].reshape((n_samples, 1)))
idx += 1
X = dask.array.hstack(cols)
else:
nonmissing_features = [i for i in range(n_features)
if i not in missing_features]
X = np.full((n_samples, n_features), np.NaN)
X[:, nonmissing_features] = nonmissing_X
return X
|
733fd72d36ea86269472949eaa12a306835578f9
| 20,504 |
def get_sample_type_from_recipe(recipe):
"""Retrieves sample type from recipe
Args:
recipe: Recipe of the project
Returns:
sample_type_mapping, dic: Sample type of the project
For Example:
{ TYPE: "RNA" } , { TYPE: "DNA" }, { TYPE: "WGS" }
"""
return find_mapping(recipe_type_mapping, recipe)
|
57cef2f592d530ad15aed47cc51da4441430f3d2
| 20,505 |
import yaml
def _is_download_necessary(path, response):
"""Check whether a download is necessary.
There three criteria.
1. If the file is missing, download it.
2. The following two checks depend on each other.
1. Some files have an entry in the header which specifies when the file was
modified last. If the file has been modified, download it.
2. If the header has no entry for the last modified date, we compare file sizes.
If the file sizes do not match, the file is downloaded.
"""
path_yaml = path.with_suffix(".yaml")
if path_yaml.exists():
last_modified_offline = pd.to_datetime(
yaml.safe_load(path_yaml.read_text())["last_modified"]
)
else:
last_modified_offline = None
last_modified_online = pd.to_datetime(response.headers.get("last-modified", None))
path.with_suffix(".yaml").write_text(
yaml.dump({"last_modified": response.headers.get("last-modified", None)})
)
if not path.exists():
is_necessary = True
reason = f"The file {path.name} does not exist."
elif (
last_modified_online is not None
and last_modified_online > last_modified_offline
):
is_necessary = True
reason = f"{path.name} has been modified online."
elif last_modified_online is None:
file_size_offline = path.stat().st_size
file_size_online = int(response.headers.get("content-length", 0))
if file_size_online != file_size_offline:
is_necessary = True
reason = f"File sizes differ for {path.name}"
else:
is_necessary = False
reason = f"File {path.name} is already downloaded."
else:
is_necessary = False
reason = f"File {path.name} is already downloaded."
return is_necessary, reason
|
69cd2778fb6d4706ff88bd35c1b5c1abda9a39ba
| 20,506 |
import hashlib
def hash64(s):
"""ะััะธัะปัะตั ั
ะตั - 8 ัะธะผะฒะพะปะพะฒ (64 ะฑะธัะฐ)
"""
hex = hashlib.sha1(s.encode("utf-8")).hexdigest()
return "{:x}".format(int(hex, 16) % (10 ** 8))
|
e35a367eac938fdb66584b52e1e8da59582fdb9a
| 20,507 |
def course_runs():
"""Fixture for a set of CourseRuns in the database"""
return CourseRunFactory.create_batch(3)
|
1ffd4efe008e44f9e2828c9128acfe3cdafb5160
| 20,508 |
import json
def _response(data=None, status_code=None):
"""Build a mocked response for use with the requests library."""
response = MagicMock()
if data:
response.json = MagicMock(return_value=json.loads(data))
if status_code:
response.status_code = status_code
response.raise_for_status = MagicMock()
return response
|
0ccd38a954d28a4f010becc68319b49896323de0
| 20,509 |
def findtailthreshold(v, figpath=None):
"""
function [f,mns,sds,gmfit] = findtailthreshold(v,wantfig)
<v> is a vector of values
<wantfig> (optional) is whether to plot a diagnostic figure. Default: 1.
Fit a Gaussian Mixture Model (with n=2)
to the data and find the point that is greater than
the median and at which the posterior probability
is equal (50/50) across the two Gaussians.
This serves as a nice "tail threshold".
To save on computational load, we take a random subset of
size 1000000 if there are more than that number of values.
We also use some discretization in computing our solution.
return:
<f> as the threshold
<mns> as [A B] with the two means (A < B)
<sds> as [C D] with the corresponding std devs
<gmfit> with the gmdist object (the order might not
be the same as A < B)
example:
from numpy.random import randn
f, mns, sds, gmfit = findtailthreshold(np.r_[randn(1000), 5+3*randn(500)], figpath='test.png')
"""
# internal constants
numreps = 3 # number of restarts for the GMM
maxsz = 1000000 # maximum number of values to consider
nprecision = 500
# linearly spaced values between median and upper robust range
# inputs
if figpath is None:
wantfig = 0
else:
wantfig = 1
# quick massaging of input
v2 = v[np.isfinite(v)]
if len(v2) > maxsz:
print('warning: too big, so taking a subset')
v2 = picksubset(v2, maxsz)
# fit mixture of two gaussians
gmfit = gmdist(n_components=2, n_init=numreps).fit(v2.reshape(-1, 1))
# figure out a nice range
rng = robustrange(v2.flatten())[0]
# evaluate posterior
allvals = np.linspace(np.median(v2), rng[1], num=nprecision)
checkit = gmfit.predict_proba(allvals.reshape(-1, 1))
# figure out crossing
np.testing.assert_equal(
np.any(checkit[:, 0] > .5) and np.any(checkit[:, 0] < .5),
True,
err_msg='no crossing of 0.5 detected')
ix = np.argmin(np.abs(checkit[:, 0]-.5))
# return it
f = allvals[ix]
# prepare other outputs
mns = gmfit.means_.flatten()
sds = np.sqrt(gmfit.covariances_.flatten())
if mns[1] < mns[0]:
mns = mns[[1, 0]]
sds = sds[[1, 0]]
# start the figure
if wantfig:
# make figure
plt.plot(allvals, checkit)
plt.plot([allvals[ix], allvals[ix]], plt.ylim(), 'k-', linewidth=2)
plt.title('Posterior Probabilities')
plt.savefig(figpath)
plt.close('all')
return f, mns, sds, gmfit
|
8ef0c3267582d604621bd98fa7c81976b76b2c51
| 20,510 |
from typing import Optional
from typing import Dict
import asyncio
async def make_request_and_envelope_response(
app: web.Application,
method: str,
url: URL,
headers: Optional[Dict[str, str]] = None,
data: Optional[bytes] = None,
) -> web.Response:
"""
Helper to forward a request to the catalog service
"""
session = get_client_session(app)
try:
async with session.request(method, url, headers=headers, data=data) as resp:
payload = await resp.json()
try:
resp.raise_for_status()
resp_data = wrap_as_envelope(data=payload)
except ClientResponseError as err:
if 500 <= err.status:
raise err
resp_data = wrap_as_envelope(error=payload["errors"])
return web.json_response(resp_data, status=resp.status, dumps=json_dumps)
except (asyncio.TimeoutError, ClientConnectionError, ClientResponseError) as err:
logger.warning(
"Catalog service errors upon request %s %s: %s", method, url.relative(), err
)
raise web.HTTPServiceUnavailable(
reason="catalog is currently unavailable"
) from err
|
22d18de671cc84d3471120273a7b0599fda26210
| 20,511 |
import os
def _app_node(app_id, existing=True):
"""Returns node path given app id."""
path = os.path.join(z.SCHEDULED, app_id)
if not existing:
path = path + '#'
return path
|
dca3abc7376c50f015a64767333432b89a2d7009
| 20,512 |
def get_provincial_miif_sets(munis):
"""
collect set of indicator values for each province, MIIF category and year
returns dict of the form {
'cash_coverage': {
'FS': {
'B1': {
'2015': [{'result': ...}]
}
}
}
}
"""
prov_sets = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list))))
dev_cat_key = lambda muni: muni['municipality.miif_category']
dev_cat_sorted = sorted(munis, key=dev_cat_key)
prov_key = lambda muni: muni['municipality.province_code']
for calculator in get_indicator_calculators(has_comparisons=True):
name = calculator.indicator_name
for dev_cat, dev_cat_group in groupby(dev_cat_sorted, dev_cat_key):
prov_sorted = sorted(dev_cat_group, key=prov_key)
for prov_code, prov_group in groupby(prov_sorted, prov_key):
for muni in prov_group:
for period in muni[name]['values']:
if period['result'] is not None:
prov_sets[name][prov_code][dev_cat][period['date']].append(period)
return prov_sets
|
a4484a40a30f8fe9e702735f6eccf78c395ea446
| 20,513 |
def create_kernel(radius=2, invert=False):
"""Define a kernel"""
if invert:
value = 0
k = np.ones((2*radius+1, 2*radius+1))
else:
value = 1
k = np.zeros((2*radius+1, 2*radius+1))
y,x = np.ogrid[-radius:radius+1, -radius:radius+1]
mask = x**2 + y**2 <= radius**2
k[mask] = value
return k
|
33bbfa6141eb722180ffa9111555e4e00fbdac6a
| 20,514 |
def edimax_get_power(ip_addr="192.168.178.137"):
"""
Quelle http://sun-watch.net/index.php/eigenverbrauch/ipschalter/edimax-protokoll/
"""
req = """<?xml version="1.0" encoding="UTF8"?><SMARTPLUG id="edimax"><CMD id="get">
<NOW_POWER><Device.System.Power.NowCurrent>
</Device.System.Power.NowCurrent><Device.System.Power.NowPower>
</Device.System.Power.NowPower></NOW_POWER></CMD></SMARTPLUG>
"""
r = requests.post("http://{0}:10000/smartplug.cgi".format(ip_addr), auth=("admin","1234"), data=req)
soup = BeautifulSoup(r.text, features="xml")
power = soup.find(name="Device.System.Power.NowPower").get_text()
print r.text
return float(power)
|
9de1720f00ca4194b66f79048314bac1cac950ed
| 20,515 |
import torch
def get_class_inst_data_params_n_optimizer(nr_classes, nr_instances, device):
"""Returns class and instance level data parameters and their corresponding optimizers.
Args:
nr_classes (int): number of classes in dataset.
nr_instances (int): number of instances in dataset.
device (str): device on which data parameters should be placed.
Returns:
class_parameters (torch.Tensor): class level data parameters.
inst_parameters (torch.Tensor): instance level data parameters
optimizer_class_param (SparseSGD): Sparse SGD optimizer for class parameters
optimizer_inst_param (SparseSGD): Sparse SGD optimizer for instance parameters
"""
class_parameters = torch.tensor(
np.ones(nr_classes) * np.log(1.0),
dtype=torch.float32,
requires_grad=True,
device=device
)
optimizer_class_param = SparseSGD(
[class_parameters],
lr=0.1,
momentum=0.9,
skip_update_zero_grad=True
)
inst_parameters = torch.tensor(
np.ones(nr_instances) * np.log(1.0),
dtype=torch.float32,
requires_grad=True,
device=device
)
optimizer_inst_param = SparseSGD(
[inst_parameters],
lr=0.1,
momentum=0.9,
skip_update_zero_grad=True
)
return class_parameters, inst_parameters, optimizer_class_param, optimizer_inst_param
|
f97e12b99a42f32bb3629df5567ca44477d71dc0
| 20,516 |
def inc(x):
""" Add one to the current value """
return x + 1
|
c8f9a68fee2e8c1a1d66502ae99e42d6034b6b5c
| 20,517 |
def regionError(df, C, R):
"""Detects if a selected region is not part of one of the selected countries
Parameters:
-----------
df : Pandas DataFrame
the original dataset
C : str list
list of selected countries
R : str list
list of selected regions
Returns
-----------
bool
True if the error is detected
"""
if C == None:
C = ['USA']
available_regions = list(regions_of_country(df, C)) + ['All_regions', 'All']
for region in R:
if not(region in available_regions):
return True
return False
|
53e237bba7c1696d23b5f1e3c77d4b2d2a4c9390
| 20,518 |
def lherzolite():
"""
Elastic constants of lherzolite rock (GPa) from
Peselnick et al. (1974), in Voigt notation
- Abbreviation: ``'LHZ'``
Returns:
(tuple): tuple containing:
* C (np.ndarray): Elastic stiffness matrix (shape ``(6, 6)``)
* rho (float): Density (3270 kg/m^3)
Example
-------
>>> from telewavesim import elast
>>> elast.lherzolite()[0]
array([[ 1.8740e+02, 6.3710e+01, 6.3870e+01, 7.8000e-01, 2.0200e+00,
-3.2000e+00],
[ 6.3710e+01, 2.1125e+02, 6.4500e+01, -3.0700e+00, 8.7000e-01,
-5.7800e+00],
[ 6.3870e+01, 6.4500e+01, 1.9000e+02, 3.8000e-01, 2.3800e+00,
-1.2000e-01],
[ 7.8000e-01, -3.0700e+00, 3.8000e-01, 6.7900e+01, -2.1200e+00,
1.6000e+00],
[ 2.0200e+00, 8.7000e-01, 2.3800e+00, -2.1200e+00, 6.3120e+01,
-5.5000e-01],
[-3.2000e+00, -5.7800e+00, -1.2000e-01, 1.6000e+00, -5.5000e-01,
6.6830e+01]])
>>> elast.lherzolite()[1]
3270.0
"""
rho = 3270.
C = np.zeros((6,6), dtype=float)
C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2
C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78
C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12
C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6
C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55
C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83
return C, rho
|
4d7e16fcfc1732ee3a881d4b1c2d755bbd9035f3
| 20,519 |
def int_to_bit(x_int, nbits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor."""
x_l = tf.expand_dims(x_int, axis=-1)
x_labels = []
for i in range(nbits):
x_labels.append(
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base)))
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
|
a9529c737e058da664d31055aa85cc7e6179f585
| 20,520 |
def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs):
"""Create LDAP external user directory configuration file content.
"""
entries = {
"user_directories": {
"ldap": {
}
}
}
entries["user_directories"]["ldap"] = []
if server:
entries["user_directories"]["ldap"].append({"server": server})
if roles:
entries["user_directories"]["ldap"].append({"roles": [{r: None} for r in roles]})
if role_mappings:
for role_mapping in role_mappings:
entries["user_directories"]["ldap"].append({"role_mapping": role_mapping})
return create_xml_config_content(entries, **kwargs)
|
baaf3d7a02f2f4e18c3ccddb7f3ff4d5b379c1d4
| 20,521 |
def intersection(lst1, lst2):
"""!
\details Finds hashes that are common to both lists and stores their location in both documents
Finds similarity that is measured by
sim(A,B) = number of hashes in intersection of both hash sets divided by minimum of the number of hashes in lst1 and lst2
\param lst1 : 1st list whose elements are of the form [hash, start location, end location]
\param lst2: 2nd list whose elements are of the form [hash, start location, end location]
\return l3: list of common hashes and their locations in both documents. This is a list whose elements are of the form
[common hash, [start location in 1, end location in 1], [start location in 2, end location in 2]]
\return sim: similarity measure evaluated
"""
l1h = [h[0] for h in lst1]
l2h = [h[0] for h in lst2]
l1loc = {h[0]:h[1:] for h in lst1}
l2loc = {h[0]:h[1:] for h in lst2}
l3h = list(set(l1h)&set(l2h))
l3 = [[h, l1loc[h], l2loc[h]] for h in l3h]
sim = len(l3)/min(len(set(l1h)), len(set(l2h)))
return l3, sim
|
7288e523e743fda89596e56f217aac8c87899b50
| 20,522 |
def allrad2(F_nm, hull, N_sph=None, jobs_count=1):
"""Loudspeaker signals of All-Round Ambisonic Decoder 2.
Parameters
----------
F_nm : ((N_sph+1)**2, S) numpy.ndarray
Matrix of spherical harmonics coefficients of spherical function(S).
hull : LoudspeakerSetup
N_sph : int
Decoding order, defaults to hull.characteristic_order.
jobs_count : int or None, optional
Number of parallel jobs, 'None' employs 'cpu_count'.
Returns
-------
ls_sig : (L, S) numpy.ndarray
Loudspeaker L output signal S.
References
----------
Zotter, F., & Frank, M. (2018). Ambisonic decoding with panning-invariant
loudness on small layouts (AllRAD2). In 144th AES Convention.
Examples
--------
.. plot::
:context: close-figs
ls_setup = spa.decoder.LoudspeakerSetup(ls_x, ls_y, ls_z)
ls_setup.pop_triangles(normal_limit=85, aperture_limit=90,
opening_limit=150)
ls_setup.ambisonics_setup(update_hull=True)
spa.plots.decoder_performance(ls_setup, 'ALLRAD2')
"""
if not hull.ambisonics_hull:
raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!')
if hull.kernel_hull:
kernel_hull = hull.kernel_hull
else:
raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!')
if N_sph is None:
N_sph = hull.characteristic_order
N_sph_in = int(np.sqrt(F_nm.shape[0]) - 1)
assert(N_sph == N_sph_in) # for now
if N_sph_in > kernel_hull.N_kernel:
warn("Undersampling the sphere. Needs higher N_Kernel.")
# virtual t-design loudspeakers
J = len(kernel_hull.points)
# virtual speakers expressed as phantom sources (Kernel)
G_k = allrap2(src=kernel_hull.points, hull=hull, N_sph=N_sph,
jobs_count=jobs_count)
# tapering already applied in kernel, sufficient?
# virtual Ambisonic decoder
_k_azi, _k_colat, _k_r = utils.cart2sph(kernel_hull.points[:, 0],
kernel_hull.points[:, 1],
kernel_hull.points[:, 2])
# band-limited Dirac
Y_bld = sph.sh_matrix(N_sph, _k_azi, _k_colat, SH_type='real')
# ALLRAD2 Decoder
D = 4 * np.pi / J * G_k.T @ Y_bld
# loudspeaker output signals
ls_sig = D @ F_nm
return ls_sig
|
a34cfd7719c36dd8abf1313e3eca4aa2ce49477b
| 20,523 |
import os
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
from skimage.exposure import match_histograms
def match_histogram(reference, image, ref_mask=None, img_mask=None):
"""Match the histogram of the T2-like anatomical with the EPI."""
nii_img = nb.load(image)
img_data = np.asanyarray(nii_img.dataobj)
ref_data = np.asanyarray(nb.load(reference).dataobj)
ref_mask = (
np.ones_like(ref_data, dtype=bool)
if ref_mask is None
else np.asanyarray(nb.load(ref_mask).dataobj) > 0
)
img_mask = (
np.ones_like(img_data, dtype=bool)
if img_mask is None
else np.asanyarray(nb.load(img_mask).dataobj) > 0
)
out_file = fname_presuffix(image, suffix="_matched", newpath=os.getcwd())
img_data[img_mask] = match_histograms(
img_data[img_mask],
ref_data[ref_mask],
)
nii_img.__class__(
img_data,
nii_img.affine,
nii_img.header,
).to_filename(out_file)
return out_file
|
3af65929e05955d297ae11ea6e8cd77884b544b9
| 20,524 |
def outfeed(token, xs):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
|
1b4b8c289ebd5dbb90ddd7b565c98ea9ebaec038
| 20,525 |
def add_gaussian_noise(images: list, var: list, random_var: float=None, gauss_noise: list=None):
"""
Add gaussian noise to input images. If random_var and gauss_noise are given, use them to compute the final images.
Otherwise, compute random_var and gauss_noise.
:param images: list of images
:param var: variance range from which the variance value is uniformly sampled if random_var is None.
:param random_var: optional value specifying the variance multiplier.
:param gauss_noise: optional value specifying the additive gaussian noise per image.
:return: transformed image, random_var value, gauss_noise_out list
"""
if random_var is None:
random_var = np.random.uniform(var[0], var[1])
mean = 0
new_images = []
gauss_noise_out = []
for i,image in enumerate(images):
row, col, c = image.shape
if gauss_noise is None or \
(gauss_noise is not None and row*col*c !=
gauss_noise[i].shape[0]*gauss_noise[i].shape[1] * gauss_noise[i].shape[2]):
gauss = np.random.normal(mean, random_var * 127.5, (row, col, c))
else:
gauss = gauss_noise[i]
gauss_noise_out.append(gauss)
gauss = gauss.reshape(row, col, c)
image1 = np.clip(image + gauss, 0., 255.)
new_images.append(image1)
return new_images, random_var, gauss_noise_out
|
e453f1fda24ec428eb1e33aec87db9456fa015b2
| 20,526 |
def get_sso_backend():
"""
Return SingleSignOnBackend class instance.
"""
return get_backend_instance(cfg.CONF.auth.sso_backend)
|
4c2a9b857006405f804826b1c096aa8d828d6e42
| 20,527 |
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
|
20d5f3d4b30edd91ef54b53a7b09882b3e2ab9b8
| 20,528 |
def ConnectWithReader(readerName, mode):
""" ConnectWithReader """
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise EstablishContextException(hresult)
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, readerName,
mode, SCARD_PROTOCOL_ANY)
return hresult, hcontext, hcard
|
af7606fb9ad669185c7a655967d0193b59404fe1
| 20,529 |
def interpolate(points: type_alias.TensorLike,
weights: type_alias.TensorLike,
indices: type_alias.TensorLike,
normalize: bool = True,
allow_negative_weights: bool = False,
name: str = "weighted_interpolate") -> type_alias.TensorLike:
"""Weighted interpolation for M-D point sets.
Given an M-D point set, this function can be used to generate a new point set
that is formed by interpolating a subset of points in the set.
Note:
In the following, A1 to An, and B1 to Bk are optional batch dimensions.
Args:
points: A tensor with shape `[B1, ..., Bk, M]` and rank R > 1, where M is
the dimensionality of the points.
weights: A tensor with shape `[A1, ..., An, P]`, where P is the number of
points to interpolate for each output point.
indices: A tensor of dtype tf.int32 and shape `[A1, ..., An, P, R-1]`, which
contains the point indices to be used for each output point. The R-1
dimensional axis gives the slice index of a single point in `points`. The
first n+1 dimensions of weights and indices must match, or be broadcast
compatible.
normalize: A `bool` describing whether or not to normalize the weights on
the last axis.
allow_negative_weights: A `bool` describing whether or not negative weights
are allowed.
name: A name for this op. Defaults to "weighted_interpolate".
Returns:
A tensor of shape `[A1, ..., An, M]` storing the interpolated M-D
points. The first n dimensions will be the same as weights and indices.
"""
with tf.name_scope(name):
points = tf.convert_to_tensor(value=points)
weights = tf.convert_to_tensor(value=weights)
indices = tf.convert_to_tensor(value=indices)
shape.check_static(
tensor=points, tensor_name="points", has_rank_greater_than=1)
shape.check_static(
tensor=indices,
tensor_name="indices",
has_rank_greater_than=1,
has_dim_equals=(-1, points.shape.ndims - 1))
shape.compare_dimensions(
tensors=(weights, indices),
axes=(-1, -2),
tensor_names=("weights", "indices"))
shape.compare_batch_dimensions(
tensors=(weights, indices),
last_axes=(-2, -3),
tensor_names=("weights", "indices"),
broadcast_compatible=True)
if not allow_negative_weights:
weights = asserts.assert_all_above(weights, 0.0, open_bound=False)
if normalize:
sums = tf.reduce_sum(input_tensor=weights, axis=-1, keepdims=True)
sums = asserts.assert_nonzero_norm(sums)
weights = safe_ops.safe_signed_div(weights, sums)
point_lists = tf.gather_nd(points, indices)
return vector.dot(
point_lists, tf.expand_dims(weights, axis=-1), axis=-2, keepdims=False)
|
13d49a570ac482a0b2b76ab0bb49bf7994df7204
| 20,530 |
def calcADPs(atom):
"""Calculate anisotropic displacement parameters (ADPs) from
anisotropic temperature factors (ATFs).
*atom* must have ATF values set for ADP calculation. ADPs are returned
as a tuple, i.e. (eigenvalues, eigenvectors)."""
linalg = importLA()
if not isinstance(atom, Atom):
raise TypeError('atom must be of type Atom, not {0:s}'
.format(type(atom)))
anisou = atom.getAnisou()
if anisou is None:
raise ValueError('atom does not have anisotropic temperature factors')
element = zeros((3,3))
element[0,0] = anisou[0]
element[1,1] = anisou[1]
element[2,2] = anisou[2]
element[0,1] = element[1,0] = anisou[3]
element[0,2] = element[2,0] = anisou[4]
element[1,2] = element[2,1] = anisou[5]
vals, vecs = linalg.eigh(element)
return vals[[2,1,0]], vecs[:, [2,1,0]]
|
0a0a0db2ca99d4a3b754acb9cd22ec4659af948f
| 20,531 |
def _create_groups(properties):
"""Create a tree of groups from a list of properties.
Returns:
Group: The root group of the tree. The name of the group is set to None.
"""
# We first convert properties into a dictionary structure. Each dictionary
# represents a group. The None key corresponds to the fields directly stored
# on that group. The other keys map from group name to another dictionary.
# For example:
# {
# None: [field1, field2, ...]
# 'groupA': { None: [field3] },
# 'groupB': {
# None: [],
# 'groupC': { None: [field4] },
# },
# }
#
# We then recursively convert this dictionary into a tree of Groups.
# TODO(shend): Skip the first step by changing Group attributes into methods.
def _dict_to_group(name, group_dict):
fields_in_current_group = group_dict.pop(None)
subgroups = [_dict_to_group(subgroup_name, subgroup_dict) for subgroup_name, subgroup_dict in group_dict.items()]
return Group(name, subgroups, _reorder_fields(fields_in_current_group))
root_group_dict = {None: []}
for property_ in properties:
current_group_dict = root_group_dict
if property_['field_group']:
for group_name in property_['field_group'].split('->'):
current_group_dict[group_name] = current_group_dict.get(group_name, {None: []})
current_group_dict = current_group_dict[group_name]
current_group_dict[None].extend(_create_fields(property_))
return _dict_to_group(None, root_group_dict)
|
bb3c582074c718250a1eb16a29830242b4ccaa5f
| 20,532 |
def bitset(array, bits):
"""
To determine if the given bits are set in an array.
Input Parameters
----------------
array : array
A numpy array to search.
bits : list or array
A list or numpy array of bits to search.
Note that the "first" bit is denoted as zero,
while the "second" bit is denoted as 1.
Optional Parameters:
None
Returns
--------
array
Returns a byte array of the same size as array. A pixel is
set if any of the bits requested are set in the same pixel
of array.
Procedure
---------
Uses the Gumley IDL ishft technique.
Example
--------
>>> bitset(np.array([3,4,1]),[0])
array([1, 0, 1])
Modification History
--------------------
2022-03-09 - Written by M. Cushing, University of Toledo.
Based on the mc_bitset.pro IDL program.
"""
# Define empty mask
mask = np.zeros_like(array, dtype=np.int8)
# Loop over every bit requested and identify those pixels for which that bit is set.
for val in bits:
tmp = (array >> val) & 1
mask = mask | tmp
return mask
|
cbae61dabfbe0789ff349f12b0df43860db72df7
| 20,533 |
import os
def build_classifier_pipeline(
input_files,
output_files,
config,
use_fake_tables = False,
converter_impl = ConverterImplType.PYTHON,
):
"""Pipeline for converting finetuning examples."""
if len(output_files) != len(input_files):
raise ValueError(f'Size mismatch: {output_files} {input_files}')
def _pipeline(root):
"""Pipeline."""
for (input_file, output_file) in zip(input_files, output_files):
name = os.path.basename(input_file)
interactions = read_interactions(root, input_file, name)
if use_fake_tables:
interactions = (
interactions
| f'InsertFakeTable_{name}' >> beam.Map(insert_fake_table_fn))
examples = (
interactions
| f'CheckTableId_{name}' >> beam.FlatMap(
pretrain_utils.check_table_id_fn)
| f'AddNumericValues_{name}' >> beam.Map(
pretrain_utils.add_numeric_values_fn)
| f'ToClassifierTensorflowExample_{name}' >> beam.ParDo(
ToClassifierTensorflowExample(
config,
name,
convert_impl_value=converter_impl.value,
)))
pretrain_utils.write_proto_outputs(output_file, f'WriteExamples_{name}',
examples, tf.train.Example)
return _pipeline
|
04c62ac25e3161316858f0e77cafc6796130d160
| 20,534 |
from typing import OrderedDict
def sample_gene_matrix(request, variant_annotation_version, samples, gene_list,
gene_count_type, highlight_gene_symbols=None):
""" highlight_gene_symbols - put these genes 1st """
# 19/07/18 - Plotly can't display a categorical color map. See: https://github.com/plotly/plotly.js/issues/1747
# So just doing as HTML table
if gene_list:
genes = gene_list.get_genes(variant_annotation_version.gene_annotation_release)
gene_symbols = set(gene_list.get_gene_names())
else:
# This was originally designed around a gene list, but now we need to support no gene list (only when uses
# variant classifications)
genes = []
gene_symbols = []
qs = gene_count_type.get_variant_queryset(variant_annotation_version)
GS_PATH = "variantannotation__transcript_version__gene_version__gene_symbol"
qs = qs.filter(**{GS_PATH + "__isnull": False})
for gene, gene_symbol in qs.values_list("variantannotation__gene", GS_PATH).distinct():
genes.append(gene)
gene_symbols.append(gene_symbol)
gene_values = list(gene_count_type.genevalue_set.all().order_by("id"))
default_color = "#d9d9d9"
default_text = ""
empty_gene_value = list(filter(lambda x: x.use_as_empty_value, gene_values))
if len(empty_gene_value) == 1:
default_color = empty_gene_value[0].rgb
phenotypes = ["Age", "HPO", "OMIM"]
highlight_gene_labels = []
other_gene_labels = []
gene_links_lookup = OrderedDict()
for gene_symbol in sorted(gene_symbols):
gene_classes_list = ["gene-label", gene_symbol]
highlight = highlight_gene_symbols and gene_symbol in highlight_gene_symbols
if highlight:
gene_classes_list.append("highlight-gene")
gene_classes = ' '.join(gene_classes_list)
if request.user.is_authenticated: # Only display links to logged in users
url = reverse('view_gene_symbol', kwargs={"gene_symbol": gene_symbol})
gene_symbol_text = f'<a class="{gene_classes}" href="{url}">{gene_symbol}</a>'
else:
gene_symbol_text = f"<span class='{gene_classes}'>{gene_symbol}</span>"
if highlight:
highlight_gene_labels.append(gene_symbol_text)
else:
other_gene_labels.append(gene_symbol_text)
gene_links_lookup[gene_symbol] = gene_symbol_text
matrix_rows = phenotypes + highlight_gene_labels + other_gene_labels
color_df = pd.DataFrame(index=matrix_rows, dtype='O')
text_df = pd.DataFrame(index=matrix_rows)
sample_names = []
used_sample_names = set()
for i, sample in enumerate(samples):
try:
can_access = False
if request.user.is_authenticated: # Only display links to logged in users
try:
Sample.get_for_user(request.user, sample.pk) # Throws exception
can_access = True
except (Sample.DoesNotExist, PermissionDenied):
pass
source = SampleAnnotationVersionVariantSource.objects.get(sample=sample,
variant_annotation_version=variant_annotation_version)
gvcc = GeneValueCountCollection.objects.get(source=source,
gene_count_type=gene_count_type)
gvc_qs = gvcc.genevaluecount_set.filter(gene__in=genes)
sample_code = "%03d" % i
if can_access:
view_sample_url = reverse('view_sample', kwargs={'sample_id': sample.pk})
sample_link = f'<a href="{view_sample_url}">{sample.name}</a>'
if sample_link in used_sample_names:
uniq_sample_name = sample.name + "_" + sample_code
sample_link = f'<a href="{view_sample_url}">{uniq_sample_name}</a>'
sample_name = sample_link
else:
sample_name = "S" + sample_code
sample_names.append(sample_name)
used_sample_names.add(sample_name)
color_df[sample_name] = default_color
color_df.loc["Age", sample_name] = '#FFFFFF'
color_df.loc["HPO", sample_name] = '#FFFFFF'
color_df.loc["OMIM", sample_name] = '#FFFFFF'
text_df[sample_name] = default_text
if sample.patient:
try:
# Check you have Patient permissions
patient = Patient.get_for_user(request.user, sample.patient.pk)
def format_ontology(ontology_term):
return f"<div title='{ontology_term}'>{ontology_term.name}</div>"
hpo, omim = OntologyTerm.split_hpo_and_omim(patient.get_ontology_term_ids())
hpo_text = " ".join(map(format_ontology, hpo))
omim_text = " ".join(map(format_ontology, omim))
try:
age = sample.specimen.age_at_collection_date
except:
age = None
text_df.loc["Age", sample_name] = age or ''
text_df.loc["HPO", sample_name] = hpo_text
text_df.loc["OMIM", sample_name] = omim_text
except PermissionDenied:
pass
except Patient.DoesNotExist:
pass
FIELDS = ["gene__geneversion__gene_symbol", "value__rgb", "value__show_counts", "count"]
for gene_symbol, rgb, show_counts, count in gvc_qs.values_list(*FIELDS):
gene_link = gene_links_lookup[gene_symbol]
color_df.loc[gene_link, sample_name] = rgb
if show_counts:
text_df.loc[gene_link, sample_name] = count
except (SampleAnnotationVersionVariantSource.DoesNotExist, GeneValueCountCollection.DoesNotExist):
pass
def set_style(s):
color_series = color_df[s.name]
styles = []
for color in color_series:
styles.append(f"color: {rgb_invert(color)}; background-color: {color};")
return styles
style = text_df.style.apply(set_style)
style = style.set_table_attributes('class="sample-gene-matrix"')
text_table_html = style.render()
context = {"text_table_html": text_table_html,
"gene_values": gene_values}
return render(request, 'snpdb/patients/cohort_gene_counts_matrix.html', context)
|
70ed9387ebba73664efdf3c94fe08a67ed07acc9
| 20,535 |
import requests
import yaml
import os
def validate_yaml_online(data, schema_uri=None):
"""
Validates the given data structure against an online
schema definition provided by schema_uri.
If schema_uri is not given, we try to get it from
the 'descriptor_schema' field in 'data'.
Returns: True/False
"""
if schema_uri is None:
# try to get schema_uri from data
schema_uri = data.get("descriptor_schema", None)
if schema_uri is None:
LOG.error("Cannot find URI pointing to schema.")
return False
try:
# try to download schema
r = requests.get(schema_uri, timeout=3)
# try to parse schema
schema = yaml.load(r.text)
except BaseException as e:
LOG.warning("Couldn't fetch schema from '{}': {}".format(
schema_uri, e))
# ok, no internet? lets try to use a local NAPD schema
try:
path = os.path.join(
os.path.expanduser("~"),
".tng-schema/package-specification/napd-schema.yml")
LOG.info("Using local schema: {}".format(path))
with open(path, "r") as f:
schema = yaml.load(f)
except BaseException as e:
LOG.error("Get schema from '{}' or '{}': {}".format(
schema_uri, path, e))
return False
try:
if schema is None:
raise BaseException("No schema found online and offile")
# validate data against schema
validate(data, schema)
except BaseException as e:
LOG.error("Couldn't validate against schema from '{}': {}".format(
schema_uri, e))
return False
return True
|
624b9aa706fc0afda24bf9c0afc8756637c8a9bd
| 20,536 |
import random
def normal218(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(zz2)
idgirande=random.choice(bb2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh)
|
aec17f55306691395dc2797cf30e175e0cb5b9e8
| 20,537 |
def get_fast_loaders(dataset, batch_size, test, device, data_path=None, train_transform=None, validation_transform=None,
train_percentage=0.85, workers=4):
"""Return :class:`FastLoader` for training and validation, outfitted with a random sampler.
If set to run on the test set, :param:`train_percentage` will be ignored and set to 1.
The transforms should only include operations on PIL images and should not convert the images to a tensor, nor
handle normalization of the tensors. This is handled at runtime by the fast loaders.
If you are not looking for high-performance, prefer :func:`get_loaders`.
:param dataset: name of the dataset, (MNIST, FashionMNIST, CIFAR-10, CIFAR-100, ImageNette, ImageWoof, ImageNet)
are available.
:type dataset: str
:param batch_size: batch size for training and validation.
:type batch_size: int
:param test: run validation on the test set.
:type test: bool
:param data_path: path to folder containing dataset.
:type data_path: str
:param train_transform: PyTorch transform to apply to images for training.
:type train_transform: torchvision.transforms.Compose
:param validation_transform: PyTorch transform to apply to images for validation.
:type validation_transform: torchvision.transforms.Compose
:param train_percentage: percentage of the data in the training set.
:type train_percentage: float
:param workers: number of subprocesses to use for data loading. Use 0 for loading in the main process.
:type workers: int
:return: training and validation fast data loaders.
:rtype: (FastLoader, FastLoader)
"""
# Check if any parameters has been set to its default value, and if so, setup the defaults.
data_path, train_transform, validation_transform = _setup_defaults(dataset, data_path, train_transform,
validation_transform, fast=True)
# Get all of the training data available.
train_data = _get_train_data(dataset, data_path, train_transform)
log.log("Training data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if not test:
# Perform a train/validation split on the training data available:
# For performance reasons, the train/validation split will always be the same.
# TODO: Implement random train/validation split with fast loading and distributed training.
log.log("Running in standard training/validation mode.", LOGTAG, log.Level.INFO)
dataset_size = len(train_data)
split_index = int(dataset_size * train_percentage)
log.log("{0}:{1}".format(dataset_size, split_index), LOGTAG, log.Level.HIGHLIGHT)
validation_data = train_data[split_index:]
train_data = train_data[:split_index]
log.log("Validation data succesfully fetched!", LOGTAG, log.Level.DEBUG)
else:
# Fetch the test data:
log.log("Running in <b>test</b> mode. All training data available will be used, and "
"validation will be done on the test set. Are you really ready to publish?", LOGTAG, log.Level.WARNING)
validation_data = _get_test_data(dataset, data_path, validation_transform)
log.log("Test data succesfully fetched!", LOGTAG, log.Level.DEBUG)
if distributed.is_initialized():
# If running in distributed mode, use a DistributedSampler:
log.log("Running in <b>distributed</b> mode. This hasn't been thoroughly tested, beware!",
LOGTAG, log.Level.WARNING)
train_sampler = data_utils.distributed.DistributedSampler(train_data)
else:
# Otherwise, default to a RandomSampler:
train_sampler = data_utils.RandomSampler(train_data)
# Build the train and validation loaders, using pinned memory and a custom collate function to build the batches.
train_loader = data_utils.DataLoader(train_data, batch_size=batch_size, num_workers=workers, pin_memory=True,
sampler=train_sampler, collate_fn=_fast_collate, drop_last=True)
log.log("Train loader succesfully created!", LOGTAG, log.Level.DEBUG)
validation_loader = data_utils.DataLoader(validation_data, batch_size=batch_size, num_workers=workers,
pin_memory=True, collate_fn=_fast_collate)
log.log("Validation loader succesfully created!", LOGTAG, log.Level.DEBUG)
# Wrap the PyTorch loaders in the custom FastLoader class and feed it the normalization parameters associated
# with the dataset.
return FastLoader(train_loader, device, *NORMALIZATION[DATASETS[dataset]]), \
FastLoader(validation_loader, device, *NORMALIZATION[DATASETS[dataset]])
|
444ff949ba247ebc21f1e34fcfcb752ccbf6360d
| 20,538 |
from typing import List
def find_paths(root: TreeNode, required_sum: int) -> List[List[int]]:
"""
Time Complexity: O(N^2)
Space Complexity: O(N)
Parameters
----------
root : TreeNode
Input binary tree.
required_sum : int
Input number 'S'.
Returns
-------
all_paths : List[List[int]]
All paths from root-to-leaf such that the sum of all the node values of each path equals 'S'.
"""
def find_paths_recursive(cur_node: TreeNode, path_sum: int, cur_path: List[int], ins_all_paths: List[List[int]]):
if not cur_node:
return
cur_path.append(cur_node.val)
if cur_node.val == path_sum and not cur_node.left and not cur_node.right:
ins_all_paths.append(cur_path.copy())
else:
find_paths_recursive(cur_node.left, path_sum - cur_node.val, cur_path, ins_all_paths)
find_paths_recursive(cur_node.right, path_sum - cur_node.val, cur_path, ins_all_paths)
del cur_path[-1]
all_paths = []
find_paths_recursive(root, required_sum, [], all_paths)
return all_paths
|
71cd37db1be97015173748e8d2142601e306552b
| 20,539 |
import time
import requests
import json
def macro_bank_switzerland_interest_rate():
"""
็ๅฃซๅคฎ่กๅฉ็ๅณ่ฎฎๆฅๅ, ๆฐๆฎๅบ้ดไป20080313-่ณไป
https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240
:return: ็ๅฃซๅคฎ่กๅฉ็ๅณ่ฎฎๆฅๅ-ไปๅผ(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["็ๅฃซๅคฎ่กๅฉ็ๅณ่ฎฎๆฅๅ"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["ไปๅผ(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "25",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "switzerland_interest_rate"
temp_df = temp_df.astype("float")
return temp_df
|
14eb2ce7dc85e0611a8d55147172256ed8a2c71e
| 20,540 |
def create_dataframe(message):
"""Create Pandas DataFrame from CSV."""
dropdowns = []
df = pd.DataFrame()
if message != "":
df = pd.read_csv(message)
df = df.sample(n = 50, random_state = 2) # reducing Data Load Running on Heroku Free !!!!
if len(df) == 0:
return pd.DataFrame()
df.insert(0,"Index", df.index)
for column in df.columns:
dropdowns.append({"label":column, "value":column})
return df, dropdowns
|
c0d764ed47cba31d0129d1c61e645065ba2e99b5
| 20,541 |
def load_model(path):
"""
This function ...
:param path:
:return:
"""
# Get the first line of the file
with open(path, 'r') as f: first_line = f.readline()
# Create the appropriate model
if "SersicModel" in first_line: return SersicModel.from_file(path)
elif "ExponentialDiskModel" in first_line: return ExponentialDiskModel.from_file(path)
elif "DeprojectionModel" in first_line: return DeprojectionModel.from_file(path)
else: raise ValueError("Unrecognized model file")
|
425113abe09b1de1efa1d5cf1ca2df4d999886c2
| 20,542 |
import functools
def add_metaclass(metaclass):
"""
Class decorator for creating a class with a metaclass.
Borrowed from `six` module.
"""
@functools.wraps(metaclass)
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
f6ee3feef418d5bff4f0495fdbfc98c9a8f48665
| 20,543 |
import torch
def max_pool_nd_inverse(layer, relevance_in : torch.Tensor, indices : torch.Tensor = None,
max : bool = False) -> torch.Tensor :
"""
Inversion of LogSoftmax layer
Arguments
---------
relevance : torch.Tensor
Input relavance
indices : torch.Tensor
Maximum feature indexes obtained when max pooling
max : bool
Implement winner takes all scheme in relevance re-distribution
Returns
-------
torch.Tensor
Output relevance
"""
if indices is None :
indices = layer.indices
out_shape = layer.out_shape
bs = relevance_in.size(0)
relevance_in = torch.cat([r.view(out_shape) for r in relevance_in ], dim=0)
indices = torch.cat([indices] * bs, dim=0)
return ( winner_takes_all(relevance_in, layer.in_shape, layer.indices)
if max else relevance_in )
|
ae3ad1b2a3791063c90568f0c954d3de6f3985f8
| 20,544 |
def aroon_up(close, n=25, fillna=False):
"""Aroon Indicator (AI)
Identify when trends are likely to change direction (uptrend).
Aroon Up - ((N - Days Since N-day High) / N) x 100
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return AroonIndicator(close=close, n=n, fillna=fillna).aroon_up()
|
2a44c15b06e9a1d2facaa800a186b780fc226717
| 20,545 |
import os
import hashlib
def hash_file(filename):
"""
computes hash value of file contents, to simplify pytest assert statements for
complex test cases that output files. For cross-platform compatibility, make sure
files are read/written in binary, and use unix-style line endings, otherwise hashes
will not match despite content being same in ASCII.
Args:
filename
Returns:
hashnumber
"""
if os.path.isfile(filename) is False:
raise Exception("File not found for hash operation")
# open file for reading in binary mode
with open(filename, "rb") as file:
return hashlib.sha512(file.read()).hexdigest()
|
e26f92869a44c0e60fcd58764069b0c7828dee95
| 20,546 |
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
N, C, H, W = x.shape
out = np.zeros((N, C, H, W))
cache = []
for c in range(C):
xc = x[:, c, :, :]
outc, cachec = batchnorm_forward(xc.reshape(N, H * W), gamma[c], beta[c], bn_param)
out[:,c,:,:] = outc.reshape(N, H, W)
cache += [cachec]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
|
0b27b4b00e21de2117a1f079741f0b968fd30042
| 20,547 |
def triangulate(points):
""" triangulate the plane for operation and visualization
"""
num_points = len(points)
indices = np.arange(num_points, dtype=np.int)
segments = np.vstack((indices, np.roll(indices, -1))).T
tri = pymesh.triangle()
tri.points = np.array(points)
tri.segments = segments
tri.verbosity = 0
tri.run()
return tri.mesh
|
d18a7d171715217b59056337c86bf5b49609b664
| 20,548 |
def immutable():
""" Get group 1. """
allowed_values = {'NumberOfPenguins', 'NumberOfSharks'}
return ImmutableDict(allowed_values)
|
851853b54b106cbc3ee621119a863a7b2862e8d5
| 20,549 |
def test_plot_colors_sizes_proj(data, region):
"""
Plot the data using z as sizes and colors with a projection.
"""
fig = Figure()
fig.coast(region=region, projection="M15c", frame="af", water="skyblue")
fig.plot(
x=data[:, 0],
y=data[:, 1],
color=data[:, 2],
size=0.5 * data[:, 2],
style="cc",
cmap="copper",
)
return fig
|
4a9f2727d046d91445504d8f147fcef95a261cb5
| 20,550 |
def predict_to_score(predicts, num_class):
"""
Checked: the last is for 0
===
Example: score=1.2, num_class=3 (for 0-2)
(0.8, 0.2, 0.0) * (1, 2, 0)
:param predicts:
:param num_class:
:return:
"""
scores = 0.
i = 0
while i < num_class:
scores += i * predicts[:, i - 1]
i += 1
return scores
|
ee4038583404f31bed42bed4eaf6d0c25684c0de
| 20,551 |
def quasi_diagonalize(link):
"""sort clustered assets by distance"""
link = link.astype(int)
sort_idx = pd.Series([link[-1, 0], link[-1, 1]])
num_items = link[-1, 3] # idx of original items
while sort_idx.max() >= num_items:
sort_idx.index = list(range(0, sort_idx.shape[0] * 2, 2)) # make space
df0 = sort_idx[sort_idx >= num_items] # find clusters
i = df0.index
j = df0.values - num_items
sort_idx[i] = link[j, 0] # item 1
df0 = pd.Series(link[j, 1], index=i + 1)
sort_idx = sort_idx.append(df0) # item 2
sort_idx = sort_idx.sort_index() # re-sort
sort_idx.index = list(range(sort_idx.shape[0])) # re-index
return sort_idx.tolist()
|
8f10f62d5f0b3dc7b8687134497dd42f183194b4
| 20,552 |
import os
import fnmatch
def dataset_files(rootdir, pattern):
"""Returns a list of all image files in the given directory"""
matches = []
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
|
b99d24d48b26393cc7cbf4df9e7cd7f1d7c018e0
| 20,553 |
def keyword(variable):
"""
Verify that the field_name isn't part of know Python keywords
:param variable: String
:return: Boolean
"""
for backend in ADAPTERS:
if variable.upper() in ADAPTERS[backend]:
msg = (
f'Variable "{variable}" is a "{backend.upper()}" '
f"reserved SQL/NOSQL keyword"
)
raise SyntaxError(msg)
if not VALID_TABLE_FIELD.match(variable) or PYTHON_KEYWORDS.match(variable):
raise SyntaxError(f"Field: invalid field name: {variable}")
return f"{variable} isn't a known keyword"
|
b1c6322d3ce3c9ee4bda4eff251af44ca3e2c699
| 20,554 |
import logging
import json
def gcp_api_main(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
logging.basicConfig(level=logging.INFO)
try:
request_json = request.get_json()
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
elif request_json and 'stock_data' in request_json and 'name' in request_json:
logging.info('run_fb_prophet')
return json.dumps(
FBProphetThread.run_fb_prophet(
json.dumps(request_json['model_input']))).replace('NaN', '"-"')
else:
return f'Hello World!'
except Exception as ex:
err_msg = 'Generated an exception: {ex}'.format(ex=ex)
logging.error(err_msg)
return err_msg
|
21ec4b1dba4ad6f5dac518a3907cd15579a0ba00
| 20,555 |
def box_area_3d(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2, z1, z2) coordinates.
Arguments:
boxes (Union[Tensor, ndarray]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2, z1, z2) format. [N, 6]
Returns:
area (Union[Tensor, ndarray]): area for each box [N]
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4])
|
be8b3c4d58d301d2044e7cfe2844516933c1247f
| 20,556 |
from sqlalchemy import create_mock_engine
import re
def mock_engine(dialect_name=None):
"""Provides a mocking engine based on the current testing.db.
This is normally used to test DDL generation flow as emitted
by an Engine.
It should not be used in other cases, as assert_compile() and
assert_sql_execution() are much better choices with fewer
moving parts.
"""
if not dialect_name:
dialect_name = config.db.name
buffer = []
def executor(sql, *a, **kw):
buffer.append(sql)
def assert_sql(stmts):
recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer]
assert recv == stmts, recv
def print_sql():
d = engine.dialect
return "\n".join(str(s.compile(dialect=d)) for s in engine.mock)
engine = create_mock_engine(dialect_name + "://", executor)
assert not hasattr(engine, "mock")
engine.mock = buffer
engine.assert_sql = assert_sql
engine.print_sql = print_sql
return engine
|
d773a6e2cd0b2060e5dd66d5ec4e758ac7f1f504
| 20,557 |
def get_feedback_thread_reply_info_by_reply_to_id(reply_to_id):
"""Gets the domain object corresponding to the model which is fetched by
reply-to-id field.
Args:
reply_to_id: str. The reply_to_id to search for.
Returns:
FeedbackThreadReplyInfo or None. The corresponding domain object.
"""
model = email_models.GeneralFeedbackEmailReplyToIdModel.get_by_reply_to_id(
reply_to_id)
if model is None:
return None
return get_feedback_thread_reply_info_from_model(model)
|
e27521030717a1dc15cd9e678dabafba86007f90
| 20,558 |
def _cross_correlations(n_states):
"""Returns list of crosscorrelations
Args:
n_states: number of local states
Returns:
list of tuples for crosscorrelations
>>> l = _cross_correlations(np.arange(3))
>>> assert l == [(0, 1), (0, 2), (1, 2)]
"""
l = n_states
cross_corr = [[(l[i], l[j]) for j in l[1:][i:]] for i in l[:-1]]
return [item for sublist in cross_corr for item in sublist]
|
c11c5655ba655a29991421c6627a3eaca4f7681d
| 20,559 |
def select_interface(worker):
"""
It gets a worker interface channel to do something.
"""
interfaces = worker.interfaces_list()
if len(interfaces) == 0:
print ' Error. Worker without interface known.'
return -1
elif len(interfaces) == 1:
return 1
option = raw_input(' Select interface -> ')
if option == '':
return -1
while not option.isdigit() or int(option) < 1 or int(option) > len(interfaces):
print ' Error. None worker interface was selected.'
option = raw_input(' Select interface -> ')
if option == '':
return -1
return int(option)
|
97d90670dd69d57b4e1f85df250a0abc56106fb6
| 20,560 |
def get_middle(arr):
"""
Get middle point ????
"""
n_val = np.array(arr.shape) / 2.0
n_int = n_val.astype(np.int0)
# print(n_int)
if n_val[0] % 2 == 1 and n_val[1] % 2 == 1:
return arr[n_int[0], n_int[1]]
if n_val[0] % 2 == 0 and n_val[1] % 2 == 0:
return np.average(arr[n_int[0]:n_int[0] + 2, n_int[1]:n_int[1] + 2])
if n_val[0] % 2 == 1 and n_val[1] % 2 == 0:
return np.average(arr[n_int[0], n_int[1]:n_int[1]+2])
return np.average(arr[n_int[0]:n_int[0]+2, n_int[1]])
|
9651bcadc991bbf7a0c635a8870356f422d43e7e
| 20,561 |
def annotate_segmentation(image, segmentation):
"""Return annotated segmentation."""
annotation = AnnotatedImage.from_grayscale(image)
for i in segmentation.identifiers:
region = segmentation.region_by_identifier(i)
color = pretty_color()
annotation.mask_region(region.border.dilate(), color)
props = skimage.measure.regionprops(segmentation)
for p in props:
try:
minr, minc, maxr, maxc = p.bbox
cval = int(p.centroid[1])
line = skimage.draw.line(minr, cval, maxr, cval)
annotation.mask_region(line, (0, 255, 0))
except IndexError:
# Don't draw line if it falls outside of the image.
pass
return annotation
|
2fadbe8d2339e37bea0dbfe054199002a3997b20
| 20,562 |
def get_champ_data(champ: str, tier: int, rank: int):
"""
Gives Champ Information by their champname, tier, and rank.
"""
champ_info = NewChampsDB()
try:
champ_info.get_data(champ, tier, rank)
champs_dict = {
"name": f"{champ_info.name}",
"released": champ_info.released,
"class": champ_info.class_type,
"tier": champ_info.tier,
"rank": champ_info.rank,
"prestige": champ_info.prestige,
"hp": champ_info.hp,
"attack": champ_info.attack,
"crit_rate": champ_info.crit_rate,
"crit_dmge": champ_info.crit_dmge,
"armor": champ_info.armor,
"block_prof": champ_info.block_prof,
"energy_resist": champ_info.energy_resist,
"physical_resist": champ_info.physical_resist,
"crit_resist": champ_info.crit_resist,
"sig_info": champ_info.sig_info,
"abilities": champ_info.abilities,
"challenger_rating": champ_info.challenger_rating,
"find": champ_info.find,
"tags": champ_info.tags,
"abilities": champ_info.abilities,
"contact": champ_info.contact,
"url_page": f"{champ_info.url_page}",
"img_portrait": f"{champ_info.img_portrait}",
"champid": f"{champ_info.champid}",
}
champs_dict.update({"status": 200, "detail": "Successful"})
return champs_dict
except Exception as e:
if isinstance(e, FileNotFoundError):
raise HTTPException(status_code=404, detail="404: " + champ_info.error)
elif isinstance(e, KeyError):
raise HTTPException(status_code=400, detail="400: " + champ_info.error)
else:
raise e
|
7d810fc5ced3d187c68533f42c2443ef8bec651b
| 20,563 |
import http
import os
def handler(event, context):
"""
Handler method for insert resource function.
"""
if event is None:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
if event is None or Constants.event_body() not in event or Constants.event_http_method() not in event:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
if event[Constants.event_body()] is None or len(event[Constants.event_body()]) is 0:
return response(http.HTTPStatus.BAD_REQUEST, Constants.error_insufficient_parameters())
global _dynamodb
if _dynamodb is None:
try:
ddb = DynamoDB()
_dynamodb = ddb.connect(os.environ[Constants.env_var_region()])
except Exception as e:
return response(http.HTTPStatus.INTERNAL_SERVER_ERROR, str(e))
try:
request_handler = RequestHandler(_dynamodb)
except Exception as e:
return response(http.HTTPStatus.INTERNAL_SERVER_ERROR, str(e))
return request_handler.handler(event, context)
|
671144bed701c8bd2480b86522bd2002ccd35e73
| 20,564 |
def serving_input_receiver_fn():
"""This is used to define inputs to serve the model.
Returns:
A ServingInputReciever object.
"""
csv_row = tf.placeholder(shape=[None], dtype=tf.string)
features, _ = _make_input_parser(with_target=False)(csv_row)
return tf.estimator.export.ServingInputReceiver(features,
{'csv_row': csv_row})
|
bcc6f0c4050d40df114ba4e5d895524f736b463a
| 20,565 |
import time
def offsetTimer():
"""
'Starts' a timer when called, returns a timer function that returns the
time in seconds elapsed since the timer was started
"""
start_time = time.monotonic()
def time_func():
return time.monotonic() - start_time
return time_func
|
348105a408ccedd1fcb840b73d5a58dfd59dd8cc
| 20,566 |
import os
def _get_sender(pusher_email):
"""Returns "From" address based on env config and default from."""
use_author = 'GITHUB_COMMIT_EMAILER_SEND_FROM_AUTHOR' in os.environ
if use_author:
sender = pusher_email
else:
sender = os.environ.get('GITHUB_COMMIT_EMAILER_SENDER')
return sender
|
09167bfd09ff031d60b4ca033fbb0cad206393f9
| 20,567 |
from typing import Callable
import functools
def find_resolution(func: Callable = None) -> Callable:
"""Decorator that gives the decorated function the image resolution."""
@functools.wraps(func)
def wrapper(self: MultiTraceChart, *args, **kwargs):
if 'width' not in kwargs:
kwargs['width'] = self.resolution[0]
if 'height' not in kwargs:
kwargs['height'] = self.resolution[1]
if 'resolution' in kwargs:
kwargs['width'] = kwargs['resolution'][0]
kwargs['height'] = kwargs['resolution'][1]
del kwargs['resolution']
if 'size' in kwargs:
kwargs['width'] = kwargs['size'][0]
kwargs['height'] = kwargs['size'][1]
del kwargs['size']
return func(self, *args, **kwargs)
return wrapper
|
70edffcec5ac772bd52cb819db589d26497fda87
| 20,568 |
def transform_spikes_to_isi(self, spikes, time_epoch, last_event_is_spike=False):
"""Convert spike times to data array, which is a suitable format for optimization.
Parameters
----------
spikes : numpy array (num_neuron,N), dtype=np.ndarray
A sequence of spike times for each neuron on each trial. Each entry is 1D array of floats.
time_epoch : list of tuples
List of N tuples, where N is the number of trials. Each tuple consists of the trial's start time and end time in seconds.
Note that the end time should be an actual end time, but not the timeout in the case of last_event_is_spike is True.
last_event_is_spike : bool
If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded.
Returns
-------
data : numpy array (N,2),dtype=np.ndarray.
Spike data packed as numpy array of the size (N,2), where each elements is a 1D array of floats.
N is the number of trials, and for each trial the first column contains the interspike intervals (ISIs),
and the second column contains the corresponding neuronal indices.
"""
num_neuron, num_trial = spikes.shape
# initialize data array
data = np.empty((num_trial, 2), dtype=np.ndarray)
# indices of neurons that spiked
spike_ind = np.empty(num_neuron, dtype=np.ndarray)
# transform spikes to interspike intervals format
for iTrial in range(num_trial):
for iCell in range(num_neuron):
spike_ind[iCell] = iCell * np.ones(len(spikes[iCell, iTrial]), dtype=np.int)
all_spikes = np.concatenate(spikes[:, iTrial], axis=0)
all_spike_ind = np.concatenate(spike_ind[:], axis=0)
# create data array
data[iTrial, 0] = np.zeros(len(all_spikes) + (not last_event_is_spike))
if all_spikes.shape[0] == 0:
data[iTrial, 1] = np.zeros(0)
# If no spikes emitted, set to trial beginning time
last_spike_time = time_epoch[iTrial][0]
else:
# sort spike times and neuron index arrays
ind_sort = np.argsort(all_spikes)
all_spikes = all_spikes[ind_sort]
all_spike_ind = all_spike_ind[ind_sort]
data[iTrial, 0][1:len(all_spikes)] = all_spikes[1:] - all_spikes[:-1]
data[iTrial, 0][0] = all_spikes[0] - time_epoch[iTrial][0] # handle the first ISI
last_spike_time = all_spikes[-1]
if not last_event_is_spike:
data[iTrial, 0][-1] = time_epoch[iTrial][1] - last_spike_time
# assign indicies of neurons which fired, -1 to absorption event
data[iTrial, 1] = all_spike_ind if last_event_is_spike else np.concatenate((all_spike_ind, [-1]))
return data
|
cc2b54e80e00b10b8cabf79093509fde1980b804
| 20,569 |
def api_github_v2(user_profile, event, payload, branches, default_stream, commit_stream, issue_stream, topic_focus = None):
"""
processes github payload with version 2 field specification
`payload` comes in unmodified from github
`default_stream` is set to what `stream` is in v1 above
`commit_stream` and `issue_stream` fall back to `default_stream` if they are empty
This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration
"""
if not commit_stream:
commit_stream = default_stream
if not issue_stream:
issue_stream = default_stream
target_stream = commit_stream
repository = payload['repository']
if not topic_focus:
topic_focus = repository['name']
# Event Handlers
if event == 'pull_request':
pull_req = payload['pull_request']
subject = github_generic_subject('pull request', topic_focus, pull_req)
content = github_generic_content('pull request', payload, pull_req)
elif event == 'issues':
# in v1, we assume that this stream exists since it is
# deprecated and the few realms that use it already have the
# stream
target_stream = issue_stream
issue = payload['issue']
subject = github_generic_subject('issue', topic_focus, issue)
content = github_generic_content('issue', payload, issue)
elif event == 'issue_comment':
# Comments on both issues and pull requests come in as issue_comment events
issue = payload['issue']
if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None:
# It's an issues comment
target_stream = issue_stream
noun = 'issue'
else:
# It's a pull request comment
noun = 'pull request'
subject = github_generic_subject(noun, topic_focus, issue)
comment = payload['comment']
content = ("%s [commented](%s) on [%s %d](%s)\n\n~~~ quote\n%s\n~~~"
% (comment['user']['login'],
comment['html_url'],
noun,
issue['number'],
issue['html_url'],
comment['body']))
elif event == 'push':
subject, content = build_message_from_gitlog(user_profile, topic_focus,
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['compare'],
payload['pusher']['name'],
forced=payload['forced'],
created=payload['created'])
elif event == 'commit_comment':
comment = payload['comment']
subject = "%s: commit %s" % (topic_focus, comment['commit_id'])
content = ("%s [commented](%s)"
% (comment['user']['login'],
comment['html_url']))
if comment['line'] is not None:
content += " on `%s`, line %d" % (comment['path'], comment['line'])
content += "\n\n~~~ quote\n%s\n~~~" % (comment['body'],)
return (target_stream, subject, content)
|
bed307903d7ddcce216919d18accb3ecfd94937d
| 20,570 |
from typing import Iterable
from typing import Optional
from typing import Callable
from typing import Dict
def concatenate(
iterable: Iterable[Results],
callback: Optional[Callable] = None,
modes: Iterable[str] = ("val", "test"),
reduction: str = "none",
) -> Results:
"""Returns a concatenated Results.
Args:
iterable (iterable of Results): Iterable of `Results` instance.
callback (callable, optional): Called for each `Results`. Must take
(`mode`, `index`, `output`, `target`) arguments and return a tuple
of ('index', `output`, `target`).
modes (iterable of str): Specify modes to concatenate.
reduction (str, optional): Reduction. `none` or `mean`.
"""
modes = list(modes)
indexes: Dict[str, list] = {mode: [] for mode in modes}
outputs: Dict[str, list] = {mode: [] for mode in modes}
targets: Dict[str, list] = {mode: [] for mode in modes}
for results in iterable:
for mode in modes:
if mode not in results:
continue
result = results[mode]
index, output, target = result["index"], result["output"], result["target"]
if callback:
index, output, target = callback(index, output, target)
indexes[mode].append(index)
outputs[mode].append(output)
targets[mode].append(target)
results = Results()
for mode in modes:
index = np.concatenate(indexes[mode])
output = np.concatenate(outputs[mode])
target = np.concatenate(targets[mode])
dict = ivory.core.collections.Dict()
results[mode] = dict(index=index, output=output, target=target)
if reduction != "none":
results = getattr(results, reduction)()
return results
|
6833a50ddc84d44c942c6e85c1ebbdb793bd78a9
| 20,571 |
def parse_version(s: str) -> tuple[int, ...]:
"""poor man's version comparison"""
return tuple(int(p) for p in s.split('.'))
|
445cd029efa3c8d4331e916f9925daddbc277ada
| 20,572 |
def replay_train(DQN, train_batch):
"""
์ฌ๊ธฐ์ train_batch๋ minibatch์์ ๊ฐ์ ธ์จ data๋ค์
๋๋ค.
x_stack์ state๋ค์ ์๋ ์ฉ๋๋ก์ด๊ณ ,
y_stack์ deterministic Q-learning ๊ฐ์ ์๊ธฐ ์ํ ์ฉ๋์
๋๋ค.
์ฐ์ ์๊ธฐ์ ์ ๋น์ด์๋ ๋ฐฐ์ด๋ก ๋ง๋ค์ด๋๊ธฐ๋ก ํ์ฃ .
"""
x_stack = np.empty(0).reshape(0, DQN.input_size) # array(10, 4)
y_stack = np.empty(0).reshape(0, DQN.output_size) # array(10, 2)
# Get stored information from the buffer
"""for๋ฅผ ํตํด์ minibatch(train_batch)์์ ๊ฐ์ ธ์จ ๊ฐ๋ค์ ํ๋์ฉ ๊บผ๋
๋๋ค."""
for state, action, reward, next_state, done in train_batch:
Q = DQN.predict(state)
# terminal
if done:
Q[0, action] = reward
else :
# Obtain the Q' values by feeding the new state through our network
Q[0, action] = reward + dis * np.max(DQN.predict(next_state))
"""
์ฌ๊ธฐ์ mian์ ์๋ action = np.argmax(mainDQN.predict(state))๊ณผ
predict๊ฐ ๊ฐ์ด ์ฐ์ด๊ณ ์๊ธฐ ๋๋ฌธ์ Non-stationary targets์ ๋ฌธ์ ๊ฐ ์๊น๋๋ค.
"""
"""np.vstack๋ y_stack์ ์๊ธฐ ์ํ numpyํจ์์
๋๋ค."""
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack, state])
# Train our network using target and predicted Q values on each episode
"""
์์ stack๋ค์ ๋ฐ๋ก update๋ก ๋๋ ค์ ํ์ต์ ์ํต๋๋ค.
ํ์ต์ ์์์ ๋ง๋ค์๋ neural network(linear regression)์ ํตํด์ ํ์ต์ด ๋๊ฒ ์ง์.
"""
return DQN.update(x_stack, y_stack)
|
05b85aab223b82637a23853d15cd8e073ecca845
| 20,573 |
def make_reverse_macro_edge_name(macro_edge_name):
"""Autogenerate a reverse macro edge name for the given macro edge name."""
if macro_edge_name.startswith(INBOUND_EDGE_FIELD_PREFIX):
raw_edge_name = macro_edge_name[len(INBOUND_EDGE_FIELD_PREFIX) :]
prefix = OUTBOUND_EDGE_FIELD_PREFIX
elif macro_edge_name.startswith(OUTBOUND_EDGE_FIELD_PREFIX):
raw_edge_name = macro_edge_name[len(OUTBOUND_EDGE_FIELD_PREFIX) :]
prefix = INBOUND_EDGE_FIELD_PREFIX
else:
raise AssertionError("Unreachable condition reached: {}".format(macro_edge_name))
reversed_macro_edge_name = prefix + raw_edge_name
return reversed_macro_edge_name
|
807efcc26fb21e553241b2de4d2c6633a24548a2
| 20,574 |
def unescaped_split(pattern,
string,
max_split=0,
remove_empty_matches=False,
use_regex=False):
"""
Splits the given string by the specified pattern. The return character (\\n)
is not a natural split pattern (if you don't specify it yourself).
This function handles escaped split-patterns (and so splits only patterns
that are unescaped).
:param pattern: A pattern that defines where to split.
:param string: The string to split by the defined pattern.
:param max_split: Defines the maximum number of splits. If 0 or
less is provided, the number of splits is not
limited.
:param remove_empty_matches: Defines whether empty entries should
be removed from the result.
:param use_regex: Specifies whether to treat the split pattern
as a regex or simple string.
:return: An iterator returning the split up strings.
"""
return _split(string,
max_split,
remove_empty_matches,
unescaped_search_for,
pattern,
string,
0,
0,
use_regex)
|
5a5cec1a54b94840e13ddec3ca8796a73e908898
| 20,575 |
def citation_distance_matrix(graph):
"""
:param graph: networkx graph
:returns: distance matrix, node labels
"""
sinks = [key for key, outdegree in graph.out_degree() if outdegree==0]
paths = {s: nx.shortest_path_length(graph, target=s) for s in sinks}
paths_df = pd.DataFrame(paths)#, index=graph.nodes)
paths_nonzero_df = 1*~paths_df.isnull()
a_paths_nonzero = paths_nonzero_df.values
m = a_paths_nonzero
intersect = m.dot(m.T)
union = m.dot(np.ones(m.shape).T) + np.ones(m.shape).dot(m.T) -intersect
union[union==0] = 1
dist = 1 - intersect/union
return dist, paths_nonzero_df.index
|
b3c41164c2081704b3b36ce0c5b1ca55440a88be
| 20,576 |
from typing import IO
def read_into_dataframe(file: IO, filename: str = "", nrows: int = 100,max_characters: int = 50) -> pd.DataFrame:
"""Reads a file into a DataFrame.
Infers the file encoding and whether a header column exists
Args:
file (IO): file buffer.
filename (str): filename. Used to infer compression.
nrows (int, optional): number of rows to peek. Default: 100.
max_characters (int, optional): max characters a column name can have to be distinguished from a real text value
Returns:
A pandas.DataFrame.
"""
detector = UniversalDetector()
for line, text in enumerate(file):
detector.feed(text)
if detector.done or line > nrows:
break
detector.close()
encoding = detector.result.get("encoding")
compression = infer_compression(filename, "infer")
file.seek(0, SEEK_SET)
contents = file.read()
with BytesIO(contents) as file:
df0 = pd.read_csv(
file,
encoding=encoding,
compression=compression,
sep=None,
engine="python",
header="infer",
nrows=nrows,
)
df0_cols = list(df0.columns)
#Check if all columns are strins and short strings(text values tend to be long)
column_names_checker = all([type(item) == str for item in df0_cols])
if column_names_checker:
column_names_checker = all([len(item) < max_characters for item in df0_cols])
#Check if any column can be turned to float
conversion_checker= True
for item in df0_cols:
try:
item = float(item)
conversion_checker = False
break
except:
pass
#Prefix and header
final_checker = True if (column_names_checker and conversion_checker) else False
header = "infer" if final_checker else None
prefix = None if header else "col"
with BytesIO(contents) as file:
df = pd.read_csv(
file,
encoding=encoding,
compression=compression,
sep=None,
engine="python",
header=header,
prefix=prefix,
)
return df
|
fe95c60870779353f2aa751c20ed331a2e0156bf
| 20,577 |
import torch
def load_generator(config: dict):
"""
Create the generator and load its weights using the function `load_weights`.
Args:
config (dict): Dictionary with the configurations.
Returns:
BigGAN.Generator: The generator.
"""
# GPU
device = "cuda"
torch.backends.cudnn.benchmark = True
# TODO: how to handle seed?
# Seed RNG
utils.seed_rng(config["seed"])
# Import the model
model_name = "BigGAN" # ! Code rewrite only supports BigGAN
model = __import__(model_name)
# Create generator and load it to the GPU
G = model.Generator(**config).to(device)
# If using EMA, prepare it
if config["ema"]:
G_ema = model.Generator(**{**config, "skip_init": True, "no_optim": True}).to(
device
)
utils.ema(G, G_ema, config["ema_decay"], config["ema_start"])
else:
G_ema = None
# If loading from a pre-trained model, load weights
try:
load_weights(G, config, G_ema=G_ema if config["ema"] else None)
except:
load_weights(G, config, G_ema=None)
G_ema.load_state_dict(G.state_dict())
# Switch to eval mode
G.eval()
if config["ema"]:
G_ema.eval()
return G_ema if config["ema"] and config["use_ema"] else G
|
ad6ae9536610e12106e53ce94d9d1d60beff2fc5
| 20,578 |
from typing import OrderedDict
import torch
def Navigatev0_action_to_tensor(act: OrderedDict, task=1):
"""
Creates the following (batch_size, seq_len, 11) action tensor from Navigatev0 actions:
0. cam left
1. cam right
2. cam up
3. cam down
4. place + jump
5. place
6. forward + attack
7. attack
8. forward + jump
9. jump
10. forward
"""
batch_size, seq_len = act["jump"].shape
PLACE_OPTIONS = {"none": 0, "dirt": 1}
# ONE_HOT = {0: np.array([1, 0]), 1: np.array([0, 1])}
out = torch.zeros((batch_size,seq_len,11))
for b in range(batch_size):
for s in range(seq_len):
c = act["camera"]
# We don't need to check if 0, 1, and 10 are in task actions
# since they always will be
task_acts = TASK_ACTIONS[task]
# Set camera left
if c[b,s][0] < -10 and abs(c[b,s][0]) >= abs(c[b,s][1]):
out[b,s][0] = 1
# Set camera right
elif c[b,s][0] > 10 and abs(c[b,s][0]) >= abs(c[b,s][1]):
out[b,s][1] = 1
# Set camera up
elif 2 in task_acts and c[b,s][1] < -10 and abs(c[b,s][1]) >= abs(c[b,s][0]):
out[b,s][2] = 1
elif 3 in task_acts and c[b,s][1] > 10 and abs(c[b,s][1]) >= abs(c[b,s][0]):
out[b,s][3] = 1
elif PLACE_OPTIONS[act["place"][b,s]] == 1:
if 4 in task_acts and act["jump"][b,s] == 1:
out[b,s][4] = 1
elif 5 in task_acts:
out[b,s][5] = 1
elif act["attack"][b,s] == 1:
if 6 in task_acts and act["forward"][b,s] == 1:
out[b,s][6] = 1
elif 7 in task_acts:
out[b,s][7] = 1
elif act["jump"][b,s] == 1:
if 8 in task_acts and act["forward"][b,s] == 1:
out[b,s][8] = 1
elif 9 in task_acts:
out[b,s][9] = 1
else:
out[b,s][10] = 1
return out
|
39d481d2e8597902b18695de97f041606f24f035
| 20,579 |
def asfarray(a, dtype=mstype.float32):
"""
Similar to asarray, converts the input to a float tensor.
If non-float dtype is defined, this function will return a float32 tensor instead.
Args:
a (Union[int, float, bool, list, tuple, numpy.ndarray]): Input data, in
any form that can be converted to a `Tensor`. This includes lists, lists of
tuples, tuples, tuples of tuples, tuples of lists and numpy.ndarray.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
of the new tensor will be inferred from `a`. Default is :class:`mindspore.float32`.
Returns:
Tensor, generated tensor with the specified float dtype.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `a` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.asfarray([1,2,3]))
[1. 2. 3.]
"""
_check_input_for_asarray(a)
if dtype is None:
return asarray(a)
dtype = _check_dtype(dtype)
if dtype not in (mstype.float16, mstype.float32, mstype.float64):
dtype = mstype.float32
if isinstance(a, (list, tuple)):
# Convert all tuple/nested tuples to lists
a = _deep_list(a)
# Convert all tensor sub-elements to numpy arrays
a = _deep_tensor_to_nparray(a)
a = onp.asarray(a)
if a.dtype is onp.dtype('object'):
raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
if isinstance(a, onp.ndarray):
a = Tensor.from_numpy(a)
return Tensor(a, dtype)
|
4da49b2bcab9686b2757cf1b9066c21876f992e6
| 20,580 |
from typing import Optional
from typing import Callable
import click
def _verify_option(value: Optional[str], value_proc: Callable) -> Optional[str]:
"""Verifies that input value via click.option matches the expected value.
This sets ``value`` to ``None`` if it is invalid so the rest of the prompt
can flow smoothly.
Args:
value (Optional[str]): Input value.
value_proc (Callable): A function to check the validity of ``value``.
Returns:
(Optional[str]): ``value`` if it is a valid value. ``None`` if it is
not.
Raises:
click.exceptions.UsageError: When ``value`` is invalid.
"""
if value is None:
return value
try:
value = value_proc(value)
except click.exceptions.UsageError as error:
click.echo(f"Error: {error.message}", err=True)
value = None
return value
|
4d0f58827982924a9d027112ffa3aaeef7634fe8
| 20,581 |
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
(NL) This function is taken from the following (and minimally modified to be used):
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/parallel_for/gradients.py#L81
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError("Need first dimension of output shape (%s) and inp shape "
"(%s) to match." % (output.shape, inp.shape))
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = tf.shape(output)
batch_size = output_shape[0]
output_row_size = tf.size(output) // batch_size
inp_shape = tf.shape(inp)
# Flatten output to 2-D.
with tf.control_dependencies([tf.assert_equal(batch_size, inp_shape[0])]):
output = tf.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = tf.gather(output, i, axis=1)
return tf.gradients(y, inp)[0]
#if use_pfor:
if False:
pfor_output = tf.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = tf.reshape(pfor_output, [output_row_size, batch_size, -1])
output = tf.transpose(pfor_output, [1, 0, 2])
new_shape = tf.concat([output_shape, inp_shape[1:]], axis=0)
return tf.reshape(output, new_shape)
|
dd42fcc9542bba8033a1eb204bf0d3a91b192dbc
| 20,582 |
def declare(baseFamily=None, baseDefault=0, derivedFamily=None, derivedDefault=""):
"""
Declare a pair of components
"""
# the declaration
class base(pyre.component, family=baseFamily):
"""a component"""
b = pyre.properties.int(default=baseDefault)
class derived(base, family=derivedFamily):
"""a derived component"""
d = pyre.properties.str(default=derivedDefault)
# return the pair to the caller
return base, derived
|
30c8d8f7d264a0e908f4305198b07c3d76a3cfac
| 20,583 |
from datetime import datetime
def parse_iso8601(dtstring: str) -> datetime:
"""naive parser for ISO8061 datetime strings,
Parameters
----------
dtstring
the datetime as string in one of two formats:
* ``2017-11-20T07:16:29+0000``
* ``2017-11-20T07:16:29Z``
"""
return datetime.strptime(
dtstring,
'%Y-%m-%dT%H:%M:%SZ' if len(dtstring) == 20 else '%Y-%m-%dT%H:%M:%S%z')
|
415a4f3a9006109e31ea344cf99e885a3fd2738d
| 20,584 |
def CalcCurvature(vertices,faces):
"""
CalcCurvature recives a list of vertices and faces
and the normal at each vertex and calculates the second fundamental
matrix and the curvature by least squares, by inverting the 3x3 Normal matrix
INPUT:
vertices -nX3 array of vertices
faces -mX3 array of faces
VertexNormals - nX3 matrix (n=number of vertices) containing the normal at each vertex
FaceNormals - mX3 matrix (m = number of faces) containing the normal of each face
OUTPUT:
FaceSFM - a list of 2x2 np arrays of (m = number of faces) second fundamental tensor at the faces
VertexSFM - a list of 2x2 np arrays (n = number of vertices) second fundamental tensor at the vertices
Other Parameters
wfp : mx3 array of vertex voronoi cell area/Mixed area weights as given in Meyer 2002
up,vp : local coordinate system at each vertex
e0,e1,e2 : edge vectors
"""
#list of 2x2 arrays for each vertex
VertexSFM = [np.zeros([2,2]) for i in vertices]
up = np.zeros(vertices.shape)
e0=vertices[faces[:,2]]-vertices[faces[:,1]]
e1=vertices[faces[:,0]]-vertices[faces[:,2]]
e2=vertices[faces[:,1]]-vertices[faces[:,0]]
e0_norm=normr(e0)
e1_norm=normr(e1)
e2_norm=normr(e2)
FaceNormals=0.5*fastcross(e1,e2) #not unit length. holds the area which is needed next
VertNormals,wfp=GetVertexNormalsExtra(vertices,faces,FaceNormals,e0,e1,e2)
FaceNormals=normr(FaceNormals)
#Calculate initial coordinate system
up[faces[:,0]]=e2_norm
up[faces[:,1]]=e0_norm
up[faces[:,2]]=e1_norm
#Calculate initial vertex coordinate system
up=fastcross(up,VertNormals)
up=normr(up)
vp=fastcross(VertNormals,up)
B=normr(fastcross(FaceNormals,e0_norm))
nfaces=faces.shape[0]
# Build a least square problem at each face to get the SFM at each face and solve it using the normal equation
scale=1.0/np.sqrt(np.sum((e0[0,:]**2+e1[0,:]**2+e2[0,:]**2)/3.0))
AT = scale*np.array([[inner1d(e0,e0_norm), inner1d(e0,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e0,e0_norm), inner1d(e0,B)],
[inner1d(e1,e0_norm), inner1d(e1,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e1,e0_norm), inner1d(e1,B)],
[inner1d(e2,e0_norm), inner1d(e2,B), np.zeros(nfaces)],
[np.zeros(nfaces), inner1d(e2,e0_norm), inner1d(e2,B)]]).T
A = np.transpose(AT,axes=(0,2,1)).copy()
dn0=VertNormals[faces[:,2]]-VertNormals[faces[:,1]]
dn1=VertNormals[faces[:,0]]-VertNormals[faces[:,2]]
dn2=VertNormals[faces[:,1]]-VertNormals[faces[:,0]]
b= scale*np.array([inner1d(dn0,e0_norm),
inner1d(dn0,B ),
inner1d(dn1,e0_norm),
inner1d(dn1,B ),
inner1d(dn2,e0_norm),
inner1d(dn2,B )]).T[:,:,np.newaxis]
X1=np.array([np.linalg.pinv(a,-1) for a in A])
X = np.matmul(X1,b)
#now calculate curvature per vertex as weighted sum of the face curvature
for i,f in enumerate(faces):
for j in [0,1,2]:
new_ku,new_kuv,new_kv = ProjectCurvatureTensor(e0_norm[i],B[i],FaceNormals[i],X[i][0],X[i][1],X[i][2],up[f[j]],vp[f[j]])
VertexSFM[f[j]]+=wfp[i,j]*np.array([[new_ku,new_kuv],[new_kuv,new_kv]]).squeeze()
return VertexSFM,VertNormals
|
b0e31073fe8aff61e60d0393098cca390bb95708
| 20,585 |
from typing import Optional
def query_abstracts(
q: Optional[str] = None,
n_results: Optional[int] = None,
index: str = "agenda-2020-1",
fields: list = ["title^2", "abstract", "fullname", "institution"],
):
"""
Query abstracts from a given Elastic index
q: str, query
n_results: int, number of results from
index: str, index of ElasticSearch
fields: list, list of fields that are included in the search
"""
responses = query(q, n_results, index, fields)
return responses
|
4ed554231c863c3164c5368978da900e3647570d
| 20,586 |
import typing
import pickle
def PretrainedEmbeddingIndicesDictionary() -> typing.Dict[str, int]:
"""Read and return the embeddings indices dictionary."""
with open(INST2VEC_DICITONARY_PATH, "rb") as f:
return pickle.load(f)
|
d4c0c8f5d7c83d99927342c5cacd8fd80a4f7d56
| 20,587 |
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
|
1806af9c915740612a6a11df723f1439c73bde2f
| 20,588 |
import os
import fnmatch
def globpattern(dir, pattern):
"""
Return leaf names in the specified directory which match the pattern.
"""
if not hasglob(pattern):
if pattern == '':
if os.path.isdir(dir):
return ['']
return []
if os.path.exists(util.normaljoin(dir, pattern)):
return [pattern]
return []
leaves = os.listdir(dir) + ['.', '..']
# "hidden" filenames are a bit special
if not pattern.startswith('.'):
leaves = [leaf for leaf in leaves
if not leaf.startswith('.')]
leaves = fnmatch.filter(leaves, pattern)
leaves = [l for l in leaves if os.path.exists(util.normaljoin(dir, l))]
leaves.sort()
return leaves
|
b5d37fc5ffa69df67e3370cbb7abb2884c482d08
| 20,589 |
def get_student_discipline(person_id: str = None):
"""
Returns student discipline information for a particular person.
:param person_id: The numeric ID of the person you're interested in.
:returns: String containing xml or an lxml element.
"""
return get_anonymous('getStudentDiscipline', person_id=person_id)
|
4e96fb4e9d566af7094b16b29540617bbb230f67
| 20,590 |
import argparse
import torch
import os
import tqdm
import time
import json
def main():
"""evaluate gpt-model fine-tune on qa dataset"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='gpt2',
help='pretrained model name')
parser.add_argument("--using_cache", type=bool, default=False)
parser.add_argument(
"--importance", type=float, help="LifeLong Learning need its (Lambda)")
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help=
"The output directory where the model predictions and checkpoints will be written."
)
parser.add_argument("--do_eval",action="store_true")
# parser.add_argument("--old_dataset", type=str, default="")
parser.add_argument('--eval_dataset', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=8)
# parser.add_argument('--old_batch_size', type=int, default=1)
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=8)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training \
steps to perform. Override num_train_epochs.")
parser.add_argument(
'--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before\
performing a backward/update pass.")
parser.add_argument("--no_cuda",action="store_true")
parser.add_argument("--argmax",action="store_true")
parser.add_argument("--sample",type=int,default=1)
args = parser.parse_args()
args.device = torch.device(
"cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
print(args)
set_seed(args.seed)
device=args.device
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not args.do_eval:
raise ValueError("At least `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
special_tokens = ['_context_', '_question_', '_ans_','_eos_','_pad_']
load_dir, tokenizer, model, special_tokens_ids = load_tool(args.model_name,special_tokens,device)
print(special_tokens_ids)
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
# print("str ",obj)
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, set):
return obj
return list(tokenize_and_encode(o) for o in obj)
logger.info("Encoding dataset...")
eval_dataset = load_squad_dataset(
args.eval_dataset, using_cache=args.using_cache)
datasets = (eval_dataset, )
encoded_datasets = tokenize_and_encode(datasets)
max_length, q_length, a_length = longest_length(model)
input_length = max(len(story[:max_length]) + len(question[:q_length]) + 5 \
for dataset in encoded_datasets for story, question, ans, _ in dataset)
input_length = min(input_length, model.config.n_positions-2)
# Load and encode the datasets
# Prepare inputs tensors and dataloaders
tensor_datasets, ID_list = pre_process_datasets(encoded_datasets, input_length,*special_tokens_ids)
eval_data = TensorDataset(*tensor_datasets[0])
eval_sampler = SequentialSampler(eval_data)
eval_sampler=BatchSampler(eval_sampler,batch_size=args.eval_batch_size,drop_last=False)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,num_workers=8)
if args.do_eval:
model.eval()
answer_dict = dict()
compared_dict = dict()
tqdm_bar = tqdm(eval_dataloader, desc="Evaluating")
for step, data in enumerate(tqdm_bar):
start_time = time.time()
sentence, answer, ID_index = tuple(t.to(device) for t in data)
sentence = sentence[sentence != special_tokens_ids[4]].long()
answer = answer[answer != special_tokens_ids[4]].long()
# print(answer)
# pdb.set_trace()
out = sample_sequence(
model=model,
context=sentence,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
is_xlnet=False,
tokenizer=tokenizer,
argmax=args.argmax,num_samples=args.sample)
end_time = time.time()
# print("It costs {} seconds for generate data!!".format(end_time-start_time))
out_ = out[:, :].tolist()
answer_ = tokenizer.decode(answer.tolist(),clean_up_tokenization_spaces=True)
for i in range(len(out_)):
text = tokenizer.decode(out_[i], clean_up_tokenization_spaces=True,skip_special_tokens=True)
answer_dict[ID_list[ID_index[0][i]]] = text
compared_dict[ID_list[ID_index[0][i]]] = (text,answer_)
if step % 50 == 0:
print("step:", step)
print(" prediction: ",text)
print(" groundtrut: ",answer_)
with open(args.output_dir + "/predictions.json", "w") as outfile:
json.dump(answer_dict, outfile)
with open(args.output_dir + "/compared_answer.json","w") as outfile:
json.dump(compared_dict, outfile)
return
|
59be7014ff8d677261263ee8ab69ec1e0a1771e4
| 20,591 |
from re import X
def dot(p1, p2):
"""
Dot product
:param p1:
:param p2:
:return:
"""
return p1[X] * p2[X] + p1[Y] * p2[Y]
|
13ba17e8757ebf9022f07d21b58a26376520f84a
| 20,592 |
def logout():
"""View function which handles a logout request."""
tf_clean_session()
if current_user.is_authenticated:
logout_user()
# No body is required - so if a POST and json - return OK
if request.method == "POST" and _security._want_json(request):
return _security._render_json({}, 200, headers=None, user=None)
return redirect(get_post_logout_redirect())
|
0343be8ec063b5c215a0a019003cbf137588171a
| 20,593 |
import os
import pkgutil
def _lookup_configuration():
"""Lookup the configuration file.
:return: opened configuration file
:rtype: stream
"""
for pth in CONFIG_PATH:
path = os.path.abspath(os.path.expanduser(pth))
LOGGER.debug('Checking for %s', path)
if os.path.exists(path):
LOGGER.info('Config file: %s', path)
return open(path)
return pkgutil.get_data('picdb', 'resources/config_app.yaml')
|
1f3e23baa2947bf5ffd8df080fe52c37235e94b6
| 20,594 |
def splinter_session_scoped_browser():
"""Make it test scoped."""
return False
|
a7587f6edff821bab3052dca73929201e98dcf56
| 20,595 |
from typing import Counter
def sample_mask(source, freq_vocab, threshold=1e-3, min_freq=0, seed=None, name=None):
"""Generates random mask for downsampling high frequency items.
Args:
source: string `Tensor` of any shape, items to be sampled.
freq_vocab: `Counter` with frequencies vocabulary.
threshold: `float`, items occurrence threshold.
min_freq: `int`, items below that frequency will be treated as unique.
seed: `int`, used to create a random seed (optional).
See @{tf.random.set_seed} for behavior.
name: `string`, a name for the operation (optional).
Returns:
A boolean `Tensor` of same shape as source: "keep" flags.
"""
with tf.name_scope(name or 'sample_mask'):
source = tf.convert_to_tensor(source, dtype=tf.string, name='source')
seed1, seed2 = random_seed.get_seed(seed)
if not isinstance(freq_vocab, Counter):
raise ValueError('Frequency vocabulary should be a Counter instance')
keys, freqs = zip(*freq_vocab.most_common())
return tfmiss_ops.miss_sample_mask(
source=source,
keys=keys,
freqs=freqs,
threshold=threshold,
min_freq=min_freq,
seed=seed1,
seed2=seed2
)
|
30fca98f95ac7a6aa2f3a3576f32abf271a693bb
| 20,596 |
def _xList(l):
"""
"""
if l is None:
return []
return l
|
ef09d779c7ebc2beb321d90726f43603c0ac8315
| 20,597 |
def IABN2Float(module: nn.Module) -> nn.Module:
"""If `module` is IABN don't use half precision."""
if isinstance(module, InplaceAbn):
module.float()
for child in module.children():
IABN2Float(child)
return module
|
587565ad78afd08d3365f637ab5b98b17e977566
| 20,598 |
from datetime import datetime
def start_of_day(val):
"""
Return a new datetime.datetime object with values that represent
a start of a day.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
return val.replace(hour=0, minute=0, second=0, microsecond=0)
|
74e302513edf428f825f9e24567e23b3a5e5d4f5
| 20,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.