content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def default(typ, default=None, frm=None):
""" optional value """
def _internal(val):
if typ(val) is Consts.Fail:
return default
else:
return val
return u_condition_checker(frm, _internal)
|
07374d4e71693640eacf7ccf003843d37284231b
| 27,400 |
def fltflag(*args):
"""fltflag() -> flags_t"""
return _idaapi.fltflag(*args)
|
1cb3f8849523c9811c0e3fa2b4bf50e5f72750cc
| 27,401 |
import io
def get_mopac_deltaH0(lines):
"""
Return delta H in kcal/mol from mopac output.
#>>> s = io.read_file('test/input.out')
#>>> print(get_mopac_deltaH(s))
-13.02534
"""
if isinstance(lines, str):
lines = lines.splitlines()
keyword = 'FINAL HEAT OF FORMATION'
n = io.get_line_number(keyword, lines=lines)
return float(lines[n].split()[5])
|
a5b01de232b8e3f8c6d74c1e48095af8b9880696
| 27,403 |
def abstract(f):
"""
make method abstract.
class Foo(object):
@abstract
def bar(self):
pass
foo = Foo()
foo.bar() # NotImplementedError: can't invoke abstract method 'bar'
"""
@wraps(f)
def wrapper(*args, **kwargs):
msg = "can't invoke abstract method '%s'" % f.__name__
logger.error(msg)
raise NotImplementedError(msg)
return wrapper
|
a652e2440a2b9b0606f18dd91ea77b3ebafb25fe
| 27,404 |
def capitalize_1(string):
"""
Capitalizes a string using a combination of the upper and lower methods.
:author: jrg94
:param string: any string
:return: a string with the first character capitalized and the rest lowercased
"""
return string[0].upper() + string[1:].lower()
|
9ad830a6d38e19b195cd3dff9a38fe89c49bd5c8
| 27,405 |
def edit_current_stock(current_stock_id, current_stock_data):
"""
修改信息
:param current_stock_id:
:param current_stock_data:
:return: Number of affected rows (Example: 0/1)
:except:
"""
return db_instance.edit(STCurrentStock, current_stock_id, current_stock_data)
|
bdc96043b74c2c02ae633700e0ec9860b61c7f11
| 27,406 |
def fit_via_yule_walker(x, order, acf_method="mle", demean=True):
"""
Estimate AR(p) parameters of a sequence x using the Yule-Walker equation.
Parameters
----------
x : 1d numpy array
order : integer
The order of the autoregressive process.
acf_method : {'unbiased', 'mle'}, optional
Method can be 'unbiased' or 'mle' and this determines denominator in
estimating autocorrelation function (ACF) at lag k. If 'mle', the
denominator is `n = x.shape[0]`, if 'unbiased' the denominator is `n - k`.
demean : bool
True, the mean is subtracted from `x` before estimation.
"""
if demean:
x = x.copy()
x -= x.mean()
if acf_method == "unbiased":
denom = lambda lag: len(x) - lag
else:
denom = lambda lag: len(x)
if x.ndim > 1 and x.shape[1] != 1:
raise ValueError("expecting a vector to estimate AR parameters")
auto_cov = np.zeros(order + 1, np.float64)
auto_cov[0] = (x ** 2).sum() / denom(0)
for lag in range(1, order + 1):
auto_cov[lag] = np.sum(x[0:-lag] * x[lag:]) / denom(lag)
if order == 0:
ar_coef = None
innovation_var = auto_cov[0]
else:
ar_coef = _solve_yule_walker(auto_cov)
innovation_var = auto_cov[0] - (auto_cov[1:] * ar_coef).sum()
aic = compute_aic(innovation_var, order, len(x))
return ar_coef, aic
|
c96f29d72d88427fa4c9e345e22e41913218113f
| 27,407 |
def beta_create_Bookstore_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): CreateBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): CreateShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): DeleteBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): DeleteShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'GetBook'): GetBookRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): GetShelfRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): ListBooksRequest.FromString,
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): Book.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): Shelf.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'GetBook'): Book.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): Shelf.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): ListBooksResponse.SerializeToString,
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): ListShelvesResponse.SerializeToString,
}
method_implementations = {
('endpoints.examples.bookstore.Bookstore', 'CreateBook'): face_utilities.unary_unary_inline(servicer.CreateBook),
('endpoints.examples.bookstore.Bookstore', 'CreateShelf'): face_utilities.unary_unary_inline(servicer.CreateShelf),
('endpoints.examples.bookstore.Bookstore', 'DeleteBook'): face_utilities.unary_unary_inline(servicer.DeleteBook),
('endpoints.examples.bookstore.Bookstore', 'DeleteShelf'): face_utilities.unary_unary_inline(servicer.DeleteShelf),
('endpoints.examples.bookstore.Bookstore', 'GetBook'): face_utilities.unary_unary_inline(servicer.GetBook),
('endpoints.examples.bookstore.Bookstore', 'GetShelf'): face_utilities.unary_unary_inline(servicer.GetShelf),
('endpoints.examples.bookstore.Bookstore', 'ListBooks'): face_utilities.unary_unary_inline(servicer.ListBooks),
('endpoints.examples.bookstore.Bookstore', 'ListShelves'): face_utilities.unary_unary_inline(servicer.ListShelves),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
|
91f15dc278a8f273232334e75e729819ec33f521
| 27,409 |
import yaml
import json
def domain_parser_yml_to_json(path_to_domain_yml, path_to_domain_json):
"""
Chatbot Domain Parser, will parse 'domain.yml' file to json object and write to file
:param path_to_domain_yml: Complete path to YML domain file
:param path_to_domain_json: Complete path to file where Json Object will be written
:return: Json string of parsed object
"""
with open(path_to_domain_yml, "r") as file:
json_domain = yaml.load(file)
json_domain["entities"] = parse_entities_to_json(path_to_domain_yml)
with open(path_to_domain_json, "w") as file:
json.dump(json_domain, file)
return json.dumps(json_domain)
|
9ea6459136eba27d1b60cfc590a3d3f526e9222b
| 27,410 |
def get_url_and_token(string):
""" extract url and token from API format """
try:
[token, api] = string.split(":", 1)
[_, _, addr, _, port, proto] = api.split("/", 5)
url = f"{proto}://{addr}:{port}/rpc/v0"
except Exception:
raise ValueError(f"malformed API string : {string}")
return (url, token)
|
f3abd327c9de2d098100e539f701bf2fff1742f5
| 27,411 |
def checkZone(false, usrdata):
""" Check the cloudflare zone record for entries to remove """
# Call api
request = [('a', 'rec_load_all')]
json_data = callAPI(request, usrdata)
# Dicts and Lists for later
falserecs = {}
names = {}
recs = []
falsedata = {}
recdata = {}
# Parse the response and tally false entries
# Check if the json request is successful
if json_data["result"] == "success":
# each zone is a dict in objs dict
for line in json_data["response"]["recs"]["objs"]:
# Added this to avoid duplicate zone names (i.e. MX records)
# example key is A-somedomain.com
key = line["type"] + "-" + line["name"]
try:
names[key] = names[key] + 1
except KeyError:
names[key] = 1
if line["content"] == false:
falserecs[line["rec_id"]] = key
falsedata[line["rec_id"]] = {'type': line["type"],
'name': line["name"],
'content': line["content"],
'service_mode': line["service_mode"],
'ttl': line["ttl"],
'prio': line["prio"]}
# Go through false records
for rec in falserecs.keys():
if names[falserecs[rec]] > 1:
# if record isn't the last entry add for removal
recs.append(rec)
recdata[rec] = falsedata[rec]
# Return the records that have false
return recs, recdata
|
4f36ba5c7f20b15bfe80a80eda41ae8bf6141e16
| 27,412 |
import io
def read_temp(buffer: io.BytesIO, offset: int = None) -> float:
"""Retrieve temperature [C] value (2 bytes) from buffer"""
if offset:
buffer.seek(offset)
value = int.from_bytes(buffer.read(2), byteorder="big", signed=True)
return float(value) / 10
|
e7cb28977af49fd3c52438357b02eb11f05d1e9d
| 27,413 |
def convert_netaddr(netaddr: str) -> str:
"""
Converts network address from hex_ip:hex_port format to ip:port
e,g: 573B1FAC:BE46 to 172.31.59.87:48710
"""
try:
if check_netaddr_pattern(netaddr):
addr, port = netaddr.split(':')
addr = convert_addr(addr)
port = convert_port(port)
return '{}:{}'.format(addr, port)
except BaseException:
raise InvalidNetAddrFormat
|
9d6979f0303ede07fcb7b92a4cfc8579b90fca84
| 27,414 |
def make_bsa_2d(betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0,
method='simple',verbose = 0):
"""
Function for performing bayesian structural analysis
on a set of images.
Parameters
----------
betas, array of shape (nsubj, dimx, dimy) the data used
Note that it is assumed to be a t- or z-variate
theta=3., float,
first level threshold of betas
dmax=5., float, expected between subject variability
ths=0, float,
null hypothesis for the prevalence statistic
thq=0.5, float,
p-value of the null rejection
smin=0, int,
threshold on the nu_mber of contiguous voxels
to make regions meaningful structures
method= 'simple', string,
estimation method used ; to be chosen among
'simple', 'dev', 'loo', 'ipmi'
verbose=0, verbosity mode
Returns
-------
AF the landmark_regions instance describing the result
BF: list of hroi instances describing the individual data
"""
ref_dim = np.shape(betas[0])
nsubj = betas.shape[0]
xyz = np.array(np.where(betas[:1])).T.astype(np.int)
nvox = np.size(xyz, 0)
# create the field strcture that encodes image topology
Fbeta = ff.Field(nvox)
Fbeta.from_3d_grid(xyz, 18)
# Get coordinates in mm
coord = xyz.astype(np.float)
# get the functional information
lbeta = np.array([np.ravel(betas[k]) for k in range(nsubj)]).T
# the voxel volume is 1.0
g0 = 1.0/(1.0*nvox)*1./np.sqrt(2*np.pi*dmax**2)
affine = np.eye(4)
shape = (1, ref_dim[0], ref_dim[1])
lmax=0
bdensity = 1
if method=='ipmi':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_ipmi(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_simple(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='loo':
mll, ll0 = bsa.compute_BSA_loo(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
return mll, ll0
if method=='dev':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_dev(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple_quick':
likelihood = np.zeros(ref_dim)
group_map, AF, BF, coclustering = \
bsa.compute_BSA_simple_quick(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='sbf':
likelihood = np.zeros(ref_dim)
group_map, AF, BF = sbf.Compute_Amers (Fbeta, lbeta, xyz, affine, shape,
coord, dmax=dmax, thr=theta,
ths=ths , pval=thq)
if method not in['loo', 'dev','simple','ipmi','simple_quick','sbf']:
raise ValueError,'method is not ocrreactly defined'
if verbose==0:
return AF,BF
if AF != None:
lmax = AF.k+2
AF.show()
group_map.shape = ref_dim
mp.figure()
mp.subplot(1,3,1)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('Blob separation map')
mp.colorbar()
if AF != None:
group_map = AF.map_label(coord,0.95,dmax)
group_map.shape = ref_dim
mp.subplot(1,3,2)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('group-level position 95% \n confidence regions')
mp.colorbar()
mp.subplot(1,3,3)
likelihood.shape = ref_dim
mp.imshow(likelihood, interpolation='nearest')
mp.title('Spatial density under h1')
mp.colorbar()
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2, 5, s+1)
lw = -np.ones(ref_dim)
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = np.size(AF)+2
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
lw[xyzk[1],xyzk[2]] = nls[k]
mp.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax)
mp.axis('off')
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2,5,s+1)
mp.imshow(betas[s],interpolation='nearest',vmin=betas.min(),
vmax=betas.max())
mp.axis('off')
return AF, BF
|
37d2727ce71530e70750696fb1e852b822e5d54e
| 27,416 |
def test_hhs_hospital_dataset_non_default_start_date():
"""Tests HHSHopsitalStateDataset imports adult_icu_beds_capacity
correctly for a state with a non-default start date (Alaska) verifying
that data prior to its start date (2020-10-06) is dropped."""
variable = ccd_helpers.ScraperVariable(
variable_name="adult_icu_beds_capacity",
measurement="current",
unit="beds",
provider="hhs",
common_field=CommonFields.ICU_BEDS,
)
source_url = UrlStr("http://foo.com")
# Do location Alaska and start at 2020-10-05 so the first date gets dropped.
input_data = build_can_scraper_dataframe(
{variable: [10, 20, 30]},
source_url=source_url,
start_date="2020-10-05",
location=2,
location_id="iso1:us#iso2:us-ak",
)
class CANScraperForTest(hhs_hospital_dataset.HHSHospitalStateDataset):
@staticmethod
def _get_covid_county_dataset() -> ccd_helpers.CanScraperLoader:
return ccd_helpers.CanScraperLoader(input_data)
ds = CANScraperForTest.make_dataset()
# Data before 2020-10-05 should have been dropped, so we are left with [20, 30]
icu_beds = test_helpers.TimeseriesLiteral(
[20, 30], source=taglib.Source(type="HHSHospitalState", url=source_url)
)
expected_ds = test_helpers.build_default_region_dataset(
{CommonFields.ICU_BEDS: icu_beds},
region=pipeline.Region.from_fips("02"),
start_date="2020-10-06",
static={CommonFields.ICU_BEDS: 30},
)
test_helpers.assert_dataset_like(ds, expected_ds)
|
19de6db3884f5d0fea55c72bfee01502462fd029
| 27,419 |
def fibonacci(n: int) -> int:
"""
Iteratively compute fibonacci of n
"""
if n==0: return 0
if n==1: return 1
fp2 = 0
fp1 = 1
for _ in range(1,n):
f = fp1 + fp2
fp2 = fp1
fp1 = f
return f
|
e9f60ad7ae5187c516dba5e473fec86b154347d5
| 27,420 |
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
def do_make_label_group(
text: List[str], **kwargs: Dict[str, Any]
) -> Tuple[int, List[RAMSTKLabel]]:
"""Make and place a group of labels.
The width of each label is set using a natural request. This ensures the
label doesn't cut off letters. The maximum size of the labels is
determined and used to set the left position of widget displaying the data
described by the label. This ensures everything lines up. It also returns
a list of y-coordinates indicating the placement of each label that is used
to place the corresponding widget.
:param text: a list containing the text for each label.
:return: (_max_x, _lst_labels)
the width of the label with the longest text and a list of the
RAMSTKLabel() instances.
:rtype: tuple of (integer, list of RAMSTKLabel())
"""
_bold = kwargs.get("bold", True)
_justify = kwargs.get("justify", Gtk.Justification.RIGHT)
_wrap = kwargs.get("wrap", True)
_lst_labels = []
_max_x = 0
_char_width = max(len(_label_text) for _label_text in text)
# pylint: disable=unused-variable
for _label_text in text:
_label = RAMSTKLabel(_label_text)
_label.do_set_properties(
bold=_bold, height=-1, justify=_justify, width=-1, wrap=_wrap
)
_label.set_width_chars(_char_width)
_max_x = max(_max_x, _label.get_attribute("width"))
_lst_labels.append(_label)
return _max_x, _lst_labels
|
200f65febc4dddf9c2f72e64963a34c2295bfcc8
| 27,421 |
def _variable_with_weight_decay(name, shape, stddev, wd, index):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float64
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
#i=var.op.name.find('/')-2
'''
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses'+index, weight_decay)
'''
return var
|
bdd51083cb93557c3588c5b6dea75968f000a02f
| 27,423 |
def make_admin_versionable(cls):
"""Make Admin class versionable"""
class AdminVersionable(AdminVersionableMixIn, cls):
pass
return AdminVersionable
|
c36d1a18aba44baddbd449b9f673241923251a01
| 27,425 |
def generate_model (d):
"""Returns a set of (random) d+1 linear model coefficients."""
return np.random.rand (d+1, 1)
|
b7b6e85cdd033e5e89aaaae5db258d97a1a61534
| 27,427 |
import json
def handle_error(ex, hed_info=None, title=None, return_as_str=True):
"""Handles an error by returning a dictionary or simple string
Parameters
----------
ex: Exception
The exception raised.
hed_info: dict
A dictionary of information.
title: str
A title to be included with the message.
return_as_str: bool
If true return as string otherwise as dictionary
Returns
-------
str or dict
"""
if not hed_info:
hed_info = {}
if hasattr(ex, 'error_type'):
error_code = ex.error_type
else:
error_code = type(ex).__name__
if not title:
title = ''
if hasattr(ex, 'message'):
message = ex.message
else:
message = str(ex)
hed_info['message'] = f"{title}[{error_code}: {message}]"
if return_as_str:
return json.dumps(hed_info)
else:
return hed_info
|
4b7bc24c9b4fd83d39f4447e29e383d1769e6b0f
| 27,428 |
def get_parent_epic(issue):
"""Get the parent epic of `issue`.
Different boards have different meta data formats.
"""
return (
# DEV board
getattr(ticket.fields, JIRA_FIELD_EPIC_LINK, None)
# BS board
or getattr(getattr(ticket.fields, 'parent', None), 'key', None)
)
|
807763ee13fb1dd639155f6b9efca057a0d2b5a1
| 27,430 |
def get_elements(driver,
selector,
text='',
selector_type=By.CSS_SELECTOR,
timeout=DEFAULT_TIMEOUT,
must_be_visible=True):
"""
Pauses execution until one or more elements matching the selector is visible.
:param driver: webdriver
:param selector: str, CSS selector
:param text: text that the element should contain
:param selector_type: selector format. Default is By.CSS_SELECTOR
:param timeout: int, time to wait before raising exception
:param must_be_visible: bool, true if the returned components must be visible
:return: the matched element
"""
callback = ElementCriteriaCondition(
(selector_type, selector),
text,
must_be_visible=must_be_visible,
return_all_matching=True)
message = "Expected at least one element matching {} `{}` to become " \
"visible".format(selector_type, selector)
if text:
message += ' containing text `{}`'.format(text)
try:
return wait_until(driver, callback, message, timeout)
except TimeoutException as e:
raise WebException(e.msg) from e
|
a9b3a5bad8ad8a9d32e6460ff72b089b20b5b713
| 27,432 |
import re
def get_date_from_folder(file_str):
"""
get datetime from file folder of .et3, .erd, i.e.,
'DATA2015-01-29-16-57-30/', '2020-10-11-03-48-52/'
"""
f = file_str.strip()
f = f[:-1] # remove trailing '/'
f = f.replace("DATA", "")
# replace the 3rd occurrence of '-'
w = [m.start() for m in re.finditer(r"-", f)][2]
# before w do not change, after w, '-' -> ':'
f = f[:w] + " " + f[w + 1 :].replace("-", ":")
# now f becomes '2015-01-29 16:57:30'
return pd.to_datetime(f)
|
1176501b771e1f7b9721f8b78516c69878417ab5
| 27,433 |
def translate_api2db(namespace, alias):
"""
>>> translate_api2db("ga4gh", "SQ.1234")
[('VMC', 'GS_1234')]
"""
if namespace.lower() == "refseq":
return [("NCBI", alias)]
if namespace == "ensembl":
return [("Ensembl", alias)]
if namespace == "lrg":
return [("LRG", alias)]
if namespace == "sha512t24u":
return [
("VMC", "GS_" + alias if alias else None),
]
if namespace == "ga4gh":
return [
("VMC", "GS_" + alias[3:]),
]
return []
|
843f67f12024222f271f9f1826e2530b5a7834b4
| 27,434 |
import requests
def register_view(request):
"""Renders the register page."""
if request.method == 'GET':
# Get signup form to display
form = SignUpForm()
return render(request, 'myroot/registration/register.html',
{'form': form,
'title': "Register | " + settings.SITE_SHORT_NAME,
'meta_desc': """A step-by-step guide on how to create a user registration form using Django 2.1+ with Python 3.7+""",
})
data = dict()
if request.method == 'POST':
form = SignUpForm(request.POST)
username = request.POST.get('username')
email = request.POST.get('email')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
is_pass_valid, msg, title = is_password_valid(password1, password2)
is_user_name_valid, msg1, title1 = is_username_valid(username)
if not is_user_name_valid:
# Return some json response back to user
data = dict_alert_msg('False', title1, msg1, 'error')
elif not is_pass_valid:
# Return some json response back to user
data = dict_alert_msg('False', title, msg, 'error')
# Check if email exist in our users list
elif User.objects.filter(email=email):
# Return some json response back to user
msg = """A user with that email address already exist."""
data = dict_alert_msg('False', 'Invalid Email!', msg, 'error')
elif User.objects.filter(username=username):
# Return some json response back to user
msg = """Username already taken, please try another one."""
data = dict_alert_msg('False', 'Invalid Username!',
msg, 'error')
# To check prohibited username match with our list
elif SiteConfig.objects.filter(property_name=username):
# Return some json response back to user
msg = """A username you have entered is not allowed."""
data = dict_alert_msg('False', 'Prohibited Username!',
msg, 'error')
# To check if Prohibited email match with our list
elif SiteConfig.objects.filter(property_name=email):
# Return some json response back to user
msg = """The email you have entered is not allowed."""
data = dict_alert_msg('False', 'Prohibited Email!',
msg, 'error')
else:
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GRECAP_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post(settings.GRECAP_VERIFY_URL, data=data)
result = r.json()
''' End reCAPTCHA validation '''
if result['success']:
# Validate email address if exist from an email server.
is_email_real = is_email_valid(email)
if is_email_real:
# Proceed with the rest of registering new user
user = form.save(commit=False)
user.is_active = False
user.save() # Finally save the form data
user.pk # Get the latest id
current_site = get_current_site(request)
subject = 'Activate Your ' + \
str(settings.SITE_SHORT_NAME) + ' Account'
message = render_to_string(
'myroot/account/account_activation_email.html',
{
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message, settings.APP_EMAIL_FROM)
# Return some json response back to user
msg = """New user has been created successfully!"""
data = dict_alert_msg('True', 'Awesome', msg, 'success')
else:
# Return some json response back to user
msg = """Invalid or non-existed email address."""
data = dict_alert_msg('False', 'Oops, Invalid Email Address', msg, 'error')
else:
# Return some json response back to user
msg = """Invalid reCAPTCHA, please try again."""
data = dict_alert_msg('False', 'Oops, Error', msg, 'error')
return JsonResponse(data)
|
aa854a427f8900a6b19ffef37598d1fa8f7b8d4d
| 27,435 |
def make_environment():
"""Creates an OpenAI Gym environment."""
# Load the gym environment.
environment = fakes.ContinuousEnvironment(
action_dim=1, observation_dim=2, episode_length=10
)
return environment
|
61156d212a14f5214e017f34792532c1dfa8d6b8
| 27,436 |
def device_get(context, device_id):
"""get a device according to specify device_id"""
return IMPL.device_get(context, device_id)
|
6bdc60e0fd177b29568c181870b1825746953751
| 27,437 |
import scipy
def convolve_gaussian_2d(image, gaussian_kernel_1d):
"""Convolve 2d gaussian."""
result = scipy.ndimage.filters.correlate1d(
image, gaussian_kernel_1d, axis=0)
result = scipy.ndimage.filters.correlate1d(
result, gaussian_kernel_1d, axis=1)
return result
|
00046cd47d324a3f391ad0a84f31eb0368857afb
| 27,438 |
from unittest.mock import call
def refresh_viewer(viewer, pdf_path,
tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')):
"""Tell the specified PDF viewer to refresh the PDF output.
If the viewer does not support refreshing PDFs (e.g. “Preview”) then this
command will do nothing. This command will return a non-zero value if the
the viewer could not be found or the PDF viewer does not support a
“manual” refresh. For this method to work correctly ``viewer`` needs to be
open beforehand.
Arguments:
viewer
The viewer for which we want to refresh the output of the PDF file
specified in ``pdf_path``.
pdf_path
The path to the PDF file for which we want to refresh the output.
tm_bundle_support
The location of the “LaTeX Bundle” support folder
Returns: ``int``
Examples:
>>> # The viewer application needs to be open before we call the
>>> # function
>>> call('open -a Skim', shell=True)
0
>>> refresh_viewer('Skim', 'test.pdf',
... tm_bundle_support=realpath('Support'))
<p class="info">Tell Skim to refresh 'test.pdf'</p>
0
"""
print('<p class="info">Tell {} to refresh \'{}\'</p>'.format(viewer,
pdf_path))
if viewer in ['Skim', 'TeXShop']:
return call("osascript '{}/bin/refresh_viewer.scpt' {} {} ".format(
tm_bundle_support, viewer, shellquote(pdf_path)),
shell=True)
return 1
|
629e5b8677ad8355acf7372690abff8c7864cd5f
| 27,439 |
def getLineagesFromChangeo(changeodb, print_summary):
"""subsets the changeo_db output by bracer by only those cells which are within lineages (non singletons)"""
df = changeodb
_df = df[df.CLONE != "None"] # get rid of unassigned cells (no BCR reconstructed)
_df = (df.CLONE.value_counts() > 1) #find clones with more than 1 member
if print_summary == True:
print( "There are", len(_df[_df == 1]), "lineages with more than one member")
CHANGEO_confidentlineages = df[df.CLONE.isin(_df[_df == 1].index)].sort_values('CLONE')
CHANGEO_confidentlineages = CHANGEO_confidentlineages[CHANGEO_confidentlineages.CLONE != 'None']
if print_summary == True:
print("number of cells in original dataframe", df.shape[0])
print("number of distinct Clones in original dataframe", df.drop_duplicates('CLONE').shape[0] -1) #subtract 1 for the 'None' entry
print(CHANGEO_confidentlineages.shape[0]/df.shape[0], 'percent of cells in a lineage' )
return CHANGEO_confidentlineages
|
1a497b084118ce0993cf6509889831cab78d2a36
| 27,440 |
def pop():
""" Clear the current execution environment for whatever parallel mechanism is used. """
with _lock:
ident = identifier()
envs = _current_envs.get(ident)
if envs:
env = envs.pop()
env.deactivate()
if _current_envs[ident]:
current = _current_envs[ident][-1]
current.activate()
return env
raise ValueError('No environment to clear.')
|
70192457f7ac15eeb763650415ee82ac2b6409e8
| 27,441 |
def _run_cnvkit_cancer(items, background, access_file):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(items[0])
ckout = _run_cnvkit_shared(items[0], [paired.tumor_bam], [paired.normal_bam],
access_file, work_dir, background_name=paired.normal_name)
ckout = theta.run(ckout, paired)
return _associate_cnvkit_out(ckout, items)
|
99ac686b0a24cfd87fdebd59fc691fbbe839323e
| 27,442 |
def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):
"""
PWL piece-wise-linear interpolation fit of phase residuals
cdata -> compressed data
"""
tdata = res.reject_absVal(site_residuals,100.)
del site_residuals
data = res.reject_outliers_elevation(tdata,5,0.5)
del tdata
numd = np.shape(data)[0]
numZD = int(90.0/zenSpacing) + 1
Neq = np.eye(numZD,dtype=float) * 0.01
Apart = np.zeros((numd,numZD))
sd = np.zeros(numd)
for i in range(0,numd):
iz = np.floor(data[i,2]/zenSpacing)
sd[i] = np.sin(data[i,2]/180.*np.pi)
Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)
prechi = np.dot(data[:,3].T,data[:,3])
Neq = np.add(Neq, np.dot(Apart.T,Apart) )
Bvec = np.dot(Apart.T,data[:,3])
Cov = np.linalg.pinv(Neq)
Sol = np.dot(Cov,Bvec)
postchi = prechi - np.dot(Bvec.T,Sol)
pwl = Sol
pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)
model = np.dot(Apart,Sol)
f = loglikelihood(data[:,3],model)
dof = numd - np.shape(Sol)[0]
aic = calcAIC(f,dof)
bic = calcBIC(f,dof,numd)
#print("My loglikelihood:",f,aic,bic,dof,numd)
#print("STATS:",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)
stats = {}
stats['prechi'] = np.sqrt(prechi/numd)
stats['postchi'] = np.sqrt(postchi/numd)
stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)
stats['aic'] = aic
stats['bic'] = bic
return pwl,pwlsig,stats
|
5c543247d64e9c644f6b9401f2d662f62de23bd7
| 27,443 |
import math
def rotateImage(input_img, angle):
"""
Rotate the input_img by angle degrees, Rotate center is image center.
:param input_img:np.array, the image to be rotated
:param angle:float, the counterclockwise rotate angle
:return:np.array, the rotated image
"""
radian = angle * math.pi / 180.0
a, b = math.sin(radian), math.cos(radian)
h, w = input_img.shape
w_r = int(math.ceil(h * math.fabs(a) + w * math.fabs(b)))
h_r = int(math.ceil(w * math.fabs(a) + h * math.fabs(b)))
dx, dy = max(0, (w_r - w) / 2), max(0, (h_r - h) / 2)
img_rotate = cv2.copyMakeBorder(input_img, dy, dy, dx, dx, cv2.BORDER_CONSTANT, value=(0,0,0))
center = (img_rotate.shape[1] / 2.0, img_rotate.shape[0] / 2.0)
affine_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
img_rotate = cv2.warpAffine(img_rotate, affine_matrix, (img_rotate.shape[1], img_rotate.shape[0]), flags=cv2.INTER_NEAREST)
return img_rotate
|
4c16e6a0af3637d21ffb27fbe0f5fad8e72c5bd2
| 27,444 |
def get_content_model_prefetch(content_model, content_attr='content'):
""" Returns the fields that should be prefetched, for a relation that
starts with '<content_attr>__'. If the model has MTURK_PREFETCH, then that
is used. Otherwise, some common attributes are tested (photo, shape) and
used if those foreign keys exist. """
if hasattr(content_model, 'MTURK_PREFETCH'):
return ['%s__%s' % (content_attr, k)
for k in content_model.MTURK_PREFETCH]
else:
# guess if there is no default
prefetch = []
if has_foreign_key(content_model, 'photo'):
prefetch.append('%s__photo' % content_attr)
if has_foreign_key(content_model, 'shape'):
prefetch.append('%s__shape' % content_attr)
prefetch.append('%s__shape__photo' % content_attr)
return prefetch
|
5abeb44353723ffc8cd17ab9509e5a747d01dd24
| 27,445 |
def dispatch(obj, replacements):
"""make the replacement based on type
:param obj: an obj in which replacements will be made
:type obj: any
:param replacements: the things to replace
:type replacements: tuple of tuples
"""
if isinstance(obj, dict):
obj = {k: dispatch(v, replacements) for k, v in obj.items()}
elif isinstance(obj, list):
obj = [dispatch(l, replacements) for l in obj]
elif isinstance(obj, str):
for replacement in replacements:
obj = obj.replace(replacement[0], replacement[1])
return obj
|
8542214f83b6f04f1bbe21baa0710e7885a3b259
| 27,446 |
def process(
mode,
infile,
outdir,
genes,
genes_drop,
genes_bed,
igh,
mmrf,
bolli,
lohr,
normals,
mytype):
"""Main function to process myTYPE SNV and indel output"""
## IMPORTING DATA
variants = import_variants(infile)
## ANNOTATIONS
variants = annotate_cosmic(variants)
if genes:
# Only runs if a path was passed to optional argument "gene"
variants = annotate_genefreq(variants, genes)
# Replace this with mutation frequency from MMRF? (and other raw data?)
variants = annotate_maf(variants)
variants = annotate_normals(variants, normals)
variants = annotate_mmrf(variants, mmrf)
variants = annotate_bolli(variants, bolli)
variants = annotate_lohr(variants, lohr)
if mytype:
# Only runs if a path was passed to optional argument "mytype"
variants = annotate_mytype(variants, mytype)
variants = annotate_known(variants, mytype)
## FILTERS
variants = filter_panel(variants, genes_bed)
if genes_drop:
variants = filter_drop(variants, genes_drop)
variants = filter_igh(variants, igh)
variants = filter_maf(variants)
variants = filter_maf_cosmic(variants, mode)
variants = filter_nonpass(variants, mode)
variants = filter_normals(variants)
variants = filter_vaf(variants)
variants = filter_bidir(variants)
## OUTPUT
name = namecore(infile)
filter_export(variants, outdir, name, mode)
print('Variant processing complete')
return(variants)
|
606cbf58fa9974e968897a9c8289fbe5f1197d77
| 27,447 |
def set_axis_formatter(axis, params=None, **kwargs):
"""
Set axis formatter.
:param axis: Matplotlib axis instance.
:type axis: :class:`matplotlib.axes.Axes`
:param params: Axis formatter parameters.
:type params: dict, optional
Example config:
.. code-block:: python
'formatter': {
'x': {
'major': {
'format': '%.3f'
},
'minor': {
'name': 'PercentFormatter',
'params': [],
'keyword_params': {
}
}
}
}
Default formatter is FormatStrFormatter and it reads 'format' value. Other
formatter is specified with params and keyword_params to pass these values
into formatter class.
"""
config = params or {}
axis_methods = {
'x': 'get_xaxis',
'y': 'get_yaxis',
}
formatter_methods = {
'major': 'set_major_formatter',
'minor': 'set_minor_formatter'
}
instances = {}
need_locator_instances = [
matplotlib.dates.AutoDateFormatter,
]
try:
# ConciseDateFormatter is not supported on Python 3.5 and Matplotlib
# below version 3.1.0. So import it only when it is available.
need_locator_instances.append(matplotlib.dates.ConciseDateFormatter)
except ImportError:
pass
locators = kwargs.get('locators', {})
def get_locator_instance(locators, on, which):
instance = locators.get(on, {}).get(which)
if instance is None:
raise ChartError("Could'nt find locator instance "
"required for formatter class")
return instance
for key, value in config.items():
if key not in axis_methods:
continue
gca = getattr(axis, axis_methods[key])()
for which, data in value.items():
if which in formatter_methods:
tick_format = data.get('format')
if tick_format:
name = data.get('name', 'FormatStrFormatter')
formatter = getattr(matplotlib.ticker, name)
getattr(gca, formatter_methods[which])(
formatter(data.get('format')))
else:
name = data.get('name')
if name is None:
continue
for attr in [matplotlib.ticker, matplotlib.dates]:
formatter = getattr(attr, name, None)
if formatter:
break
if formatter is None:
raise ChartError(
'Unsupported formatter class {}'.format(name))
if formatter in need_locator_instances:
locator = get_locator_instance(locators, key, which)
instance = formatter(
locator,
*data.get('params', []),
**data.get('keyword_params', {})
)
else:
instance = formatter(*data.get('params', []),
**data.get('keyword_params', {}))
try:
instances[key].update({which: instance})
except KeyError:
instances.update({key: {which: instance}})
getattr(gca, formatter_methods[which])(instance)
return instances
|
8e5a1ca49b1ed6c29950a8d357664a7902f793ba
| 27,448 |
import time
from datetime import datetime
def convert_tzaware_time(t: time, tz_out: tp.Optional[tzinfo]) -> time:
"""Return as non-naive time.
`datetime.time` should have `tzinfo` set."""
return datetime.combine(datetime.today(), t).astimezone(tz_out).timetz()
|
8dd59b59d4679789687b3c94d28e4dafbbc1fd01
| 27,449 |
def total_posts():
"""
A simple template tag that shows the number
of posts that have been uploaded so far
"""
return Post.published.count()
|
77ebf33ec5e646e461e57ae18e606766e4679a82
| 27,450 |
from typing import Tuple
def make_policy_prior_network(
spec: specs.EnvironmentSpec, hidden_layer_sizes: Tuple[int, ...] = (64, 64)
) -> networks.FeedForwardNetwork:
"""Creates a policy prior network used by the agent."""
action_size = np.prod(spec.actions.shape, dtype=int)
def _policy_prior_fn(observation_t, action_tm1, is_training=False, key=None):
# is_training and key allows to defined train/test dependant modules
# like dropout.
del is_training
del key
network = hk.nets.MLP(hidden_layer_sizes + (action_size,))
# Policy prior returns an action.
return network(jnp.concatenate([observation_t, action_tm1], axis=-1))
policy_prior = hk.without_apply_rng(hk.transform(_policy_prior_fn))
return make_network_from_module(policy_prior, spec)
|
7720bb9b182a3c19d80e3ecd241e7197fdec6c14
| 27,451 |
def simulate_moment_contributions(params, x, y):
"""Calculate moment contributions for example from Honore, DePaula, Jorgensen."""
y_estimated = x.to_numpy() @ (params["value"].to_numpy())
x_np = x.T.to_numpy()
residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)
mom_value = []
length = len(x_np)
for i in range(length):
for j in range(i, length):
moment = residual * x_np[i] * x_np[j]
mom_value.append(moment)
mom_value = np.stack(mom_value, axis=1)[0]
mom_value = pd.DataFrame(data=mom_value)
return mom_value
|
b3c38c3fb85d353eecf4185bd600cb3a6eade732
| 27,452 |
def get_app_host_list(app_id):
"""
获取指定业务所有主机信息
:param app_id
:return dict: {"InnerIP": host_info}
"""
cc_result = bk.cc.get_app_host_list(app_id=app_id)
return cc_result
|
ce5b305c76b320c60963fa135734ac55e2960896
| 27,453 |
def staking_product_quota(self, product: str, productId: str, **kwargs):
"""Get Personal Left Quota of Staking Product (USER_DATA)
Weight(IP): 1
GET /sapi/v1/staking/personalLeftQuota
https://binance-docs.github.io/apidocs/spot/en/#get-personal-left-quota-of-staking-product-user_data
Args:
product (str)
productId (str)
Keyword Args:
recvWindow (int, optional): The value cannot be greater than 60000
"""
check_required_parameters([[product, "product"], [productId, "productId"]])
params = {"product": product, "productId": productId, **kwargs}
url_path = "/sapi/v1/staking/personalLeftQuota"
return self.sign_request("GET", url_path, params)
|
64587a64a25fedea39e53aa8d5f8857d1844da89
| 27,454 |
def softmax(x):
"""
개선된 softmax 함수
"""
max_x = np.max(x)
y = np.exp(x - max_x) / np.sum(np.exp(x-max_x))
return y
|
8912601dd73a10dfeb0391b9e292be0de6c1165a
| 27,456 |
def _FloatTraitsBaseClass_write_values_attribute(a, v):
"""_FloatTraitsBaseClass_write_values_attribute(hid_t a, Floats v)"""
return _RMF_HDF5._FloatTraitsBaseClass_write_values_attribute(a, v)
|
8e93038969b901dc9b2696e89a6c8110ed5becb4
| 27,458 |
def get_cifar(root, dataset, transform, mode = 'train'):
"""Get cifar data set
Args :
--root: data root
--dataset: dataset name
--transform: transformation
--mode: 'train'/'test'
"""
assert dataset.count('cifar')
if dataset == 'cifar10':
dataset = tv.datasets.CIFAR10(root = root,
download = True,
train = True if mode == 'train' else False,
transform = transform)
elif dataset == 'cifar100':
dataset = tv.datasets.CIFAR100(root = root,
download = True,
train = True if mode == 'train' else False,
transform = transform)
else:
raise Exception('No other data sets!')
return dataset
|
daf68549c2d6719f4a1fe7ae8c252e264288a5da
| 27,459 |
from pathlib import Path
import textwrap
def config(base_config):
""":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests."""
config_file = Path(base_config.file)
with config_file.open("at") as f:
f.write(
textwrap.dedent(
"""\
file group: allen
temperature salinity:
download:
status file url template: 'https://liveocean.apl.uw.edu/output/f{yyyymmdd}/ubc_done.txt'
bc file url template: 'https://liveocean.apl.uw.edu/output/f{yyyymmdd}/ubc.nc'
file name: low_passed_UBC.nc
dest dir: forcing/LiveOcean/downloaded
"""
)
)
config_ = nemo_nowcast.Config()
config_.load(config_file)
return config_
|
9c23b47ff355a68562446d41bca3d01b5a90d2d1
| 27,460 |
def mark_cell_function(fun, mesh, foc, regions):
"""
Iterates over the mesh and stores the
region number in a meshfunction
"""
if foc is None:
foc = calculus.estimate_focal_point(mesh)
for cell in dolfin.cells(mesh):
# Get coordinates to cell midpoint
x = cell.midpoint().x()
y = cell.midpoint().y()
z = cell.midpoint().z()
T = calculus.cartesian_to_prolate_ellipsoidal(x, y, z, foc)
fun[cell] = calculus.strain_region_number(T, regions)
return fun
|
0d91fa752350de5f51cf841770234675712f6f06
| 27,462 |
import json
def incorrect_format():
"""for handling incorrect json format"""
js = json.dumps({'error': 'Incorrect format.'})
return Response(js, status=422, mimetype='application/json')
|
971096fde565d66786196773d99d3d7849fd6d14
| 27,463 |
import multiprocessing
def start_multiprocessing(function_list, data_list):
"""
Creates and runs a multiprocessing pool for (1..n) functions all of which
use the same data_list (e.g. YouTube video links). Returns a dictionary
of results indexed by function.
"""
with multiprocessing.Pool(initializer=set_global_session) as pool:
results = {}
for function in function_list:
print(f"\n* Setting up multiprocessing for: {function.__name__} ({len(data_list)} items).\n")
results[function] = pool.map(function, data_list)
print(f"\n* {function.__name__}: multiprocessing complete.\n")
return(results)
|
d02e00107f52bf0fa0a6bbc0da834c5c7638b33c
| 27,464 |
def GetNodeProperty(line):
"""
Get node property from a string.
:param line: a string
:return: name, size, and position of the node
"""
name, attr = NameAndAttribute(line)
name = ProcessName(name, False)
position = GetAttributeValue("pos", attr)[:-1].replace(",", "-")
attr = CleanAttribute(attr)
width = GetAttributeValue("width", attr)
#group = GetAttributeValue("color", attr)
size = SizeScale(GetSize(width))
return name, size, position
|
2f63af91864236033783d773439f829b9d7f405a
| 27,465 |
def login(request):
"""
Logs in a user.
"""
if request.method != "GET":
return _methodNotAllowed()
options = _validateOptions(request, {})
if type(options) is str:
return _response(options)
user = userauth.authenticateRequest(request, storeSessionCookie=True)
if type(user) is str:
return _response(user)
elif user == None:
return _unauthorized()
else:
return _response("success: session cookie returned")
|
abde5c258899cc8fcfdef0e0286e5e846d075a27
| 27,466 |
def wavenumber(f, h, g=9.81):
""" solves the dispersion relation, returns the wave number k
INPUTS:
omega: wave cyclic frequency [rad/s], scalar or array-like
h : water depth [m]
g: gravity [m/s^2]
OUTPUTS:
k: wavenumber
"""
omega = 2*np.pi*f
if hasattr(omega, '__len__'):
k = np.array([fsolve(lambda k: om**2/g - k*np.tanh(k*h), (om**2)/g)[0] for om in omega])
else:
func = lambda k: omega**2/g - k*np.tanh(k*h)
k_guess = (omega**2)/g
k = fsolve(func, k_guess)[0]
return k
|
a07541e5327cd778cf34b792380f2f25f9617c05
| 27,467 |
def command(cmd, label, env={}):
"""Create a Benchpress command, which define a single benchmark execution
This is a help function to create a Benchpress command, which is a Python `dict` of the parameters given.
Parameters
----------
cmd : str
The bash string that makes up the command
label : str
The human readable label of the command
env : dict
The Python dictionary of environment variables to define before execution'
Returns
-------
command : dict
The created Benchpress command
"""
return {'cmd': cmd,
'label': label,
'env': env}
|
487e7b8518ae202756177fc103561ea03ded7470
| 27,468 |
def adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-02-03: Christopher Wingard. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-03-28: Russell Desiderio. Corrected documentation only.
2014-04-04: Russell Desiderio. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: Russell Desiderio.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_corr = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
|
9f7c427a7f3ddbbfbdfe1f56269ddf04f99ea2bb
| 27,469 |
import re
def get_text(string):
"""
normalizing white space and stripping HTML markups.
"""
text = re.sub('\s+',' ',string)
text = re.sub(r'<.*?>',' ',text)
return text
|
5ef04effe14ee9b0eee90de791b3e3f3be6c15e3
| 27,470 |
import struct
def decode_ia(ia: int) -> str:
""" Decode an individual address into human readable string representation
>>> decode_ia(4606)
'1.1.254'
See also: http://www.openremote.org/display/knowledge/KNX+Individual+Address
"""
if not isinstance(ia, int):
ia = struct.unpack('>H', ia)[0]
return '{}.{}.{}'.format((ia >> 12) & 0x1f, (ia >> 8) & 0x07, (ia) & 0xff)
|
6f107f47110a59ca16fe8cf1a7ef8f061bf117c7
| 27,471 |
import array
def dd_to_dms(deg):
"""Return degrees, minutes, seconds from degrees decimal"""
mins, secs = divmod(deg * 3600, 60)
degs, mins = divmod(mins, 60)
return array((degs, mins, secs), dtype='i4')
|
f0bb8287ffb14f5d5643d5f9709dbad37381a8ae
| 27,472 |
def _compare_labels(primary, label1, label2):
"""
Compare two labels disposition
:param primary: the primary label
:param label1: the first label
:param label2: the second label
:return A tuple containing True if there is no difference False otherwise
and the output text
"""
output = ""
try:
l1_disp = label1['disp'][primary]
l2_disp = label2['disp'][primary]
except KeyError:
raise InvalidSymmetry()
if l1_disp != l2_disp:
output += "Change in disposition for {cat} {label} [{cp}] (2->1):" \
" {l2disp} => {l1disp}\n".format(cat=label2['cat'],
label=label2['bidi'],
cp=label2['cp_out'],
l2disp=l2_disp,
l1disp=l1_disp)
if label2['cat'] == 'Primary':
return False, output
try:
output += "\nRules for LGR2:\n{}\n".format(label2['rules'][primary])
output += "\nRules for LGR1:\n{}".format(label1['rules'][primary])
except KeyError:
raise InvalidSymmetry()
return False, output
return True, output
|
9b1802d4b9bab0a71e1509bb5e49388267a96a29
| 27,473 |
def set_pause(df, index, ts_name):
"""
:param df:
Spark DataFrame object with timestamp data
:param index:
fix index type: 'i1', 'i2', 'i3', 'j1', 'j2', 'j3'
:param ts_name:
column with timestamp data
:return:
Spark DataFrame object with timestamp data
"""
"""
run after proc_segment
start from set: 3 3 duration 0.0
"""
## calculate distance between two fixes
app_fun = F.udf(lambda a, b, c, d: calc_distance(a, b, c, d))
w2 = Window.partitionBy('segment').orderBy(ts_name)
pcol = 'pause2'
df2 = df.withColumn(pcol, F.when(F.col(index).isNotNull() &
(F.lag('state', 1).over(w2) == 3) &
(F.lag('tripType', 1).over(w2) == 2) &
(F.col('state') == 3) &
(F.col('tripType') == 3) &
(F.lead('tripType', 1).over(w2) != 3) & ####### CHECK
(F.col('pause') == F.col('duration')),
F.col('duration')
)
).orderBy(ts_name)
## avoid to pick up multiple lines with pcol non-null
df2 = df2.withColumn('check', F.when(F.col(index).isNotNull(),
F.last(pcol, ignorenulls=True)
.over(w2.rowsBetween(Window.unboundedPreceding, 0))
).otherwise(F.col(pcol))
).orderBy(ts_name)
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull() &
F.col(pcol).isNotNull() &
F.lag('check', 1).over(w2).isNull(),
F.col(pcol)
)
).drop('check').orderBy(ts_name)
df2 = df2.withColumn('pause_cp', F.when(F.col(index).isNotNull(), F.col('pause')))
df2 = df2.withColumn('pause_dist_cp', F.when(F.col(index).isNotNull(), F.col('pause_dist')))
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull(),
F.last(pcol, ignorenulls=True)
.over(w2.rowsBetween(0, Window.unboundedFollowing))
).otherwise(F.col(pcol))
).orderBy(ts_name)
## calculate pause-time for merged segments
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNotNull() &
F.lead(pcol, 1).over(w2).isNull() &
(F.col('pause') == F.col('duration')),
F.col('cum_pause') - F.col('duration')
).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull() &
(F.col('pause') != F.col('cum_pause') - F.col('duration')),
None
).otherwise(F.col('pause'))
)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
F.last('pause', ignorenulls=True).over(w2)
).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.withColumn(pcol, F.when(F.col(index).isNotNull() &
(F.col('pause') == F.col('cum_pause') - F.col('duration')),
None
).otherwise(F.col(pcol))
)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
F.col('cum_pause') - F.col('pause')
).otherwise(F.col('pause'))
)
## compute the distance traveled from the beginning of a pause
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull() &
(F.col('pause') != F.col('duration')),
None
).otherwise(F.col('pause_dist'))
)
df2 = df2.withColumn('lat2', F.when(F.col('pause_dist').isNotNull(), F.col('lat')))
df2 = df2.withColumn('lat2', F.when(F.col('pause_dist').isNull(),
F.last('lat2', ignorenulls=True).over(w2)
)
.otherwise(F.col('lat'))
).orderBy(ts_name)
df2 = df2.withColumn('lat2', F.when(F.col('lat2').isNull(), F.col('lat')).otherwise(F.col('lat2')))
df2 = df2.withColumn('lon2', F.when(F.col('pause_dist').isNotNull(), F.col('lon')))
df2 = df2.withColumn('lon2', F.when(F.col('pause_dist').isNull(),
F.last('lon2', ignorenulls=True).over(w2)
).otherwise(F.col('lon'))
).orderBy(ts_name)
df2 = df2.withColumn('lon2', F.when(F.col('lon2').isNull(), F.col('lon')).otherwise(F.col('lon2')))
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col(pcol).isNull(),
app_fun(F.col('lat'), F.col('lon'), F.col('lat2'), F.col('lon2'))
).otherwise(F.col('pause_dist'))
)
df2 = df2.withColumn('pause_dist', F.when(F.col(index).isNotNull() &
F.col('pause').isNull(),
F.col('pause_dist_cp')).otherwise(F.col('pause_dist'))
).orderBy(ts_name)
df2 = df2.withColumn('pause', F.when(F.col(index).isNotNull() &
F.col('pause').isNull(),
F.col('pause_cp')).otherwise(F.col('pause'))
).orderBy(ts_name)
df2 = df2.drop(*['lat2', 'lon2', 'pause_cp', 'pause_dist_cp', pcol])
return df2
|
cd7356ca134b5b0d3c91351478a9bd202cf2fcad
| 27,474 |
from pathlib import Path
def _validate_magics_flake8_warnings(actual: str, test_nb_path: Path) -> bool:
"""Validate the results of notebooks with warnings."""
expected = (
f"{str(test_nb_path)}:cell_1:1:1: F401 'random.randint' imported but unused\n"
f"{str(test_nb_path)}:cell_1:2:1: F401 'IPython.get_ipython' imported but unused\n"
f"{str(test_nb_path)}:cell_3:6:21: E231 missing whitespace after ','\n"
f"{str(test_nb_path)}:cell_3:11:10: E231 missing whitespace after ','\n"
)
return actual == expected
|
4baa419ad4e95bf8cc794298e70211c0fa148e5b
| 27,475 |
def _drop_index(index, autogen_context):
"""
Generate Alembic operations for the DROP INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
text = "%(prefix)sdrop_index(%(name)r, "\
"table_name='%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': _render_gen_name(autogen_context, index.name),
'table_name': index.table.name,
'schema': ((", schema='%s'" % index.table.schema)
if index.table.schema else '')
}
return text
|
43b6b5391b69896b1e7409108d79661a13697391
| 27,476 |
def add_metabolite_drain_reactions(mod_, metab_list, prefix_id="MDS"):
"""Take in a model and add metabolite drain reactions for a list of metabolites
in the model. These metabolite drain reactions will have the identification
of (MDS)__(metabolite id). (i.e., MDS__atp_c for the atp_c metabolite)
"""
mod = mod_.copy()
metab_obj_names=[]
for metab in metab_list:
obj_name = prefix_id+"__"+metab
metab_drain = Reaction(obj_name)
metab_drain.lower_bound = 0
metab_drain.upper_bound = 1000.
metab_drain.add_metabolites({mod.metabolites.get_by_id(metab): -1.0})
mod.add_reaction(metab_drain)
metab_obj_names.append(obj_name)
return mod, metab_obj_names
|
ea8cdf76896d4ab70dbb5f7fd4c15f4e83be7ba5
| 27,477 |
def tax_total(par):
"""
Finds total tax burden in a log normal distributed population
Args:
par: simplenamespace containing relevant parameters
phi (float): C-D weights
epsilon (float): public housing assement factor
r (float): mortgage interest
tau_g (float): base housing tax
tau_p (float): progressive housing tax
p_bar (float): cutoff price
m (float): cash-on-hand
seed (int): seed number for random draws
mu (float): mean value for the distribution
sigma (float): standard deviation for the distribution
Local variables:
h_cit (float): housing quality choice of one citizen in the population
c_cit (float): other consumption choice of one citizen in the population
u_cit (float): utility for one citizen in the population given chice of h and c
Returns:
T (float): total tax burden
"""
# Set seed and tax = 0
np.random.seed(par.seed)
T = 0
# Loop through every citizen in the population and calculate optimal choices
# and tax given those choices
for i in range(par.pop):
par.m = np.random.lognormal(par.mu, par.sigma)
h_cit, c_cit, u_cit = u_optimize(par)
T += par.tau_g*(h_cit*par.epsilon) + par.tau_p*max((par.epsilon*h_cit)-par.p_bar, 0)
return T
|
f3a2ace349bcf25bffda855d3bad11cbeef9b6c0
| 27,478 |
def _split_uri(uri):
"""
Get slash-delimited parts of a ConceptNet URI.
Args:
uri (str)
Returns:
List[str]
"""
uri = uri.lstrip("/")
if not uri:
return []
return uri.split("/")
|
91b48fff83041fe225a851a9e3016e3722bd9771
| 27,479 |
def file_len(fname):
""" Calculate the length of a file
Arguments:
Filename: Name of the file wanting to count the rows of
Returns:
i+1: Number of lines in file
"""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
|
d571b048649c636c359e731d72693aed26ef1595
| 27,480 |
def create_initializer(initializer_range=0.001):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
|
70eb43744202c5abb34fca4bb0bce6b2b2d31d72
| 27,481 |
from typing import List
def collapse_columns(
df: ContactsTable, names: List[str], new_name: str
) -> ContactsTable:
"""
This function assumes that df has both columns and indexes identified by the same `names`. They will all be added
together to create a new column and row named `new_name`. Eg.:
>>> df = ContactsTable(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=list("abc")))
>>> collapse_columns(df, ["a", "b"], "a'")
a' c
a' 12 15
c 9 9
:param df: a contacts table type table. That means it's a upper triangle matrix
:param names: name of the columns and indexes to aggregate
:param new_name: name of the new column and row that will be created
:return: A dataframe with collapsed columns and indexes
"""
if not names:
raise ValueError("Names must be a non-empty list")
missing_columns = set(names) - set(df.columns)
if missing_columns:
raise ValueError(f"Names mismatch: {missing_columns}")
if not all(df.columns == df.index):
raise ValueError("Indexes and columns must match")
agg = df.copy()
agg[names[0]] = df[names].sum(axis=1)
agg = agg.rename({names[0]: new_name}, axis=1)
agg = agg.drop(columns=names[1:])
agg.loc[names[0]] = agg.loc[names].sum()
agg = agg.rename({names[0]: new_name}, axis=0)
agg = agg.drop(index=names[1:])
return ContactsTable(agg)
|
7dd38ec18ca26b4757f7b743813401e6d7b2a7d4
| 27,482 |
import time
def current_date(pattern="%Y-%m-%d %H:%M:%S"):
"""
获取当前日期
:param: pattern:指定获取日期的格式
:return: 字符串 "20200615 14:57:23"
"""
return time.strftime(pattern, time.localtime(time.time()))
|
9a554e91e0842fe52f822f4403366695c92a609b
| 27,483 |
def ubi(funding_billions=0, percent=0):
""" Calculate the poverty rate among the total US population by:
-passing a total level of funding for a UBI proposal (billions USD),
-passing a percent of the benefit recieved by a child and the benefit
recieved by an adult
AND
taking into account that funding will be raise by a flat tax leveled on each households
taxable income """
percent = percent / 100
funding = funding_billions * 1e9
target_persons = person.copy(deep=True)
# i think this is % funding, not % benefit
adult_ubi = ((1 - percent) * funding) / adult_pop
child_ubi = (percent * funding) / child_pop
tax_rate = funding / total_taxable_income
target_persons['hh_new_tax'] = target_persons.hh_tax_income * tax_rate
target_persons['hh_ubi'] = (target_persons.hh_adults * adult_ubi +
target_persons.hh_children * child_ubi)
target_persons['new_spm_resources'] = (target_persons.spm_resources +
target_persons.hh_ubi -
target_persons.hh_new_tax)
target_persons['new_spm_resources_pp'] = (target_persons.new_spm_resources /
(target_persons.hh_total_people))
# Calculate poverty rate
target_persons['poor'] = (target_persons.new_spm_resources <
target_persons.spm_povthreshold)
total_poor = (target_persons.poor * target_persons.weight).sum()
poverty_rate = (total_poor / pop * 100)
# Calculate poverty gap
target_persons['poverty_gap'] = target_persons.spm_povthreshold - target_persons.new_spm_resources
poverty_gap = (((target_persons.poor * target_persons.poverty_gap
* target_persons.weight).sum()))
# Calculate Gini
gini = mdf.gini(target_persons, 'new_spm_resources_pp', w='weight')
# Percent winners
target_persons['better_off'] = (target_persons.new_spm_resources > target_persons.spm_resources)
total_better_off = (target_persons.better_off * target_persons.weight).sum()
percent_better_off = total_better_off / pop
return pd.Series([poverty_rate, gini, poverty_gap, percent_better_off, adult_ubi, child_ubi])
|
3cdb9ea40085dce37f5746ff74b37f18c87e6c48
| 27,484 |
def local_disp_centr(x, y, image, disp_n, size_k, mode):
"""
Returns a tuple: (disp_l, centr_l)
"""
border_pixels = image[x-size_k//2 : x+size_k//2+1, y-size_k//2 : y+size_k//2+1]
if mode == 'robust':
percentiles = np.percentile(border_pixels, [75, 50, 25])
disp_l = percentiles[2] - percentiles[0] # interquartile range
centr_l = percentiles[1] # the 50 percentile is the median
elif mode == 'average':
disp_l = np.std(border_pixels)
centr_l = np.mean(border_pixels)
else:
raise ValueError('Invalid denoising mode')
if disp_l == 0:
disp_l = disp_n
return disp_l, centr_l
|
c74c72d732b3c7ec9cf7c4b10a13e9b38af1a3d9
| 27,486 |
import aiohttp
async def get_music_list() -> MusicList:
"""
获取所有数据
"""
async with aiohttp.request("GET", 'https://www.diving-fish.com/api/maimaidxprober/music_data') as obj_data:
if obj_data.status != 200:
raise aiohttp.ClientResponseError('maimaiDX曲目数据获取失败,请检查网络环境')
else:
data = await obj_data.json()
async with aiohttp.request("GET", 'https://www.diving-fish.com/api/maimaidxprober/chart_stats') as obj_stats:
if obj_stats.status != 200:
raise aiohttp.ClientResponseError('maimaiDX数据获取错误,请检查网络环境')
else:
stats = await obj_stats.json()
total_list: MusicList = MusicList(data)
for i in range(len(total_list)):
total_list[i] = Music(total_list[i])
total_list[i]['stats'] = stats[total_list[i].id]
for j in range(len(total_list[i].charts)):
total_list[i].charts[j] = Chart(total_list[i].charts[j])
total_list[i].stats[j] = Stats(total_list[i].stats[j])
return total_list
|
44b6f42377391ab3eccda8f8fb770dc2ab238019
| 27,487 |
import csv
import ast
def split_data(data_index,data_dir,amount):
"""
Split data into 'train' and 'test' sets.
Data index = csv filename
Data dir = directory in which data is stored
Amount = percentage of data to be imported ( to reduce memory usage )
"""
lfiles = []
with open(data_index, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
lfiles = list(reader)
dfiles = []
dimages = []
dlabels = []
dlabel_names = []
total_l = len(lfiles)
amount = int(total_l / 100 * int(amount))
for idx,val in enumerate(lfiles):
if idx < amount:
labels = ast.literal_eval(val[0])
#labels = [n.strip() for n in x]
labels_names = ast.literal_eval(val[2])
#label_names = [n.strip() for n in y]
dlabels.append(labels)
# Fix file path to full path depending data_dir argv:
dfiles.append(data_dir + val[1])
dlabel_names.append(labels_names)
ds_train = {
'files':dfiles[:int(len(dfiles)/2)],
'images':[],
'labels':dlabels[:int(len(dlabels)/2)],
'label_names':dlabel_names[:int(len(dlabel_names)/2)]
}
ds_test = {
'files':dfiles[int(len(dfiles)/2):],
'images':[],
'labels':dlabels[int(len(dlabels)/2):],
'label_names':dlabel_names[int(len(dlabel_names)/2):]
}
return [ds_train,ds_test]
|
70aa1df0b666b231b0ec5d8b3d6bfec7409e26c9
| 27,488 |
def isValidPasswordPartTwo(firstIndex: int, secondIndex: int, targetLetter: str, password: str) -> int:
"""
Takes a password and returns 1 if valid, 0 otherwise. Second part of the puzzle
"""
bool1: bool = password[firstIndex - 1] == targetLetter
bool2: bool = password[secondIndex - 1] == targetLetter
return 1 if bool1 ^ bool2 else 0
|
81f85c3848909b5037f13ed641ec3a1b77dff3b1
| 27,489 |
import json
from typing import OrderedDict
def read_yaml_or_json(url: str) -> dict:
"""Read a YAML or JSON document.
:param url: URL or path; if `url` ends with '.yaml', the document is interpreted
as YAML, otherwise it is assumed to be JSON.
:return: a dictionary with the document contents.
"""
with open_url(url, decode=True) as fp:
if url.endswith(".yaml"):
return yaml_ordered_load(fp)
else:
return json.load(fp, object_pairs_hook=OrderedDict)
|
27cb578e7194e1f1635cfd4d083c846f4bb5fdf2
| 27,492 |
def diagEst(matFun, n, k=None, approach='Probing'):
"""
Estimate the diagonal of a matrix, A. Note that the matrix may be a
function which returns A times a vector.
Three different approaches have been implemented:
1. Probing: cyclic permutations of vectors with 1's and 0's (default)
2. Ones: random +/- 1 entries
3. Random: random vectors
:param callable matFun: takes a (numpy.array) and multiplies it by a matrix to estimate the diagonal
:param int n: size of the vector that should be used to compute matFun(v)
:param int k: number of vectors to be used to estimate the diagonal
:param str approach: approach to be used for getting vectors
:rtype: numpy.array
:return: est_diag(A)
Based on Saad http://www-users.cs.umn.edu/~saad/PDF/umsi-2005-082.pdf,
and http://www.cita.utoronto.ca/~niels/diagonal.pdf
"""
if type(matFun).__name__ == 'ndarray':
A = matFun
def matFun(v):
return A.dot(v)
if k is None:
k = np.floor(n/10.)
if approach.upper() == 'ONES':
def getv(n, i=None):
v = np.random.randn(n)
v[v < 0] = -1.
v[v >= 0] = 1.
return v
elif approach.upper() == 'RANDOM':
def getv(n, i=None):
return np.random.randn(n)
else: # if approach == 'Probing':
def getv(n, i):
v = np.zeros(n)
v[i:n:k] = 1.
return v
Mv = np.zeros(n)
vv = np.zeros(n)
for i in range(0, k):
vk = getv(n, i)
Mv += matFun(vk)*vk
vv += vk*vk
d = Mv/vv
return d
|
631d11b6f9201404ec75d9ffbc04b8c77b82e244
| 27,493 |
def delete_a_recipe(category_id, recipe_id):
"""Method to handle delete permanently a single recipe"""
user_id = decode_auth_token(request.headers.get("Authorization"))
if isinstance(user_id, int):
recipe = Recipe()
response = recipe.delete_recipe(category_id, recipe_id)
return response
else:
return invalid_token()
|
216e1b40a49bd477d4bdd5c748672ae4286b8e46
| 27,494 |
def hydrate_datatrust_state(data={}):
"""
Given a dictionary, allow the viewmodel to hydrate the data needed by this view
"""
vm = State()
return vm.hydrate(data)
|
69091f66cce513eafb7e14c13e43fa5a4ce9d2f6
| 27,495 |
def preprocess_data(X, Y):
""" This method has the preprocess to train a model """
X_p = X
# changind labels to one-hot representation
Y_p = K.utils.to_categorical(Y, 9)
return (X_p, Y_p)
|
f2288be9fdb752ea67ddd0d493375d019a5981ca
| 27,496 |
import json
def readLog(logPath):
"""Reads a file containing an array of json objects. \
Used with the 'gcloudformatter' function.
Args:
logPath: file system path to map file
Returns:
An array of json objects
"""
logger.info("Reading log {}".format(logPath))
with open(logPath, 'r') as f:
log = json.load(f)
return log
|
9e946ae1735025be2f9b4b5f271c19bbc64d4a61
| 27,497 |
from typing import List
import csv
from datetime import datetime
def save_traces_file(traces_file_name, animals_list: List[Animal]):
"""
Saves TRACES compatible import file, based on animals in animals_list
:param traces_file_name:
:param animals_list:
:return:
"""
def save_traces_file_cattles():
fd = open(traces_file_name, 'w')
try:
writer = csv.writer(fd, dialect='excel', delimiter=';', quoting=csv.QUOTE_NONE)
writer.writerow(['[COLUMNS]'])
writer.writerow(['official_ident', 'numpassportemp'])
writer.writerow([])
writer.writerow(['[DATA]'])
for animal in animals_list:
row = [animal.animal_id, animal.passport_id]
writer.writerow(row)
writer.writerow(['[DATA]'])
finally:
fd.close()
def save_traces_file_sheeps():
fd = open(traces_file_name, 'w')
try:
writer = csv.writer(fd, dialect='excel', delimiter=';', quoting=csv.QUOTE_NONE)
writer.writerow(['[COLUMNS]'])
writer.writerow(['official_indiv', 'age_month', 'sexinfo'])
writer.writerow([''])
writer.writerow(['[DATA]'])
now = datetime.now()
for animal in animals_list:
row = [animal.animal_id, animal.age_in_months(now), animal.get_traces_sex()]
writer.writerow(row)
writer.writerow(['[DATA]'])
finally:
fd.close()
if not animals_list:
return False
if animals_list[0].species == ANIMAL_SPECIES_CATTLE:
save_traces_file_cattles()
elif animals_list[0].species == ANIMAL_SPECIES_SHEEP:
save_traces_file_sheeps()
else:
raise NotImplementedError
return True
|
8af33024ee413300147c7dcb6cd34a211609ef03
| 27,498 |
def get_KNN(X, k):
"""Identify nearest neighbours
Parameters
----------
D : array, [n_samples, n_features]
input data
k : int
number of nearest neighbours
Returns
-------
knn_graph : array, [n_samples, n_samples]
Connectivity matrix with binary edges connecting nearest neighbours
"""
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
# sparse neighbourhood graph
W = knn.fit(X).kneighbors_graph(mode='connectivity')
# into square matrix
W = W.toarray()
# enforce symmetry (not true kNN)
W = np.fmax(W,W.T)
#knn_graph = W*(1-D) # similarity not distance
return W
|
9b62be61837a0c500451f286eb1a043a81939e19
| 27,499 |
def createMode():
"""Required to initialize the module. RV will call this function to create your mode."""
return AudioForSequence()
|
aa3095a7b0754da8cb4739c3e3198f8304a21661
| 27,500 |
def pre_process(cpp_line):
"""预处理"""
# 预处理
cpp_line = cpp_line.replace('\t', ' ')
cpp_line = cpp_line.replace('\n', '')
cpp_line = cpp_line.replace(';', '')
return cpp_line
|
4c0db8ae834286106472aba425c45a8eeded3183
| 27,501 |
def actions_db(action_id):
"""
replaces the actual db call
"""
if action_id == 'not found':
return None
elif action_id == 'state error':
return {
'id': 'state error',
'name': 'dag_it',
'parameters': None,
'dag_id': 'state error',
'dag_execution_date': '2017-09-06 14:10:08.528402',
'user': 'robot1',
'timestamp': '2017-09-06 14:10:08.528402',
'context_marker': '8-4-4-4-12a'
}
else:
return {
'id': '59bb330a-9e64-49be-a586-d253bb67d443',
'name': 'dag_it',
'parameters': None,
'dag_id': 'did2',
'dag_execution_date': '2017-09-06 14:10:08.528402',
'user': 'robot1',
'timestamp': '2017-09-06 14:10:08.528402',
'context_marker': '8-4-4-4-12a'
}
|
cd9e8e87ce5535648b4e7a5e58d0333b80e9ae1c
| 27,502 |
def parse(argv):
"""
Parse command line options. Returns the name of the command
and any additional data returned by the command parser.
May raise CommandParsingError if there are problems.
"""
# Find the command
if len(argv) < 1:
full = None
cmd = command_dict[None]
else:
try:
full = complete(argv[0])
except KeyError, e:
raise CommandParsingError(str(e))
if full is not None and full in command_dict:
cmd = command_dict[full]
argv = argv[1:]
else:
cmd = command_dict[None]
try:
return None, cmd.parse(*getopt.gnu_getopt(argv, cmd.short_opts, cmd.long_opts))
except getopt.GetoptError, e:
raise CommandParsingError("unrecognised command '%s'" % argv[0])
# Process arguments
try:
return full, cmd.parse(*getopt.gnu_getopt(argv, cmd.short_opts, cmd.long_opts))
except getopt.GetoptError, e:
raise CommandParsingError(str(e))
|
09867eeb1a17f7609c5f99fc9c5b2d80442a7b44
| 27,504 |
def _get_answer_spans(text, ref_answer):
"""
Based on Rouge-L Score to get the best answer spans.
:param text: list of tokens in text
:param ref_answer: the human's answer, also tokenized
:returns max_spans: list of two numbers, marks the start and end position with the max score
"""
max_score = -1.
max_spans = [0, len(text)-1]
for start, _token in enumerate(text):
if _token not in ref_answer: continue
for end in range(len(text)-1, start-1, -1):
scorer = recall_score # rouge_score, rouge score is too slow
_score = scorer(text[start: end+1], ref_answer)
if _score > max_score:
max_score = _score
max_spans = [start, end]
if max_score > 0.9:
return max_spans
# Warning: the end pointed character is inclueded in fake answer
return max_spans
|
aa99c85dec40bcb01457b31b358aac9b50238284
| 27,505 |
from functools import cmp_to_key
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse)
|
7bcc6f44f02be4fb329b211b5caadf057d6d9b9a
| 27,506 |
import requests
def curl_post(method, txParameters=None, RPCaddress=None, ifPrint=False):
"""
call Ethereum RPC functions
"""
payload = {"jsonrpc": "2.0",
"method": method,
"id": 1}
if txParameters:
payload["params"] = [txParameters]
headers = {'Content-type': 'application/json'}
response = requests.post(RPCaddress, json=payload, headers=headers)
response_json = response.json()
if ifPrint:
print('raw json response: {}'.format(response_json))
if "error" in response_json:
raise MethodNotExistentError()
else:
return response_json['result']
|
1b403e91cf542127038b7a79b54a28b69105be39
| 27,508 |
def intRoexpwt2(g1, g2, p, w, t):
""" Integral of the roexpwt filter Oxenham & Shera (2003) equation (3)
Parameters
----------
g1, g2 - Limits of the integral in normalized terms (eg.: g1=0.1,g2=0.35)
p - SLope parameter
t - Factor by which second slope is shallower than first
w - relative weigths slopes (determines where 2nd starts to dominate)
Returns
-------
I - Integral of the function
"""
(I, err) = quad(roexpwt, g1, g2, args=(p, w, t))
return I
|
fc6e312a5f5134e43d63569b35fc1e6e7af74084
| 27,509 |
def create_connection(query):
"""
クエリを発行するためのデコレータ
:param query: クエリストリング
:return:
"""
def wrapper(*args, **kargs):
config = Config()
connection = pymysql.connect(host= config.nijo_db_host,
user= config.nijo_db_user,
password= config.nijo_db_pass,
db= config.nijo_db_name,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute(query(*args, **kargs))
rows = cursor.fetchall()
connection.commit()
connection.close()
return rows
return wrapper
|
8e6d0733eef3210b2ee074430cf79dd3bdbf8dfc
| 27,510 |
def numentries(arrays):
"""
Counts the number of entries in a typical arrays from a ROOT file,
by looking at the length of the first key
"""
return arrays[list(arrays.keys())[0]].shape[0]
|
e3c9f2e055f068f12039741ff9bb1091716263d5
| 27,511 |
import torch
def gen_gcam_target(imgs, model, target_layer='layer4', target_index=None, classes=get_imagenet_classes(), device='cuda', prep=True):
"""
Visualize model responses given multiple images
"""
# Get model and forward pass
gcam, probs, ids, images = gen_model_forward(imgs, model, device=device, prep=prep, type='gcam')
ids_ = torch.LongTensor([[x] for x in target_index]).to(device)
gcam.backward(ids=ids_)
regions = gcam.generate(target_layer=target_layer)
masks=[]
for j in range(len(images)):
mask = save_gradcam(
gcam=regions[j, 0]
)
masks += [mask]
if len(masks) == 1:
return masks[0]
return masks
|
f685b17b030643fbd29eed0b69be140422b3e730
| 27,513 |
def get_insert_query(table_name):
"""Build a SQL query to insert a RDF triple into a PostgreSQL dataset"""
return f"INSERT INTO {table_name} (subject,predicate,object) VALUES (%s,%s,%s) ON CONFLICT (subject,predicate,object) DO NOTHING"
|
423ccbf1d69e85316abdb81207d6e0f04729c2b8
| 27,514 |
import torch
def q_mult(q1, q2):
"""Quaternion multiplication."""
w = q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3]
x = q1[1] * q2[0] + q1[0] * q2[1] + q1[2] * q2[3] - q1[3] * q2[2]
y = q1[0] * q2[2] - q1[1] * q2[3] + q1[2] * q2[0] + q1[3] * q2[1]
z = q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1] + q1[3] * q2[0]
return torch.stack((w, x, y, z))
|
cafafa392d9e41e7680c703415ed6df87207f0d0
| 27,515 |
def is_object_group(group):
"""True if the group's object name is not one of the static names"""
return not group.name.value in (IMAGE, EXPERIMENT, OBJECT_RELATIONSHIPS)
|
d111781f39feef74698b625186f3937a85fa8713
| 27,516 |
import time
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
Exception
"""
global _handle, _restart_config
bluetooth_adr = _handle['bluetoothAddress']['value']
tag = _handle['tag']
asset_prefix = '{}'.format(_handle['assetNamePrefix']['value']).replace('%M', bluetooth_adr)
try:
if not tag.is_connected:
raise RuntimeError("SensorTagCC2650 {} not connected".format(bluetooth_adr))
time_stamp = utils.local_timestamp()
data = list()
# In this method, cannot use "handle" as it might have changed due to restart. Hence use "_handle".
if _handle['temperatureSensor']['value'] == 'true':
count = 0
while count < SensorTagCC2650.reading_iterations:
object_temp_celsius, ambient_temp_celsius = tag.hex_temp_to_celsius(
tag.char_read_hnd(_handle['characteristics']['temperature']['data']['handle'], "temperature"))
time.sleep(0.5) # wait for a while
count = count + 1
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['temperatureSensorName']['value']),
'timestamp': time_stamp,
'readings': {"object": object_temp_celsius, 'ambient': ambient_temp_celsius}
})
if _handle['luminanceSensor']['value'] == 'true':
lux_luminance = tag.hex_lux_to_lux(
tag.char_read_hnd(_handle['characteristics']['luminance']['data']['handle'], "luminance"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['luminanceSensorName']['value']),
'timestamp': time_stamp,
'readings': {"lux": lux_luminance}
})
if _handle['humiditySensor']['value'] == 'true':
rel_humidity, rel_temperature = tag.hex_humidity_to_rel_humidity(
tag.char_read_hnd(_handle['characteristics']['humidity']['data']['handle'], "humidity"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['humiditySensorName']['value']),
'timestamp': time_stamp,
'readings': {"humidity": rel_humidity, "temperature": rel_temperature}
})
if _handle['pressureSensor']['value'] == 'true':
bar_pressure = tag.hex_pressure_to_pressure(
tag.char_read_hnd(_handle['characteristics']['pressure']['data']['handle'], "pressure"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['pressureSensorName']['value']),
'timestamp': time_stamp,
'readings': {"pressure": bar_pressure}
})
if _handle['movementSensor']['value'] == 'true':
gyro_x, gyro_y, gyro_z, acc_x, acc_y, acc_z, mag_x, mag_y, mag_z, acc_range = tag.hex_movement_to_movement(
tag.char_read_hnd(_handle['characteristics']['movement']['data']['handle'], "movement"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['gyroscopeSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": gyro_x, "y": gyro_y, "z": gyro_z}
})
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['accelerometerSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": acc_x, "y": acc_y, "z": acc_z}
})
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['magnetometerSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": mag_x, "y": mag_y, "z": mag_z}
})
if _handle['batteryData']['value'] == 'true':
battery_level = tag.get_battery_level(
tag.char_read_hnd(_handle['characteristics']['battery']['data']['handle'], "battery"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['batterySensorName']['value']),
'timestamp': time_stamp,
'readings': {"percentage": battery_level}
})
except (Exception, RuntimeError, pexpect.exceptions.TIMEOUT) as ex:
_plugin_restart(bluetooth_adr)
raise ex
return data
|
05f3974c919fdd9c5ee4b853b37bf07bbd45738c
| 27,517 |
def info(token, customerid=None):
""" Returns the info for your account
:type token: string
:param token: Your NodePing API token
:type customerid: string
:param customerid: Optional subaccount ID for your account
:return: Return contents from the NodePing query
:rtype: dict
"""
url = "{0}accounts".format(API_URL)
valid_token = _query_nodeping_api.get(
_utils.create_url(token, url, customerid))
return valid_token
|
d106bf166e846bc2b3627af4e43e74074a519cd1
| 27,519 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.