content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def merge_schemas(a, b, path=None):
"""Recursively zip schemas together
"""
path = path if path is not None else []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_schemas(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
print("Overriding '{}':\n\t- {}\n\t+ {}".format(
'.'.join(path + [str(key)]), a[key], b[key]))
a[key] = b[key]
else:
print("Adding '{}':\n\t+ {}".format(
'.'.join(path + [str(key)]), b[key]))
a[key] = b[key]
return a
|
8915f5e6fa0c352379852b088a9fe111fc27a719
| 35,847 |
def resample_dataarray2d_to_vertex_grid(da_in, gridprops=None,
xyi=None, cid=None,
method='nearest',
**kwargs):
"""resample a 2d dataarray (xarray) from a structured grid to a new
dataaraay of a vertex grid.
Parameters
----------
da_in : xarray.DataArray
data array with dimensions (y, x). y and x are from the original
grid
gridprops : dictionary, optional
dictionary with grid properties output from gridgen.
xyi : numpy.ndarray, optional
array with x and y coรถrdinates of cell centers, shape(len(cid), 2). If
xyi is None xyi is calculated from the gridproperties.
cid : list or numpy.ndarray, optional
list with cellids. If cid is None cid is calculated from the
gridproperties.
method : str, optional
type of interpolation used to resample. The default is 'nearest'.
Returns
-------
da_out : xarray.DataArray
data array with dimension (cid).
"""
if (xyi is None) or (cid is None):
xyi, cid = mgrid.get_xyi_cid(gridprops)
# get x and y values of all cells in dataarray
mg = np.meshgrid(da_in.x.data, da_in.y.data)
points = np.vstack((mg[0].ravel(), mg[1].ravel())).T
# regrid
arr_out = griddata(points, da_in.data.flatten(), xyi, method=method,
**kwargs)
# new dataset
da_out = xr.DataArray(arr_out, dims=('cid'),
coords={'cid': cid})
return da_out
|
f2dacc6cad1fa10ee9014668b4aab853ab60f01f
| 35,848 |
def an_capabilities(b: bytes) -> list:
""" Decode autonegotiation capabilities
Args:
b: coded ***
Returns:
human readable ***
"""
cap: list = []
i: int = (b[0] << 8) + b[1]
cap_list = ['1000BASE-T (full duplex mode)',
'1000BASE-T (half duplex mode)',
'1000BASE-X (-LX, -SX, -CX full duplex mode)',
'1000BASE-X (-LX, -SX, -CX half duplex mode)',
'Asymmetric and Symmetric PAUSE (for full-duplex links)',
'Symmetric PAUSE (for full-duplex links)',
'Asymmetric PAUSE (for full-duplex links)',
'PAUSE (for full-duplex links)',
'100BASE-T2 (full duplex mode)',
'100BASE-T2 (half duplex mode)',
'100BASE-TX (full duplex mode)',
'100BASE-TX (half duplex mode)',
'100BASE-T4',
'10BASE-T (full duplex mode)',
'10BASE-T (half duplex mode)',
'Other or unknown']
for bit in range(len(cap_list)):
if (i & (2**bit) ) > 0:
cap.append(cap_list[bit])
return cap
|
1e8b60582ad27ab6c1feafaac3992c4dc0550bbf
| 35,851 |
def peer_count(interface_name: str) -> int:
"""Number of peers in the mesh"""
with mesh_client() as client:
return len(client.getPeers(interface_name))
|
1caa3f916344a6fc919dbe18559e6f4606f53316
| 35,852 |
def german_words():
"""Provides same known words list as used by the main script."""
return get_dictionary(language="de")
|
4a87fed3c451fa481373c209b788d1032e3bf7c0
| 35,853 |
def result():
"""Get results when ready regarding a previously submitted task."""
# Retrieve JSON parameters data.
data = request.get_json() or {}
data.update(dict(request.values))
tid = data.get("tid")
if not tid:
raise abort(400, "missing 'tid' data")
# Get the result (if exists and finished).
result = tasks.process_message.AsyncResult(tid)
# Return status and result if available.
resp = {
"status": result.status,
"result": None,
}
if result.ready():
resp["result"] = result.get()
return resp
|
809ac8bc762fee45e4fcfb237085a4b998b328e9
| 35,855 |
def pushWPStoArray(aln, halfWPSwindow, start, end, tssWindow, isize, wpsWindow):
"""
for a given alignment, compute the regions that can be fully aligned and not
e.g. [-1, -1 , -1, 1 , 1, 1, 1, 1, 1, -1, -1, -1] for a wps window -f 6 (halfwindow 3 )
this will be added to the defined transcription start site wps score array after
adjusting fot the position
"""
tssWindow = end - start
transcriptAlnWPS = np.zeros(tssWindow) # setting the tss window as zeros wps array
alnWPS = np.zeros(isize + wpsWindow) #adding halfwindow to both side of the alignment
alnWPS[wpsWindow:-wpsWindow] = 1 # only half window after aln start, the alignment is fully covering the wps window
# and we added half window on the previous line
alnWPS[alnWPS != 1 ] = -1 #making the alignment wps with ends containing windows
alnShift = start - (aln.pos - halfWPSwindow) # the distance between alignment start and right side of the wps window:
# + is left side of the window start, - is to the right
if alnShift >= 0:
wps = alnWPS[alnShift:]
end = len(wps) if len(wps) < tssWindow else tssWindow
transcriptAlnWPS[:end] += wps[:end]
else:
baseShifted = abs(alnShift)
end = tssWindow if baseShifted + len(alnWPS) > tssWindow else baseShifted + len(alnWPS)
alignedBases = tssWindow + alnShift
wps = alnWPS[:alignedBases]
transcriptAlnWPS[baseShifted:end] += wps
return transcriptAlnWPS
|
2f94dfa762f9997793a9a0d11bc47a375a06283f
| 35,856 |
def usgs_lithium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
|
17da5e3f8f25b2a2477f6eea4656ad28c307b760
| 35,858 |
import collections
def convert_keys_to_string(dictionary):
""" Recursively converts dictionary keys to strings.
Utility to help deal with unicode keys in dictionaries created from json requests.
In order to pass dict to function as **kwarg we should transform key/value to str.
"""
if isinstance(dictionary, basestring):
return str(dictionary)
elif isinstance(dictionary, collections.Mapping):
return dict(map(convert_keys_to_string, dictionary.iteritems()))
elif isinstance(dictionary, collections.Iterable):
return type(dictionary)(map(convert_keys_to_string, dictionary))
else:
return dictionary
|
01f15be1419d21758e215216b41819e5c4dbcf0f
| 35,859 |
def detectron2_available() -> bool:
"""
Returns True if Detectron2 is installed
"""
return bool(_DETECTRON2_AVAILABLE)
|
16531363e7728b02fb5639d93d66f82cc7647b8b
| 35,860 |
from operator import or_
def delete_user_by_id_and_name(id):
""" username ๆ่
id ๅ ้ค็จๆท """
user = User.query.filter(or_(User.id.like(id), User.username.like(id))).first_or_404()
user.delete()
return user
|
d23485963e1b2b54957532aa8ea3ccb7575e0d07
| 35,861 |
def branch(default='master'):
"""used in the `git.latest` state for the `branch` attribute.
If a specific revision exists DON'T USE THE BRANCH VALUE.
There will always be a branch value, even if it's the 'master' default"""
if cfg('project.revision'):
return '' # results in a None value for git.latest
return cfg('project.branch', default)
|
f1e1cbe73e27955c4e571115a72108aba9ba7b00
| 35,862 |
import requests
def instance_types(gvar):
"""
List EC2 instance types for the specified cloud.
"""
mandatory = ['-cn']
required = []
optional = ['-CSEP', '-CSV', '-g', '-H', '-h', '-itc', '-itf', '-itmn', '-itmx', '-itos', '-itp', '-itpm', '-NV', '-ok', '-r', '-s', '-V', '-VC', '-v', '-v', '-x509', '-xA', '-w']
if gvar['retrieve_options']:
return mandatory + required + optional
key_map = {
'-cn': 'cloud_name',
'-itc': 'cores',
'-itf': 'families',
'-itmn': 'memory_min_gigabytes_per_core',
'-itmx': 'memory_max_gigabytes_per_core',
'-itos': 'operating_systems',
'-itp': 'processors',
'-itpm': 'processor_manufacturers',
}
# Check for missing arguments or help required.
form_data, updates = get_form_data_and_update_count(
gvar,
mandatory,
required,
optional,
key_map=key_map,
query_keys=['cloud_name'])
# Retrieve data (possibly after changing the filters).
if updates > 0:
response = requests(
gvar,
'/ec2/instance-types/',
form_data
)
else:
response = requests(gvar, '/ec2/instance-types/', query_data={'cloud_name': gvar['user_settings']['cloud-name']})
if response['message']:
print(response['message'])
# Print report.
show_active_user_groups(gvar, response)
show_table(
gvar,
response['ec2_instance_type_filters'],
[
'group_name/Group,k',
'cloud_name/Cloud,k',
'families/Families',
'operating_systems/Operatings Systems',
'processors/Processors',
'processor_manufacturers/Processor Manufacturers',
'cores/Cores',
'memory_min_gigabytes_per_core/Min/Memory (GiB per core)',
'memory_max_gigabytes_per_core/Max/Memory (GiB per core)',
# 'owner_aliases/Aliases/Owner',
# 'owner_ids/IDs/Owner',
# 'like/Like/Images',
# 'not_like/Not Like/Images',
# 'operating_systems/Operating Systems',
# 'architectures/Architectures',
],
title="EC2 Instance Type Filters",
)
show_table(
gvar,
response['families'],
[
'instance_family/Family',
],
title="Family Filter",
optional=True,
)
show_table(
gvar,
response['operating_systems'],
[
'operating_system/Operating System',
],
title="Operating System Filter",
optional=True,
)
show_table(
gvar,
response['processors'],
[
'processor/Processor',
],
title="Processor Filter",
optional=True,
)
show_table(
gvar,
response['manufacturers'],
[
'processor_manufacturer/Manufacturer',
],
title="Manufacturer Filter",
optional=True,
)
show_table(
gvar,
response['cores'],
[
'cores/Cores',
],
title="Cores Filter",
optional=True,
)
show_table(
gvar,
response['ec2_instance_types'],
[
'region/Region,k',
'instance_type/Instance Type',
'operating_system/Operating System',
'instance_family/Family',
'processor/Processor',
'processor_manufacturer/Manufacturer',
'cores/Cores',
'memory/Memory',
'memory_per_core/Memory per Core',
'storage/Storage',
'cost_per_hour/Cost per Hour',
],
title="EC2 Instance Types",
)
|
676282fa68986a7545ff000f935714e3cae65dca
| 35,863 |
import math
def math_logsumexp(data):
"""
achieve logsumexp by numpy
Args:
data: float array
Returns:
Float
"""
res = []
for i in data:
res.append(math.exp(i))
return math.log(sum(res))
|
44a056d2aaa0298c62cc21ae2e224a974956ed8b
| 35,864 |
def only_numbers(iterable):
"""Returns whether the given iterable contains numbers (or strings that
can be converted into numbers) only."""
return not any(lenient_float(item) is None for item in iterable)
|
d73735ed69aa85bd3982eade6a15de1bf76afcf4
| 35,865 |
def gaussian(birth, pers, mu=None, sigma=None):
""" Optimized bivariate normal cumulative distribution function for computing persistence images using a Gaussian kernel.
Parameters
----------
birth : (M,) numpy.ndarray
Birth coordinate(s) of pixel corners.
pers : (N,) numpy.ndarray
Persistence coordinates of pixel corners.
mu : (2,) numpy.ndarray
Coordinates of the distribution mean (birth-persistence pairs).
sigma : float or (2,2) numpy.ndarray
Distribution's covariance matrix or the equal variances if the distribution is standard isotropic.
Returns
-------
float
Value of joint CDF at (birth, pers), i.e., P(X <= birth, Y <= pers).
"""
if mu is None:
mu = np.array([0.0, 0.0], dtype=np.float64)
if sigma is None:
sigma = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64)
if sigma[0][1] == 0.0:
return sbvn_cdf(birth, pers,
mu_x=mu[0], mu_y=mu[1], sigma_x=sigma[0][0], sigma_y=sigma[1][1])
else:
return bvn_cdf(birth, pers,
mu_x=mu[0], mu_y=mu[1], sigma_xx=sigma[0][0], sigma_yy=sigma[1][1], sigma_xy=sigma[0][1])
|
1c5321b31b7efdc501df72ee77fad17fbcc34056
| 35,866 |
def login():
"""
TASKS: write the logic here to parse a json request
and send the parsed parameters to the appropriate service.
return a json response and an appropriate status code.
"""
data = request.get_json()
user = User.objects(username=data.get('username')).first()
if user:
auth = check_password_hash(pwhash=user.password, password=data.get('password'))
if auth is False:
return jsonify({"message": "username or password does not match, please try again"}), 401
return jsonify({"message": "User has logged in successfully"}), 200
return jsonify({"message": "User does not exist"}), 400
|
3a8e2c385d77705c5823e771c36311856bce2cae
| 35,867 |
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
|
67ffaac731e709e39b3120fd32ee3a0ad8dae299
| 35,869 |
def get_memoryview_and_address(data):
"""Get a memoryview for the given data and its memory address.
The data object must support the buffer protocol.
"""
# To get the address from a memoryview, there are multiple options.
# The most obvious is using ctypes:
#
# c_array = (ctypes.c_uint8 * nbytes).from_buffer(m)
# address = ctypes.addressof(c_array)
#
# Unfortunately, this call fails if the memoryview is readonly, e.g. if
# the data is a bytes object or readonly numpy array. One could then
# use from_buffer_copy(), but that introduces an extra data copy, which
# can hurt performance when the data is large.
#
# Another alternative that can be used for objects implementing the array
# interface (like numpy arrays) is to directly read the address:
#
# address = data.__array_interface__["data"][0]
#
# But what seems to work best (at the moment) is using cffi.
# Convert data to a memoryview. That way we have something consistent
# to work with, which supports all objects implementing the buffer protocol.
m = memoryview(data)
# Test that the data is contiguous.
# Note that pypy does not have the contiguous attribute, so we assume it is.
if not getattr(m, "contiguous", True):
raise ValueError("The given texture data is not contiguous")
# Get the address via ffi. In contrast to ctypes, this also
# works for readonly data (e.g. bytes)
c_data = ffi.from_buffer("uint8_t []", m)
address = int(ffi.cast("uintptr_t", c_data))
return m, address
|
7898bb5d79e8b839b1009ff549eb58ee8c2f71ed
| 35,870 |
def get_inv_dist_mat(node_list):
"""
Get pairwise distance matrix for specified nodes in node list.
Args:
node_list (list): Nodes for which to compute the pairwise distances
Returns:
(numpy.ndarray): Matrix of pairwise distances
"""
# Initialize array.
dist_mat = np.zeros((len(node_list), len(node_list)), dtype=float)
# Compute pairwise distances
for idx1 in range(len(node_list)-1):
for idx2 in range(idx1+1, len(node_list)):
dist_mat[idx1, idx2] = dist_mat[idx2, idx1] = 1/dist_func(node_list[idx1], node_list[idx2])
# Return computed distance matrix.
return dist_mat
|
27b600ef8ce03a0ea3a8c791e6cedcd7091e3a5b
| 35,871 |
import torch
def stableSoftMax(x):
"""
stableSoftMax computes a normalized softmax
:param x: Tensor List
:return: Tensor List
"""
x = torch.exp(x - torch.max(x))
return x/torch.sum(x)
|
fa1e017812b7fd0c4e964eafb3fd59eae141203b
| 35,872 |
def is_item(var):
"""
is this a single item
"""
return is_str(var) or (not is_iterable(var))
|
3a56cb9832a77c77271087b5f653a9ef7b1a40a9
| 35,873 |
def _str_plot_fields(val, f, field_filter):
"""
get CSV representation of fields used by _str_plot
:returns: list of fields as a CSV string, ``str``
"""
s = _sub_str_plot_fields(val, f, field_filter)
if s is not None:
return "time,"+s
else:
return 'time,'
|
2d1be13fb801ea03ec34ef3810d6cc6dd11782b9
| 35,874 |
def cypher_repr(value, **kwargs):
""" Return the Cypher representation of a value.
This function attempts to convert the supplied value into a Cypher
literal form, as used in expressions.
"""
encoder = CypherEncoder(**kwargs)
return encoder.encode_value(value)
|
7e0a986206236399901c77d467c1c726573a1b33
| 35,875 |
def compareResultByTimeTupleRangesAndFlags(result, check, dateOnly=False):
"""
Ensures that flags are an exact match and time tuples a close match when
given data in the format ((timetuple), (timetuple), flag)
"""
return (_compareTimeTuples(result[0], check[0], dateOnly) and
_compareTimeTuples(result[1], check[1], dateOnly) and
_compareFlags(result[2], check[2]))
|
e2be1b7bdec5dcdaef8746349b4e7a56d54c8c19
| 35,876 |
def check_valid_title(title):
"""Checks if the title contains valid content"""
title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))
return title_issues
|
39bc77dce1a9136a7ab70c802bb90416655043b8
| 35,877 |
from typing import Any
import typing
from typing import Dict
def ValueWidget(value: Any = None, on_value: typing.Callable[[Any], Any] = None) -> Element[ipywidgets.widgets.valuewidget.ValueWidget]:
"""Widget that can be used for the input of an interactive function
:param value: The value of the widget.
"""
kwargs: Dict[Any, Any] = without_default(ValueWidget, locals())
widget_cls = ipywidgets.widgets.valuewidget.ValueWidget
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs)
|
6317cdb731902f9ae24435ad290e136f75680005
| 35,878 |
def list_all_volumes():
"""Lists all available issues and volumes from the VA registry webpage. Only returns
volumes that are available on HTML.
Returns
-------
volumes : dict
A dictionary where the volumes are the keys and the issues are list entries.
"""
volumes = set()
html = get_page(VA_REGISTRY_PAGE)
details = html.find_all(class_="archiveDetail")
for line in details:
links = line.find_all("a")
for link in links:
if link.text != "PDF":
volume, issue = tuple(link["href"].split("=")[-1].split(":"))
volumes.add((volume, issue))
break
return volumes
|
5dea8c7db1089a427282c7c76f780d37895f689f
| 35,879 |
def get_distribution(dist):
"""Return a PyMC distribution."""
if isinstance(dist, str):
if hasattr(pm, dist):
dist = getattr(pm, dist)
else:
raise ValueError(f"The Distribution '{dist}' was not found in PyMC")
return dist
|
fe8f120be29690d3ae33dfb974369b9f068cb325
| 35,880 |
from datetime import datetime
def login_required(f):
"""
Validates the JWT and ensures that is has not expired and the user is still active.
:param f:
:return:
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get("Authorization"):
response = jsonify(message="Missing authorization header")
response.status_code = 401
return response
try:
token = request.headers.get("Authorization").split()[1]
except Exception as e:
return dict(message="Token is invalid"), 403
try:
header_data = fetch_token_header(token)
payload = jwt.decode(token, current_app.config["LEMUR_TOKEN_SECRET"], algorithms=[header_data["alg"]])
except jwt.DecodeError:
return dict(message="Token is invalid"), 403
except jwt.ExpiredSignatureError:
return dict(message="Token has expired"), 403
except jwt.InvalidTokenError:
return dict(message="Token is invalid"), 403
if "aid" in payload:
access_key = api_key_service.get(payload["aid"])
if access_key.revoked:
return dict(message="Token has been revoked"), 403
if access_key.ttl != -1:
current_time = datetime.utcnow()
# API key uses days
expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl)
if current_time >= expired_time:
return dict(message="Token has expired"), 403
if access_key.application_name:
g.caller_application = access_key.application_name
user = user_service.get(payload["sub"])
if not user.active:
return dict(message="User is not currently active"), 403
g.current_user = user
if not g.current_user:
return dict(message="You are not logged in"), 403
# Tell Flask-Principal the identity changed
identity_changed.send(
current_app._get_current_object(), identity=Identity(g.current_user.id)
)
return f(*args, **kwargs)
return decorated_function
|
e5582f78afa7acb0d2f7a8875d6f15bf2d9b73b1
| 35,882 |
from typing import List
from typing import Dict
def single__intent_topk_accuracy_score(
intent_prediction: List[Dict[str, str]],
y_true: List[str],
k: int = 1,
) -> float:
"""Compute the Accuracy of a single utterance with multi-intents
Accuracy of a single utterance is defined as the proportion of
correctly predicted labels to the total number (predicted and true)
of labels. It can be formulated as
.. math::
\\text{Accuracy of single utterance}=\\frac{|\\text{pred}_i \\cap \\text{true}_i|}{|\\text{true}_i \\cap \\text{pred}_i|}
Args:
intent_prediction (a list of dictionaries):
A sorted intent prediction (by score) of a single utterance.
y_true (a list of strings):
The corresponding true intent of that utterance.
Note that it can be more than one intents.
k (an integer):
The top k prediction of intents we take for computing accuracy.
Returns:
accuracy score (a float):
accuracy of a single utterance given top k prediction.
Examples:
>>> intent_prediction, _ = model.predict("I like apple.")
>>> print(intent_prediction)
[
{"intent": "blabla", "score": 0.7},
{"intent": "ohoh", "score": 0.2},
{"intent": "preference", "score": 0.1},
]
>>> accuracy = single__intent_topk_accuracy_score(
intent_prediction=intent_prediction,
y_true=["preference", "ohoh", "YY"],
k=2,
)
>>> print(accuracy)
0.2499999
""" # noqa
top_k_pred = [pred["intent"] for pred in intent_prediction[: k]]
accuracy_score = (
len(set(y_true) & set(top_k_pred)) /
len(set(y_true) | set(top_k_pred))
)
return accuracy_score
|
c3a1c79692ef2031efc41cf6eb00c1e7139e0b20
| 35,883 |
def safeCellValue(cell, level=warning):
"""
์
๊ฐ์ฒด๋ก๋ถํฐ ๊ฐ์ ์์ ํ๊ฒ ์ป๊ธฐ ์ํ ํจ์
- ์์ ํ์ง ์์ ๊ฒฝ์ฐ, ๊ณต๋ฐฑ ๋ฌธ์์ด์ ๋ฐํ
"""
cellType = type(cell)
if cellType is openpyxl.cell.Cell or cellType is ReadOnlyCell:
noneSpaceString = str(cell.value).strip()
if noneSpaceString not in kIgnoreCharacters:
return noneSpaceString
else:
# level.print("์
๊ฐ์ด ์กด์ฌํ์ง ์์ต๋๋ค (%s)" % cell.coordinate)
pass
else:
level.print("์
์ด ์๋ ๊ฐ์ฒด๋ฅผ ํธ์ถํ๊ณ ์์ต๋๋ค (safeCellValue)")
return ""
|
df7e636d269c29180faec548d7acf308fed4351d
| 35,884 |
def _get_port(config):
"""Get the server's port from configuration."""
if not config.has_option("server", "port"):
return None
port = config.getint("server", "port")
return port
|
bee579fcfc82ea80c593dc7bd93ff3d39e63ef7b
| 35,885 |
def add_rectangular_plane(
center_loc=(0, 0, 0), point_to=(0, 0, 1), size=(2, 2), name=None):
"""Adds a rectangular plane specified by its center location, dimensions,
and where its +z points to.
Args:
center_loc (array_like, optional): Plane center location in world
coordinates.
point_to (array_like, optional): Point in world coordinates to which
plane's +z points.
size (array_like, optional): Sizes in x and y directions (0 in z).
name (str, optional): Plane name.
Returns:
bpy_types.Object: Plane added.
"""
bpy = preset_import('bpy', assert_success=True)
Vector = preset_import('Vector', assert_success=True)
center_loc = np.array(center_loc)
point_to = np.array(point_to)
size = np.append(np.array(size), 0)
bpy.ops.mesh.primitive_plane_add(location=center_loc)
plane_obj = bpy.context.object
if name is not None:
plane_obj.name = name
plane_obj.dimensions = size
# Point it to target
direction = Vector(point_to) - plane_obj.location
# Find quaternion that rotates plane's 'Z' so that it aligns with
# `direction`. This rotation is not unique because the rotated plane can
# still rotate about direction vector. Specifying 'Y' gives the rotation
# quaternion with plane's 'Y' pointing up
rot_quat = direction.to_track_quat('Z', 'Y')
plane_obj.rotation_euler = rot_quat.to_euler()
# Scene update necessary, as matrix_world is updated lazily
bpy.context.view_layer.update()
return plane_obj
|
66d5410949d6702284e5b8027150129006691e86
| 35,886 |
def locations_3d_to_view(locations, extrinsic_matrix, intrinsic_matrix):
""" Transforms 3D locations to 2D camera view."""
world_points = np.ones((4, len(locations)))
for i in range(len(locations)):
world_points[0][i] = locations[i].x
world_points[1][i] = locations[i].y
world_points[2][i] = locations[i].z
# Convert the points to the sensor coordinates.
transformed_points = np.dot(
np.linalg.inv(extrinsic_matrix), world_points)
# Convert the points to an unreal space.
unreal_points = np.concatenate([
transformed_points[1, :],
-transformed_points[2, :],
transformed_points[0, :]
])
# Convert to screen points.
screen_points = np.dot(intrinsic_matrix, unreal_points)
screen_points[0] /= screen_points[2]
screen_points[1] /= screen_points[2]
screen_locations = []
for i in range(len(locations)):
screen_locations.append(Location(float(screen_points[0, i]),
float(screen_points[1, i]),
float(screen_points[2, i])))
return screen_locations
|
fc5f3c781641366a840bcd1db72704ff804b17c2
| 35,887 |
def superkeyword_presence(document, superkeywords):
"""Return 1 if document contains any superkeywords, 0 if not."""
for word in superkeywords:
if word in document.split():
return True
return False
|
4b3223190651873d27562cc475ff623aa4cb5b47
| 35,888 |
def runTest(numClients, numServers, scripts, numFailures, runIndex):
"""
Run a single test of the Fault Tolerant SimpleFileLockService
This takes numClients, numServers, and the test scripts to run.
It SSHs into the requested number of servers and starts up the
Fault Tolerant SimpleFileLockService on each server. It then
starts up the requested number of clients.
"""
clientSSH = [None]*numClients
thread = [None]*numClients
serverSSH = [None]*numServers
# Loop through clients and kill any previously running processes
for i in range(0,numClients):
# Open Client SSH sessions
host = clientPrefix + str(i + 1)
clientSSH[i] = paramiko.SSHClient()
clientSSH[i].set_missing_host_key_policy(paramiko.AutoAddPolicy())
clientSSH[i].connect(hostname=host, username="client", pkey=sshKey)
stdin, stdout, stderr = clientSSH[i].exec_command("killall " + clientBinaryName)
stdout.channel.exit_status_ready()
# Loop through servers and kill any previously running processes
for i in range(0,numServers):
# Open Server SSH sessions
host = serverPrefix + str(i + 1)
serverSSH[i] = paramiko.SSHClient()
serverSSH[i].set_missing_host_key_policy(paramiko.AutoAddPolicy())
serverSSH[i].connect(hostname=host, username="server", pkey=sshKey)
# Need to kill log cabin, remove storage directory, and kill FT SFL service
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
stdin.write("killall " + logCabinBinaryName + "\n")
stdin.write("killall " + ft_serverBinaryName + "\n")
stdin.write("killall " + serverBinaryName + "\n")
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
if i == (numServers - 1):
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
stdin.write("rm -rf " + testPath + "/storage\n")
stdin.write("rm -rf " + testPath + "/*.txt\n")
stdin.write("rm -rf " + testPath + "/incarnation*\n")
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
# Loop through servers and start up logcabin cluster
for i in range(0,numServers):
host = serverPrefix + str(i + 1)
# Need to startup log cabin as well as FT SFL service
if numServers > 1:
# Bootstrap logCabin to designate an inital leader.
# We'll use the first server for this
if i == 0:
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
cmd = "cd " + testPath + "\n"
print cmd
stdin.write("cd " + testPath + "\n")
cmd = "../logcabin/build/" + logCabinBinaryName + " --config logCabin-" + host + ".conf --bootstrap > log/" + str(runIndex) + "_" + host + "_bootstrap.log 2>&1\n"
print cmd
stdin.write(cmd)
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
# Bring up logCabin server on each server
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
stdin.write("cd " + testPath + "\n")
cmd = "nohup ../logcabin/build/" + logCabinBinaryName + " --config logCabin-" + host + ".conf > log/" + str(runIndex) + "_" + host + "_logcabin.log 2>&1 &\n"
print cmd
stdin.write(cmd)
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
if i == (numServers - 1):
# Reconfigure the cluster to include all logCabin servers
stdin, stdout, stderr = clientSSH[0].exec_command("bash")
stdin.write("cd " + testPath + "\n")
cmd = "../logcabin/build/Examples/Reconfigure --cluster=" + clusterServersNoSpace + " set " + clusterServers + " > log/" + str(runIndex) + "_client_1_reconfigure.log 2>&1\n"
print cmd
stdin.write(cmd)
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
# Start up FT Simple File Locking Service
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
stdin.write("cd " + testPath + "\n")
stdin.write("nohup ../simpleFileLockService/bin/" + ft_serverBinaryName + " > log/" + str(runIndex) + "_" + host + "_simplefilelockservice.log 2>&1 &\n")
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(1)
# Just need to startup SFL service
else:
# Start up Simple File Locking Service
stdin, stdout, stderr = serverSSH[i].exec_command("bash")
stdin.write("cd " + testPath + "\n")
cmd = "nohup ../simpleFileLockService/bin/" + serverBinaryName + " 9001 > log/" + str(runIndex) + "_" + host + "_simplefilelockservice.log 2>&1 &\n"
print cmd
stdin.write(cmd)
stdin.write("exit\n")
stdout.channel.exit_status_ready()
time.sleep(.5)
# If a failure is requested, trigger the failure(s)
if numFailures > 0:
print("Failure requested")
# Delay a little to ensure the threads spawned and started executing
time.sleep(.2)
# Kill requested number of servers
for i in range(0,numFailures):
if numServers > 1:
stdin, stdout, stderr = serverSSH[i].exec_command("killall " + logCabinBinaryName)
stdout.channel.exit_status_ready()
# Wait for logcabin to reconfigure without that server
time.sleep(1)
else:
stdin, stdout, stderr = serverSSH[i].exec_command("killall " + serverBinaryName)
stdout.channel.exit_status_ready()
# Loop through the client list and kick off the client threads
for i in range(0,numClients):
clientName = clientPrefix + str(i + 1)
serverName = serverPrefix + str(numServers)
thread[i] = clientThread(i, clientName, serverName, clientSSH[i], scripts[i], runIndex)
thread[i].start()
print "Client " + str(i) + " started"
if numFailures > 0:
# Need to kill clients as well so the script won't hang on the "join" call
time.sleep(5)
for i in range(0,numClients):
stdin, stdout, stderr = clientSSH[i].exec_command("killall " + clientBinaryName)
stdout.channel.exit_status_ready()
print "waiting to join"
# Wait for client threads to exit
for i in range(0,numClients):
thread[i].join()
print "done!"
testPassed = True
time.sleep(2)
if numServers > 1:
for i in range(0,numClients):
outfile = "client_" + str(i+1) + ":BestSpaceOpera.txt"
stdin, stdout, stderr = clientSSH[i].exec_command("bash")
stdin.write("cd " + testPath + "\n")
cmd = "../logcabin/build/Examples/TreeOps --timeout=5 --cluster=" + clusterServersNoSpace + " read " + outfile + " --verbosity=SILENT\n"
print cmd
stdin.write(cmd)
stdin.write("exit\n")
stdout.channel.exit_status_ready()
output = stdout.read()
print output.rstrip()
print goldenFiles[i].rstrip()
if output.rstrip() != goldenFiles[i].rstrip():
testPassed = False
else:
for i in range(0,numClients):
outFile = "/nfsShare/test/client_" + str(i+1) + ':BestSpaceOpera.txt'
try:
f = open(outFile, 'r')
output = f.read()
except:
output = ""
if output != goldenFiles[i]:
testPassed = False
# Close SSH sessions
for i in range(0,numClients):
clientSSH[i].close()
for i in range(0,numServers):
serverSSH[i].close()
return testPassed
|
f8de155564d415e327589bd1d551e3353faca22c
| 35,889 |
def shuf_device_inclusive_scan(data, temp):
"""
Args
----
data: scalar
input for tid
temp: shared memory for temporary work, requires at least
threadcount/wavesize storage
"""
tid = roc.get_local_id(0)
lane = tid & (_WARPSIZE - 1)
warpid = tid >> 6
roc.barrier()
# Scan warps in parallel
warp_scan_res = shuf_wave_inclusive_scan(data)
roc.barrier()
# Store partial sum into shared memory
if lane == (_WARPSIZE - 1):
temp[warpid] = warp_scan_res
roc.barrier()
# Scan the partial sum by first wave
if warpid == 0:
temp[lane] = shuf_wave_inclusive_scan(temp[lane])
roc.barrier()
# Get block sum for each wave
blocksum = 0 # first wave is 0
if warpid > 0:
blocksum = temp[warpid - 1]
return warp_scan_res + blocksum
|
a45d889202804493ebb653fce4cf66c3b58a941d
| 35,891 |
def chicago(return_X_y=True):
"""Chicago air pollution and death rate data
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['time', 'tmpd', 'pm10median', 'o3median']], with no NaNs
y contains 'death', the deaths per day, with no NaNs
Source:
R gamair package
`data(chicago)`
Notes
-----
https://cran.r-project.org/web/packages/gamair/gamair.pdf
https://rdrr.io/cran/gamair/man/chicago.html
Columns:
death : total deaths (per day).
pm10median : median particles in 2.5-10 per cubic m
pm25median : median particles < 2.5 mg per cubic m (more dangerous).
o3median : Ozone in parts per billion
so2median : Median Sulpher dioxide measurement
time : time in days
tmpd : temperature in fahrenheit
"""
# recommend PoissonGAM
chi = pd.read_csv(PATH + '/chicago.csv', index_col=0).astype(float)
if return_X_y:
chi = chi[['time', 'tmpd', 'pm10median', 'o3median', 'death']].dropna()
X = chi[['time', 'tmpd', 'pm10median', 'o3median']].values
y = chi['death'].values
return X, y
else:
return chi
|
5e31c114f1e935927c052f7ff3fecee4222f33ab
| 35,892 |
def Normalize(normThisData: np.array,toThisData: np.array):
"""Normalize one dataset to another to produce a unitless output.
Args:
normThisData (np.array):
toThisData (np.array):
Returns:
np.array: normalized_data
Notes:
Currently only works for 1D arrays shape=[m,] ?????true? and returns same shape 1D array
"""
return (toThisData - normThisData) / toThisData
|
2867d1835caf183b51e7c5dae0766cf6703312ea
| 35,893 |
def bbiboll(df, n=10, k=3):
"""
BBIๅค็ฉบๅธๆ็บฟ bbiboll(10,3)
BBI={MA(3)+ MA(6)+ MA(12)+ MA(24)}/4
ๆ ๅๅทฎMD=ๆ นๅท[โ๏ผBBI-MA(BBI๏ผN)๏ผ^2/N]
UPR= BBI๏ผkรMD
DWN= BBI๏ผkรMD
"""
# pd.set_option('display.max_rows', 1000)
_bbiboll = pd.DataFrame()
_bbiboll['date'] = df.date
_bbiboll['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
_bbiboll['md'] = _md(_bbiboll.bbi, n)
_bbiboll['upr'] = _bbiboll.bbi + k * _bbiboll.md
_bbiboll['dwn'] = _bbiboll.bbi - k * _bbiboll.md
return _bbiboll
|
26b6d83f50ebeecee0f7d671ba896dbc89df0a33
| 35,894 |
def calc_distance(y1, x1, y2, x2):
"""
Calculate distance between two locations using great circle distance.
Notes:
y1 = lat1, x1 = long1
y2 = lat2, x2 = long2
all assumed to be in decimal degrees
if (and only if) the input is strings
use the following conversions
"""
if y1 == x1 == y2 == x2:
return float(0)
y1 = float(y1)
x1 = float(x1)
y2 = float(y2)
x2 = float(x2)
R = 3958.76 # miles = 6371 km
y1 *= pi / 180.0
x1 *= pi / 180.0
y2 *= pi / 180.0
x2 *= pi / 180.0
# approximate great circle distance with law of cosines
return acos(sin(y1) * sin(y2) + cos(y1) * cos(y2) * cos(x2 - x1)) * R
|
6fbe2a02e1e3fa196e138bcbb28e676a55461cce
| 35,896 |
import unittest
def suite():
"""suite of unittest"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDBManager))
return suite
|
9c0a313f137fc2c9530ea3f1a7ce8ad64d4f4a8f
| 35,897 |
def convert_size_string_to_bytes(size):
"""
Convert the given size string to bytes.
"""
units = [item.lower() for item in SIZE_UNITS]
parts = size.strip().replace(' ', ' ').split(' ')
amount = float(parts[0])
unit = parts[1]
factor = units.index(unit.lower())
if not factor:
return amount
return int((1024 ** factor) * amount)
|
5763a4cc266a66e64f63d5bbe8d73f10edf0a397
| 35,898 |
def dumps(data, expires):
"""ๅ ๅฏ"""
# ๅๅปบๅฏน่ฑก
serializer = TimedJSONWebSignatureSerializer(settings.SECRET_KEY, expires)
# ๅ ๅฏ
token = serializer.dumps(data).decode()
return token
|
d38daca8895e544acea0de22e5e76926c21b34b5
| 35,899 |
def align_data(data):
"""Given dict with lists, creates aligned strings
Args:
data: (dict) data["x"] = ["I", "love", "you"]
(dict) data["y"] = ["O", "O", "O"]
Returns:
data_aligned: (dict) data_align["x"] = "I love you"
data_align["y"] = "O O O "
"""
spacings = [max([len(seq[i]) for seq in data.values()])
for i in range(len(data[list(data.keys())[0]]))]
data_aligned = dict()
# for each entry, create aligned string
for key, seq in data.items():
str_aligned = ''
for token, spacing in zip(seq, spacings):
str_aligned += token + ' ' * (spacing - len(token) + 1)
data_aligned[key] = str_aligned
return data_aligned
|
6be0d9854cf73b4c44a91b2bbe6b4ac1bb76157e
| 35,900 |
def average_surface_distance(mflo, mref):
""" average on points so not reliable if the sampling is unhomogeneous """
pd = polydata_distance(mflo, mref, do_signed=False)
xv = pd.GetPointData().GetArray("Distance")
xn = nps.vtk_to_numpy(xv)
return xn.mean()
|
42e17183d58f05b1c4810f8d995b57e5b24a7bf8
| 35,901 |
def CleanUserUrl(user_url: str) -> str:
"""ๆธ
็ user_url๏ผๅป้คๅ
ถไธญ็็ฉบๆ ผๅๆ ็จๅๆฐ
"""
user_url = user_url.strip()
return user_url.split("?")[0]
|
7c5c2cf5879d4ddfdbd1a60a679a747f162ebe35
| 35,902 |
def get_aqi_pb_24h(pb_24h: float) -> (int, str, str):
"""
Calculates Pb (24h) India AQI
:param pb_24h: Pb average (24h), ppm
:return: Pb India AQI, Effect message, Caution message
"""
cp = __round_down(pb_24h * 1000, 3)
return __get_aqi_general_formula_texts(cp, IN_PB_24H, IN_AQI_EFFECTS, IN_AQI_EFFECTS, IN_AQI)
|
dbd8beadaf57c5a14b1f91854cf004f877389ec4
| 35,903 |
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
else:
return id_to_ec2_id(snapshot_id, 'snap-%08x')
|
d13a1a4c608c0baf8727a2b5c1d928b8722f71f7
| 35,905 |
def delete_nonestimator_parameters(parameters):
"""Delete non-estimator parameters.
Delete all parameters in a parameter dictionary that are not used for the
actual estimator.
"""
if 'Number' in parameters.keys():
del parameters['Number']
if 'UsePCA' in parameters.keys():
del parameters['UsePCA']
del parameters['PCAType']
if 'ReliefUse' in parameters.keys():
del parameters['ReliefUse']
del parameters['ReliefNN']
del parameters['ReliefSampleSize']
del parameters['ReliefDistanceP']
del parameters['ReliefNumFeatures']
if 'OneHotEncoding' in parameters.keys():
del parameters['OneHotEncoding']
del parameters['OneHotEncoding_feature_labels_tofit']
if 'Imputation' in parameters.keys():
del parameters['Imputation']
del parameters['ImputationMethod']
del parameters['ImputationNeighbours']
if 'SelectFromModel' in parameters.keys():
del parameters['SelectFromModel']
del parameters['SelectFromModel_lasso_alpha']
del parameters['SelectFromModel_estimator']
del parameters['SelectFromModel_n_trees']
if 'Featsel_Variance' in parameters.keys():
del parameters['Featsel_Variance']
if 'FeatPreProcess' in parameters.keys():
del parameters['FeatPreProcess']
if 'FeatureScaling' in parameters.keys():
del parameters['FeatureScaling']
if 'StatisticalTestUse' in parameters.keys():
del parameters['StatisticalTestUse']
del parameters['StatisticalTestMetric']
del parameters['StatisticalTestThreshold']
if 'Resampling_Use' in parameters.keys():
del parameters['Resampling_Use']
del parameters['Resampling_Method']
del parameters['Resampling_sampling_strategy']
del parameters['Resampling_n_neighbors']
del parameters['Resampling_k_neighbors']
del parameters['Resampling_threshold_cleaning']
del parameters['Resampling_n_cores']
if 'random_seed' in parameters.keys():
del parameters['random_seed']
return parameters
|
d84984d182a5945167b8e7880e493aa8fad832b7
| 35,906 |
from pathlib import Path
def get_gene_sequence(gene_name: Path) -> str:
"""Okay, I don't understand how this is suppose to work.
\f
Parameters
----------
gene_name :
Return
------
seq : `str`
"""
try:
with open(gene_name, "r") as f:
seq = f.read()
seq = seq.replace("\r\n", "")
except ValueError:
print(
f"could not find gene sequence file {gene_name}, "
f"please see examples and generate one for your gene "
f"as needed, with this filename"
)
return seq
|
1ec4b9a2945b3e14dc87ce440734799c87560906
| 35,907 |
def get_service(credentials):
"""Get the service object corresponding to GMail."""
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
user = service.users().getProfile(userId="me").execute()
print("Authenticated user: {0}".format(user["emailAddress"]))
return service
|
f59eaeb46a3f922855c9adc96a10ef2797ce8cbc
| 35,908 |
from typing import Optional
from typing import Sequence
def get_private_application_packages(display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetPrivateApplicationPackagesFilterArgs']]] = None,
package_types: Optional[Sequence[str]] = None,
private_application_id: Optional[str] = None,
private_application_package_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateApplicationPackagesResult:
"""
This data source provides the list of Private Application Packages in Oracle Cloud Infrastructure Service Catalog service.
Lists the packages in the specified private application.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_private_application_packages = oci.servicecatalog.get_private_application_packages(private_application_id=oci_service_catalog_private_application["test_private_application"]["id"],
display_name=var["private_application_package_display_name"],
package_types=var["private_application_package_package_type"],
private_application_package_id=oci_service_catalog_private_application_package["test_private_application_package"]["id"])
```
:param str display_name: Exact match name filter.
:param Sequence[str] package_types: Name of the package type. If multiple package types are provided, then any resource with one or more matching package types will be returned.
:param str private_application_id: The unique identifier for the private application.
:param str private_application_package_id: The unique identifier for the private application package.
"""
__args__ = dict()
__args__['displayName'] = display_name
__args__['filters'] = filters
__args__['packageTypes'] = package_types
__args__['privateApplicationId'] = private_application_id
__args__['privateApplicationPackageId'] = private_application_package_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:servicecatalog/getPrivateApplicationPackages:getPrivateApplicationPackages', __args__, opts=opts, typ=GetPrivateApplicationPackagesResult).value
return AwaitableGetPrivateApplicationPackagesResult(
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id,
package_types=__ret__.package_types,
private_application_id=__ret__.private_application_id,
private_application_package_collections=__ret__.private_application_package_collections,
private_application_package_id=__ret__.private_application_package_id)
|
3dc97aa7d3fc27cc2cf12a797d7e7d364cffe210
| 35,909 |
def load_data():
"""
:return: Data frame
"""
# load data
engine = create_engine('sqlite:///data/disaster_response.db')
df = pd.read_sql_table('disaster_response', engine)
return df
|
231a0233d65717d4b171223d1870e2ba7c2822c4
| 35,910 |
def start_child_span(
operation_name: str, tracer=None, parent=None, span_tag=None
):
"""
Start a new span as a child of parent_span. If parent_span is None,
start a new root span.
:param operation_name: operation name
:param tracer: Tracer or None (defaults to opentracing.tracer)
:param parent: parent or None
:param span_tag: optional tags
:return: new span
"""
tracer = tracer or opentracing.tracer
return tracer.start_span(
operation_name=operation_name, child_of=parent, tags=span_tag
)
|
e5465d45800560e601fde1d513c204d5ca1284de
| 35,911 |
def demslv08old():
"""Nonlinear complementarity problem methods
Solve nonlinear complementarity problem on R^2 using semismooth and minmax methods
"""
''' function to be solved'''
def f(z):
x, y = z
fval = np.array([200 * x * (y - x ** 2) + 1 - x,
100 * (x ** 2 - y)])
fjac = np.array([[200 * (y - x ** 2) - 400 * x ** 2 - 1, 200 * x],
[200 * x, -100]])
return fval, fjac
# Generate problem test data
z = 2 * np.random.randn(2, 2)
a = 1 + np.min(z, 0)
b = 1 + np.max(z, 0)
x0 = np.random.randn(2)
hdr = 'Hundreds of seconds required to solve nonlinear complementarity \n' \
'problem on R^2 using minmax and semismooth formulations, with \n' \
'randomly generated bounds \n\ta = [{:4.2f}, {:4.2f}] \n\tb = [{:4.2f}, {:4.2f}]'
print(hdr.format(a[0], a[1], b[0], b[1]))
print('\nAlgorithm Time Norm x1 x2\n{}'.format('-' * 56));
'''Solve my applying Newton method to minmax formulation '''
t1 = tic()
x, z = ncpsolve(f, a, b, x0, kind='minmax', maxit=1500)
print('Newton minmax {:6.2f} {:8.0e} {:5.2f} {:5.2f}'.format(100*toc(t1), norm(minmax(x, a, b, z)), *x))
'''Solve my applying Newton method to semismooth formulation '''
t2 = tic()
x, z = ncpsolve(f, a, b, x0, maxit=1500)
print('Newton semismooth {:6.2f} {:8.0e} {:5.2f} {:5.2f}'.format(100*toc(t2), norm(minmax(x, a, b, z)), *x))
|
4bf4b37ca8dbf7503443292996cfaf2a1ef4d368
| 35,912 |
from re import A
import math
def from_mercator(x, y):
"""Convert x,y coordinate from Spherical Mercator to lon, lat
Ported from mercantile.
Parameters
----------
x : float
y : float
Returns
-------
(longitude, latitude)
"""
return (x * R2D / A, ((math.pi * 0.5) - 2.0 * math.atan(math.exp(-y / A))) * R2D)
|
09a6a5ae85290fc230398de5ee0cfa28e8893366
| 35,913 |
def GetMaxIndex(tree):
"""get maximum node number."""
return tree.id
|
f443a9006765dface834aa9120c3ab38cd1d4369
| 35,914 |
def audit_name_starts_with(prefixes):
"""
Given a list of prefixes, returns a function that takes a folder and prints the folder path if the folder name does not start with one of the prefixes.
"""
def action(folder):
if any(folder.name.startswith(prefix) for prefix in prefixes) == False:
print(f"{folder.id}, {path_str(folder)}")
return action
|
886af28239aa989bed15d8f5a4462dd94cd1363b
| 35,915 |
def error_handler(error):
"""
Handle errors in views.
"""
return render('error.html'), 500
|
307846d8445f03ac8acb48585ccab5ffdbab972e
| 35,916 |
def save_work(work_id):
"""Save a work"""
if not auth.session_user().can_manage_works:
return Response('User not logged in or not authorized to manage works.',
401)
if int(work_id) == -1:
# New work
work = Work()
else:
# Existing work
work = Work.find(work_id)
work.name = request.form['name']
work.save()
db.get_db().commit()
return redirect(url_for("view_work", work_id=work.id))
|
296ed0eff93ea64b4541b4a84c62f8be673b6e89
| 35,917 |
def get_feedback_expertise_levels(): # noqa: E501
"""Request a list of allowable expertise levels
# noqa: E501
:rtype: ExpertiseLevels
"""
rtxFeedback = RTXFeedback()
return rtxFeedback.getExpertiseLevels()
|
fc76ea2daf885159f4461ae694b7b68e203befdc
| 35,918 |
def get_coord_limits(coord):
"""get cooordinate limits"""
lower_limit = float('.'.join([str(coord).split('.')[0], str(coord).split('.')[1][:2]]))
if lower_limit > 0:
upper_limit = lower_limit + 0.01
else:
tmp = lower_limit - 0.01
upper_limit = lower_limit
lower_limit = tmp
return lower_limit, upper_limit
|
803c0804e34a97d46a9555b4566be72949f55e8d
| 35,919 |
import torch
def decode(loc, priors, use_yolo_regressors:bool=False):
"""
Decode predicted bbox coordinates using the same scheme
employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf
b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x
b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y
b_w = prior_w * exp(loc_w)
b_h = prior_h * exp(loc_h)
Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]
while priors are inputed as [x, y, w, h] where each coordinate
is relative to size of the image (even sigmoid(x)). We do this
in the network by dividing by the 'cell size', which is just
the size of the convouts.
Also note that prior_x and prior_y are center coordinates which
is why we have to subtract .5 from sigmoid(pred_x and pred_y).
Args:
- loc: The predicted bounding boxes of size [num_priors, 4]
- priors: The priorbox coords with size [num_priors, 4]
Returns: A tensor of decoded relative coordinates in point form
form with size [num_priors, 4]
"""
if use_yolo_regressors:
# Decoded boxes in center-size notation
boxes = torch.cat((
loc[:, :2] + priors[:, :2],
priors[:, 2:] * torch.exp(loc[:, 2:])
), 1)
boxes = point_form(boxes)
else:
variances = [0.1, 0.2]
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
|
8158b29c7557f1bde0e7873a6eace78d039c6d5d
| 35,920 |
def read_relative_file(filename):
"""Returns contents of the given file, whose path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read()
|
b20d61e4ddc049c4beeacf106b04d30e3d0cc966
| 35,921 |
import torch
import tqdm
def get_activations(data_loader, model, device=None, batch_size=32, resize=False, n_samples=None):
"""Computes the activation of the given images
Args:
imgs: Torch dataset of (3xHxW) numpy images normalized in the
range [-1, 1]
cuda: whether or not to run on GPU
batch_size: batch size for feeding into Inception v3
splits: number of splits
"""
try:
n_batches = len(data_loader)
except TypeError: # data_loader can also be a generator object
n_batches = float('inf')
assert batch_size > 0
if n_samples is not None:
assert n_samples <= n_batches * batch_size
n_batches = int(np.ceil(n_samples / batch_size))
model = model.to(device)
model.eval()
up = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=False).to(device)
def get_feat(x):
with torch.no_grad():
x = x.to(device)
if resize:
x = up(x)
_, out = model(x)
out = out[0].flatten(1, 3)
return out.cpu().numpy()
# Get predictions
feat = []
for batch in tqdm(data_loader, 'Compute activations', total=n_batches):
if len(feat) >= n_batches:
break
if isinstance(batch, tuple) or isinstance(batch, list): # img, label
batch = batch[0]
batch = batch.to(device)
feat_i = get_feat(batch[:, :3]) # rgb only
feat.append(feat_i)
feat = np.concatenate(feat)
if n_samples is not None:
feat = feat[:n_samples]
return feat
|
2755bd34f05c3fb72f4c1da83bdb25d3df2f7662
| 35,922 |
from pathlib import Path
def write(
# Basic setup
input_path,
# preset_nickname=None,
stream_name="",
stream_description="",
output_directory=None,
output_mode="video",
stream_name_file_output=False,
max_cpu_cores=0,
# Stream configuration
compression_enabled=True,
# error_correction=False, -> to be implemented
file_mask_enabled=False,
# Encryption
encryption_key="",
scrypt_n=14,
scrypt_r=8,
scrypt_p=1,
# Stream geometry, color, general config
stream_palette_id='6',
# stream_palette_nickname=None,
pixel_width=24,
block_height=45,
block_width=80,
# Video rendering
frames_per_second=30,
# Logging
logging_level='info',
logging_stdout_output=True,
logging_txt_output=False,
# Session Data
save_statistics=False,
# App
bg_version='1.0.0'
):
"""This is the primary function in creating BitGlitter streams from files. Please see Wiki page or project README
for more information.
"""
config = session.query(Config).first() #<--- only used for logging, see below
constants = session.query(Constants).first()
# Initializing logging, must be up front for logging to work properly.
logging_setter(logging_level, logging_stdout_output, logging_txt_output, Path(config.log_txt_dir))
# This sets the name of the temporary folder while the file is being written, as well as the default output path.
working_directory = Path(constants.WORKING_DIR)
refresh_directory(working_directory)
# This is what takes the raw input files and runs them through several processes in preparation for rendering.
pre_processor = PreProcessor(working_directory, input_path, encryption_key, compression_enabled, scrypt_n, scrypt_r,
scrypt_p)
# This is where the final steps leading up to rendering as well as rendering itself takes place.
render_handler = RenderHandler(stream_name, stream_description, working_directory, output_directory, encryption_key,
scrypt_n, scrypt_r, scrypt_p, block_height, block_width, pixel_width,
stream_palette_id, max_cpu_cores, pre_processor.stream_sha256,
pre_processor.size_in_bytes, compression_enabled, pre_processor.encryption_enabled,
file_mask_enabled, pre_processor.datetime_started, bg_version,
pre_processor.manifest, constants.PROTOCOL_VERSION, output_mode, output_directory,
stream_name_file_output, save_statistics)
# Video render
if output_mode == 'video':
render_video(output_directory, stream_name_file_output, working_directory, render_handler.total_frames,
frames_per_second, pre_processor.stream_sha256, block_width, block_height, pixel_width,
stream_name, render_handler.total_operations)
# Removing temporary files
remove_working_folder(working_directory)
write_done_http()
return pre_processor.stream_sha256
|
f9b7335b5a4fe95d9f53631d78f2b1c9708d5eea
| 35,923 |
def shell_escape(string):
"""
Escape double quotes, backticks and dollar signs in given ``string``.
For example::
>>> _shell_escape('abc$')
'abc\\\\$'
>>> _shell_escape('"')
'\\\\"'
"""
for char in ('"', '$', '`'):
string = string.replace(char, '\\{}'.format(char))
return string
|
03fcec5cdd99685e821fea11a69a234f3123fd9b
| 35,924 |
def boundary_and_obstacles(start, goal, top_vertex, bottom_vertex, obs_number):
"""
:param start: start coordinate
:param goal: goal coordinate
:param top_vertex: top right vertex coordinate of boundary
:param bottom_vertex: bottom left vertex coordinate of boundary
:param obs_number: number of obstacles generated in the map
:return: boundary_obstacle array, obstacle list
"""
# below can be merged into a rectangle boundary
ay = list(range(bottom_vertex[1], top_vertex[1]))
ax = [bottom_vertex[0]] * len(ay)
cy = ay
cx = [top_vertex[0]] * len(cy)
bx = list(range(bottom_vertex[0] + 1, top_vertex[0]))
by = [bottom_vertex[1]] * len(bx)
dx = [bottom_vertex[0]] + bx + [top_vertex[0]]
dy = [top_vertex[1]] * len(dx)
# generate random obstacles
ob_x = np.random.randint(bottom_vertex[0] + 1,
top_vertex[0], obs_number).tolist()
ob_y = np.random.randint(bottom_vertex[1] + 1,
top_vertex[1], obs_number).tolist()
# x y coordinate in certain order for boundary
x = ax + bx + cx + dx
y = ay + by + cy + dy
obstacle = np.vstack((ob_x, ob_y)).T.tolist()
# remove start and goal coordinate in obstacle list
obstacle = [coor for coor in obstacle if coor != start and coor != goal]
obs_array = np.array(obstacle)
bound = np.vstack((x, y)).T
bound_obs = np.vstack((bound, obs_array))
return bound_obs, obstacle
|
b0203b782e7655184c60d3a9e6c277ea880a2184
| 35,925 |
import logging
import time
def build_uncertain_table(args, scores, timestamp_list, image_path_list):
"""phase 3: build table from detection prediction"""
logging.info('phase 3 start.')
start = time.time()
uncertain_scores = build_uncertain_table_fast(scores)
save_uncertain_table(timestamp_list, uncertain_scores, image_path_list, args.table_path)
end = time.time()
elapsed = end - start
return elapsed
|
43edf61e10718dc6156060851fed3173ab47159f
| 35,926 |
def mvg_logpdf_fixedcov(x, mean, inv_cov):
"""
Log-pdf of the multivariate Gaussian where the determinant and inverse of the covariance matrix are precomputed
and fixed.
Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is
irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf.
Args:
x (1D numpy array): Vector value at which to evaluate the pdf.
mean (1D numpy array): Mean vector of the multivariate Gaussian distribution.
inv_cov (2D numpy array): Inverted covariance matrix.
Returns:
float: Log-pdf value.
"""
dev = x - mean
return -0.5 * (dev @ inv_cov @ dev)
|
3b2d256d58f9dce655d8aae9e04f3f4f87030981
| 35,928 |
def exec_flat_python_func(func, *args, **kwargs):
"""Execute a flat python function (defined with def funcname(args):...)"""
# Prepare a small piece of python code which calls the requested function
# To do this we need to prepare two things - a set of variables we can use to pass
# the values of arguments into the calling function, and the list of arguments for
# the function being called
context = {}
funcargs = []
# Handle unnamed arguments
aidx = 1
for arg in args:
argname = 'arg_%s' % aidx
context[argname] = arg
funcargs.append(argname)
aidx += 1
# Handle keyword arguments
context.update(kwargs)
funcargs.extend(['%s=%s' % (arg, arg) for arg in kwargs.keys()])
code = 'retval = %s(%s)' % (func, ', '.join(funcargs))
comp = bb.utils.better_compile(code, '<string>', '<string>')
bb.utils.better_exec(comp, context, code, '<string>')
return context['retval']
|
9c494daec2172fe59d65e625cc3a1b98656de5df
| 35,929 |
def robust_scale(df):
"""Return copy of `df` scaled by (df - df.median()) / MAD(df) where MAD is a function returning the median absolute deviation."""
median_subtracted = df - df.median()
mad = median_subtracted.abs().median()
return median_subtracted/mad
|
ba9ce747612c99997d890930e7ac7c582ba1af70
| 35,930 |
def generate_context_menu_mainmenu(menu_id):
"""Generate context menu items for a listitem"""
items = []
if menu_id == 'myList':
items.append(_ctx_item('force_update_mylist', None))
return items
|
d1cd169ec71a33c7bccf50b611d61b533ebcca54
| 35,931 |
def unroll_edges(domain, xgrid):
"""If necessary, "unroll" intervals that cross boundary of periodic domain.
"""
xA, xB = domain
assert all(np.diff(xgrid) >= 0)
assert xA < xB
assert xA <= xgrid[0]
assert xgrid[-1] <= xB
if xgrid[0] == xA and xgrid[-1] == xB:
return xgrid
elif xgrid[0] != xA:
return np.array([xgrid[-1] - (xB-xA), *xgrid])
elif xgrid[-1] != xB:
return np.array([*xgrid, xgrid[0] + (xB-xA)])
|
274c446f60bb953e2b864bed06e23bb0153fac94
| 35,932 |
def create_correct_bias_pipe(params={}, name="correct_bias_pipe"):
"""
Description: Correct bias using T1 and T2 images
Same as bash_regis.T1xT2BiasFieldCorrection
Params:
- smooth (see `MathsCommand <https://nipype.readthedocs.io/en/0.12.1/\
interfaces/generated/nipype.interfaces.fsl.maths.html#mathscommand>`_)
- norm_smooth (see `MultiMathsCommand <https://nipype.readthedocs.io/\
en/0.12.1/interfaces/generated/nipype.interfaces.fsl.maths.html\
#multiimagemaths>`_)
- smooth_bias (see `IsotropicSmooth <https://nipype.readthedocs.io/en/\
0.12.1/interfaces/generated/nipype.interfaces.fsl.maths.html#\
isotropicsmooth>`_)
Inputs:
inputnode:
preproc_T1:
preprocessed T1 file name
preproc_T2:
preprocessed T2 file name
arguments:
params:
dictionary of node sub-parameters (from a json file)
name:
pipeline name (default = "correct_bias_pipe")
Outputs:
outputnode.debiased_T1:
T1 after bias correction
outputnode.debiased_T2:
T2 after bias correction
"""
# creating pipeline
correct_bias_pipe = pe.Workflow(name=name)
# creating inputnode
inputnode = pe.Node(
niu.IdentityInterface(fields=['preproc_T1', 'preproc_T2']),
name='inputnode')
# BinaryMaths
mult_T1_T2 = pe.Node(fsl.BinaryMaths(), name='mult_T1_T2')
mult_T1_T2.inputs.operation = "mul"
mult_T1_T2.inputs.args = "-abs -sqrt"
mult_T1_T2.inputs.output_datatype = "float"
correct_bias_pipe.connect(inputnode, 'preproc_T1', mult_T1_T2, 'in_file')
correct_bias_pipe.connect(inputnode, 'preproc_T2',
mult_T1_T2, 'operand_file')
# Mean Brain Val
meanbrainval = pe.Node(fsl.ImageStats(), name='meanbrainval')
meanbrainval.inputs.op_string = "-M"
correct_bias_pipe.connect(mult_T1_T2, 'out_file', meanbrainval, 'in_file')
# norm_mult
norm_mult = pe.Node(fsl.BinaryMaths(), name='norm_mult')
norm_mult.inputs.operation = "div"
correct_bias_pipe.connect(mult_T1_T2, 'out_file', norm_mult, 'in_file')
correct_bias_pipe.connect(meanbrainval, 'out_stat',
norm_mult, 'operand_value')
# smooth
smooth = NodeParams(fsl.maths.MathsCommand(),
params=parse_key(params, "smooth"),
name='smooth')
correct_bias_pipe.connect(norm_mult, 'out_file', smooth, 'in_file')
# norm_smooth
norm_smooth = NodeParams(fsl.MultiImageMaths(),
params=parse_key(params, "norm_smooth"),
name='norm_smooth')
correct_bias_pipe.connect(norm_mult, 'out_file', norm_smooth, 'in_file')
correct_bias_pipe.connect(smooth, 'out_file', norm_smooth, 'operand_files')
# modulate
modulate = pe.Node(fsl.BinaryMaths(), name='modulate')
modulate.inputs.operation = "div"
correct_bias_pipe.connect(norm_mult, 'out_file',
modulate, 'in_file')
correct_bias_pipe.connect(norm_smooth, 'out_file',
modulate, 'operand_file')
# std_modulate
std_modulate = pe.Node(fsl.ImageStats(), name='std_modulate')
std_modulate.inputs.op_string = "-S"
correct_bias_pipe.connect(modulate, 'out_file', std_modulate, 'in_file')
# mean_modulate
mean_modulate = pe.Node(fsl.ImageStats(), name='mean_modulate')
mean_modulate.inputs.op_string = "-M"
correct_bias_pipe.connect(modulate, 'out_file', mean_modulate, 'in_file')
# compute_lower_val
def compute_lower_val(mean_val, std_val):
return mean_val - (std_val*0.5)
# compute_lower
lower = pe.Node(niu.Function(input_names=['mean_val', 'std_val'],
output_names=['lower_val'],
function=compute_lower_val),
name='lower')
correct_bias_pipe.connect(mean_modulate, 'out_stat', lower, 'mean_val')
correct_bias_pipe.connect(std_modulate, 'out_stat', lower, 'std_val')
# thresh_lower
thresh_lower = pe.Node(fsl.Threshold(), name='thresh_lower')
correct_bias_pipe.connect(lower, 'lower_val', thresh_lower, 'thresh')
correct_bias_pipe.connect(modulate, 'out_file', thresh_lower, 'in_file')
# mod_mask
mod_mask = pe.Node(fsl.UnaryMaths(), name='mod_mask')
mod_mask.inputs.operation = "bin"
mod_mask.inputs.args = "-ero -mul 255"
correct_bias_pipe.connect(thresh_lower, 'out_file', mod_mask, 'in_file')
# bias
bias = pe.Node(fsl.MultiImageMaths(), name='bias')
bias.inputs.op_string = "-mas %s -dilall"
bias.inputs.output_datatype = "float"
correct_bias_pipe.connect(norm_mult, 'out_file', bias, 'in_file')
correct_bias_pipe.connect(mod_mask, 'out_file', bias, 'operand_files')
# smooth_bias
smooth_bias = NodeParams(fsl.IsotropicSmooth(),
params=parse_key(params, "smooth_bias"),
name='smooth_bias')
correct_bias_pipe.connect(bias, 'out_file', smooth_bias, 'in_file')
# debiased_T1
debiased_T1 = pe.Node(fsl.BinaryMaths(), name='debiased_T1')
debiased_T1.inputs.operation = "div"
debiased_T1.inputs.output_datatype = "float"
correct_bias_pipe.connect(inputnode, 'preproc_T1',
debiased_T1, 'in_file')
correct_bias_pipe.connect(smooth_bias, 'out_file',
debiased_T1, 'operand_file')
# debiased_T2
debiased_T2 = pe.Node(fsl.BinaryMaths(), name='debiased_T2')
debiased_T2.inputs.operation = "div"
debiased_T2.inputs.output_datatype = "float"
correct_bias_pipe.connect(inputnode, 'preproc_T2',
debiased_T2, 'in_file')
correct_bias_pipe.connect(smooth_bias, 'out_file',
debiased_T2, 'operand_file')
# outputnode
outputnode = pe.Node(
niu.IdentityInterface(
fields=["debiased_T1", "debiased_T2"]),
name='outputnode')
correct_bias_pipe.connect(debiased_T1, 'out_file',
outputnode, 'debiased_T1')
correct_bias_pipe.connect(debiased_T2, 'out_file',
outputnode, 'debiased_T2')
return correct_bias_pipe
|
ec0be858a900f1b38ea6c61d9bcbcb8ac8535b85
| 35,933 |
def infer_gaps_in_tree(df_seq, tree, id_col='id', sequence_col='sequence'):
"""Adds a character matrix to DendroPy tree and infers gaps using
Fitch's algorithm.
Infer gaps in sequences at ancestral nodes.
"""
taxa = tree.taxon_namespace
# Get alignment as fasta
alignment = df_seq.phylo.to_fasta(id_col=id_col, id_only=True,
sequence_col=sequence_col)
# Build a Sequence data matrix from Dendropy
data = dendropy.ProteinCharacterMatrix.get(
data=alignment,
schema="fasta",
taxon_namespace=taxa)
# Construct a map object between sequence data and tree data.
taxon_state_sets_map = data.taxon_state_sets_map(gaps_as_missing=False)
# Fitch algorithm to determine placement of gaps
dendropy.model.parsimony.fitch_down_pass(tree.postorder_node_iter(),
taxon_state_sets_map=taxon_state_sets_map)
dendropy.model.parsimony.fitch_up_pass(tree.preorder_node_iter())
return tree
|
ccd71b7b10977441a25a7d753f5a228287ed8d28
| 35,934 |
def _sklearn_booster_to_model(booster: GradientBoostingClassifier):
"""
Load a scikit-learn gradient boosting classifier as a Model instance. A multiclass booster gets turned into a one-vs-all representation inside the JSON.
.
Parameters
----------
booster : sklearn.ensemble.GradientBoostingClassifier
Gradient boosting ensemble to export
"""
init = booster.init_
if (
not (isinstance(init, DummyClassifier) and init.strategy == "prior")
and not init == "zero"
):
raise ValueError("Only 'zero' or prior DummyClassifier init is supported")
json_trees = []
if booster.loss_.K == 1:
if init != "zero":
# For the binary case sklearn inverts the sigmoid function
json_trees.append(
{
"nodeid": 0,
"leaf": _sigmoid_inverse(init.class_prior_[1]),
}
)
json_trees.extend(
[
_sklearn_tree_to_dict(
tree[0], classifier=False, learning_rate=booster.learning_rate
)
for tree in booster.estimators_
]
)
else:
json_trees = []
if init != "zero":
for i in range(booster.loss_.K):
# For the multiclass case sklearn uses the log prior probability
json_trees.append(
{
"nodeid": 0,
"leaf": np.log(init.class_prior_[i]),
}
)
for round_estimators in booster.estimators_:
for tree in round_estimators:
json_tree = _sklearn_tree_to_dict(
tree, classifier=False, learning_rate=booster.learning_rate
)
json_trees.append(json_tree)
return Model(json_trees, booster.n_classes_)
|
0c1b0cb1eb396db707d22a1bb509f3592647ddb2
| 35,935 |
def encode_utf8_with_error_log(arg):
"""Return byte string encoded with UTF-8, but log and replace on error.
The text is encoded, but if that fails, an error is logged, and the
offending characters are replaced with "?".
Parameters
----------
arg : str
Text to be encoded.
Returns
-------
encoded : byte str
UTF-8 encoded version of the text. May include "?" replacement chars.
"""
try:
return arg.encode('utf-8')
except UnicodeEncodeError:
logger.error("Error encoding message argument to byte str! "
"Replacing bad characters with '?'.")
return arg.encode('utf-8', 'replace')
|
9a2ebc69f00220cfba92e0561b526ee3b5fd23d2
| 35,936 |
def gearys_c(adata, vals):
"""
Compute Geary's C statistics for an AnnData.
Adopted from https://github.com/ivirshup/scanpy/blob/metrics/scanpy/metrics/_gearys_c.py
:math:`C=\\frac{(N - 1)\\sum_{i,j} w_{i,j} (x_i - x_j)^2}{2W \\sum_i (x_i - \\bar{x})^2}`
Parameters
----------
adata : AnnData object
adata.obsp["Connectivities] should contain the connectivity graph,
with shape (n_obs, n_obs).
vals : array-like
Values to calculate Geary's C for. If one dimensional, should have
shape (n_obs,).
Returns
-------
C : float
Geary's C statistics.
"""
graph = adata.obsp["connectivities"]
assert graph.shape[0] == graph.shape[1]
graph_data = graph.data.astype(np.float_, copy=False)
assert graph.shape[0] == vals.shape[0]
assert np.ndim(vals) == 1
W = graph_data.sum()
N = len(graph.indptr) - 1
vals_bar = vals.mean()
vals = vals.astype(np.float_)
# numerators
total = 0.0
for i in range(N):
s = slice(graph.indptr[i], graph.indptr[i + 1])
# indices of corresponding neighbors
i_indices = graph.indices[s]
# corresponding connecting weights
i_data = graph_data[s]
total += np.sum(i_data * ((vals[i] - vals[i_indices]) ** 2))
numer = (N - 1) * total
denom = 2 * W * ((vals - vals_bar) ** 2).sum()
C = numer / denom
return C
|
ea5441619f0c893242b6d851781251b93a63b05d
| 35,937 |
def eq_kinematic_src():
"""
Factory associated with EqKinSrc.
"""
return EqKinSrc()
|
be15e4169f90c28323dd881f67b325182547ad43
| 35,938 |
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
|
e417a3a07576b61dd6b247251834e2c04ba0a947
| 35,939 |
def _mktyperef(obj):
"""Return a typeref dictionary. Used for references.
>>> from jsonpickle import tags
>>> _mktyperef(AssertionError)[tags.TYPE].rsplit('.', 1)[0]
'exceptions'
>>> _mktyperef(AssertionError)[tags.TYPE].rsplit('.', 1)[-1]
'AssertionError'
"""
return {tags.TYPE: '%s.%s' % (obj.__module__, obj.__name__)}
|
0c89d3771c3531773e13475487b4466231e067e5
| 35,940 |
from typing import Tuple
def colorize(img: np.ndarray, color: Tuple) -> np.ndarray:
"""colorize a single-channel (alpha) image into a 4-channel RGBA image"""
# ensure color to RGBA
if len(color) == 3:
color = (color[0], color[1], color[2], 255)
# created result image filled with solid "color"
res = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.ubyte)
res[:, :, 0:4] = color
# scale the alpha component by the image
# (this comes into play if "color" has alpha < 255)
res[:, :, 3] = color[3] / 255.0 * img
# set the RGB of completely transparent pixels to zero
res[res[:, :, 3] == 0, 0:3] = (0, 0, 0)
return res
|
88e79e7adc7785f1391db94c9a295b2a55943c7c
| 35,941 |
def prepare_template_stream(stream, base_url):
"""Prepares the stream to be stored in the DB"""
document_tree = _get_document_tree(stream)
_make_links_absolute(document_tree, base_url)
return _serialize_stream(document_tree)
|
1072573ee0856725351a015f104e20604ef3b9c3
| 35,942 |
def _jsarr(x):
"""Return a string that would work for a javascript array"""
return "[" + ", ".join(['"{}"'.format(i) for i in x]) + "]"
|
9c9b6df65bf4c01fa1c321445bbb7c86c6d28c5a
| 35,943 |
def PyApp_SetMacPreferencesMenuItemId(*args, **kwargs):
"""PyApp_SetMacPreferencesMenuItemId(long val)"""
return _core_.PyApp_SetMacPreferencesMenuItemId(*args, **kwargs)
|
7704e544439e27f362fe703027af7d393e1ab647
| 35,945 |
def isnum(value):
"""
Check if a value is a type of number (decimal or integer)
value:
The value to check
"""
try:
return bool(isinstance(value, (float, int)))
except BaseException:
return False
|
6116d0d4c61f5f3afefe311b06d9b23b03ab9bfc
| 35,946 |
import httpx
import json
async def _async_json_object(api_call):
"""async function to make a request to the wiki api and return a json object with article information
Args:
api_call (text): link to the api call
Returns:
dict: json content from the api call
"""
async with httpx.AsyncClient() as client:
response = await client.get(api_call, headers=headers)
content = response.text
await client.aclose()
print(".", flush = True, end = "")
return(json.loads(content))
|
a047bbf16af314bd96c5d12c03ff8c2d0a16d17b
| 35,947 |
def yices_model_set_bv_int64(model, var, val):
"""Assign an integer value to a bitvector uninterpreted term.
"""
return libyices.yices_model_set_bv_int64(model, var, val)
|
5575adec31c297ff685884283bce7d1d9c47e386
| 35,948 |
def generate_histograms(
num_users: int,
counts_iid_param: float,
avg_count: float,
ref_distribution: np.ndarray,
hist_iid_param: float,
rng=np.random.default_rng()) -> np.ndarray:
"""Generate histograms with different total counts and distributions.
Args:
num_users: An integer indicating the total number of users. Must be
positive.
counts_iid_param: A float which controls the similarity of total counts.
Must be non-negative.
avg_count: A float indicating the expected average total count. Must be at
least 1.
ref_distribution: reference distribution over the domain
hist_iid_param: A non-negative float. Level of perturbation around the
reference distribution.
rng: A numpy random generator.
Returns:
histograms: list of numpy arrays which contains all histograms.
"""
if num_users <= 0:
raise ValueError(f'num_users must be positive.'
f'Found num_users={num_users}.')
if counts_iid_param < 0:
raise ValueError(f'counts_iid_param must be non-negative.'
f'Found counts_iid_param={counts_iid_param}')
if avg_count < 1:
raise ValueError(f'avg_count must be at least 1.'
f'Found avg_count={avg_count}')
if hist_iid_param < 0:
raise ValueError(f'hist_iid_param must be non-negative.'
f'Found hist_iid_param={hist_iid_param}')
if ref_distribution.ndim != 1:
raise ValueError(f'ref_distribution must be a 1-D array.'
f'Found dimension={ref_distribution.ndim}.')
if (ref_distribution < 0).any() | (ref_distribution > 1).any():
raise ValueError('Expecting elements in ref_distribution to be in [0, 1].')
if np.sum(ref_distribution) != 1:
raise ValueError(f'ref_distribution should sum up to 1.'
f'Found the sum to be {np.sum(ref_distribution)}.')
# Make sure that each user has at least 1 item
counts = generate_non_iid_poisson_counts(num_users, counts_iid_param,
avg_count - 1, rng) + 1
distributions = generate_non_iid_distributions_dirichlet(
num_users, ref_distribution, hist_iid_param, rng)
histograms = []
for i in range(num_users):
histograms.append(rng.multinomial(counts[i], distributions[i]))
return np.array(histograms)
|
0e3c00a908cfe139dd273326d63a2c18cd0b1294
| 35,949 |
import requests
def neo_create_bucket(**kwargs):
"""Create a bucket with headers.
:param auth: Tuple, consists of auth object and endpoint string
:param acl: Input for canned ACL, defaults to "private"
:param policy_id: String represent `x-gmt-policyid` or determines how data in the bucket will be distributed, defaults to None
:param bucket_name: Bucket name
:param random: A flag for deciding that a bucket name should be suffixed by random string or not, defaults to False
"""
auth = kwargs.get("auth")
acl = kwargs.get("acl", "private")
policy_id = kwargs.get("policy_id", "")
bucket_name = kwargs.get("bucket_name")
if kwargs.get("random_name"):
bucket_name = gen_random_name(bucket_name)
endpoint = auth_lib.get_endpoint("storage", bucket_name)
headers = {"x-gmt-policyid": policy_id, "x-amz-acl": acl}
if "." in bucket_name:
response = requests.put(endpoint, auth=auth, headers=headers, verify=False)
else:
response = requests.put(endpoint, auth=auth, headers=headers)
return response
|
52d5c6f3c49731c7fdfa95e85cdfc06c96a84f0a
| 35,951 |
def babi_handler(data_dir, task_number):
"""
Handle for bAbI task.
Args:
data_dir (string) : Path to bAbI data directory.
task_number (int) : The task ID from the bAbI dataset (1-20).
Returns:
BABI : Handler for bAbI task.
"""
task = task_list[task_number - 1]
return BABI(path=data_dir, task=task, subset=subset)
|
12472b8b2430bf7d04e51e41f1b34124583935aa
| 35,952 |
def _full_ner(text_analyzer):
"""
Run complete NER.
This includes extraction of different entity types and geotagging.
:param class text_analyzer: the text_analyzer of nlp_components
:return dict: json with persons, geotagged locations and metadata,
readalbe by the viewer
"""
named_entites = text_analyzer.do_ner()
persons = _convert_list_of_objs_to_list_of_dicts(
text_analyzer.get_persons(named_entites))
locations = text_analyzer.get_locations(named_entites)
geotagged_locations = _convert_list_of_objs_to_list_of_dicts(
text_analyzer.geoparse(locations))
return _generatere_viewer_json(persons, geotagged_locations)
|
e182e4d4bbc61ce9d7eca52b1a490d8705a81808
| 35,953 |
def frequency_impulse_response(magnitudes: tf.Tensor,
window_size: int = 0) -> tf.Tensor:
"""Get windowed impulse responses using the frequency sampling method.
Follows the approach in:
https://ccrma.stanford.edu/~jos/sasp/Windowing_Desired_Impulse_Response.html
Args:
magnitudes: Frequency transfer curve. Float32 Tensor of shape [batch,
n_frames, n_frequencies] or [batch, n_frequencies]. The frequencies of the
last dimension are ordered as [0, f_nyqist / (n_frequencies -1), ...,
f_nyquist], where f_nyquist is (sample_rate / 2). Automatically splits the
audio into equally sized frames to match frames in magnitudes.
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it defaults to the impulse_response size.
Returns:
impulse_response: Time-domain FIR filter of shape
[batch, frames, window_size] or [batch, window_size].
Raises:
ValueError: If window size is larger than fft size.
"""
# Get the IR (zero-phase form).
magnitudes = tf.complex(magnitudes, tf.zeros_like(magnitudes))
impulse_response = tf.signal.irfft(magnitudes)
# Window and put in causal form.
impulse_response = apply_window_to_impulse_response(impulse_response,
window_size)
return impulse_response
|
9e307778511b7fb4b79caab338b016a1f7e91120
| 35,954 |
def parse_station_list_to_json(filepath_or_buffer) -> str:
""" Return JSON-formatted data """
return _parse_station_list(filepath_or_buffer).to_json(orient="records")
|
32d3e6e122433692a1dd16bd3750cda528c70596
| 35,955 |
from typing import List
from typing import Callable
from typing import Set
import numpy
def get_minimal_intactness_ls_centralities(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float],
get_mu: Callable[[numpy.array], float]
) -> numpy.array:
"""Compute minimal intactness linear system centralities"""
M = get_minimal_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
A = numpy.eye(len(nodes)) - get_mu(M) * M
centralities = numpy.linalg.solve(A, numpy.ones(len(nodes)))
return centralities / numpy.max(centralities)
|
ac2d01a8d5ba1842379044c77398c93292e8a4f9
| 35,956 |
def join_kwargs(**kwargs) -> str:
"""
Joins keyword arguments and their values in parenthesis.
Example: key1{value1}_key2{value2}
"""
return "_".join(key + "{" + value + "}" for key, value in kwargs.items())
|
3054573ec51676bb8d93e2fcabd4cb5097e4b897
| 35,958 |
def zipped_lambda_function():
"""Return a simple test lambda function, zipped."""
func_str = """
def lambda_handler(event, context):
print("testing")
return event
"""
zip_output = BytesIO()
with ZipFile(zip_output, "w", ZIP_DEFLATED) as zip_file:
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
|
38f2862c4e9401a32866ec54d5db420265bda2a1
| 35,959 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.