content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from grouper.permissions import permission_intersection
def get_public_key_permissions(session, public_key):
# type: (Session, PublicKey) -> List[Permission]
"""Returns the permissions that this public key has. Namely, this the set of permissions
that the public key's owner has, intersected with the permissions allowed by this key's
tags
Returns:
a list of all permissions this public key has
"""
# TODO: Fix circular dependency
my_perms = user_permissions(session, public_key.user)
for tag in get_public_key_tags(session, public_key):
my_perms = permission_intersection(my_perms, get_public_key_tag_permissions(session, tag))
return list(my_perms) | 9e9d33cda6fb342d00fb3e00b668ba89c654bc68 | 8,100 |
def _format_breed_name(name):
"""
Format breed name for displaying
INPUT
name: raw breed name, str
OUTPUT
name : cleaned breed name, str
"""
return name.split('.')[1].replace('_', ' ') | 0c2680de9bd19e61d717fb84c1ce01e5095ddf35 | 8,101 |
import os
def loadIndianPinesData():
"""
加载数据
:return: data, labels
"""
data_path = os.path.join(os.getcwd( ), '../Indian Pines')
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
return data, labels | 3d54f79417fc5cec98708a38c47c3e34100ad639 | 8,102 |
def create_pid_redirected_error_handler():
"""Creates an error handler for `PIDRedirectedError` error."""
def pid_redirected_error_handler(e):
try:
# Check that the source pid and the destination pid are of the same
# pid_type
assert e.pid.pid_type == e.destination_pid.pid_type
# Redirection works only for the item route of the format
# `/records/<pid_value>`
location = url_for(
request.url_rule.endpoint,
pid_value=e.destination_pid.pid_value
)
data = dict(
status=301,
message='Moved Permanently.',
location=location,
)
response = make_response(jsonify(data), data['status'])
response.headers['Location'] = location
return response
except (AssertionError, BuildError, KeyError):
raise e
return pid_redirected_error_handler | 3137c4d6447c5da9f500b3d4cd7a6a3a68325a92 | 8,103 |
def is_var_name_with_greater_than_len_n(var_name: str) -> bool:
"""
Given a variable name, return if this is acceptable according to the
filtering heuristics.
Here, we try to discard variable names like X, y, a, b etc.
:param var_name:
:return:
"""
unacceptable_names = {}
if len(var_name) < min_var_name_len:
return False
elif var_name in unacceptable_names:
return False
return True | c4509b33cc7326c1709f526137e04a590cf3c7ad | 8,104 |
from re import L
def stacked_L(robot: RobotPlanar, q: list, q_goal: list):
"""
Stacks the L matrices for conviencne
"""
LL = []
LLinv = []
Ts_ee = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q))
Ts_goal = robot.get_full_pose_fast_lambdify(list_to_variable_dict(q_goal))
for ee in robot.end_effectors:
T_0_ee = SE2_to_SE3(Ts_ee[ee[0]])
Re = T_0_ee[0:3, 0:3]
T_0_goal = SE2_to_SE3(Ts_goal[ee[0]])
Rd = T_0_goal[0:3, 0:3]
ll, llinv = L(Rd, Re)
LL.append(np.eye(3))
LLinv.append(np.eye(3))
LL.append(ll)
LLinv.append(llinv)
LL = block_diag(*LL)
LLinv = block_diag(*LLinv)
return LL, LLinv | a329ad79b9add95307195329b937a85cf9eeda50 | 8,105 |
from typing import Dict
async def help() -> Dict:
"""Shows this help message."""
return {
'/': help.__doc__,
'/help': help.__doc__,
'/registration/malaysia': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/latest': format_docstring(get_latest_registration_data_malaysia.__doc__),
'/registration/malaysia/{date}': format_docstring(get_registration_data_malaysia.__doc__),
'/vaccination/malaysia': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/latest': format_docstring(get_latest_vax_data_malaysia.__doc__),
'/vaccination/malaysia/{date}': format_docstring(get_vax_data_malaysia.__doc__),
'/registration/state': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/latest': format_docstring(get_latest_registration_data_state.__doc__),
'/registration/state/all/{date}': format_docstring(get_registration_data_all_state.__doc__),
'/registration/state/{state}/latest': format_docstring(get_latest_registration_data_for_state.__doc__),
'/registration/state/{state}/{date}': format_docstring(get_registration_data_state.__doc__),
'/vaccination/state': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/latest': format_docstring(get_latest_vax_data_state.__doc__),
'/vaccination/state/all/{date}': format_docstring(get_vax_data_all_state.__doc__),
'/vaccination/state/{state}/latest': format_docstring(get_latest_vax_data_for_state.__doc__),
'/vaccination/state/{state}/{date}': format_docstring(get_vax_data_state.__doc__),
} | 4d72c66069956c469aea1d39fd68e20454f68e40 | 8,106 |
def eliminate_arrays(clusters, template):
"""
Eliminate redundant expressions stored in Arrays.
"""
mapper = {}
processed = []
for c in clusters:
if not c.is_dense:
processed.append(c)
continue
# Search for any redundant RHSs
seen = {}
for e in c.exprs:
f = e.lhs.function
if not f.is_Array:
continue
v = seen.get(e.rhs)
if v is not None:
# Found a redundant RHS
mapper[f] = v
else:
seen[e.rhs] = f
if not mapper:
# Do not waste time
processed.append(c)
continue
# Replace redundancies
subs = {}
for f, v in mapper.items():
for i in filter_ordered(i.indexed for i in c.scope[f]):
subs[i] = v[f.indices]
exprs = []
for e in c.exprs:
if e.lhs.function in mapper:
# Drop the write
continue
exprs.append(e.xreplace(subs))
processed.append(c.rebuild(exprs))
return processed | 2d5a59d9d5758963e029cc102d6a123c62ed8758 | 8,107 |
def data_generator(batch_size):
"""
Args:
dataset: Dataset name
seq_length: Length of sequence
batch_size: Size of batch
"""
vocab_size = 20000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
x_train, y_train, x_test, y_test = tf.ragged.constant(x_train), tf.constant(y_train[..., None]), \
tf.ragged.constant(x_test), tf.constant(y_test[..., None])
# Shuffle only train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) \
.shuffle(batch_size * 100).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
return train_dataset, test_dataset, vocab_size | ed1edfd0cbeac01bd1fcad6cc1fe36a94dc006e8 | 8,108 |
from datetime import datetime
def now():
""" Get current timestamp
Returns:
str: timestamp string
"""
current_time = datetime.now()
str_date = current_time.strftime("%d %B %Y, %I:%M:%S %p")
return str_date | 4c487416fa119cae0c5310678dfd96e0f737b937 | 8,109 |
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset | 59639b4bb45d4c1306ea8ecfa1241b86247ce16b | 8,110 |
def projection(projection_matrix: tf.Tensor,
flattened_vector: tf.Tensor) -> tf.Tensor:
"""Projects `flattened_vector` using `projection_matrix`.
Args:
projection_matrix: A rank-2 Tensor that specifies the projection.
flattened_vector: A flat Tensor to be projected
Returns:
A flat Tensor returned from projection.
"""
return tf.reshape(
projection_matrix @ (tf.transpose(projection_matrix) @ tf.reshape(
flattened_vector, [-1, 1])), [-1]) | 7954247be2f3d130ac79f53e44b0509608fe85d6 | 8,111 |
def free_vars(e):
"""Get free variables from expression e.
Parameters
----------
e: tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables
"""
return _ir_pass.free_vars(e) | bfab6f5ff0ccadf8dba7af518401e6026efbcb20 | 8,112 |
def image_as_uint(im, bitdepth=None):
""" Convert the given image to uint (default: uint8)
If the dtype already matches the desired format, it is returned
as-is. If the image is float, and all values are between 0 and 1,
the values are multiplied by np.power(2.0, bitdepth). In all other
situations, the values are scaled such that the minimum value
becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
(255 for 8-bit and 65535 for 16-bit).
"""
if not bitdepth:
bitdepth = 8
if not isinstance(im, np.ndarray):
raise ValueError("Image must be a numpy array")
if bitdepth == 8:
out_type = np.uint8
elif bitdepth == 16:
out_type = np.uint16
else:
raise ValueError("Bitdepth must be either 8 or 16")
dtype_str1 = str(im.dtype)
dtype_str2 = out_type.__name__
if (im.dtype == np.uint8 and bitdepth == 8) or (
im.dtype == np.uint16 and bitdepth == 16
):
# Already the correct format? Return as-is
return im
if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1:
_precision_warn(dtype_str1, dtype_str2, "Range [0, 1].")
im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
elif im.dtype == np.uint16 and bitdepth == 8:
_precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.")
im = np.right_shift(im, 8)
elif im.dtype == np.uint32:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(32 - bitdepth),
)
im = np.right_shift(im, 32 - bitdepth)
elif im.dtype == np.uint64:
_precision_warn(
dtype_str1,
dtype_str2,
"Losing {} bits of resolution.".format(64 - bitdepth),
)
im = np.right_shift(im, 64 - bitdepth)
else:
mi = np.nanmin(im)
ma = np.nanmax(im)
if not np.isfinite(mi):
raise ValueError("Minimum image value is not finite")
if not np.isfinite(ma):
raise ValueError("Maximum image value is not finite")
if ma == mi:
return im.astype(out_type)
_precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma))
# Now make float copy before we scale
im = im.astype("float64")
# Scale the values between 0 and 1 then multiply by the max value
im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999
assert np.nanmin(im) >= 0
assert np.nanmax(im) < np.power(2.0, bitdepth)
return im.astype(out_type) | a906eb7022a1823cd49bedb3858bac34e59fdf02 | 8,113 |
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator | 04bc43c2f5e84db29b7a913de35d7a366464dfda | 8,114 |
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
L2_regularization_cost = (1. / m * lambd / 2) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
cost = cross_entropy_cost + L2_regularization_cost
return cost | 5904cab44af1768779ed983fa001876d14faeb1d | 8,115 |
import matplotlib.pyplot as plt
from shapely.geometry import Point, MultiPolygon, MultiLineString
from scipy import spatial
def catalog_area(ra=[], dec=[], make_plot=True, NMAX=5000, buff=0.8, verbose=True):
"""Compute the surface area of a list of RA/DEC coordinates
Parameters
----------
ra, dec : `~numpy.ndarray`
RA and Dec. coordinates in decimal degrees
make_plot : bool
Make a figure.
NMAX : int
If the catalog has more then `NMAX` entries, draw `NMAX` random
samples.
buff : float
Buffer in arcmin to add around each catalog point.
Returns
-------
area : float
Computed catalog area in square arcminutes
fig : `~matplotlib.figure.Figure`
Figure object returned if `make_plot==True`.
"""
points = np.array([ra, dec])*1.
center = np.mean(points, axis=1)
points = (points.T - center)*60. # arcmin
points[:, 0] *= np.cos(center[1]/180*np.pi)
hull = spatial.ConvexHull(points)
edge = points[hull.vertices, :]
#pbuff = 1
if len(ra) > NMAX:
rnd_idx = np.unique(np.cast[int](np.round(np.random.rand(NMAX)*len(ra))))
else:
rnd_idx = np.arange(len(ra))
poly = Point(points[rnd_idx[0], :]).buffer(buff)
for i, ix in enumerate(rnd_idx):
if verbose:
print(NO_NEWLINE + '{0} {1}'.format(i, ix))
poly = poly.union(Point(points[ix, :]).buffer(buff))
# Final (multi)Polygon
pjoin = poly.buffer(-buff)
if make_plot:
fig = plt.figure()
ax = fig.add_subplot(111)
if isinstance(pjoin, MultiPolygon):
for p_i in pjoin:
if isinstance(p_i.boundary, MultiLineString):
for s in p_i.boundary:
p = s.xy
ax.plot(p[0], p[1])
else:
p = p_i.boundary.xy
ax.plot(p[0], p[1])
else:
p_i = pjoin
if isinstance(p_i.boundary, MultiLineString):
for s in p_i.boundary:
p = s.xy
ax.plot(p[0], p[1])
else:
p = p_i.boundary.xy
ax.plot(p[0], p[1])
ax.scatter(points[rnd_idx, 0], points[rnd_idx, 1], alpha=0.1, marker='+')
ax.set_xlim(ax.get_xlim()[::-1])
ax.set_xlabel(r'$\Delta$RA ({0:.5f})'.format(center[0]))
ax.set_ylabel(r'$\Delta$Dec. ({0:.5f})'.format(center[1]))
ax.set_title('Total area: {0:.1f} arcmin$^2$'.format(pjoin.area))
ax.grid()
fig.tight_layout(pad=0.1)
return pjoin.area, fig
else:
return pjoin.area | bcc8f834b5ea7404213c629c2837e45ba42cd690 | 8,116 |
from operator import and_
def user_page(num_page=1):
"""Page with list of users route."""
form = SearchUserForm(request.args, meta={'csrf': False})
msg = False
if form.validate():
search_by = int(request.args.get('search_by'))
order_by = int(request.args.get('order_by'))
search_string = str(request.args.get('search'))
if len(search_string) >= MIN_SEARCH_STR:
condition = user_search(search_string, search_by)
else:
condition = ""
if search_string != "":
msg = True
order_list = [User.id, User.role_id, User.delete_date]
order = order_list[order_by]
search_users = db.session.query(User, Role).filter(and_(
User.role_id == Role.id, condition)).order_by(order).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
if msg:
flash("Search string is too small", category="danger")
return render_template('user_page.html', form=form, users=search_users,
get="?" + urlencode(request.args))
else:
users = db.session.query(User, Role).filter(
User.role_id == Role.id).order_by(User.id).paginate(
per_page=PAGINATE_PAGE, page=num_page, error_out=True)
return render_template('user_page.html', form=form, users=users,
get="?" + urlencode(request.args)) | 3acf9cc4a274de456ad5ea0ee609857f550867ee | 8,117 |
def validate_input(data: ConfigType) -> dict[str, str] | None:
"""Validate the input by the user."""
try:
SIAAccount.validate_account(data[CONF_ACCOUNT], data.get(CONF_ENCRYPTION_KEY))
except InvalidKeyFormatError:
return {"base": "invalid_key_format"}
except InvalidKeyLengthError:
return {"base": "invalid_key_length"}
except InvalidAccountFormatError:
return {"base": "invalid_account_format"}
except InvalidAccountLengthError:
return {"base": "invalid_account_length"}
except Exception as exc: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception from SIAAccount: %s", exc)
return {"base": "unknown"}
if not 1 <= data[CONF_PING_INTERVAL] <= 1440:
return {"base": "invalid_ping"}
return validate_zones(data) | 56ced7cf0d3b02a484910b599c266e928303ddd7 | 8,118 |
import importlib
def file_and_path_for_module(modulename):
"""Find the file and search path for `modulename`.
Returns:
filename: The filename of the module, or None.
path: A list (possibly empty) of directories to find submodules in.
"""
filename = None
path = []
try:
spec = importlib.util.find_spec(modulename)
except ImportError:
pass
else:
if spec is not None:
if spec.origin != "namespace":
filename = spec.origin
path = list(spec.submodule_search_locations or ())
return filename, path | 4e8d0edb3a5844bb3c523aed66f8eb7f0c646aaa | 8,119 |
def hello_page(request):
"""Simple view to say hello.
It is used to check the authentication system.
"""
text = "Welcome to test_project"
if not request.user.is_anonymous:
text = "Welcome '%s' to test_project" % request.user.username
return HttpResponse(text, content_type='text/plain') | fee98ccca3c89d1f110bc828521cbc26af004325 | 8,120 |
from typing import Dict
def hash_all(bv: Binary_View) -> Dict[str, Function]:
"""
Iterate over every function in the binary and calculate its hash.
:param bv: binary view encapsulating the binary
:return: a dictionary mapping hashes to functions
"""
sigs = {}
for function in bv.functions:
sigs[hash_function(function)] = function
return sigs | 3a3a046c9c7fe786c55d3e0d3993679f8ee71465 | 8,121 |
from typing import Union
from typing import List
def apply_deformation(
deformation_indices: Union[List[bool], np.ndarray], bsf: np.ndarray
) -> np.ndarray:
"""Return Hadamard-deformed bsf at given indices."""
n = len(deformation_indices)
deformed = np.zeros_like(bsf)
if len(bsf.shape) == 1:
if bsf.shape[0] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be {(2*n,)}'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[i] = bsf[i + n]
deformed[i + n] = bsf[i]
else:
deformed[i] = bsf[i]
deformed[i + n] = bsf[i + n]
else:
if bsf.shape[1] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be '
f'{(bsf.shape[0], 2*n)}.'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[:, i] = bsf[:, i + n]
deformed[:, i + n] = bsf[:, i]
else:
deformed[:, i] = bsf[:, i]
deformed[:, i + n] = bsf[:, i + n]
return deformed | 87e6a3403190f1139fef223d374df5f7e5f59257 | 8,122 |
def index(request):
"""Return the index.html file"""
return render(request, 'index.html') | 7ac6c9418e332aebe29a25c3954152adca3f7716 | 8,123 |
import logging
def execute(cx, stmt, args=(), return_result=False):
"""
Execute query in 'stmt' over connection 'cx' (with parameters in 'args').
Be careful with query statements that have a '%' in them (say for LIKE)
since this will interfere with psycopg2 interpreting parameters.
Printing the query will not print AWS credentials IF the string used
matches "CREDENTIALS '[^']*'".
So be careful or you'll end up sending your credentials to the logfile.
"""
with cx.cursor() as cursor:
executable_statement = mogrify(cursor, stmt, args)
printable_stmt = remove_password(executable_statement.decode())
logger.debug("QUERY:\n%s\n;", printable_stmt) # lgtm[py/clear-text-logging-sensitive-data]
with Timer() as timer:
cursor.execute(executable_statement)
if cursor.rowcount is not None and cursor.rowcount > 0:
logger.debug(
"QUERY STATUS: %s [rowcount=%d] (%s)", cursor.statusmessage, cursor.rowcount, timer
)
else:
logger.debug("QUERY STATUS: %s (%s)", cursor.statusmessage, timer)
if cx.notices and logger.isEnabledFor(logging.DEBUG):
for msg in cx.notices:
logger.debug("QUERY " + msg.rstrip("\n"))
del cx.notices[:]
if return_result:
return cursor.fetchall() | eaf02dd3177d56d7ca313cec476f3b3b110ee007 | 8,124 |
from typing import Optional
from typing import Iterable
from typing import Dict
from typing import Type
import click
def parse_custom_builders(builders: Optional[Iterable[str]]) -> Dict[str, Type[AbstractBuilder]]:
"""
Parse the custom builders passed using the ``--builder NAME`` option on the command line.
:param builders:
"""
custom_builders: Dict[str, Type[AbstractBuilder]] = {}
if builders is None:
return custom_builders
entry_points = get_entry_points()
for builder_name in builders:
if builder_name not in entry_points:
raise click.BadArgumentUsage(
f"Unknown builder {builder_name!r}. \n"
f"Is it registered as an entry point under 'whey.builder'?"
)
else:
custom_builders[builder_name] = entry_points[builder_name].load()
return custom_builders | 95216d12dfeacf319464b4f14be249ab3f12f10a | 8,125 |
def construct_user_rnn_inputs(document_feature_size=10,
creator_feature_size=None,
user_feature_size=None,
input_reward=False):
"""Returns user RNN inputs.
Args:
document_feature_size: Integer, length of document features.
creator_feature_size: Integer or None, length of creator features. If None,
no features about creators will be input.
user_feature_size: Integer or None, length of user features. If None, no
features about users will be input.
input_reward: Boolean, whether to input previous reward to RNN layer.
"""
# Previous consumed document.
rnn_input_doc_feature = tf.keras.layers.Input(
shape=(None, document_feature_size), name='user_consumed_doc_feature')
merged_embedding = rnn_input_doc_feature
inputs = [rnn_input_doc_feature]
# Previous consumed document-associated creator.
if creator_feature_size is not None:
# This vector includes creator's observable features and/or creator's hidden
# states inferred by creator model.
merged_embedding, inputs = _merge_inputs(
(None, creator_feature_size), 'user_consumed_doc-creator_feature',
merged_embedding, inputs)
# User current context.
if user_feature_size is not None:
merged_embedding, inputs = _merge_inputs(
(None, user_feature_size), 'user_current_feature', merged_embedding,
inputs)
# Previous reward.
if input_reward:
merged_embedding, inputs = _merge_inputs((None, 1), 'user_previous_reward',
merged_embedding, inputs)
return merged_embedding, inputs | cdc42e86ff7fee9a7487d05badeae1ef995a3357 | 8,126 |
def numpy_read(DATAFILE, BYTEOFFSET, NUM, PERMISSION, DTYPE):
"""
Read NumPy-compatible binary data.
Modeled after MatSeis function read_file in util/waveread.m.
"""
f = open(DATAFILE, PERMISSION)
f.seek(BYTEOFFSET, 0)
data = np.fromfile(f, dtype=np.dtype(DTYPE), count=NUM)
f.close()
return data | 9fc4b2de3eecefc649cb78fe7d8b545a09b8f786 | 8,127 |
def _process_pmid(s: str, sep: str = '|', prefix: str = 'pubmed:') -> str:
"""Filter for PubMed ids.
:param s: string of PubMed ids
:param sep: separator between PubMed ids
:return: PubMed id
"""
for identifier in s.split(sep):
identifier = identifier.strip()
if identifier.startswith(prefix):
return identifier | 9a1fc49bf570c81f10b6b5470620d7fc0b54275e | 8,128 |
import ast
def _get_import(name, module: ast.Module):
"""
get from import by name
"""
for stm in ast.walk(module):
if isinstance(stm, ast.ImportFrom):
for iname in stm.names:
if isinstance(iname, ast.alias):
if iname.name == name:
return 'from ' + str(stm.module) + ' import ' + name
if isinstance(stm, ast.Import):
pass
return None | bc33a882c65f7fe44d446376db3a71631629ff04 | 8,129 |
import os
def load_FIPS_data():
""" Load FIPS ref table """
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'FIPS_ref_data.csv')
df = pd.read_csv(filename)
df['fips'] = df['fips'].astype(str)
return df | e6e819cb8e820b96af2850f23b9f69af6fa4136e | 8,130 |
from typing import Optional
from typing import Tuple
def number_of_qucosa_metadata_in_elasticsearch(
host: str = SLUB_ELASTICSEARCH_SERVER_URL,
http_auth: Optional[Tuple[str, str]] = None,
index_name: str = "fulltext_qucosa",
) -> int:
"""Return the number of qucosa documents currently available at the SLUB elastic search server.
Parameters
----------
host: str = SLUB_ELASTICSEARCH_SERVER_URL
The hostname of the ElasticSearch server
http_auth: Optional[Tuple[str, str]]
Http basic auth parameters as tuple of username and password. If http_auth is None, but environment variables
`SLUB_ELASTICSEARCH_SERVER_USER` and `SLUB_ELASTICSEARCH_SERVER_PASSWORD` are set, then these are used as
username and password.
index_name: str = "fulltext_qucosa"
The name of the ElasticSearch index to be queried.
Returns
-------
int
the number of qucosa documents
"""
es_server = _initialize_elasticsearch_connection(host, http_auth)
return es_server.count(index=index_name, body={"query": {"match_all": {}}})["count"] | 9e3628998f7c93d12b4855ec2d2c88278b1a5e2a | 8,131 |
from typing import Tuple
from typing import Sequence
from typing import List
from typing import Any
from typing import get_type_hints
from typing import Optional
def codegen_py(typeit_schema: TypeitSchema,
top: bool = True,
indent: int = 4) -> Tuple[str, Sequence[str]]:
"""
:param typ: A type (NamedTuple definition) to generate a source for.
:param top: flag to indicate that a toplevel structure is to be generated.
When False, a sub-structure of the toplevel structure is to be generated.
:param indent: keep indentation for source lines.
:return:
"""
typ = typeit_schema.typ
overrides = typeit_schema.overrides
wrappers = typeit_schema.sequence_wrappers
overrides_source: List[str] = []
if typ is None:
type_name = 'None'
elif typ is Any:
type_name = 'Any'
else:
type_name = typ.__name__
required_imports = [
'# ------- generated by typeit -------',
'from typing import Any, NamedTuple, Optional, Sequence',
]
wrapped_type_literal = ('Sequence[' * wrappers) + type_name + (']' * wrappers)
if typ in PythonPrimitives:
required_imports.extend([
'from typeit import TypeConstructor',
])
if wrappers:
generated_definitions = [
f'Main = {wrapped_type_literal}'
]
else:
generated_definitions = []
elif typ is Any:
required_imports.extend([
'from typeit import TypeConstructor',
])
generated_definitions = [
f'Main = {wrapped_type_literal}'
]
else:
required_imports.extend([
'from typeit import TypeConstructor',
])
ind = ' ' * indent
generated_definitions = [f'class {type_name}(NamedTuple):']
hints = get_type_hints(typ)
if not hints:
generated_definitions.extend([
f'{ind}...',
])
for field_name, field_type in hints.items():
# 1. Generate source code for the field
type_literal = literal_for_type(field_type)
if field_type not in BUILTIN_LITERALS_FOR_TYPES:
# field_type: Union[NamedTuple, Sequence]
# TODO: Sequence/List/PVector flag-based
folded_lists_count = type_literal.count('Sequence[')
if folded_lists_count:
# field_type: Sequence[T]
# traverse to the folded object
for __ in range(folded_lists_count):
field_type = field_type.__args__[0]
if field_type not in BUILTIN_LITERALS_FOR_TYPES:
sub, folded_overrides = codegen_py(
TypeitSchema(field_type, overrides, wrappers), False
)
generated_definitions.insert(0, f'{sub}{NEW_LINE}{NEW_LINE}')
overrides_source.extend(folded_overrides)
else:
# field_type: NamedTuple
# Generate a folded structure definition in the global scope
# and then use it for the current field
sub, folded_overrides = codegen_py(
TypeitSchema(field_type, overrides, wrappers), False
)
generated_definitions.insert(0, f'{sub}{NEW_LINE}{NEW_LINE}')
overrides_source.extend(folded_overrides)
generated_definitions.append(f'{ind}{field_name}: {type_literal}')
# 2. Check if the field included into overrides
field_override: Optional[str] = overrides.get(getattr(typ, field_name))
if field_override:
overrides_source.append(
f"{ind}{type_name}.{field_name}: '{field_override}',"
)
if top:
if wrappers:
type_literal = 'Main'
else:
type_literal = type_name
if overrides_source:
overrides_part = [
LINE_SKIP,
LINE_SKIP,
'overrides = {' +
NEW_LINE +
NEW_LINE.join(overrides_source) +
NEW_LINE +
'}'
]
constructor_part = f'TypeConstructor & overrides ^ {type_literal}'
else:
overrides_part = []
constructor_part = f'TypeConstructor ^ {type_literal}'
generated_definitions.extend(overrides_part)
constructor_serializer_def = (
f'mk_{inflection.underscore(type_literal)}, '
f'serialize_{inflection.underscore(type_literal)} = {constructor_part}'
)
generated_definitions.extend([
LINE_SKIP,
LINE_SKIP,
constructor_serializer_def,
LINE_SKIP,
])
# TODO: import Sequence/List/PVector flag-based
generated_definitions = ( required_imports
+ [LINE_SKIP, LINE_SKIP]
+ generated_definitions )
return NEW_LINE.join(generated_definitions), overrides_source | 55a58893cb05a21e1d0c9715de7bad73ceec8fe4 | 8,132 |
def _tuple_to_string(tup):
"""
Converts a tuple of pitches to a string
Params:
* tup (tuple): a tuple of pitch classes, like (11, 10, 5, 9, 3)
Returns:
* string: e.g., 'et593'
"""
def _convert(pitch):
pitch = mod_12(pitch)
if pitch not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11):
# should never happen
raise ValueError('unexpected pitch found: %s' % pitch)
if pitch == 10:
return 't'
elif pitch == 11:
return 'e'
else:
return str(pitch)
output = []
for pitch in tup:
output.append(_convert(pitch))
return ''.join(output) | 61ee32199b85fe5ec645887641d6b28ff701eabd | 8,133 |
def dashboard():
"""Logged in Dashboard screen."""
session["redis_test"] = "This is a session variable."
return render_template(
"dashboard.jinja2",
title="Flask-Session Tutorial.",
template="dashboard-template",
current_user=current_user,
body="You are now logged in!",
) | 48472e2ad8c3b81adab98524103959a812ab9b30 | 8,134 |
def choose_top_k(scores_flat, config):
"""Chooses the top-k beams as successors.
"""
next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=config.beam_width)
return next_beam_scores, word_indices | e8bbf86c8452b0b2153f591968370612986673e2 | 8,135 |
def train_valid_test_split(data, proportions='50:25:25'):
"""
Splits the data into 3 parts - training, validation and test sets
:param proportions: proportions for the split, like 2:1:1 or 50:30:20
:param data: preprocessed data
:return: X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test
"""
features = [c for c in data.columns if c not in ('ret','bin')]
n = len(data)
borders = [float(p) for p in proportions.split(':')]
borders = borders / np.sum(borders)
train_ids = (0, int(np.floor(n * borders[0])))
valid_ids = (train_ids[1] + 1, int(np.floor(n * np.sum(borders[:2]))))
test_ids = (valid_ids[1] + 1, n)
X_train = data[features].iloc[train_ids[0]:train_ids[1], :]
X_valid = data[features].iloc[valid_ids[0]:valid_ids[1], :]
X_test = data[features].iloc[test_ids[0]:test_ids[1], :]
Y_train = data.bin.iloc[train_ids[0]:train_ids[1]]
Y_valid = data.bin.iloc[valid_ids[0]:valid_ids[1]]
Y_test = data.bin.iloc[test_ids[0]:test_ids[1]]
target_rtns_train = data.ret.iloc[train_ids[0]:train_ids[1]]
target_rtns_valid = data.ret.iloc[valid_ids[0]:valid_ids[1]]
target_rtns_test = data.ret.iloc[test_ids[0]:test_ids[1]]
return X_train, Y_train, target_rtns_train, X_valid, Y_valid, target_rtns_valid, X_test, Y_test, target_rtns_test | b8a9d160860aea9c224b72af32ef843b43b44656 | 8,136 |
def basis_ders_on_quad_grid(knots, degree, quad_grid, nders, normalization):
"""
Evaluate B-Splines and their derivatives on the quadrature grid.
If called with normalization='M', this uses M-splines instead of B-splines.
Parameters
----------
knots : array_like
Knots sequence.
degree : int
Polynomial degree of B-splines.
quad_grid: 2D numpy.ndarray (ne,nq)
Coordinates of quadrature points of each element in 1D domain,
which can be given by quadrature_grid() or chosen arbitrarily.
nders : int
Maximum derivative of interest.
normalization : str
Set to 'B' for B-splines, and 'M' for M-splines.
Returns
-------
basis: 4D numpy.ndarray
Values of B-Splines and their derivatives at quadrature points in
each element of 1D domain. Indices are
. ie: global element (0 <= ie < ne )
. il: local basis function (0 <= il <= degree)
. id: derivative (0 <= id <= nders )
. iq: local quadrature point (0 <= iq < nq )
"""
# TODO: add example to docstring
ne,nq = quad_grid.shape
basis = np.zeros((ne, degree+1, nders+1, nq))
if normalization == 'M':
scaling = 1. / basis_integrals(knots, degree)
for ie in range(ne):
xx = quad_grid[ie, :]
for iq, xq in enumerate(xx):
span = find_span(knots, degree, xq)
ders = basis_funs_all_ders(knots, degree, xq, span, nders)
if normalization == 'M':
ders *= scaling[None, span-degree:span+1]
basis[ie, :, :, iq] = ders.transpose()
return basis | be1678ab5e758d9d7f9fa7710336fa892c46f9bf | 8,137 |
import opcode
def analyze_jumps(jumps):
"""takes the list of Jump tuples from group_jumps. returns JumpCmp.
fails if input is weird (tell me more).
"""
# todo: more of a decompile, AST approach here? look at uncompyle.
if jumps[-1].head is not None: raise BadJumpTable("last jump not an else")
if len(jumps) < 3: raise BadJumpTable("too few, what's the point")
head0 = jumps[0].head
if head0[-2].code != opcode.opmap['COMPARE_OP'] or head0[-2].arg != 2: raise BadJumpTable('cmp not ==',0)
if head0[-3].code != opcode.opmap['LOAD_CONST']: raise BadJumpTable('cmp right not LOAD_CONST',0)
def compare_head(headi, i):
if len(head0) != len(headi): raise BadJumpTable('length mismatch',i)
if headi[-2].code != opcode.opmap['COMPARE_OP'] or headi[-2].arg != 2: raise BadJumpTable('cmp not ==',i)
# todo below: it would be great if this didn't have to be a constant
if headi[-3].code != opcode.opmap['LOAD_CONST']: raise BadJumpTable('cmp right not LOAD_CONST',i)
if any(h0[1:]!=hi[1:] for h0,hi in zip(head0[:-3],headi[:-3])): raise BadJumpTable('preamble mismatch',i)
for i in range(1,len(jumps)-1): compare_head(jumps[i].head,i)
load_left = head0[:-3] # sans the const, sans the compare, sans the jump
const2offset = {j.head[-3].arg:j.head[0].pos for j in jumps[:-1]}
return JumpCmp(load_left, const2offset) | 3f8730c957ad89b649281cf615ff826529c55b3c | 8,138 |
def data(*args, **kwargs):
"""
The HTML <data> Element links a given content with a
machine-readable translation. If the content is time- or
date-related, the <time> must be used.
"""
return el('data', *args, **kwargs) | c948ea946b29369b78fbda0564a822d7b9bb0a06 | 8,139 |
import traceback
import os
def process_dir(dir, doc_type = 'Annual Return', parallel = False):
"""
Process all document directories in a directory.
Parameters
----------
dir : str
Relative path to directory containing the document directories
doc_type : str
Type of documents (default = 'Annual Return')
parallel : bool
Process directories in parallel for faster performance
Returns
-------
data_df : pandas.DataFrame
Dataframe containing information about all document directories
processed successfully
failed_df : pandas.DataFrame
Dataframe containing information about all document directories
processed unsuccessfully and their corresponding traceback
"""
doc_data_list = []
failed_list = []
if parallel:
completed = 0
def worker(input, output, failed):
nonlocal completed
for doc_dir in iter(input.get, 'STOP'):
completed += 1
try:
doc_data = process_doc_dir(doc_dir, doc_type)
assert (isinstance(doc_data, pd.DataFrame) or isinstance(doc_data, pd.Series))
output.put(doc_data)
except:
exception = traceback.format_exc(7)
failed.put((doc_dir, exception))
print(f'\t\t****{mp.current_process().name} is at iteration {completed}****')
NUMBER_OF_PROCESSES = mp.cpu_count()
doc_list = [f'{dir}/{doc_dir}' for doc_dir in os.listdir(dir) if os.path.isdir(f'{dir}/{doc_dir}')]
num_doc = len(doc_list)
print(f"\t\t****Total documents to be processed: {num_doc}****\n\n")
task_manager = mp.Manager()
done_manager = mp.Manager()
failed_manager = mp.Manager()
task_queue = task_manager.Queue()
done_queue = done_manager.Queue()
failed_queue = failed_manager.Queue()
for doc_dir in doc_list:
task_queue.put(doc_dir)
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
process_list = [mp.Process(name=f'Process {str(i)}',
target=worker,
args=(task_queue, done_queue, failed_queue))
for i in range(NUMBER_OF_PROCESSES)]
for process in process_list:
process.start()
for process in process_list:
process.join()
while not done_queue.empty():
doc_data_list.append(done_queue.get())
while not failed_queue.empty():
failed_list.append(failed_queue.get())
else:
doc_list = [f'{dir}/{doc_dir}'.replace('//', '/') for doc_dir in os.listdir(dir) if os.path.isdir(f'{dir}/{doc_dir}'.replace('//', '/'))]
num_doc = len(doc_list)
print(f"\t\t****Total documents to be processed: {num_doc}****\n\n")
for count, doc_dir in enumerate(doc_list):
print(f'\t\t****{count} items processed out of {num_doc}****')
try:
doc_data = process_doc_dir(doc_dir, doc_type = doc_type)
doc_data_list.append(doc_data)
except:
exception = traceback.format_exc(7)
failed_list.append((doc_dir, exception))
if len(failed_list) != 0:
failed_df = pd.Series(dict(failed_list))
else:
failed_df = pd.Series(['There were no exceptions'])
if len(doc_data_list) != 0:
data_df = pd.concat(doc_data_list, axis = 0, sort=False)
else:
data_df = pd.Series(['No documents were scraped successfully'])
print('\t\t****Task completed****')
print(data_df)
return (data_df, failed_df) | 5d9dbb2d2df553b26ad94c306d282fa52c81baf1 | 8,140 |
import copy
def get_crops(nodules, fmt='raw', nodule_shape=(32, 64, 64), batch_size=20, share=0.5, histo=None,
variance=(36, 144, 144), hu_lims=(-1000, 400), **kwargs):
""" Get pipeline that performs preprocessing and crops cancerous/non-cancerous nodules in
a chosen proportion.
Parameters
----------
nodules : pd.DataFrame
contains:
- 'seriesuid': index of patient or series.
- 'z','y','x': coordinates of nodules center.
- 'diameter': diameter, in mm.
fmt : str
can be either 'raw', 'blosc' or 'dicom'.
nodule_shape : tuple, list or ndarray of int
crop shape along (z,y,x).
batch_size : int
number of nodules in batch generated by pipeline.
share : float
share of cancer crops in the batch.
histo : tuple
:func:`numpy.histogramdd` output.
Used for sampling non-cancerous crops
variance : tuple, list or ndarray of float
variances of normally distributed random shifts of
nodules' start positions
hu_lims : tuple, list of float
seq of len=2, representing limits of hu-trimming in normalize_hu-action.
**kwargs
spacing : tuple
(z,y,x) spacing after resize.
shape : tuple
(z,y,x) shape after crop/pad.
method : str
interpolation method ('pil-simd' or 'resize').
See :func:`~radio.CTImagesBatch.resize`.
order : None or int
order of scipy-interpolation (<=5), if used.
padding : str
mode of padding, any supported by :func:`numpy.pad`.
Returns
-------
pipeline
"""
# update args of unify spacing
args_unify_spacing = copy(kwargs_default)
args_unify_spacing.update(kwargs)
# set up other args-dicts
args_sample_nodules = dict(nodule_size=nodule_shape, batch_size=batch_size, share=share,
histo=histo, variance=variance)
# set up the pipeline
pipeline = (Pipeline()
.load(fmt=fmt)
.fetch_nodules_info(nodules=nodules)
.unify_spacing(**args_unify_spacing)
.create_mask()
.normalize_hu(min_hu=hu_lims[0], max_hu=hu_lims[1])
.sample_nodules(**args_sample_nodules)
.run(lazy=True, batch_size=RUN_BATCH_SIZE, shuffle=True)
)
return pipeline | 51bc314a8675790f83d0b6b7276e094986317187 | 8,141 |
def get_dict_from_args(args):
"""Extracts a dict from task argument string."""
d = {}
if args:
for k,v in [p.strip().split('=') for p in args.split(',')]:
d[k] = v
return d | 8fb05329f6119393f94215808c6ab9b3116ec759 | 8,142 |
import warnings
import cupy as cp
from netver.utils.cuda_code import cuda_code
def multi_area_propagation_gpu(input_domain, net_model, thread_number=32):
"""
Propagation of the input domain through the network to obtain the OVERESTIMATION of the output bound.
The process is performed applying the linear combination node-wise and the necessary activation functions.
The process is on GPU, completely parallelized on NVIDIA CUDA GPUs and c++ code.
Parameters
----------
input_domain : list
the input domain expressed as a 3-dim matrix. (a) a list of list for each splitted domain;
(b) a list of bound for each input node and (c) a list of two element for the node, lower and upper
net_model : tf.keras.Model
tensorflow model to analyze, the model must be formatted in the 'tf.keras.Model(inputs, outputs)' format
thread_number : int
number of CUDA thread to use for each CUDA block, the choice is free and does not effect the results,
can however effect the performance
Returns:
--------
reshaped_bound : list
the propagated bound in the same format of the input domain (3-dim)
"""
# Ignore the standard warning from CuPy
warnings.filterwarnings("ignore")
# Import the necessary library for the parallelization (Cupy) and also the c++ CUDA code.
# Load network shape, activations and weights
layer_sizes = []
activations = []
full_weights = np.array([])
full_biases = np.array([])
# Iterate on each layer of the network, exluding the input (tf2 stuff)
for layer in net_model.layers[1:]:
# Obtain the activation function list
if layer.activation == tf.keras.activations.linear: activations.append(0)
elif layer.activation == tf.keras.activations.relu: activations.append(1)
elif layer.activation == tf.keras.activations.tanh: activations.append(2)
elif layer.activation == tf.keras.activations.sigmoid: activations.append(3)
# Obtain the netowrk shape as a list
layer_sizes.append(layer.input_shape[1])
# Obtain all the weights for paramters and biases
weight, bias = layer.get_weights()
full_weights = np.concatenate((full_weights, weight.T.reshape(-1)))
full_biases = np.concatenate((full_biases, bias.reshape(-1)))
# Fixe last layer size
layer_sizes.append( net_model.output.shape[1] )
# Initialize the kernel loading the CUDA code
my_kernel = cp.RawKernel(cuda_code, 'my_kernel')
# Convert all the data in cupy array beore the kernel call
max_layer_size = max(layer_sizes)
results_cuda = cp.zeros(layer_sizes[-1] * 2 * len(input_domain), dtype=cp.float32)
layer_sizes = cp.array(layer_sizes, dtype=cp.int32)
activations = cp.array(activations, dtype=cp.int32)
input_domain = cp.array(input_domain, dtype=cp.float32)
full_weights = cp.array(full_weights, dtype=cp.float32)
full_biases = cp.array(full_biases, dtype=cp.float32)
# Define the number of CUDA block
block_number = int(len(input_domain) / thread_number) + 1
# Create and launch the kernel, wait for the sync of all threads
kernel_input = (input_domain, len(input_domain), layer_sizes, len(layer_sizes), full_weights, full_biases, results_cuda, max_layer_size, activations)
my_kernel((block_number, ), (thread_number, ), kernel_input)
cp.cuda.Stream.null.synchronize()
# Reshape the results and convert in numpy array
reshaped_bound = cp.asnumpy(results_cuda).reshape((len(input_domain), net_model.layers[-1].output_shape[1], 2))
#
return reshaped_bound | a81aad5e05b7054c5b7fc5016941ffc6abea5948 | 8,143 |
def opensslCmsSignedDataCreate( conveyedInfoFile, cert, privateKey ):
"""Create a signed CMS encoded object given a conveyed-info file and
base64 encode the response."""
opensslCmdArgs = [ "openssl", "cms", "-sign", "-in", conveyedInfoFile,
"-signer", cert,
"-inkey", privateKey,
"-outform", "der", "-nodetach" ]
conveyedInfoCmsSignedDerBase64 = runOpensslCmd( opensslCmdArgs, [ "base64" ] )
return conveyedInfoCmsSignedDerBase64 | 905ddbec7c252de6169f4fdedab19e0c6818fb39 | 8,144 |
import os
def compute_kv(config):
"""Parse log data and calling draw"""
result = {}
for _cfg in config['data']:
data = data_parser.log_kv(_cfg['path'], _cfg['phase'], _cfg['keys'])
# clip from start idx
if 'start_iter' in _cfg:
start_idx = 0
for idx, iteration in enumerate(data['iter']):
if iteration >= _cfg['start_iter']:
start_idx = idx
break
data = utils.process_keys(utils.clip, data, start_idx)
# downsampling all points including iter
if 'iter_invl' in _cfg:
invl = int(_cfg['iter_invl'] / (data['iter'][1]-data['iter'][0]))
assert invl >= 1
data = utils.process_keys(utils.downsampling, data, invl)
res_list = {}
# compute max
if _cfg['task'] == 'max':
idx, value = _kv_max(data, _cfg['sort_key'])
# broadcast to other key
res_list['iter'] = data['iter'][idx]
for key in _cfg['keys']:
res_list[key] = data[key][idx]
elif _cfg['task'] == 'min':
idx, value = _kv_min(data, _cfg['sort_key'])
# broadcast to other key
res_list['iter'] = data['iter'][idx]
for key in _cfg['keys']:
res_list[key] = data[key][idx]
# print
print(_cfg['path'])
for res in res_list:
print(' ', res, res_list[res])
# add-in result
result[os.path.basename(_cfg['path'])] = data
return result | 074563650c701820a8c8cf88b5cb56e25259f164 | 8,145 |
def change_app_header(uri, headers, body):
""" Add Accept header for preview features of Github apps API """
headers["Accept"] = "application/vnd.github.machine-man-preview+json"
return uri, headers, body | 3610d1d482e057ba73a1901aed8430ff35d98f3b | 8,146 |
def fib_fail(n: int) -> int:
"""doesn't work because it's missing the base case"""
return fib_fail(n - 1) + fib_fail(n - 2) | 6e8138b7ce330c9ab191367e3911fe8146240c25 | 8,147 |
def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | 6a7b6e7e090cccc20a0e0e3196e81f79cc5dabc5 | 8,148 |
def randomize_onesample(a, n_iter=10000, h_0=0, corrected=True,
random_seed=None, return_dist=False):
"""Nonparametric one-sample T test through randomization.
On each iteration, randomly flip the signs of the values in ``a``
and test the mean against 0.
If ``a`` is two-dimensional, it is assumed to be shaped as
(n_observations, n_tests), and a max-statistic based approach
is used to correct the p values for multiple comparisons over tests.
Parameters
----------
a : array-like
input data to test
n_iter : int
number of randomization iterations
h_0 : float, broadcastable to tests in a
null hypothesis for the group mean
corrected : bool
correct the p values in the case of multiple tests
random_seed : int or None
seed to use for random number generator
return_dist : bool
if True, return the null distribution of t statistics
Returns
-------
obs_t : float or array of floats
group mean T statistic(s) corresponding to tests in input
obs_p : float or array of floats
one-tailed p value that the population mean is greater than h_0
(1 - percentile under the null)
dist : ndarray, optional
if return_dist is True, the null distribution of t statistics
"""
a = np.asarray(a, np.float)
if a.ndim < 2:
a = a.reshape(-1, 1)
n_samp, n_test = a.shape
a -= h_0
rs = np.random.RandomState(random_seed)
flipper = (rs.uniform(size=(n_samp, n_iter)) > 0.5) * 2 - 1
flipper = (flipper.reshape(n_samp, 1, n_iter) *
np.ones((n_samp, n_test, n_iter), int))
rand_dist = a[:, :, None] * flipper
err_denom = np.sqrt(n_samp - 1)
std_err = rand_dist.std(axis=0) / err_denom
t_dist = rand_dist.mean(axis=0) / std_err
obs_t = a.mean(axis=0) / (a.std(axis=0) / err_denom)
if corrected:
obs_p = 1 - percentile_score(t_dist.max(axis=0), obs_t) / 100
else:
obs_p = []
for obs_i, null_i in zip(obs_t, t_dist):
obs_p.append(1 - percentile_score(null_i, obs_i) / 100)
obs_p = np.array(obs_p)
if a.shape[1] == 1:
obs_t = np.asscalar(obs_t)
obs_p = np.asscalar(obs_p)
t_dist = t_dist.squeeze()
if return_dist:
return obs_t, obs_p, t_dist
return obs_t, obs_p | 2af8b9592f82b14dda1f59e41663e7253eb7dbe8 | 8,149 |
from typing import Optional
from typing import Dict
def git_get_project(
directory: str, token: Optional[str] = None, revisions: Optional[Dict[str, str]] = None
) -> BuiltInCommand:
"""
Create an Evergreen command to clones the tracked project and check current revision.
Also, applies patches if the task was created by a patch build.
:param directory: Directory to clone into.
:param token: Use a token to clone instead of ssh key.
:param revisions: Map of revisions to use for modules.
"""
params = {
"directory": directory,
}
add_if_exists(params, "token", token)
add_if_exists(params, "revisions", revisions)
return BuiltInCommand("git.get_project", params) | b4cc4e3335c6c91556d02c76761082d95baee775 | 8,150 |
import sys
def main(args=None):
"""ec2mc script's entry point
Args:
args (list): Arguments for argparse. If None, set to sys.argv[1:].
"""
if args is None:
args = sys.argv[1:]
try:
# Classes of available commands in the commands directory
commands = [
configure_cmd.Configure,
aws_setup_cmd.AWSSetup,
server_cmds.Server,
servers_cmds.Servers,
address_cmds.Address,
user_cmds.User
]
# Use argparse to turn args into namedtuple of arguments
cmd_args = _argv_to_cmd_args(args, commands)
# If basic configuration being done, skip config validation
if cmd_args.command != "configure":
# Validate config's config.json
validate_config.main()
# Validate config's aws_setup.json and YAML instance templates
validate_setup.main()
# Create an instance from the appropriate command class
chosen_cmd = next(cmd(cmd_args) for cmd in commands
if cmd.cmd_name() == cmd_args.command)
# Validate IAM user has needed permissions to use the command
halt.assert_empty(chosen_cmd.blocked_actions(cmd_args))
# Use the command
chosen_cmd.main(cmd_args)
except SystemExit:
return False
return True | a59c99d1c8f63f64ef69d45b7e19f239584ed7b0 | 8,151 |
def body_contour(binary_image):
"""Helper function to get body contour"""
contours = find_contours(binary_image)
areas = [cv2.contourArea(cnt) for cnt in contours]
body_idx = np.argmax(areas)
return contours[body_idx] | 0ccfa7340d492f89c6c0090c296e7aede379754a | 8,152 |
def rule_like(rule, pattern):
"""
Check if JsonLogic rule matches a certain 'pattern'.
Pattern follows the same structure as a normal JsonLogic rule
with the following extensions:
- '@' element matches anything:
1 == '@'
"jsonlogic" == '@'
[1, 2] == '@'
{'+': [1, 2]} == '@'
{'+': [1, 2]} == {'@': [1, 2]}
{'+': [1, 2]} == {'+': '@'}
{'+': [1, 2]} == {'+': ['@', '@']}
{'+': [1, 2]} == {'@': '@'}
- 'number' element matches any numeric value:
1 == 'number'
2.34 == 'number'
[1, 2] == ['number', 'number']
{'+': [1, 2]} == {'+': ['number', 'number']}
- 'string' element matches any string value:
"name" == 'string'
{'cat': ["json", "logic"]} = {'cat': ['string', 'string']}
- 'array' element matches an array of any length:
[] == 'array'
[1, 2, 3] = 'array'
{'+': [1, 2]} == {'+': 'array'}
Use this method to make sure JsonLogic rule is correctly constructed.
"""
if pattern == rule:
return True
if pattern == '@':
return True
if pattern == 'number':
return _is_numeric(rule)
if pattern == 'string':
return _is_string(rule)
if pattern == "array":
return _is_array(rule)
if is_logic(pattern):
if is_logic(rule):
# Both pattern and rule are a valid JsonLogic rule, go deeper
pattern_operator = _get_operator(pattern)
rule_operator = _get_operator(rule)
if pattern_operator == '@' or pattern_operator == rule_operator:
# Operators match, go deeper and try matching values
return rule_like(
_get_values(rule, rule_operator, normalize=False),
_get_values(pattern, pattern_operator, normalize=False))
return False # All above assumptions failed
if _is_array(pattern):
if _is_array(rule):
# Both pattern and rule are arrays, go deeper
if len(pattern) == len(rule):
# Length of pattern and rule arrays are the same,
# go deeper and try matching each value
return all(
rule_like(rule_elem, pattern_elem)
for rule_elem, pattern_elem in zip(rule, pattern))
return False # All above assumptions failed
return False | cd058d799cdee4d548c3e2075e1555ea28e594f1 | 8,153 |
def apt_repo(module, *args):
"""run apt-repo with args and return its output"""
# make args list to use in concatenation
args = list(args)
rc, out, err = module.run_command([APT_REPO_PATH] + args)
if rc != 0:
module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
return out | d4572cb9d586b973d461e4ac33709e582c26dda7 | 8,154 |
from typing import Optional
from typing import Dict
from typing import Any
import httpx
async def get_data(
*,
config: Box,
region: Region,
start: Optional[int] = None,
end: Optional[int] = None,
) -> Dict[Any, Any]:
"""Return a new consumer token."""
lookup = f"awattar.{region.name.lower()}"
awattar_config = config[lookup]
endpoint = f"{awattar_config.host}{awattar_config.url}" + "{}"
params = {}
if start:
params["start"] = str(start)
if end:
params["end"] = str(end)
if params:
url = endpoint.format("?" + urlencode(params))
else:
url = endpoint.format("")
timeout = 10.0
log.debug(f"Awattar URL: {url}")
try:
async with httpx.AsyncClient() as client:
response = await client.get(url, timeout=timeout)
except Exception as e:
log.error(f"Caught an exception while fetching data from the Awattar API: {e}")
raise
try:
data = response.json()
except Exception as e:
log.error(f"Could not JSON decode the Awattar response: {e}")
raise
return data | ed3795e1ae85f7bd65ce2f71fd922c261e993452 | 8,155 |
def get_rasterization_params() -> RasterizationParams:
"""
Construct the RasterizationParams namedtuple
from the static configuration file
:return: the rasterization parameters
"""
if cfg is None:
load_cfg()
# get rasterization section
rasterization_dict = cfg[compute_dsm_tag][rasterization_tag]
rasterization_params = RasterizationParams(*rasterization_dict.values())
return rasterization_params | 9adea0ffd838cbf3425cad5a0ab30bfe92829bc7 | 8,156 |
def rain_attenuation_probability(lat, lon, el, hs=None, Ls=None, P0=None):
"""
The following procedure computes the probability of non-zero rain
attenuation on a given slant path Pr(Ar > 0).
Parameters
----------
lat : number, sequence, or numpy.ndarray
Latitudes of the receiver points
lon : number, sequence, or numpy.ndarray
Longitudes of the receiver points
el : sequence, or number
Elevation angle (degrees)
hs : number, sequence, or numpy.ndarray, optional
Heigh above mean sea level of the earth station (km). If local data for
the earth station height above mean sea level is not available, an
estimate is obtained from the maps of topographic altitude
given in Recommendation ITU-R P.1511.
Ls : number, sequence, or numpy.ndarray, optional
Slant path length from the earth station to the rain height (km). If
data about the rain height is not available, this value is estimated
automatically using Recommendation ITU-R P.838
P0 : number, sequence, or numpy.ndarray, optional
Probability of rain at the earth station, (0 ≤ P0 ≤ 1)
Returns
-------
p: Quantity
Probability of rain attenuation on the slant path (%)
References
----------
[1] Propagation data and prediction methods required for the design of
Earth-space telecommunication systems:
https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.618-12-201507-I!!PDF-E.pdf
"""
type_output = get_input_type(lat)
lat = prepare_input_array(lat)
lon = prepare_input_array(lon)
lon = np.mod(lon, 360)
el = prepare_quantity(prepare_input_array(el), u.deg, 'Elevation angle')
hs = prepare_quantity(
hs, u.km, 'Heigh above mean sea level of the earth station')
Ls = prepare_quantity(
Ls, u.km, 'Heigh above mean sea level of the earth station')
P0 = prepare_quantity(P0, u.pct, 'Point rainfall rate')
val = __model.rain_attenuation_probability(lat, lon, el, hs, Ls, P0)
return prepare_output_array(val, type_output) * 100 * u.pct | 728879dc2b51de813f8e1c83a99a8117883c423f | 8,157 |
import os
import glob
def discover(paths=None):
"""Get the full list of files found in the registered folders
Args:
paths (list, Optional): directories which host preset files or None.
When None (default) it will list from the registered preset paths.
Returns:
list: valid .json preset file paths.
"""
presets = []
for path in paths or preset_paths():
path = os.path.normpath(path)
if not os.path.isdir(path):
continue
# check for json files
glob_query = os.path.abspath(os.path.join(path, "*.json"))
filenames = glob.glob(glob_query)
for filename in filenames:
# skip private files
if filename.startswith("_"):
continue
# check for file size
if not check_file_size(filename):
log.warning("Filesize is smaller than 1 byte for file '%s'",
filename)
continue
if filename not in presets:
presets.append(filename)
return presets | c793fb1b6096cc99f5a5f337517a34b34008e71f | 8,158 |
import itertools
def largets_prime_factor(num):
"""
Returns the largest prime factor of num.
"""
prime_factors = []
for n in itertools.count(2):
if n > num:
break
if num%n == 0:
prime_factors.append(n)
while (num%n == 0):
num = num/n
return max(prime_factors) | 12100b6cdc2e0553295c1803e699544aa930bbfb | 8,159 |
import os
def delete_md5(md5):
"""Delete the data of the file that has the MD5 hash."""
file = File.query.filter(File.md5 == md5).one_or_none()
schema = FileSchema()
result = schema.dump(file)
if file is not None:
filename = f"{result['file_name']}.{result['file_type']}"
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.makedirs(app.config['UPLOAD_FOLDER'])
folder = app.config['UPLOAD_FOLDER']
file_path = os.path.join(folder, filename)
os.remove(file_path)
db.session.delete(file)
db.session.commit()
return make_response(f"File with MD5 hash {md5} deleted.", 200)
else:
abort(404, f"File not found with MD5 hash: {md5}") | 7bfd1491805d7430ee02423cc739d364c17264ed | 8,160 |
def compute_eigenvectors(exx, exy, eyy):
"""
exx, eyy can be 1d arrays or 2D arrays
:param exx: strain component, float or 1d array
:param exy: strain component, float or 1d array
:param eyy: strain component, float or 1d array
:rtype: list
"""
e1, e2 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvalues
v00, v01 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx));
v10, v11 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvectors
dshape = np.shape(exx);
if len(dshape) == 1:
for i in range(len(exx)):
[e11, e22, v] = eigenvector_eigenvalue(exx[i], exy[i], eyy[i]);
e1[i], e2 = e11, e22; # convention of this code returns negative eigenvalues compared to my other codes
v00[i], v10[i] = v[0][0], v[1][0];
v01[i], v11[i] = v[0][1], v[1][1];
elif len(dshape) == 2:
for j in range(dshape[0]):
for i in range(dshape[1]):
[e11, e22, v] = eigenvector_eigenvalue(exx[j][i], exy[j][i], eyy[j][i]);
e1[j][i], e2[j][i] = e11, e22;
v00[j][i], v01[j][i] = v[0][0], v[0][1];
v10[j][i], v11[j][i] = v[1][0], v[1][1];
return [e1, e2, v00, v01, v10, v11]; | 524fe0cabeda91ca3086c3e46e88f19a919ff489 | 8,161 |
def name_looks_valid(name: str) -> bool:
"""
Guesses if a name field is valid. Valid is defined as being at least two words, each beginning with a capital
letter and ending with a lowercase letter.
:param name: the name to check
:return: whether this name is considered valid
"""
existing_parts = name.split()
parts_that_look_like_names = list(
filter(lambda part: fullmatch(r"[A-Z](?:[A-Za-z-']+)?[a-z]", part), existing_parts)
)
if len(existing_parts) < 2 or len(parts_that_look_like_names) < 2:
return False
if len(parts_that_look_like_names) > 2 or len(existing_parts) == len(parts_that_look_like_names):
return True
return False | 3f980ac4db9623c599733794253e6563abe698cb | 8,162 |
import tqdm
from pathlib import Path
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False):
"""Convert polygons to lines.
Arguments:
src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be
converted to lines.
dst_lines {[type]} -- Filename where to write the line vector dataset to.
Keyword Arguments:
crs {dict or str} -- Output projection parameters as string or in dictionary format.
This will reproject the data when a crs is given (not {None}) (default: {None}).
add_allone_col {bool} -- Add an additional attribute column with all ones.
This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}).
Returns:
int -- Exit code 0 if successeful.
"""
gdf = gpd.read_file(src_polygons)
geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"]
lines = []
row_ids = []
for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)):
boundary = pol.boundary
if boundary.type == 'MultiLineString':
for line in boundary:
lines.append(line)
row_ids.append(i_row)
else:
lines.append(boundary)
row_ids.append(i_row)
gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :]
gdf_lines["Coordinates"] = lines
gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs)
if crs is not None:
gdf_lines = gdf_lines.to_crs(crs)
if add_allone_col:
gdf_lines["ALLONE"] = 1
Path(dst_lines).parent.mkdir(exist_ok=True, parents=True)
gdf_lines.to_file(dst_lines)
return 0 | 7340eccc3b02f70d38967f3c325c968bcec67f26 | 8,163 |
def format_decimal(amount):
""" jinja2 filter function for decimal number treatment """
amt_whole = int(amount)
amt_whole_len = len(str(amt_whole))
if amount < 1:
amt_str = '{:0.15f}'.format(amount).rstrip("0").rstrip(".")
elif amt_whole_len < 4:
amt_str = '{:0.3f}'.format(amount).rstrip("0").rstrip(".")
elif amt_whole_len < 6:
amt_str = '{:0.2f}'.format(amount).rstrip("0").rstrip(".")
elif amt_whole_len < 9:
amt_str = '{:0.1f}'.format(amount).rstrip("0").rstrip(".")
else:
amt_str = '{}'.format(amt_whole)
return amt_str | 55ee4b6134abd409ade396233fa07061d0a30764 | 8,164 |
def remove_special_char(df, col):
"""Removes special characters such as % and $ from numeric variables and converts them into float"""
df[col] = df[col].replace(regex = True, to_replace = r'[^0-9.\-]', value=r'')
df[col] = df[col].astype("float")
return df[col] | c6c4c86eb480d2f045e40b3eb831d0b8d5381d33 | 8,165 |
def getNonlinearInfo(numHiddenLayers, numBinary, unaryPerBinary):
"""
Generates a 2D list to be used as a nonlinearInfo argument in building an
EQL/EQL-div model
# Arguments
numHiddenLayers: integer, number of hidden layers (i.e. layers
including nonlinear keras layer components)
numBinary: list of integers, available numbers to be used as number of
binary functions in a nonlinear layer component
unaryPerBinary: integer, number of unary function per binary function
in a nonlinear layer component
# Returns
A 2D list of integers with dimension numHiddenLayers x 2. Rows
represent layers, first column is number of unary functions, second
column is number of binary functions
"""
nonlinearInfo = [0 for i in range(numHiddenLayers)]
for i in range(numHiddenLayers):
v = np.random.choice(numBinary) # binary nodes
u = unaryPerBinary * v # unary nodes
nonlinearInfo[i] = [u, v]
return nonlinearInfo | e62f8d016501ad48aeae09ebd8e61b659618e0b0 | 8,166 |
import types
def construct_magmad_gateway_payload(gateway_id: str,
hardware_id: str) -> types.Gateway:
"""
Returns a default development magmad gateway entity given a desired gateway
ID and a hardware ID pulled from the hardware secrets.
Args:
gateway_id: Desired gateway ID
hardware_id: Hardware ID pulled from the VM
Returns:
Gateway object with fields filled in with reasonable default values
"""
return types.Gateway(
name='TestGateway',
description='Test Gateway',
tier='default',
id=gateway_id,
device=types.GatewayDevice(
hardware_id=hardware_id,
key=types.ChallengeKey(
key_type='ECHO',
),
),
magmad=types.MagmadGatewayConfigs(
autoupgrade_enabled=True,
autoupgrade_poll_interval=60,
checkin_interval=60,
checkin_timeout=30,
),
) | 87826b72fd2f33a4a862ffbacbbce14f206dc086 | 8,167 |
import uuid
import types
import sys
import traceback
def endpoint(fun):
"""
REST HTTP method endpoints should use this decorator. It converts the return
value of the underlying method to the appropriate output format and
sets the relevant response headers. It also handles RestExceptions,
which are 400-level exceptions in the REST endpoints, AccessExceptions
resulting from access denial, and also handles any unexpected errors
using 500 status and including a useful traceback in those cases.
If you want a streamed response, simply return a generator function
from the inner method.
"""
@wraps(fun)
def endpointDecorator(self, *path, **params):
_setCommonCORSHeaders()
cherrypy.lib.caching.expires(0)
cherrypy.request.girderRequestUid = str(uuid.uuid4())
setResponseHeader('Girder-Request-Uid', cherrypy.request.girderRequestUid)
try:
_preventRepeatedParams(params)
val = fun(self, path, params)
# If this is a partial response, we set the status appropriately
if 'Content-Range' in cherrypy.response.headers:
cherrypy.response.status = 206
val = _mongoCursorToList(val)
if callable(val):
# If the endpoint returned anything callable (function,
# lambda, functools.partial), we assume it's a generator
# function for a streaming response.
cherrypy.response.stream = True
_logRestRequest(self, path, params)
return val()
if isinstance(val, cherrypy.lib.file_generator):
# Don't do any post-processing of static files
return val
if isinstance(val, types.GeneratorType):
val = list(val)
except RestException as e:
val = _handleRestException(e)
except AccessException as e:
val = _handleAccessException(e)
except GirderException as e:
val = _handleGirderException(e)
except ValidationException as e:
val = _handleValidationException(e)
except cherrypy.HTTPRedirect:
raise
except Exception:
# These are unexpected failures; send a 500 status
logger.exception('500 Error')
cherrypy.response.status = 500
val = dict(type='internal', uid=cherrypy.request.girderRequestUid)
if config.getServerMode() == ServerMode.PRODUCTION:
# Sanitize errors in production mode
val['message'] = 'An unexpected error occurred on the server.'
else:
# Provide error details in non-production modes
t, value, tb = sys.exc_info()
val['message'] = '%s: %s' % (t.__name__, repr(value))
val['trace'] = traceback.extract_tb(tb)
resp = _createResponse(val)
_logRestRequest(self, path, params)
return resp
return endpointDecorator | 794e99aede341eef177cf1b5e617613a0ee5aeb1 | 8,168 |
def script_rename_number(config):
""" The scripting version of `rename_number`. This function
applies the rename to the entire directory. It also adds the
tags to the header file of each fits.
Parameters
----------
config : ConfigObj
The configuration object that is to be used for this
function.
Returns
-------
None
"""
# Extract the configuration parameters.
data_directory = core.config.extract_configuration(
config_object=config, keys=['data_directory'])
begin_garbage = core.config.extract_configuration(
config_object=config, keys=['renaming','begin_garbage'])
# Obtain the labels.
labels, raw = rename_number(data_directory=data_directory,
begin_garbage=begin_garbage)
# Add to all file headers. Assume that the order has not
# changed between renaming steps.
core.error.ifas_info("Adding the file number under the `NUMBER` card "
"in the headers of the fits files in {data_dir} "
"based on the file order."
.format(data_dir=data_directory))
fits_files = core.io.get_fits_filenames(data_directory=data_directory)
for (filedex, headerdex) in zip(fits_files, raw):
__ = core.io.append_astropy_header_card(
file_name=filedex, header_cards={'NUMBER':headerdex})
# Finally rename the files based on parallel appending. Glob
# provides the directory.
core.error.ifas_info("Appending the file number to the end of "
"the files in {data_dir}."
.format(data_dir=data_directory))
core.io.rename_by_parallel_append(file_names=fits_files,
appending_names=labels,
directory=None)
return None | bd0c14cbec43644ed5b6e7b9e23e1c1f71f51984 | 8,169 |
def jasper10x4(**kwargs):
"""
Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs) | 4b8e210a619a28ca0d28ee6ba85fd7a7acf15335 | 8,170 |
def swap(lst, idx1, idx2):
"""
>>> swap([0, 1, 2], 0, 1)
[1, 0, 2]
>>> swap([0, 1, 2], 0, 0)
[0, 1, 2]
"""
# print("Swapping [{}, {}] from {}".format(idx1, idx2, lst))
lst[idx1], lst[idx2] = lst[idx2], lst[idx1]
# print("resulting to {}".format(lst))
return lst | 81dee804db05eedaa1a9b5611e836a4c1da89b4b | 8,171 |
def substring_index(column, delim=' ', cnt=1):
"""
Returns the substring from string ``column`` before ``cnt`` occurrences of the delimiter ``delim``.
If ``cnt`` is positive, everything the left of the final delimiter (counting from left) is
returned. If ``cnt`` is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for ``delim``.
"""
return _with_expr(exprs.SubstringIndex, column, delim, cnt) | b17fe73e19ece0d9e2511f8b45c43accb65f4138 | 8,172 |
def askPrize(mon: int) -> str:
"""
Args :
n:欲查詢的期別
Returns:
查詢結果字串
"""
(date, data) = initData(mon)
date = f"{date}月\n"
ssp_prize = f"特別獎:{data[0]}\n"
sp_prize = f"特獎:{data[1]}\n"
first_prize = f"頭獎:{data[2]}、{data[3]}、{data[4]}\n"
six_prize = f"六獎:{data[2][5:]}、{data[3][5:]}、{data[4][5:]}、{data[5]}\n"
return date + ssp_prize + sp_prize + first_prize + six_prize | 1fcd388a38823a53719e8b50b02d2758b8ebe6dc | 8,173 |
import pickle
import tokenize
def get_letters_df(letters_dict_pickle):
"""Get the letters Pandas Dataframe
Parameters
----------
letters_dict_pickle: string
Path to the dict with the letters text
Returns
-------
Pandas DataFrame
Pandas DataFrame with a columns with the tokens
"""
with open(letters_dict_pickle, 'rb') as handle:
letters_dict = pickle.load(handle)
letters_df = pd.DataFrame(letters_dict, index=[const.LETTER_TEXT]).T
letters_df[const.TOKENIZED] = letters_df[const.LETTER_TEXT].apply(tokenize)
return letters_df | f6c40627ae917d51ce30cd572bb02c378ca7f7e2 | 8,174 |
def lorenzmod1(XYZ, t, a=0.1, b=4, dz=14, d=0.08):
"""
The Lorenz Mod 1 Attractor.
x0 = (0,1,0)
"""
x, y, z = XYZ
x_dt = -a * x + y**2 - z**2 + a * dz
y_dt = x * (y - b * z) + d
z_dt = -z + x * (b * y + z)
return x_dt, y_dt, z_dt | 17dbd87b25968ca0e24b6e6fc602007932983f54 | 8,175 |
def binomial_p(x, n, p0, reps=10**5, alternative='greater', keep_dist=False, seed=None):
"""
Parameters
----------
sample : array-like
list of elements consisting of x in {0, 1} where 0 represents a failure and
1 represents a seccuess
p0 : int
hypothesized number of successes in n trials
n : int
number of trials
reps : int
number of repetitions (default: 10**5)
alternative : {'greater', 'less', 'two-sided'}
alternative hypothesis to test (default: 'greater')
keep_dis : boolean
flag for whether to store and return the array of values of the test statistics (default: false)
seed : RandomState instance or {None, int, RandomState instance}
If None, the pseudorandom number generator is the RandomState
instance used by `np.random`;
If int, seed is the seed used by the random number generator;
If RandomState instance, seed is the pseudorandom number generator
Returns
-------
float
estimated p-value
float
test statistic
list
distribution of test statistics (only if keep_dist == True)
"""
if n < x:
raise ValueError("Cannot observe more ones than the population size")
prng = get_prng(seed)
def generate():
return prng.binomial(n, p0, 1)[0]
if keep_dist:
permutations = np.empty(reps)
for i in range(reps):
permutations[i] = generate()
if alternative == 'two-sided':
hits_up = np.sum(permutations >= x)
hits_low = np.sum(permutations <= x)
p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5])
elif alternative == 'greater':
p_value = np.mean(permutations >= x)
else:
p_value = np.mean(permutations <= x)
return p_value, x, permutations
else:
hits_up = 0
hits_low = 0
for i in range(reps):
ts = generate()
hits_up += (ts >= x)
hits_low += (ts <= x)
if alternative == 'two-sided':
p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5])
elif alternative == 'greater':
p_value = hits_up/reps
else:
p_value = hits_low/reps
return p_value, x | 486257dfc1c517313556d08c8e2ad4ed3e85980d | 8,176 |
def is_guild_owner() -> commands.check:
"""
Returns True under the following conditions:
- **ctx.author** is the owner of the guild where this command was called from
"""
def predicate(ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage('This command can only be used in a server.')
author: Member = ctx.author
if author != ctx.guild.owner.id:
commands.MissingPermissions('This command can only be run by the owner of this guild.')
return commands.check(predicate) | 3f3c9a5d5990794bced7b021c646041e514e72ed | 8,177 |
def round_grade(grade: int) -> int:
"""
Round the grade according to policy.
Parameters
----------
grade: int
Raw grade.
Returns
-------
rounded_grade: int
Rounded grade.
"""
if grade < 38:
rounded_grade = grade
else:
closest_multiple_5 = (grade // 5 + 1) * 5
if (closest_multiple_5 - grade) >= 3:
rounded_grade = grade
else:
rounded_grade = closest_multiple_5
return rounded_grade | 8f1be9575d98b4ed24ff1e5904a5345d7ebc3e48 | 8,178 |
def patch_indecies(i_max: int, j_max: int, ps: int, pstr: int):
"""
Given the sizes i_max and j_max of an image, it extracts the top-left corner pixel
location of all the patches of size (ps,ps) and distant "pstr"
pixels away from each other. If pstr < ps, the patches are overlapping.
Input:
i_max, j_max - int, sizes of the image
ps - int, patch size
pstr - int, patch stride
Output:
idx - int, array of [total_num_patches, 2], pixels locations
"""
idx = []
for i in range(0, i_max - ps + 1, pstr):
for j in range(0, j_max - ps + 1, pstr):
idx.append([i, j])
return tf.convert_to_tensor(idx) | 6b760311513b3ded56f85690bab6622c999cc40d | 8,179 |
def model_fn():
"""
Defines a convolutional neural network for steering prediction.
"""
model = Sequential()
# Input layer and normalization
model.add(InputLayer(input_shape=(20, 80, 1)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# Convolutional layer 1
model.add(Conv2D(filters=48, kernel_size=(3,3), strides=(1,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Convolutional layer 2
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Dropout for regularization.
model.add(Dropout(0.2))
# Full connected layer
model.add(Flatten())
model.add(Dense(100))
# Predicted steering
model.add(Dense(1))
print(model.summary())
return model | 42b85775635ee71ac6ed76f64170776eb36b5953 | 8,180 |
def authenticate(request):
"""Return the user model instance associated with the given request
If no user is retrieved, return an instance of `AnonymousUser`
"""
token, _ = get_token_from_request(request)
jwt_info = {
'token': token,
'case': TokenCase.OK,
'payload': None,
}
if not token:
jwt_info['case'] = TokenCase.NO_TOKEN
return get_user(), jwt_info
try:
payload = decode(token)
user_pk = payload[JWT_CONFIG.user_pk_key]
return get_user(user_pk=user_pk), jwt_info
except jwt.ExpiredSignatureError:
jwt_info['case'] = TokenCase.EXPIRED
except jwt.DecodeError:
jwt_info['case'] = TokenCase.DECODE_ERROR
except jwt.InvalidTokenError:
jwt_info['case'] = TokenCase.INVALID_TOKEN
except KeyError:
jwt_info['case'] = TokenCase.MISSING_KEY
return get_user(), jwt_info | 15d2d4343673cd30f2b201b834bd26889813d4ab | 8,181 |
def _urpc_test_func_2(buf):
"""!
@brief u-RPC variable length data test function.
@param buf A byte string buffer
@return The same byte string repeated three times
"""
return buf*3 | f13f7dcf45eaa0706b69eb09c63d29ba2bbd3d60 | 8,182 |
from datetime import datetime
def main():
"""
Use Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this
"""
start_time = datetime.now()
for device in devices:
print()
print('#' * 40)
output = show_version(device)
print(output)
print()
print('#' * 40)
print("\nBenoetigte Zeit: " + str(datetime.now() - start_time))
return None | 9182a63ea3e6a98d995c4ae00126175164cd6dfc | 8,183 |
from typing import Iterable
import warnings
def infer_data_type(data_container: Iterable):
"""
For a given container of data, infer the type of data as one of
continuous, categorical, or ordinal.
For now, it is a one-to-one mapping as such:
- str: categorical
- int: ordinal
- float: continuous
There may be better ways that are not currently implemented below. For
example, with a list of numbers, we can check whether the number of unique
entries is less than or equal to 12, but has over 10000+ entries. This
would be a good candidate for floats being categorical.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
warnings.warn(
"`infer_data_type` is deprecated! " "Please use `infer_data_family` instead!"
)
# Defensive programming checks.
# 0. Ensure that we are dealing with lists or tuples, and nothing else.
assert isinstance(data_container, list) or isinstance(
data_container, tuple
), "data_container should be a list or tuple."
# 1. Don't want to deal with only single values.
assert (
len(set(data_container)) > 1
), "There should be more than one value in the data container."
# 2. Don't want to deal with mixed data.
assert is_data_homogenous(data_container), "Data are not of a homogenous type!"
# Once we check that the data type of the container is homogenous, we only
# need to check the first element in the data container for its type.
datum = data_container[0]
# Return statements below
# treat binomial data as categorical
# TODO: make tests for this.
if len(set(data_container)) == 2:
return "categorical"
elif isinstance(datum, str):
return "categorical"
elif isinstance(datum, int):
return "ordinal"
elif isinstance(datum, float):
return "continuous"
else:
raise ValueError("Not possible to tell what the data type is.") | 9618aef33e45908dcb29981c52e8d53821c98642 | 8,184 |
import typing
import subprocess
def Preprocess(
src: str,
cflags: typing.List[str],
timeout_seconds: int = 60,
strip_preprocessor_lines: bool = True,
):
"""Run input code through the compiler frontend to inline macros.
This uses the repository clang binary.
Args:
src: The source code to preprocess.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
strip_preprocessor_lines: Whether to strip the extra lines introduced by
the preprocessor.
Returns:
The preprocessed code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
cmd = [
"timeout",
"-s9",
str(timeout_seconds),
str(CLANG),
"-E",
"-c",
"-",
"-o",
"-",
] + cflags
app.Log(2, "$ %s", " ".join(cmd))
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(src)
if process.returncode == 9:
raise errors.ClangTimeout(
f"Clang preprocessor timed out after {timeout_seconds}s"
)
elif process.returncode != 0:
raise errors.ClangException(stderr)
if strip_preprocessor_lines:
return StripPreprocessorLines(stdout)
else:
return stdout | bc9bb6b451cf4d9883c4d76637871d244b490e86 | 8,185 |
def versionString(version):
"""Create version string."""
ver = [str(v) for v in version]
numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:]
return '.'.join(numbers) + '-'.join(rest) | 2feec3f8ac5a1f2b848d0805dfa0c3ff53a44ead | 8,186 |
import warnings
import builtins
def any(iterable, pred):
"""Returns True if ANY element in the given iterable is True for the
given pred function"""
warnings.warn(
"pipe.any is deprecated, use the builtin any(...) instead.",
DeprecationWarning,
stacklevel=4,
)
return builtins.any(pred(x) for x in iterable) | 32f48ab7a6be329b8758ba3dbbe6721923890e11 | 8,187 |
import argparse
import os
def get_parser():
"""
Parser of nuth kaab independent main
TODO: To clean with main. Keep independent main ?
"""
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description="Universal co-registration method "
"presented in Nuth & Kaab 2011."
"NB : 1) It is supposed that both dsms share common reference"
" (whether it is geoid or ellipsoid)."
" 2) DSMs must be georefenced.",
)
parser.add_argument("dsm_to", type=str, help="master dsm")
parser.add_argument(
"dsm_from", type=str, help="slave dsm you wish to coregister to dsm_to"
)
parser.add_argument(
"-outfile", action="store_true", help="saves output coregistered DSM"
)
parser.add_argument(
"-nb_iters",
dest="nb_iters",
type=int,
default=6,
help="number of iterations",
)
parser.add_argument(
"-dirplot",
dest="plot",
type=str,
default=None,
help="path to output plot directory. "
"Plots are printed if set to None (default)",
)
parser.add_argument(
"-nodata1",
dest="nodata1",
type=str,
default=None,
help="no data value for DSM to compare "
"(default value is read in metadata)",
)
parser.add_argument(
"-nodata2",
dest="nodata2",
type=str,
default=None,
help="no data value for Reference DSM "
"(default value is read in metadata)",
)
parser.add_argument(
"-save_diff",
action="store_true",
help="store on file system a ./initial_dh.tiff and a ./final_dh.tiff "
"with dsms differences before and after coregistration",
)
return parser | 9bc9e8318704ecd50bf504c54c24a1684146db42 | 8,188 |
def build_prev_df_n(
dispositions) -> pd.DataFrame:
"""Build admissions dataframe from Parameters."""
days = np.array(range(0, n_days))
data_dict = dict(
zip(
["day", "hosp", "icu", "vent"],
[days] + [disposition for disposition in dispositions],
)
)
projection = pd.DataFrame.from_dict(data_dict)
# New cases
projection_admits = projection.iloc[:-1, :] - projection.shift(1)
projection_admits["day"] = range(projection_admits.shape[0])
projection_admits.loc[0,'hosp'] = 25
return projection_admits | e17ca1ab78e16aeaeac0afa5a1a9fa193cb9777f | 8,189 |
import html
def main() -> VDOMNode:
"""Main entry point."""
vdom = html("<{Heading} />")
return vdom | c29d55ec4d469373e5504cc089e8370d0573a719 | 8,190 |
def GT(x=None, y=None):
"""
Compares two values and returns:
true when the first value is greater than the second value.
false when the first value is less than or equivalent to the second value.
See https://docs.mongodb.com/manual/reference/operator/aggregation/gt/
for more details
:param x: first value or expression
:param y: second value or expression
:return: Aggregation operator
"""
if x is None and y is None:
return {'$gt': []}
return {'$gt': [x, y]} | 62a4321d5d36306b9cc5b910e7eac0eec4d914f3 | 8,191 |
def pymongo_formatter(credentials):
"""Returns a DSN for a pymongo-MongoDB connection.
Note that the username and password will still be needed separately in the constructor.
Args:
credentials (dict):
The credentials dictionary from the relationships.
Returns:
(string) A formatted pymongo DSN.
"""
return '{0}:{1}/{2}'.format(
credentials['host'],
credentials['port'],
credentials['path']
) | 69216575258f297c368ec3015c1c14569bb82cd2 | 8,192 |
def sigma_disp_over_vcirc(gal, R=None):
"""The velocity dispersion over circular velocity computed at R=x*Rs [km/s]. Isotropic NFW is assumed.
:param R: radius [kpc]
:param gal: galaxy object
"""
# get Rs
(rho, rs, c) = reconstruct_density_DM(gal, DM_profile='NFW')
# make array of r, preferably with gal.R
if R is None:
x_arr = np.array(gal.R / rs)
ratio_arr = sigma_over_vcirc(x_arr)
else:
R, is_scalar = tl.treat_as_arr(R)
x_arr = np.array(R / rs)
ratio_arr = sigma_over_vcirc(x_arr)
if is_scalar:
ratio_arr = np.squeeze(ratio_arr)
return ratio_arr | f05bb1f1a7ca2e0899ab67bb1c2a355236e3e810 | 8,193 |
def filters(param: str, default_value: str, base_key: str, key_manager: KeyManager) -> list:
"""Filter combo box selector for parameter"""
update_type = '|filters|'
row = combo_row(param, default_value, base_key, key_manager, update_type)
return row | 6645d369116d10cc4392810c9228f0e72fc21fd5 | 8,194 |
def get_scanner(fs_id):
""" get scanner 3T or 1.5T"""
sc = fs_id.split("_")[2]
if sc in ("15T", "1.5T", "15t", "1.5t"):
scanner = "15T"
elif sc in ("3T", "3t"):
scanner = "3T"
else:
print("scanner for subject " + fs_id + " cannot be identified as either 1.5T or 3T...")
print("Please double check the IDs in the list of subjects")
scanner = "false"
return scanner | f905bd16f3103b0c6c02193d30fb945646afb54c | 8,195 |
def _find_query_rank(similarities, library_keys, query_keys):
"""tf.py_func wrapper around _find_query_rank_helper.
Args:
similarities: [batch_size, num_library_elements] float Tensor. These are not
assumed to be sorted in any way.
library_keys: [num_library_elements] string Tensor, where each column j of
similarities corresponds to library_key j.
query_keys: [num_queries] string Tensor
Returns:
query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where
each value is a [batch_size] Tensor. The 'lowest' Tensor contains
for each batch the lowest index of a library key that matches the query
key for that batch element when the library keys are sorted in descending
order by similarity score. The 'highest' and 'avg'
Tensors are defined similarly. The first two are tf.int32 and the
final is a tf.float32.
Note that the behavior of these metrics is undefined when there are ties
within a row of similarities.
best_query_similarities: the value of the similarities evaluated at
the lowest query rank.
"""
(highest_rank, lowest_rank, avg_rank, best_query_similarities) = tf.py_func(
_find_query_rank_helper, [similarities, library_keys, query_keys],
(tf.int32, tf.int32, tf.float32, tf.float32),
stateful=False)
query_ranks = {
'highest': highest_rank,
'lowest': lowest_rank,
'avg': avg_rank
}
return query_ranks, best_query_similarities | f3b002b77c77845681b35c3d6f629f6290324a47 | 8,196 |
def random_adjust_brightness(image, max_delta=0.2, seed=None):
"""Randomly adjusts brightness. """
delta = tf.random_uniform([], -max_delta, max_delta, seed=seed)
image = tf.image.adjust_brightness(image / 255, delta) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image | 9d371ebb268708b983a523ce71a64103d3e46717 | 8,197 |
import re
def get_assign_ops_and_restore_dict(filename, restore_all=False):
"""Helper function to read variable checkpoints from filename.
Iterates through all vars in restore_all=False else all trainable vars. It
attempts to match variables by name and variable shape. Returns a possibly
empty list of assign_ops, and a possibly empty dictionary for tf.train.Saver()
"""
def check_name_and_shape(name, var, shape_map):
if name in shape_map:
# Cannot check variables with unknown sizes such as cudnn rnns
if str(var.shape) == "<unknown>":
# Just return True and hope the shapes match
return True
if var.shape == shape_map[name]:
return True
return False
assign_ops = []
restore_dict = {}
try:
reader = tf.train.NewCheckpointReader(filename)
var_to_shape_map = reader.get_variable_to_shape_map()
variables = tf.trainable_variables()
if restore_all:
variables = tf.get_collection(tf.GraphKeys.VARIABLES)
for var in variables:
idx = var.name.find(":")
if idx != -1:
true_name = var.name[:idx]
loss_idx = re.search("Loss_Optimization", true_name)
if 'EmbeddingMatrix' in true_name:
embed_restore, assign = _restore_embed(var, var_to_shape_map, reader)
if assign:
assign_ops.append(embed_restore)
else:
restore_dict[true_name] = embed_restore
if check_name_and_shape(true_name, var, var_to_shape_map):
tensor = reader.get_tensor(true_name)
if tensor.dtype != var.dtype.as_numpy_dtype():
assign_ops.append(var.assign(tf.cast(tensor, var.dtype)))
else:
restore_dict[true_name] = var
elif loss_idx:
loss_idx = loss_idx.end()
if FP32_TEST.search(true_name):
true_name = FP32_TEST.sub("", true_name)
else:
true_name = (true_name[:loss_idx]
+ "/Loss_Optimization/FP32-master-copy"
+ true_name[loss_idx:])
if check_name_and_shape(true_name, var, var_to_shape_map):
tensor = reader.get_tensor(true_name)
if tensor.dtype != var.dtype.as_numpy_dtype():
assign_ops.append(var.assign(tf.cast(tensor, var.dtype)))
else:
restore_dict[true_name] = var
else:
print("Not restoring {}".format(var.name))
if true_name not in var_to_shape_map:
print("true name [{}] was not in shape map".format(true_name))
else:
if var.shape != var_to_shape_map[true_name]:
print(("var.shape [{}] does not match var_to_shape_map[true_name]"
"[{}]").format(var.shape, var_to_shape_map[true_name]))
print("WARNING: Run will mostly error out due to this")
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
if ("Data loss" in str(e) and
(any([e in filename for e in [".index", ".meta", ".data"]]))):
proposed_file = ".".join(filename.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the
filename *prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file))
raise ValueError("Error in loading checkpoint")
return assign_ops, restore_dict | d93583c914bbe066b6a62d1f2041ab60cd511ab6 | 8,198 |
def log_creations(model, **extra_kwargs_for_emit):
"""
Sets up signal handlers so that whenever an instance of `model` is created, an Entry will be emitted.
Any further keyword arguments will be passed to the constructor of Entry as-is. As a special case,
if you specify the sentinel value `INSTANCE` as the value of a keyword argument, the newly created
instance of `model` will be passed instead. If the value of the keyword argument is a function,
it will be called with the newly created instance to determine the value of the keyword argument to
the Entry constructor.
For examples on usage, see `feedback/handlers/feedback_message.py`.
"""
meta = model._meta
entry_type_name = '{app_label}.{model_name}.created'.format(
app_label=meta.app_label,
model_name=meta.model_name,
)
@receiver(post_save, sender=model, weak=False)
def on_save_emit_event_log_entry(sender, instance, created, **kwargs):
if not created:
return
kwargs_for_emit = dict()
for key, value in extra_kwargs_for_emit.items():
if value is INSTANCE:
value = instance
elif callable(value):
value = value(instance)
kwargs_for_emit[key] = value
emit(entry_type_name, **kwargs_for_emit)
return on_save_emit_event_log_entry | 4eee202ccb335c658c1f6bf15b02f00955eb3da7 | 8,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.