content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sympy
import warnings
def _add_aliases_to_namespace(namespace, *exprs):
"""
Given a sequence of sympy expressions,
find all aliases in each expression and add them to the namespace.
"""
for expr in exprs:
if hasattr(expr, 'alias') and isinstance(expr, sympy.FunctionClass):
if namespace.has_key(str(expr)):
if namespace[str(expr)] != expr.alias:
warnings.warn('two aliases with the same name were found')
namespace[str(expr)] = expr.alias
if hasattr(expr, 'func'):
if isinstance(expr.func, sympy.FunctionClass) and hasattr(expr.func, 'alias'):
if namespace.has_key(expr.func.__name__):
if namespace[expr.func.__name__] != expr.func.alias:
warnings.warn('two aliases with the same name were found')
namespace[expr.func.__name__] = expr.func.alias
if hasattr(expr, 'args'):
try:
_add_aliases_to_namespace(namespace, *expr.args)
except TypeError:
pass
return namespace | e90e311aacd9c9c41363badc690ad25c18501251 | 23,195 |
def rotICA(V, kmax=6, learnrate=.0001, iterations=10000):
""" ICA rotation (using basicICA) with default parameters and normalization of
outputs.
:Example:
>>> Vica, W = rotICA(V, kmax=6, learnrate=.0001, iterations=10000)
"""
V1 = V[:, :kmax].T
[W, changes_s] = basicICA(V1, learnrate, iterations)
Vica = (W.dot(V1)).T
for n in range(kmax):
imax = abs(Vica[:, n]).argmax()
Vica[:, n] = np.sign(Vica[imax, n]) * Vica[:, n] / np.linalg.norm(
Vica[:, n])
return Vica, W | 2db4bb0d5c5c5f70c9f7cf5a20b27fd2b146e26f | 23,196 |
import socket
def getipbyhost(hostname):
""" return the IP address for a hostname
"""
return socket.gethostbyname(hostname) | 9556f537e16fd710a566a96a51d4262335967893 | 23,197 |
def reduce_mem_usage(df) -> pd.DataFrame:
"""DataFrameのメモリ使用量を節約するための関数.
Arguments:
df {DataFrame} -- 対象のDataFrame
Returns:
[DataFrame] -- メモリ節約後のDataFrame
"""
numerics = [
'int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'
]
start_mem = df.memory_usage(deep=True).sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type not in numerics:
continue
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
np_int_type_list = [np.int8, np.int16, np.int32, np.int64]
for np_int_type in np_int_type_list:
if c_min > np.iinfo(np_int_type).min and c_max < np.iinfo(
np_int_type).max:
df[col] = df[col].astype(np_int_type)
else:
np_float_type_list = [np.float16, np.float32, np.float64]
for np_float_type in np_float_type_list:
if c_min > np.finfo(np_float_type).min and c_max < np.finfo(
np_float_type).max:
df[col] = df[col].astype(np_float_type)
end_mem = df.memory_usage(deep=True).sum() / 1024**2
if (start_mem - end_mem) > 0:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem))
return df | b317c85aee9f51d221b3895bcc9ac1a6bc3535f6 | 23,198 |
import typing
def findparam(
parameters: _TYPE_FINDITER_PARAMETERS,
selector: _TYPE_FINDITER_SELECTOR
) -> typing.Iterator[_T_PARAM]:
"""
Return an iterator yielding those parameters (of type
:class:`inspect.Parameter` or :class:`~forge.FParameter`) that are
mached by the selector.
:paramref:`~forge.findparam.selector` is used differently based on what is
supplied:
- str: a parameter is found if its :attr:`name` attribute is contained
- Iterable[str]: a parameter is found if its :attr:`name` attribute is
contained
- callable: a parameter is found if the callable (which receives the
parameter), returns a truthy value.
:param parameters: an iterable of :class:`inspect.Parameter` or
:class:`~forge.FParameter`
:param selector: an identifier which is used to determine whether a
parameter matches.
:returns: an iterator yield parameters
"""
if isinstance(selector, str):
return filter(lambda param: param.name == selector, parameters)
elif isinstance(selector, typing.Iterable):
selector = list(selector)
return filter(
lambda param: param.name in selector, # type: ignore
parameters,
)
return filter(selector, parameters) | 61da9c7e453d04bf2db9c5f923e815e250da4b53 | 23,199 |
def FSA(profile_exp, profile_sm, diffsys, time, Xlim=[], n=[400, 500], w=None, f=None, alpha=0.3, name=''):
"""
Forward Simulation Analysis
Extract diffusion coefficients based on a diffusion profile.
Please do not close any plot window during the FSA process.
This is the final step of FSA.
Parameters
----------
profile_exp : DiffProfile
Experimental diffusion profile, used for comparison with simulation
results.
profile_sm : DiffProfile
Diffusion profile after data smooth on experimental profile.
diffsys : DiffSystem
Diffusion coefficients
time : float
Diffusion time in seconds
Xlim : list (float), optional
Passed to 'pydiffusion.Dtools.SF', 'pydiffusion.utils.step'.
Indicates the left and right concentration limits for calculation.
Default value = [profile.X[0], profile.X[-1]].
n : list. optional
Passed to 'pydiffusion.utils.automesh'.
Meshing number range, default = [400, 500].
w : list, optional
Weights of each phase to calculate error.
Passed to 'pydiffusion.utils.error_profile'.
f : function of Meshing
Keyword argument of automesh()
alpha : float
Keyword argument of automesh()
name : str, optional
Name the output DiffProfile
Returns
-------
profile_sim : DiffProfile
Simulated diffusion profile after FSA.
diffsys_sim : DiffSystem
Calculated diffusion efficients by FSA.
Examples
--------
After datasmooth() and Dmodel(), FSA can be performed to calculate accurate diffusion coefficients:
>>> ds = datasmooth(exp)
>>> dsys = Dmodel(ds, time)
>>> fsa = FSA(exp, ds, dsys, time)
"""
# Create step profile on meshed grids
dism = automesh(profile=profile_sm, diffsys=diffsys, n=n, f=f, alpha=alpha)
matano = matanocalc(profile_sm, Xlim)
if Xlim == [] and profile_sm.X[-1] < profile_sm.X[0]:
profile_init = step(dism, matano, diffsys, [diffsys.Xr[-1, 1], diffsys.Xr[0, 0]])
else:
profile_init = step(dism, matano, diffsys, Xlim)
# Determine the stop criteria of forward simulations
error_sm = error_profile(profile_sm, profile_exp)
ipt = input('Default error = %.6f\nInput the stop criteria of error: [%.6f]\n'
% (error_sm, error_sm*2))
error_stop = error_sm*2 if ipt == '' else float(ipt)
# If there is no Xspl info in diffsys, use Phase Mode
# else: ask if use Phase or Point Mode
if diffsys.Xspl is not None:
ipt = input('Use Phase Mode? [n]\n(The shape of diffusivity curve does not change)\n')
pp = False if 'y' in ipt or 'Y' in ipt else True
else:
pp = False
if name == '':
name = profile_exp.name+'_FSA'
# Diffusion coefficients used for forward simulations
diffsys_sim = DiffSystem(diffsys.Xr, diffsys.Dfunc, Xspl=diffsys.Xspl, name=name)
# Plot FSA status
fig = plt.figure('FSA', figsize=(16, 6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
plt.pause(0.1)
n_sim = 0
while True:
# Simulation
n_sim += 1
profile_sim = mphSim(profile_init, diffsys_sim, time, name=name)
error_sim = error_profile(profile_sim, profile_exp, w)
print('Simulation %i, error = %f(%f)' % (n_sim, error_sim, error_stop))
# Plot simulation results
ax1.cla()
ax2.cla()
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
profileplot(profile_sim, ax1, ls='-', c='r', lw=2)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
# DC adjust
Dfunc_adjust = [0] * diffsys_sim.Np
# If error > stop criteria, continue simulation by auto DC adjustment
if error_sim > error_stop:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
# If error < stop criteria or simulate too many times
if error_sim <= error_stop or n_sim > 9:
ita_start()
# Ask if exit
ipt = ask_input('Satisfied with FSA? [n]')
if 'y' in ipt or 'Y' in ipt:
ita_finish()
break
# If use Point Mode
if diffsys_sim.Xspl is not None:
ipt = ask_input('Use Point Mode (y) or Phase Mode (n)? [y]')
pp = False if 'n' in ipt or 'N' in ipt else True
if pp:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
continue
# Phase Mode, ask if use manual input for each phase
pp = False
ipt = input('Phase Mode\nManually input for each phase? [n]')
manual = True if 'y' in ipt or 'Y' in ipt else False
for ph in range(diffsys_sim.Np):
if manual:
ipt = input('Input deltaD for phase # %i:\n(DC = DC * 10^deltaD, default deltaD = auto)\n' % (ph+1))
deltaD = float(ipt) if ipt != '' else None
else:
deltaD = None
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp, deltaD)
except (ValueError, TypeError) as error:
ita_finish()
raise error
# Apply the adjustment to diffsys_sim
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
return profile_sim, diffsys_sim | 049475203d30ac02dadc0cb281d38909ba32039c | 23,201 |
def permutacion_matriz(U, fila_i, idx_max, verbose=False, P=None, r=None):
"""Efectua una permutación por filas de una matriz
Args:
U (matriz): MAtriz a permutar
fila_i (int): indice de fila origen
idx_max (int): indice de fila a la que permutar
verbose (bool, optional): verbose. Defaults to False.
P (mat, optional): matriz de permutación. Defaults to None.
r (mat, optional): rhs de ecuación. Defaults to None.
Returns:
(U, P, r): Matrices U, P y r cambiadas
"""
print_verbose(
[
f"Permutamos fila {fila_i} con {idx_max}",
f"U antes:\n {np.array(U)}",
f"P antes:\n {np.array(P)}",
],
verbose,
)
if fila_i != idx_max:
fila_origen, fila_destino = U[fila_i, :].copy(), U[idx_max, :].copy()
U[idx_max, :], U[fila_i, :] = fila_origen, fila_destino
if P is not None:
fila_origen, fila_destino = P[fila_i, :].copy(), P[idx_max, :].copy()
P[idx_max, :], P[fila_i, :] = fila_origen, fila_destino
if r is not None:
fila_origen, fila_destino = r[fila_i, :].copy(), r[idx_max, :].copy()
r[idx_max, :], r[fila_i, :] = fila_origen, fila_destino
print_verbose(
[f"U despues:\n {np.array(U)}", f"P despues:\n {np.array(P)}"], verbose
)
return U, P, r | 2ff9ae6a0b789de24c3479df49275fa254876dd2 | 23,202 |
def get_compliance_by_rules(scan_id):
"""
Lists compliance results by rule for a scan.
"""
items = []
offset = 0
while True:
params = {'offset': offset}
response = get('scans/%s/compliance_by_rules' % scan_id, params)
items.extend(response['items'])
if not response['is_truncated']:
break
offset = response['next_offset']
return items | 205dccc9b7714eb9a2179272b2fc1cec4cbc26e3 | 23,203 |
def QuadRemeshBrep1(brep, parameters, guideCurves, multiple=False):
"""
Create Quad Remesh from a Brep
Args:
brep (Brep): Set Brep Face Mode by setting QuadRemeshParameters.PreserveMeshArrayEdgesMode
guideCurves (IEnumerable<Curve>): A curve array used to influence mesh face layout
The curves should touch the input mesh
Set Guide Curve Influence by using QuadRemeshParameters.GuideCurveInfluence
"""
url = "rhino/geometry/mesh/quadremeshbrep-brep_quadremeshparameters_curvearray"
if multiple: url += "?multiple=true"
args = [brep, parameters, guideCurves]
if multiple: args = list(zip(brep, parameters, guideCurves))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 042e89c1da55fd74f18c9592fc18bd2e0a18e2a8 | 23,204 |
def tags2turbo(lon, lat, dist, bdim=155, timeout=60, pretty_print=False, maxsize=None, tags=[]):
""" """
gtypes = ('node', 'way', 'relation',)
turbo = Turbo()
qconditions = [{
"query": filter2query(tags),
"distance": dist,
"gtypes": gtypes, # Optional. Possible values:
# "node", "way", "relation", "way-node", node-relation",
# "relation-way", "relation-relation", "relation-backwards"
# "amplitude": 0,
"newer": "%Y-%m-%ddT%H:%M:%SZ" #
}]
query = turbo.build_query(
Turbo.optimize_centralized_query_by_base_tile(lon, lat, qconditions, bdim=bdim),
timeout=timeout, maxsize=maxsize
)
return dict(query=query) | a9eae6c63266818f9e05993100432bdce2df851e | 23,205 |
async def get_people(from_number: int = None, up_to_number: int = None):
"""
Endpoint to get all people from-to given number
:return: list of people from-to numbers
"""
return _people[from_number:up_to_number] | 0ea214227493642eace1a1dc864268176cf872af | 23,206 |
import chunk
def window(x, y, width, overlap=0., x_0=None, expansion=None, cap_left=True,
cap_right=True, ret_x=True):
"""Break arrays x and y into slices.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
overlap : float, optional
Overlap of windows in the same units as x. If negative, the window
steps along x values rather than binning.
x_0 : float, optional
Position in x at which to start windowing. (untested)
expansion : polynomial coefficients, optional
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width*np.polyval(expansion, x). Overlap is
similarly increased.
cap_left : boolean, optional
Stop window exceeding left most (minimum) value of x. Only applies when
overlap is positive.
cap_right : boolean, optional
Stop window exceeding right most (maximum) value of x. Only applies
when overlap is positive.
Returns
-------
vals : numpy.array
Contains all the windowed chunks of x and y.
Notes
-----
The current check on monotonicity is whether more than 20% of points in
x are are not monotonic. This is a sort of hack to avoid flipping for the
occasional erroneous non-monotonic point.
"""
if x.size != y.size:
raise ValueError('x and y must be of equal size.')
if overlap > width:
raise ValueError('The overlap cannot be larger than the width.')
# Incredibly bad check for monotonicity.
not_monotonic = np.sum(np.diff(x) < 0) > 0.2*len(x)
if not_monotonic:
x = utils.flip_padded(x)
y = utils.flip_padded(y)
if x_0 is not None:
idxs = ~np.isnan(x) & (x >= x_0)
else:
idxs = ~np.isnan(x)
x = x[idxs]
y = y[idxs]
if overlap < 0.:
left = x - width/2.
right = left + width
elif overlap >= 0.:
step = width - overlap
if cap_left:
xmin = x[0]
else:
xmin = x[0] - width
if cap_right:
# Take away slightly less than the full width to allow for the last
# bin to complete the full range.
xmax = x[-1] - 0.99*width
else:
xmax = x[-1]
left = np.arange(xmin, xmax, step)
right = left + width
bins = np.transpose(np.vstack((left, right)))
if ret_x:
vals = np.asarray([chunk(x, b, y) for b in bins])
else:
vals = np.asarray([chunk(x, b, y)[1] for b in bins])
if not_monotonic:
vals = np.flipud(vals)
return vals | 947b84a33e351e7e75fbdaf4997de689f44c83ec | 23,209 |
import json
def mark_property_purchased(request):
"""
Api to mark a property as purchased by the buyer without page reload using vue or htmx
"""
data = json.loads(request.body)
property = data['property_id']
if not property.property_status == Property.SOLD and property.property_sold:
property.update(property_status = Property.SOLD)
messages.success(request, f"You have successfully completed {property.property_title} purchase.")
return JsonResponse({"success": True}) | 2ce7e22b8cf566361d307c53423b0b0a26ab2dbc | 23,210 |
def argmax_unique(arr, axis):
"""Return a mask so that we can exclude the nonunique maximums, i.e. the nodes that aren't completely resolved"""
arrm = np.argmax(arr, axis)
arrs = np.sum(arr, axis)
nonunique_mask = np.ma.make_mask((arrs == 1) is False)
uni_argmax = np.ma.masked_array(arrm, mask=nonunique_mask, fill_value=-1)
return uni_argmax, nonunique_mask | e6691fea688c6f0044fd9e0a70e4651ae56aaa49 | 23,212 |
from typing import Dict
import json
def get_json(response: func.HttpResponse) -> Dict:
"""Get JSON from an HttpResponse."""
return json.loads(response.get_body().decode("utf-8")) | 58fa7916d6b988b2e949e55db838f3f6b9430fb3 | 23,213 |
def _float_feature(value):
"""Returns a float_list from a float / double."""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) | 2caaa8b572ee2744cf6deeaff6a287fb472cef7f | 23,214 |
def sendVillasNodeOutput(message, output_mapping_vector, powerflow_results, state_estimation_results, scenario_flag):
"""
to create the payload according to "villas_node_output.json"
@param message: received message from the server (json.loads(msg.payload)[0])
@param output_mapping_vector: according to villas_node_output.json (see function read_mapping_file)
@param powerflow_results: results of powerflow (type acs.state_estimation.results.Results)
@param state_estimation_results: results of state_estimation (type acs.state_estimation.results.Results)
@param scenario_flag:
@return: string formatted according to "villas_node_output.json"
"""
VillasNodeOutput = {}
VillasNodeOutput["ts"] = {}
VillasNodeOutput["ts"]["origin"] = message["ts"]["origin"]
if "sequence" in message:
VillasNodeOutput["sequence"] = message["sequence"]
else:
print('Sequence no. not available.')
VillasNodeOutput["sequence"] = 1
# calculate Vmag_err
Vmag_err = np.zeros(len(powerflow_results.nodes))
for idx, elem in enumerate(powerflow_results.nodes):
uuid_pf = elem.topology_node.uuid
Vmag_true = np.absolute(elem.voltage)
Vmag_est = np.absolute(state_estimation_results.get_node(uuid=uuid_pf).voltage)
Vmag_err[idx] = np.absolute(Vmag_est - Vmag_true)
Vmag_err[idx] = 100 * np.divide(Vmag_err[idx], Vmag_true)
max_err = np.amax(Vmag_err)
mean_err = np.mean(Vmag_err)
data = [None] * len(output_mapping_vector)
for idx, elem in enumerate(output_mapping_vector):
if elem[0] == "max_err":
data[idx] = max_err
continue
elif elem[0] == "mean_err":
data[idx] = mean_err
continue
elif elem[0] == "scenario_flag":
data[idx] = float(scenario_flag)
continue
else: # elem = ["N4", "V", "phase", "est"] or elem = ["N4", "V", "phase", "pf"]
node = None
if elem[3] == "est":
node = state_estimation_results.get_node(uuid=elem[0])
elif elem[3] == "pf":
node = powerflow_results.get_node(uuid=elem[0])
value = None
if elem[2] == "mag": # elem_data[2] = "mag" or "phase"
value = np.absolute(node.voltage)
elif elem[2] == "phase":
value = np.angle(node.voltage)
data[idx] = value
VillasNodeOutput["data"] = data
return "[" + dumps(VillasNodeOutput) + "]" | dc992622a625219c6befb865cee87c4d3ffa0aef | 23,215 |
def find_org_rooms(dbs, user_id, meeting_date):
"""
获取可分配的机构
:param dbs:
:param user_id:
:param meeting_date:
:return:
"""
orgs = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id))\
.filter(SysUserOrg.user_id == user_id).all()
rooms = dbs.query(HasBoardroom.id, HasBoardroom.name, HasBoardroom.org_id) \
.outerjoin(SysOrg, SysOrg.id == HasBoardroom.org_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id)) \
.filter(SysUserOrg.user_id == user_id).all()
meetings = dbs.query(HasMeeting.id, HasMeeting.name, HasMeeting.description, HasMeetBdr.boardroom_id,
HasMeetBdr.meeting_date, HasMeeting.start_time,
HasMeeting.end_time, HasMeeting.repeat, HasMeeting.create_user, HasMeeting.create_time,
SysUser.user_name, SysUser.phone, SysOrg.org_name)\
.outerjoin(SysUser, HasMeeting.create_user == SysUser.id)\
.outerjoin(SysOrg, SysUser.org_id == SysOrg.id)\
.outerjoin(HasMeetBdr, HasMeetBdr.meeting_id == HasMeeting.id)\
.outerjoin(HasBoardroom, HasBoardroom.id == HasMeetBdr.boardroom_id)\
.filter(HasMeetBdr.meeting_date == meeting_date).all()
lists = []
for org in orgs:
org_id = org.id
org_name = org.org_name
parent_id = org.parent_id
# room_list = []
# for room in rooms:
# # 将会议室拼入公司机构list
# if org_id == room.org_id:
# room_dict = {
# 'id': room.id,
# 'name': room.name,
# 'org_id': org_id
# }
# room_list.append(room_dict)
temp_dict = {
'org_id': org_id,
'org_name': org_name,
'parent_id': parent_id
# 'rooms': room_list
}
lists.append(temp_dict)
return lists | 18f3c9c75377a4c212141c0b14f4e4802d7782b5 | 23,216 |
def config_openapi(app: FastAPI, settings: ApiSettings):
"""Config openapi."""
def custom_openapi():
"""Config openapi."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Arturo STAC API", version="0.1", routes=app.routes
)
if settings.api_extension_is_enabled(ApiExtensions.fields):
openapi_schema["paths"]["/search"]["get"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
openapi_schema["paths"]["/search"]["post"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi | d57f37ce33cef4639f635330c6313a9fea5ab7dc | 23,217 |
from typing import Sequence
def twelve_tone_matrix(
row: Sequence,
) -> DataFrame:
""" Returns a twelve-tone matrix in the form of a Pandas DataFrame.
"""
inverted_row = inversion(row)
inv_mat = transposition(inverted_row, row[0]-inverted_row[0])
new = [row]
for i in range(1, 12):
k = transposition(row, (inv_mat[i] - row[0]) % 12)
new.append(k)
m = reshape(new, (12, 12))
df = DataFrame(m)
return df | 3ab247e7e347aa3c84e4b040380f0529e8081625 | 23,218 |
async def upstream_http_exception_handler(request, exc: HTTPError):
"""Handle http exceptions from upstream server"""
logger.warning(f"Upstream HTTP error [{request.query_params['url']}]: {repr(exc)}")
# Convert to FastApi exception
exc = HTTPException(502, f"Upstream server returned: [{exc.status}] {exc.message}")
return await http_exception_handler(request, exc) | 9f96e83c5b120bdcb8f6f7173ec928a33be1b210 | 23,219 |
from typing import List
from pathlib import Path
import re
def extract_latest_checkpoint_and_epoch(available_files: List[Path]) -> PathAndEpoch:
"""
Checkpoints are saved as recovery_epoch={epoch}.ckpt, find the latest ckpt and epoch number.
:param available_files: all available checkpoints
:return: path the checkpoint from latest epoch and epoch number
"""
recovery_epochs = [int(re.findall(r"[\d]+", f.stem)[0]) for f in available_files]
idx_max_epoch = int(np.argmax(recovery_epochs))
return available_files[idx_max_epoch], recovery_epochs[idx_max_epoch] | f11d627b79baef580c0dc354a9d9be28552fe4d9 | 23,220 |
def bark_filter_banks(nfilts=20,
nfft=512,
fs=16000,
low_freq=0,
high_freq=None,
scale="constant"):
"""
Compute Bark-filterbanks. The filters are stored in the rows, the columns
correspond to fft bins.
Args:
nfilts (int) : the number of filters in the filterbank.
(Default 20)
nfft (int) : the FFT size.
(Default is 512)
fs (int) : sample rate/ sampling frequency of the signal.
(Default 16000 Hz)
low_freq (int) : lowest band edge of mel filters.
(Default 0 Hz)
high_freq (int) : highest band edge of mel filters.
(Default samplerate/2)
scale (str) : choose if max bins amplitudes ascend, descend or are constant (=1).
Default is "constant"
Returns:
a numpy array of size nfilts * (nfft/2 + 1) containing filterbank.
Each row holds 1 filter.
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
# compute points evenly spaced in Bark scale (points are in Bark)
low_bark = hz2bark(low_freq)
high_bark = hz2bark(high_freq)
bark_points = np.linspace(low_bark, high_bark, nfilts + 4)
# we use fft bins, so we have to convert from Bark to fft bin number
bins = np.floor(bark2fft(bark_points, fs, nfft))
fbank = np.zeros([nfilts, nfft // 2 + 1])
# init scaler
if scale == "descendant" or scale == "constant":
c = 1
else:
c = 0
for j in range(2, nfilts + 2):
# compute scaler
if scale == "descendant":
c -= 1 / nfilts
c = c * (c > 0) + 0 * (c < 0)
elif scale == "ascendant":
c += 1 / nfilts
c = c * (c < 1) + 1 * (c > 1)
for i in range(int(bins[j - 2]), int(bins[j + 2])):
fc = bark_points[j]
fb = fft2bark(i, fs, nfft)
fbank[j - 2, i] = c * Fm(fb, fc)
return np.abs(fbank) | 227edcca91c64515b73531d80e1bd9db7faf9bb0 | 23,221 |
import json
def auth_check_response_fixture():
"""Define a fixture to return a successful authorization check."""
return json.loads(load_fixture("auth_check_response.json")) | 18bb02313534376f4244430725135d089941693f | 23,222 |
def cancel_session(session_id):
"""
Cancel all tasks within a session
Args:
string: session_id
Returns:
dict: results
"""
lambda_response = {}
all_cancelled_tasks = []
for state in task_states_to_cancel:
res = cancel_tasks_by_status(session_id, state)
print("Cancelling session: {} status: {} result: {}".format(
session_id, state, res))
lambda_response["cancelled_{}".format(state)] = len(res)
all_cancelled_tasks += res
lambda_response["tatal_cancelled_tasks"] = len(all_cancelled_tasks)
return(lambda_response) | 5d07d9038023ad15eaca33bb409b5a4c4db66089 | 23,223 |
def get_cli_args():
"""Gets, parses, and returns CLI arguments"""
parser = ArgumentParser(description='Check modules formatting')
parser.add_argument('filepath', help='path to a file to check')
parser.add_argument('-n', '--fqcn',
dest='fqcn',
metavar='FQCN',
default=False,
required=False,
help='FQCN to check examples')
parser.add_argument("-c", "--comments",
dest="check_comments",
action="store_true",
required=False,
help="check comments")
parser.add_argument("-l", "--length",
dest="check_length",
action="store_true",
required=False,
help="check description length")
parser.add_argument("-s", "--spelling",
dest="check_spelling",
action="store_true",
required=False,
help="check spelling")
return parser.parse_args() | 4e54a7141e19ebba9c0502e4bb40293a583e2d96 | 23,225 |
def __virtual__():
"""
Determine whether or not to load this module
"""
return __virtualname__ | 3b5f873a504d44aba03691f58d8f19a834287eff | 23,226 |
def load_glove_embeddings():
"""
Load the glove embeddings into a array and a dictionary with words as
keys and their associated index as the value. Assumes the glove
embeddings are located in the same directory and named "glove.6B.50d.txt"
RETURN: embeddings: the array containing word vectors
word_index_dict: a dictionary matching a word in string form to
its index in the embeddings array. e.g. {"apple": 119"}
"""
#if you are running on the CSE machines, you can load the glove data from here
#data = open("/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt",'r',encoding="utf-8")
data = open("glove.6B.50d.txt",'r',encoding="utf-8")
word_index_dict = {}
word_index_dict['UNK'] = 0
embeddings = np.ndarray(shape=(500001, batch_size), dtype='float32')
embeddings_list = []
i = 1
for line in data:
load_array = line.split()
# Sets the word to the 0th value in array
word = load_array[0]
# Other values are the assigned index
values = np.asarray(load_array[1:], dtype='float32')
# Put values in row of array
embeddings[i] = values
# E.g. word_index_dict["the"] = 0
word_index_dict[word] = i
i = i+1
data.close()
return embeddings, word_index_dict | eaac7465e7a4d9658add81ea7a17e62684d38bed | 23,227 |
def _f2_rsub_ ( self , other ) :
"""Operator for ``2D-function - other''"""
return _f2_rop_ ( self , other , Ostap.MoreRooFit.Subtraction , "Subtract_" ) | 2b771d39ea3d1cd3cb7f8c4bc88c11d3814f2e4e | 23,228 |
def attenuate(source, factor=0.01, duration=1.0, srate=None):
"""Exponential attenuation towards target value within 'factor' in time 'duration' for constant signals."""
if srate is None:
srate = get_srate()
return onepole(source, 1.0, -factor ** (srate / duration), 1.0 - factor ** (srate / duration)) | d67c141ec36bfcaf9dde2518488f8cc5d2b24ef5 | 23,229 |
def IsMultiPanel(hcuts, vcuts) -> bool:
"""
Check if the image is multi-panel or not.
Could have more logic.
"""
return bool(hcuts or vcuts) | fc62a31007445eac90b6f5ceb3a7c9c006dd2eef | 23,231 |
def is_subject_mutable(context, subject):
"""Return True if the subject is mutable in this context."""
if context.is_admin:
return True
if subject.owner is None or context.owner is None:
return False
return subject.owner == context.owner | 74e36a2f79111c9d09ea9f90579ce9b51beb3e61 | 23,232 |
from datetime import datetime
def time_filter(df, start_date, end_date):
"""Remove times that are not within the start/end bounds."""
if start_date:
datetime_start = datetime.strptime(start_date, '%Y-%m-%d')
start_selection = df.index >= datetime_start
if end_date:
datetime_end = datetime.strptime(end_date, '%Y-%m-%d')
end_selection = df.index <= datetime_end
if start_date and end_date:
selection = start_selection & end_selection
filtered_df = df[selection]
elif start_date:
filtered_df = df[start_selection]
elif end_date:
filtered_df = df[end_selection]
else:
filtered_df = df
return filtered_df | 13e66add286ddad649e4ae09dc9d6ec5dd013d8a | 23,233 |
def collapse_umi(cells):
"""
Input set of genotypes for each read
Return list with one entry for each UMI, per cell barcode
"""
collapsed_data = {}
for cell_barcode, umi_set in cells.items():
for _, genotypes in umi_set.items():
if len(set(genotypes)) > 1:
pass
else:
try:
collapsed_data[cell_barcode]
except KeyError:
collapsed_data[cell_barcode] = [genotypes[0]]
else:
collapsed_data[cell_barcode].append(genotypes[0])
# count total ref, total alt UMIs for each genotype
for key, value in collapsed_data.items():
collapsed_data[key] = [value.count("ref"), value.count("alt")]
assert len(collapsed_data[key]) == 2
return collapsed_data | e98b44193487691fb04e8e0f4ec25c3438175c65 | 23,235 |
def get_corrected_PRES(PRES: np.ndarray, ele_gap: float, TMP: np.ndarray) -> np.ndarray:
"""気圧の標高補正
Args:
PRES (np.ndarray): 補正前の気圧 [hPa]
ele_gap (float): 標高差 [m]
TMP (np.ndarray): 気温 [℃]
Returns:
np.ndarray: 標高補正後の気圧 [hPa]
Notes:
気温減率の平均値を0.0065℃/mとする。
"""
return PRES * np.power(1 - ((ele_gap * 0.0065) / (TMP + 273.15)), 5.257) | 1be9c2bd5a07714463ac7a6b05bc4d7ca1f84e70 | 23,236 |
def mishra_bird(x, *args):
"""Mishra's Bird constrained function with 2 parameters.
To be used in the constrained optimization examples.
When subject to:
(x[0] + 5) ** 2 + (x[1] + 5) ** 2 < 25
the global minimum is at f(-3.1302, -1.5821) = -106.7645
Bounds: -10 <= x[0] <= 0
-6.5 <= x[1] <= 0
Reference:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
"""
fx = np.sin(x[1]) * np.exp((1 - np.cos(x[0])) ** 2) + \
np.cos(x[0]) * np.exp((1 - np.sin(x[1])) ** 2) + (x[0] - x[1]) ** 2
return fx | ca6fd1211f8715ef1cbc39c75e252107774da54d | 23,237 |
def find_target_migration_file(database=DEFAULT_DB_ALIAS, changelog_file=None):
"""Finds best matching target migration file"""
if not database:
database = DEFAULT_DB_ALIAS
if not changelog_file:
changelog_file = get_changelog_file_for_database(database)
try:
doc = minidom.parse(changelog_file)
except ExpatError as ex:
raise InvalidChangelogFile(
'Could not parse XML file %s: %s' % (changelog_file, ex))
try:
dbchglog = doc.getElementsByTagName('databaseChangeLog')[0]
except IndexError:
raise InvalidChangelogFile(
'Missing <databaseChangeLog> node in file %s' % (
changelog_file))
else:
nodes = list(filter(lambda x: x.nodeType is x.ELEMENT_NODE,
dbchglog.childNodes))
if not nodes:
return changelog_file
last_node = nodes[-1]
if last_node.tagName == 'include':
last_file = last_node.attributes.get('file').firstChild.data
return find_target_migration_file(
database=database, changelog_file=last_file)
else:
return changelog_file | 39a3ed89dacd9393f69e081aaf44c64f34852592 | 23,238 |
from functools import reduce
def encode_message(ctl, addr, src_id, msg_code, data=""):
"""Encode a message for the PIM, assumes data formatted"""
ctl = create_control_word(addr.is_link) if ctl == -1 else ctl
length = 7 + len(data)
ctl = ctl | (length << 8)
msg = bytearray(length)
msg[0:2] = ctl.to_bytes(2, byteorder="big")
msg[2] = addr.network_id
msg[3] = addr.upb_id
msg[4] = src_id
msg[5] = msg_code
if data:
msg[6 : len(data) + 6] = data
msg[-1] = (256 - reduce(lambda x, y: x + y, msg)) % 256 # Checksum
return msg.hex().upper() | 3ffef7a1b65e5dabc6ae4c237018d446b7852cc1 | 23,239 |
def rule_16(l, r):
"""
Rule for "vyaṁjana sandhi - ghośī karaṇaya"
:return:
"""
l_suffix = utils.endswith(l, letters.AGOSHA_LETTERS)
r_prefix = utils.startswith(r, letters.GOSHA_LETTERS)
if l_suffix is not None and r_prefix is not None:
if r_prefix in letters.VOWELS:
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix][0] + \
letters.DIACRITICS_MAPPING[r_prefix] + r[len(r_prefix):]
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix] + r | ea151d8697128e933ecd927ac2e18d039f578a1c | 23,240 |
def is_vertex_cover(G, vertex_cover):
"""Determines whether the given set of vertices is a vertex cover of graph G.
A vertex cover is a set of vertices such that each edge of the graph
is incident with at least one vertex in the set.
Parameters
----------
G : NetworkX graph
The graph on which to check the vertex cover.
vertex_cover :
Iterable of nodes.
Returns
-------
is_cover : bool
True if the given iterable forms a vertex cover.
Examples
--------
This example checks two covers for a graph, G, of a single Chimera
unit cell. The first uses the set of the four horizontal qubits, which
do constitute a cover; the second set removes one node.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cover = [0, 1, 2, 3]
>>> dnx.is_vertex_cover(G,cover)
True
>>> cover = [0, 1, 2]
>>> dnx.is_vertex_cover(G,cover)
False
"""
cover = set(vertex_cover)
return all(u in cover or v in cover for u, v in G.edges) | 4213db1953ec976b1606c3756fa73ff0cae9f578 | 23,241 |
def datetimeformat(value, formatstring='%Y-%m-%d %H:%M', nonchar=''):
"""Formates a datetime.
Tries to convert the given ``value`` to a ``datetime`` object and then formats
it according to ``formatstring``::
{{ datetime.now()|datetimeformat }}
{{ "20171224T235959"|datetimeformat('%H:%M') }}
"""
if not value:
return nonchar
return Markup(convert_to_datetime(value).strftime(formatstring).replace('-', '‑')) | a95510b5734168899d81c893f647b5b836ee3b27 | 23,242 |
def get_interface_for_name(protocols, target_interface_name):
# type: (Iterable[Protocol], str) -> Optional[Interface]
"""Given a name string, gets the interface that has that name, or None."""
for protocol in protocols:
for interface in protocol.interfaces:
if interface.name == target_interface_name:
return interface
return None | 22c8d4ad64058a068700fd8f16a1dee49efe4001 | 23,243 |
def extract_validation_set(x: ndarray, y: ndarray, size=6000):
"""Will extract a validation set of "size" from given x,y pair
Parameters:
x (ndarray): numpy array
y (ndarray): numpy array
size (int): Size of validation set. Must be smaller than examples count
in x, y and multiple of label_count
"""
assert x.shape[0] == y.shape[0]
assert (
x.shape[0] % size == 0
), f"number of examples ({x.shape[0]}) needs to be evenly divisible by parameter size ({size})"
assert size % len(set(y)) == 0, "size must be a multiple of number of labels"
x_balanced, y_balanced = classes_balanced_randomized_per_partition(x, y)
xy_val = (x_balanced[:size], y_balanced[:size])
xy_train = (x_balanced[size:], y_balanced[size:])
return xy_train, xy_val | a5cc15cdd7a5889a29196c2bcee7b50aae5b3bc5 | 23,245 |
def send_update(peer_ip, attr, nlri, withdraw):
"""
send update message
:param peer_ip: peer ip address
:return:
"""
if cfg.CONF.bgp.running_config['factory'].fsm.protocol.send_update({
'attr': attr, 'nlri': nlri, 'withdraw': withdraw}):
return {
'status': True
}
else:
return {
'status': False,
'code': 'failed when send this message out'
} | f53f4ba009d21e3d36867d5292131fcffb73dc5e | 23,246 |
def generate_basic_blame_experiment_actions(
project: Project,
bc_file_extensions: tp.Optional[tp.List[BCFileExtensions]] = None,
extraction_error_handler: tp.Optional[PEErrorHandler] = None
) -> tp.List[actions.Step]:
"""
Generate the basic actions for a blame experiment.
- handle caching of BC files
- compile project, if needed
Args:
project: reference to the BB project
bc_file_extensions: list of bitcode file extensions (e.g. opt, no opt)
extraction_error_handler: handler to manage errors during the
extraction process
"""
return get_bc_cache_actions(
project, bc_file_extensions, extraction_error_handler
) | bd2785e3e611fea95f9e64b8f68ecf8746905bff | 23,247 |
from typing import Any
from typing import Optional
from typing import Union
from typing import OrderedDict
from typing import Mapping
from typing import Literal
def is_json_encodable(t: Any) -> bool:
""" Checks whether a type is json encodable. """
# pylint:disable=invalid-name,too-many-return-statements,too-many-branches
if not is_typecheckable(t):
return False
if t in JSON_BASE_TYPES:
return True
if t in (None, type(None)):
return True
if t is ...:
return True
if is_namedtuple(t):
field_types = getattr(t, "_field_types")
return all(is_json_encodable(field_types[field]) for field in field_types)
if hasattr(t, "__origin__") and hasattr(t, "__args__"):
if t.__origin__ in (list, set, frozenset, deque, Optional):
return is_json_encodable(t.__args__[0])
if t.__origin__ is tuple:
if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return
return is_json_encodable(t.__args__[0])
else:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ is Union:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ in (dict, OrderedDict, Mapping):
return t.__args__[0] == str and is_json_encodable(t.__args__[1])
if t.__origin__ is Literal:
return all(isinstance(s, JSON_BASE_TYPES+(type(None),)) for s in t.__args__)
return False | bdebbebd28d16949c2786386c4be666f3445783e | 23,248 |
def request_parse_platform_id(validated_request):
"""Parses the PlatformID from a provided visibility API request.
Args:
validated_request (obj:Request): A Flask request object that has been generated for a
visibility/opportunity endpoint.
Requires:
The request object MUST have been validated against the requested schema.
Returns:
A list of Satellite model objects.
Throws:
InputError: If any provided platform ID(s) are invalid.
"""
if 'PlatformID' not in validated_request.json:
return Satellite.query.all()
satellites = []
for satellite in validated_request.json['PlatformID']:
satellite = Satellite.query.get(satellite)
if satellite is None:
raise InputError('PlatformID', 'No such platform')
satellites.append(satellite)
return satellites | 68fd6847549cf68edddfed2fca4e9177a6e41e6f | 23,249 |
def crear_comentario_submeta(request, pk):
""" Crea y agrega un comentario a una meta identificada por su id """
# meta = get_object_or_404(Meta, pk=pk)
meta = Submeta.objects.get(pk=pk)
# si ya se creo se guarda el comentario y se redirecciona el navegador a
# la meta
if request.method == "POST":
form = ComentarioFormulario(request.POST)
if form.is_valid():
# Se crea el comentario con los datos del formulario
comentario = form.save(commit=False)
# se lo relaciona con la meta (foreing key y eso)
comentario.meta = meta
# se guarda el comentario en la base de datos
comentario.save()
# comentarios = Comentario.objects.filter(meta__pk=pk)
return redirect('info_submeta', pk=meta.id)
# sino se crea un formulario vacio y se lo envia al template
# crear_comentario, para que el usuario cree el comentario
# cargando los datos.
else:
form = ComentarioFormulario(instance=meta)
return render(request, 'crear_comentario_submeta.html', {'form': form}) | 56452834478c43560eaf168553ac4723091d400d | 23,250 |
def load_mask_from_shapefile(filename, shape, transform):
"""Load a mask from a shapefile."""
multipolygon, _ = load_shapefile2multipolygon(filename)
mask = multipolygon2mask(multipolygon, shape, transform)
return mask | 82af816cb92828862003929a42ddb62f5153cada | 23,251 |
def _spectra_resample(spectra, wvl_orig, wvl_target):
"""
:param spectra:
:param wvl_orig:
:param wvl_target:
:param k:
:return:
"""
idx_finite = np.isfinite(spectra)
min_wvl_s = np.nanmin(wvl_orig[idx_finite])
max_wvl_s = np.nanmax(wvl_orig[idx_finite])
idx_target = np.logical_and(wvl_target >= min_wvl_s,
wvl_target <= max_wvl_s)
new_flux = np.interp(wvl_target[idx_target], wvl_orig[idx_finite], spectra[idx_finite])
nex_flux_out = np.ndarray(len(wvl_target))
nex_flux_out.fill(np.nan)
nex_flux_out[idx_target] = new_flux
return nex_flux_out | 5dedfce082d1d417cd609e53a2c73fafa69c451a | 23,252 |
from datetime import datetime
def test_standard_surface():
"""Test to read a standard surface file."""
def dtparse(string):
return datetime.strptime(string, '%y%m%d/%H%M')
skip = ['text']
gsf = GempakSurface(get_test_data('gem_std.sfc'))
gstns = gsf.sfjson()
gempak = pd.read_csv(get_test_data('gem_std.csv'),
index_col=['STN', 'YYMMDD/HHMM'],
parse_dates=['YYMMDD/HHMM'],
date_parser=dtparse)
for stn in gstns:
idx_key = (stn['properties']['station_id'],
stn['properties']['date_time'])
gemsfc = gempak.loc[idx_key, :]
for param, val in stn['values'].items():
if param not in skip:
assert val == pytest.approx(gemsfc[param.upper()]) | 4538481d85b313826d488e09c5504956f06b7865 | 23,253 |
def calc_mu(Rs):
""" Calculates mu for use in LinKK """
neg_sum = sum(abs(x) for x in Rs if x < 0)
pos_sum = sum(abs(x) for x in Rs if x >= 0)
return 1 - neg_sum/pos_sum | 915cd4718d255e963fede2b73b5637a1afc13d4b | 23,254 |
def computeAnomaly(data):
"""
Remove the seasonality
"""
period = _get_period(data)
meanclim = computeMeanClimatology(data)
anom = data.groupby(f'time.{period}') - meanclim
return anom | 2d42fb7c2f219f78e2971a554eaddb593f5dbc9c | 23,255 |
def product(*args):
"""Calculate product of args.
@param args: list of floats to multiply
@type args: list of float
@return: product of args
@rtype: float
"""
r = args[0]
for x in args[1:]:
r *= x
return r | 3862c4c9ac2ccd8336f70d86a17ae9fee4c7fed5 | 23,256 |
def unpack_into_tensorarray(value, axis, size=None):
"""
unpacks a given tensor along a given axis into a TensorArray
Parameters:
----------
value: Tensor
the tensor to be unpacked
axis: int
the axis to unpack the tensor along
size: int
the size of the array to be used if shape inference resulted in None
Returns: TensorArray
the unpacked TensorArray
"""
shape = value.get_shape().as_list()
rank = len(shape)
dtype = value.dtype
array_size = shape[axis] if not shape[axis] is None else size
if array_size is None:
raise ValueError("Can't create TensorArray with size None")
array = tf.TensorArray(dtype=dtype, size=array_size)
dim_permutation = [axis] + list(range(1, axis)) + [0] + list(range(axis + 1, rank))
unpack_axis_major_value = tf.transpose(value, dim_permutation)
full_array = array.unstack(unpack_axis_major_value)
return full_array | e6320350d5963a47ec8f853e5b9c819b730c352f | 23,257 |
from datetime import datetime
def get_error_page(status_code, message):
"""
获取错误页面
:param status_code:
:param message:
:return:
"""
context = {
'site_web': settings.SITE_TITLE,
'site_url': reverse(settings.SITE_NAME),
'status_code': status_code,
'message': message,
'date': datetime.now().year
}
return context | 76da02826b026fd63562c2d51976ee77ad86f794 | 23,258 |
def fits_difference(*args, **keys):
"""Difference two FITS files with parameters specified as Differencer class."""
differ = FitsDifferencer(*args, **keys)
return differ.difference() | 4ccfd9c521d76e5b3d47a2be8afefe594895e570 | 23,259 |
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
"""
#Análise de Complexidade
#Tempo e memória são O(n)
pilha = Pilha()
if expressao == "":
return True
elif expressao[0] in ')}]':
return False
else:
for caracter in expressao:
if caracter in '({[':
pilha.empilhar(caracter)
else:
try:
desenpilhado = pilha.desempilhar()
except PilhaVaziaErro:
return pilha.vazia()
if caracter == '}' and desenpilhado != '{':
return False
elif caracter == ']' and desenpilhado != '[':
return False
elif caracter == ')' and desenpilhado != '(':
return False
return pilha.vazia() | c35a2f8ca4afef76e722d03809edca9f4dbac3fd | 23,260 |
def create_doc(im_src, tag, coords, fea_arr, fea_bin_arr):
"""
Create elasticsearch doc
Params:
im_src: image file name
tag: tag or class for image
coords: list of boxes corresponding to a tag
fea_arr: list of ImFea objects
fea_bin_arr: list of ImFeaBin objects
"""
doc = {}
doc['coords'] = coords
f_bin = ImFeaBinArr()
f = ImFeaArr()
f.arr.extend(fea_arr)
f_bin.arr.extend(fea_bin_arr)
obj_bin_str = b64encode(f_bin.SerializeToString())
obj_str = b64encode(f.SerializeToString())
doc['sigs'] = obj_str
doc['bin_sigs'] = obj_bin_str
doc['im_src'] = im_name
doc['cl'] = tag
return doc | 4e7afe795d30873516840b66a0e4a54b4599fe8c | 23,261 |
def scaled_mouse_pos(mouse): # pragma: no cover
"""
Renvoie la position de la souris mise à l'échelle de l'image.
Parameters
----------
mouse : int * int
La position réelle de la souris
Returns
-------
int * int
La position mise à l'échelle
"""
# Récupération de la dimension de la fenêtre
window_dimensions = ut.get_screen_size()
# Calcul du facteur d'échelle
scale_factor_x = cf.SCREEN_WIDTH / window_dimensions[0]
scale_factor_y = cf.SCREEN_HEIGHT / window_dimensions[1]
return mouse[0] * scale_factor_x, mouse[1] * scale_factor_y | ad216c50f1492bb0248c04f3195aceed4622bfe1 | 23,262 |
import re
def is_valid_zcs_image_id(zcs_image_id):
"""
Validates Zadara Container Services (ZCS) image IDs, also known as the ZCS
image "name". A valid ZCS image name should look like: img-00000001 - It
should always start with "img-" and end with 8 hexadecimal characters in
lower case.
:type zcs_image_id: str
:param zcs_image_id: The ZCS image name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_image_id passes
validation.
"""
if zcs_image_id is None:
return False
match = re.match(r'^img-[0-9a-f]{8}$', zcs_image_id)
if not match:
return False
return True | 4b2e689c5ff62c32c147dec1c05b77cf0df31c9a | 23,263 |
import random
def get_topology2(gid: int, cfg: Config):
"""
Create a uniformly and randomly sampled genome of fixed topology:
Sigmoid with bias 1.5 --> Actuation default of 95,3%
(key=0, bias=1.5) (key=1, bias=?)
____ / /
/ /
GRU /
| _____/
| /
(key=-1)
"""
# Create an initial dummy genome with fixed configuration
genome = Genome(
key=gid,
num_outputs=cfg.genome.num_outputs,
bot_config=cfg.bot,
)
# Setup the parameter-ranges
conn_range = cfg.genome.weight_max_value - cfg.genome.weight_min_value
bias_range = cfg.genome.bias_max_value - cfg.genome.bias_min_value
rnn_range = cfg.genome.rnn_max_value - cfg.genome.rnn_min_value
# Create the nodes
genome.nodes[0] = OutputNodeGene(key=0, cfg=cfg.genome) # OutputNode 0
genome.nodes[0].bias = 1.5 # Drive with 0.953 actuation by default
genome.nodes[1] = OutputNodeGene(key=1, cfg=cfg.genome) # OutputNode 1
genome.nodes[1].bias = random() * bias_range + cfg.genome.bias_min_value # Uniformly sampled bias
genome.nodes[2] = GruNodeGene(key=2, cfg=cfg.genome, input_keys=[-1], input_keys_full=[-1]) # Hidden node
genome.nodes[2].bias = 0 # Bias is irrelevant for GRU-node
# Uniformly sample the genome's GRU-component
genome.nodes[2].bias_h = rand_arr((3,)) * bias_range + cfg.genome.bias_min_value
genome.nodes[2].weight_xh_full = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
genome.nodes[2].weight_hh = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
# Create the connections
genome.connections = dict()
# input2gru
key = (-1, 2)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 1 # Simply forward distance
genome.connections[key].enabled = True
# gru2output - Uniformly sampled
key = (2, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 3 # Enforce capabilities of full spectrum
genome.connections[key].enabled = True
# input2output - Uniformly sampled
key = (-1, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = random() * conn_range + cfg.genome.weight_min_value
genome.connections[key].enabled = True
genome.update_rnn_nodes(config=cfg.genome)
return genome | 4b8d21b8e22857c0bec06f58d6d13857a3834649 | 23,264 |
def dry_press(
H,
Pv,
alt_setting=P0,
alt_units=default_alt_units,
press_units=default_press_units,
):
"""
Returns dry air pressure, i.e. the total air pressure, less the water
vapour pressure.
"""
HP = pressure_alt(H, alt_setting, alt_units=alt_units)
P = alt2press(HP, press_units=press_units, alt_units=alt_units)
Pd = P - Pv
return Pd | eceffd1bf8c13edc77c3a3bbb4131acf6c6b9bca | 23,266 |
def InvocationAddCallerAuthid(builder, callerAuthid):
"""This method is deprecated. Please switch to AddCallerAuthid."""
return AddCallerAuthid(builder, callerAuthid) | 63144e4311430009c419543c4ebb6a4f83f60281 | 23,267 |
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim) | d20db7475284f9b52ce1bc0d10d68f6ab96555a0 | 23,268 |
from typing import Concatenate
def DeepLabV3Plus(shape):
""" Inputs """
inputs = Input(shape)
""" Pre-trained ResNet50 """
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=inputs)
""" Pre-trained ResNet50 Output """
image_features = base_model.get_layer('conv4_block6_out').output
x_a = ASPP(image_features)
x_a = UpSampling2D((4, 4), interpolation="bilinear")(x_a)
""" Get low-level features """
x_b = base_model.get_layer('conv2_block2_out').output
x_b = Conv2D(filters=48, kernel_size=1, padding='same', use_bias=False)(x_b)
x_b = BatchNormalization()(x_b)
x_b = Activation('relu')(x_b)
x = Concatenate()([x_a, x_b])
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu',use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((4, 4), interpolation="bilinear")(x)
""" Outputs """
x = Conv2D(1, (1, 1), name='output_layer')(x)
x = Activation('sigmoid')(x)
""" Model """
model = Model(inputs=inputs, outputs=x)
return model | da838d36b7926fc394bb8153ff3e6ed67926059b | 23,269 |
def white_noise(template, rms_uKarcmin_T, rms_uKarcmin_pol=None):
"""Generate a white noise realisation corresponding to the template pixellisation
Parameters
----------
template: ``so_map`` template
the template for the white noise generalisation
rms_uKarcmin_T: float
the white noise temperature rms in uK.arcmin
rms_uKarcmin_pol: float
the white noise polarisation rms in uK.arcmin
if None set it to sqrt(2)*rms_uKarcmin_T
"""
noise = template.copy()
rad_to_arcmin = 60 * 180 / np.pi
if noise.pixel == "HEALPIX":
nside = noise.nside
pixArea = hp.pixelfunc.nside2pixarea(nside) * rad_to_arcmin ** 2
if noise.pixel == "CAR":
pixArea = noise.data.pixsizemap() * rad_to_arcmin ** 2
if noise.ncomp == 1:
if noise.pixel == "HEALPIX":
size = len(noise.data)
noise.data = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data.shape
noise.data = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.ncomp == 3:
if rms_uKarcmin_pol is None:
rms_uKarcmin_pol = rms_uKarcmin_T * np.sqrt(2)
if noise.pixel == "HEALPIX":
size = len(noise.data[0])
noise.data[0] = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data[0].shape
noise.data[0] = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
return noise | 2e72d5362e66409081e0a4a222d49d18034006e2 | 23,271 |
def index():
"""process request to the root."""
return render_template('index.html') | ea02cfd380d51670ca69cbec74f9b299dd650e88 | 23,272 |
import torch
def aromatic_bonds(mol: IndigoObject) -> dict:
"""Get whether bonds in a molecule are aromatic or not.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of booleans
"""
is_aromatic = []
for bond in mol.iterateBonds():
is_aromatic.append(bond.bondOrder() == 4)
return {"is_aromatic": torch.tensor(is_aromatic * 2).unsqueeze(1).float()} | 92bae4b5b5a67f8165732ab64e345f85ddaa7d28 | 23,273 |
import warnings
def get_unitroot(df: pd.DataFrame, fuller_reg: str, kpss_reg: str) -> pd.DataFrame:
"""Calculate test statistics for unit roots
Parameters
----------
df : pd.DataFrame
DataFrame of target variable
fuller_reg : str
Type of regression of ADF test
kpss_reg : str
Type of regression for KPSS test
Returns
-------
pd.DataFrame
Dataframe with results of ADF test and KPSS test
"""
# The Augmented Dickey-Fuller test
# Used to test for a unit root in a univariate process in the presence of serial correlation.
try:
result = adfuller(df, regression=fuller_reg)
except MissingDataError:
df = df.dropna(axis=0)
result = adfuller(df, regression=fuller_reg)
cols = ["Test Statistic", "P-Value", "NLags", "Nobs", "ICBest"]
vals = [result[0], result[1], result[2], result[3], result[5]]
data = pd.DataFrame(data=vals, index=cols, columns=["ADF"])
# Kwiatkowski-Phillips-Schmidt-Shin test
# Test for level or trend stationarity
# This test seems to produce an Interpolation Error which says
# The test statistic is outside of the range of p-values available in the
# look-up table. The actual p-value is greater than the p-value returned.
# Wrap this in catch_warnings to prevent
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res2 = kpss(df, regression=kpss_reg, nlags="auto")
vals2 = [res2[0], res2[1], res2[2], "", ""]
data["KPSS"] = vals2
return data | 2d459e0980cb8983a03269f4e6b57e831065e6a0 | 23,274 |
def lat_avg(data, lat_wgt):
"""Perform latitude average of data:
Inputs:
data - n dimensional spatial data. The last 2 dimensions are assumed to lat and
lon respectively
lat_wgt - weights by latitudes"""
lat_shape = lat_wgt.shape
data_shape = data.shape
# If one dimensional:
if(len(lat_wgt) == 1):
lat_wgt_re = np.broadcast_to(lat_wgt, data.shape)
elif(len(lat_shape) > 1):
raise ValueError ("lat_wgt must be 1 dimensional latitude weights")
else:
lat_2d = np.broadcast_to(lat_wgt.reshape(len(lat_wgt), 1), data_shape[-2:])
lat_wgt_re = np.broadcast_to(lat_2d, data_shape)
return (data * lat_wgt_re).mean(axis = -2) | 2cef0a7e0eeead1983a8ae9dfc9d4cd7954a2c29 | 23,275 |
def create_line(line_coefficients, height=5, step=0.5, vis=False):
"""
Args:
line_coefficients: A dictionary containing cylindrical coefficients:
(r, x0, y0, z0_, a, b, c
r not used: to keep the same form between cylinder coefficients and line coefficients,
so that a same group of coefficients can generate a cylinder and a line, then the line is
the Central axis of the cylinder
x0,y0,z0 the Starting center of the cylinder
a, b, c the axis coefficient of the cylinder)
height: length of the line
step: Density of line point cloud
vis: whether to visualize the cylinder
Returns:
numpy form of the line point cloud: n x 3
@Author: Carlos_Lee 202111
"""
x0 = line_coefficients['x0']
y0 = line_coefficients['y0']
z0 = line_coefficients['z0']
a = line_coefficients['a']
b = line_coefficients['b']
c = line_coefficients['c']
v = np.arange(0, height, step)
npy = np.zeros((len(v), 3))
for idx_, i in enumerate(v):
x = x0 + a / np.power(a * a + b * b + c * c, 0.5) * i
y = y0 + b / np.power(a * a + b * b + c * c, 0.5) * i
z = z0 + c / np.power(a * a + b * b + c * c, 0.5) * i
npy[idx_] = [x, y, z]
if vis:
coordinate_ = o3d.geometry.TriangleMesh.create_coordinate_frame(size=height / 2., origin=[0.0, 0.0, 0.0])
pcd_ = o3d.geometry.PointCloud()
pcd_.points = o3d.utility.Vector3dVector(npy)
o3d.visualization.draw_geometries([coordinate_, pcd_], window_name="generate line",
width=960, height=900, left=960, top=100)
return npy | 0cafddb6263841152d506c08177ef8cc0691dd89 | 23,276 |
def get_result_summaries_query(start, end, sort, state, tags):
"""Returns TaskResultSummary.query() with these filters.
Arguments:
start: Earliest creation date of retrieved tasks.
end: Most recent creation date of retrieved tasks, normally None.
sort: Order to use. Must default to 'created_ts' to use the default. Cannot
be used along start and end.
state: One of State enum value as str. Use 'all' to get all tasks.
tags: List of search for one or multiple task tags.
"""
# Disable the in-process local cache. This is important, as there can be up to
# a thousand entities loaded in memory, and this is a pure memory leak, as
# there's no chance this specific instance will need these again, therefore
# this leads to 'Exceeded soft memory limit' AppEngine errors.
q = TaskResultSummary.query(
default_options=ndb.QueryOptions(use_cache=False))
# Filter by one or more tags.
if tags:
# Add TaskResultSummary indexes if desired.
if sort != 'created_ts':
raise ValueError(
'Add needed indexes for sort:%s and tags if desired' % sort)
for tag in tags:
parts = tag.split(':', 1)
if len(parts) != 2 or any(i.strip() != i or not i for i in parts):
raise ValueError('Invalid tags')
values = parts[1].split(OR_DIM_SEP)
separated_tags = ['%s:%s' % (parts[0], v) for v in values]
q = q.filter(TaskResultSummary.tags.IN(separated_tags))
return filter_query(TaskResultSummary, q, start, end, sort, state) | 45e0d7c09ea2c2a4cd9f3827fff5eaf09b12d98a | 23,277 |
def approx_jacobian(tform, image, delta=0.01):
"""approximate the image pixel gradient wrt tform using central differences
(This has been so helpful while troubleshooting jacobians,
let's keep it around for unit testing.
Parameters
----------
tform : TForm
current transform, to be applied to image and its gradient
image : ndarray(h, v)
untransformed image
delta : real or ndarray(nparams)
stepsize
Returns
-------
jacobian : ndarray(h * v, nparams)
transformation parameter derivatives at each image pixel.
out-of-bounds points will be populated with 0's
"""
if not isinstance(delta, np.ndarray):
delta = np.ones(len(tform.paramv)) * delta
npixels = np.prod(tform.output_shape)
gradvecs = np.empty((npixels, len(tform.paramv)))
for i in range(len(tform.paramv)):
dimage = np.zeros(tform.output_shape)
for sign in (-1, 1):
paramv = tform.paramv.copy()
paramv[i] += delta[i] * sign
stepT = tform.clone(paramv)
dimage += stepT.imtransform(image) * sign
gradvecs[:, i] = (dimage / (2 * delta[i])).flatten()
return np.nan_to_num(gradvecs) | 4153471fe4f94255a21b4d63c564aabe0db3b1d6 | 23,278 |
from typing import Collection
def create_collection(collection_id: str) -> Collection:
"""Creates a STAC Collection for Landsat Collection 2 Level-1 or Level-2
data.
Args:
collection_id (str): ID of the STAC Collection. Must be one of
"landsat-c2-l1" or "landsat-c2-l2".
Returns:
Collection: The created STAC Collection.
"""
if collection_id not in COLLECTION_IDS:
raise ValueError(f"Invalid collection id: {collection_id}")
fragment = CollectionFragments(collection_id).collection()
collection = Collection(id=collection_id,
title=fragment["title"],
description=fragment["description"],
license=fragment["license"],
keywords=fragment["keywords"],
providers=fragment["providers"],
extent=fragment["extent"],
summaries=fragment["summaries"])
collection.add_links(fragment["links"])
item_assets = ItemAssetsExtension(collection)
item_assets.item_assets = fragment["item_assets"]
ItemAssetsExtension.add_to(collection)
ViewExtension.add_to(collection)
ScientificExtension.add_to(collection)
RasterExtension.add_to(collection)
EOExtension.add_to(collection)
return collection | 111992f59a9a69aac0742350a0bce8b66b1ebb5d | 23,279 |
def rician_noise(image, sigma, rng=None):
"""
Add Rician distributed noise to the input image.
Parameters
----------
image : array-like, shape ``(dim_x, dim_y, dim_z)`` or ``(dim_x, dim_y,
dim_z, K)``
sigma : double
rng : random number generator (a numpy.random.RandomState instance).
"""
n1 = rng.normal(loc=0, scale=sigma, size=image.shape)
n2 = rng.normal(loc=0, scale=sigma, size=image.shape)
return np.sqrt((image + n1)**2 + n2**2) | a7f5962a8c388cd69f1bdb364fa8fd5dcd1e2dd4 | 23,280 |
def compute_composition_df(seq_df):
"""
Compute the composition matrix for all proteins.
Args:
seq_df: df, dataframe with sequences
Returns:
df, with the composition of the proteins
"""
# get composition table
df_seq_comp = pd.DataFrame(
list(seq_df["sequence"].apply(parser.amino_acid_composition).values)) * 1.0
# add column with 0s for amino acids that didnt occur in the protein fasta file
for i in parser.std_amino_acids:
if i not in df_seq_comp.columns:
df_seq_comp[i] = 0
df_seq_comp = df_seq_comp.fillna(0.0)
df_seq_comp.index = seq_df.index
return df_seq_comp | 4d68dc4568914df8349dd32d9e24a55f74896023 | 23,282 |
def make_gradient_squared(
grid: CylindricalSymGrid, central: bool = True
) -> OperatorType:
"""make a discretized gradient squared operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
grid (:class:`~pde.grids.cylindrical.CylindricalSymGrid`):
The grid for which the operator is created
central (bool):
Whether a central difference approximation is used for the gradient
operator. If this is False, the squared gradient is calculated as
the mean of the squared values of the forward and backward
derivatives.
Returns:
A function that can be applied to an array of values
"""
# use processing for large enough arrays
dim_r, dim_z = grid.shape
parallel = dim_r * dim_z >= config["numba.parallel_threshold"]
if central:
# use central differences
scale_r, scale_z = 0.25 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
term_r = (arr[i + 1, j] - arr[i - 1, j]) ** 2
term_z = (arr[i, j + 1] - arr[i, j - 1]) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
else:
# use forward and backward differences
scale_r, scale_z = 0.5 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
arr_z_l, arr_c, arr_z_h = arr[i, j - 1], arr[i, j], arr[i, j + 1]
term_r = (arr[i + 1, j] - arr_c) ** 2 + (arr_c - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
return gradient_squared | a5e64b5e217200b9b5d22b20484217c2483420f2 | 23,283 |
def state(git_root):
"""Return a hash of the current state of the .git directory. Only considers
fsck verbose output and refs.
"""
if not git_root.is_dir():
return 0
rc, stdout, stderr = util.captured_run(*"git fsck --full -v".split(), cwd=git_root)
refs = "".join([ref.name + ref.value for ref in collect_refs(git_root)])
return hash(stdout + stderr + refs) | c86f528dc80cdb12e88e83ce0bfdb7b393d8ede5 | 23,284 |
def search_handler(data_type_name, search_key=None, search_value=None):
"""
Purpose: Adapt PathError and QueryError to appropriate Django error types.
Input Parameters:
data_type_name - One of the searchable types 'PasswordData' or 'GroupData'.
search_key - Name of searchable field for type specified Optional, default = None.
search_value - Value of defined field to match from data, default = None.
Return: HttpResponse with json representation of returned values.
Exceptions: Http404 on QueryError,
ImproperlyConfigured on PathError """
result_list = []
try:
search_type = settings.PWDSVC_SEARCH
if search_type == 'DataBaseSearch':
db_search = DataBaseSearch(DATAMGR)
result_list = db_search.search(data_type_name, search_key, search_value)
else:
result_list = DATAMGR.search(data_type_name, search_key, search_value)
except PathError, path_error:
raise ImproperlyConfigured(path_error)
except QueryError, query_error:
raise Http404(query_error)
if not result_list:
raise Http404('No results.')
return result_list | 5c2fd5a9e3199418febe215ee10524f70a5e9af3 | 23,285 |
def render_CardsCounter_edit(self, h, comp, *args):
"""Render the title of the associated object"""
text = var.Var(self.text)
with h.div(class_='list-counter'):
with h.div(class_='cardCounter'):
with h.form(onsubmit='return false;'):
action = h.input(type='submit').action(lambda: self.validate(text(), comp)).get('onclick')
id_ = h.generate_id()
h << h.input(id=id_, type='text', value=self.column.nb_max_cards or '', onblur=action).action(text)
h << h.script(
"""YAHOO.util.Event.on(%s, 'keyup', function (e) {
if (e.keyCode == 13) {
e.preventDefault();
this.blur();
}
var result = this.value.replace(/[^0-9]/g, '')
if (this.value !=result) {
this.value = result;
}
});""" % ajax.py2js(id_)
)
h << h.script(
"YAHOO.kansha.app.selectElement(%s);" % ajax.py2js(id_)
)
if self.error is not None:
with h.div(class_='nagare-error-message'):
h << self.error
return h.root | fca04ae6551961f1c0187d016ac06614d1a77388 | 23,286 |
def get_embedding_tids(tids, mapping):
"""Obtain token IDs based on our own tokenization, through the mapping to BERT tokens."""
mapped = []
for t in tids:
mapped += mapping[t]
return mapped | a31c9b0cf5b791590d6e30d8238cf0eb6ae2272b | 23,287 |
import requests
def extract_stream_url(ashx_url):
""" Extract real stream url from tunein stream url """
r = requests.get(ashx_url)
for l in r.text.splitlines():
if len(l) != 0:
return l | 679ca261510413f652d0953551b65db8e5c2a62e | 23,289 |
def check_for_rematch(player_id1, player_id2):
"""Checks whether the two players specified have played a match before.
Args:
player_id1: ID of first player
player_id2: ID of second player
Returns:
Bool: True if they have met before, False if they have not.
"""
query = """SELECT EXISTS(SELECT 1
FROM matches
WHERE winner_pid=%(id1)s AND loser_pid=%(id2)s
OR winner_pid=%(id2)s AND loser_pid=%(id1)s);"""
parameter = {'id1': player_id1, 'id2': player_id2}
with connect_to_db() as database:
database['cursor'].execute(query, parameter)
is_rematch = database['cursor'].fetchone()[0]
return is_rematch
# Credits
# Idea for using the EXISTS PSQL keyword found on this Stack Overflow page:
# http://stackoverflow.com/questions/7471625/ | 8ee0652dc089cb286021f1d54672439881e86e56 | 23,290 |
def nextrandombitsAES(cipher, bitlength):
"""
<Purpose>
generate random bits using AES-CTR
<Arguments>
bitlength: the lenght of the random string in BITS
<Side Effects>
Increases the AES counter
<Returns>
A random string with the supplied bitlength (the rightmost bits are zero if bitlength is not a multiple of 8)
"""
# offset for the last byte
bytelength = bits_to_bytes(bitlength)
bitoffset = bitlength % 8
if bitoffset > 0:
# if the bitlength is not a multiple of 8, clear the rightmost bits
pt = (bytelength - 1) * b'\0'
randombytes = cipher.encrypt(pt)
b = cipher.encrypt(b'\0')
b = (b[0] & ((0xff00 >> bitoffset) & 0xff)).to_bytes(1, byteorder = 'big')
randombytes += b
return randombytes
else:
pt = bytelength * b'\0'
return cipher.encrypt(pt) | ccc8a0ca2e2af595452e996d17e2d51125bf1d97 | 23,291 |
def _binparams2img(mc, param):
"""
Maximum data of all the bins
Parameters
----------
mc : dict
Molecular cloud dimensions
param : boolean
Parameter
----------
"""
if not param in sos.all_params:
raise Exception('Parameter not valid')
# Get binned or full dimensions
dims, nbins, mc_binned = _get_mc_dims(mc)
sx, sy = dims
# Define paremeter matrix
param_matrix = np.zeros((sx, sy))
# Scan all the bins
for b in range(nbins):
if mc_binned:
# Get bin name
name = 'B'+str(b)
# Get coordinates
i, j = mc[name]['pos']
if not mc[name]['flag']:
# Get parameter value
m = mc[name][param]
else:
m = np.nan
else:
# Get coordinate
i, j = 0, 0
# Get parameter value
m = mc[param]
#if param == 'den':
# m = m/(1.28*1e3*1.672622e-27) # Neutral gas * proton mass [g]
param_matrix[i][j] = m
return param_matrix, dims | 70224eeb3e9a6096d4ac19232f55966873078174 | 23,292 |
def arccos(x):
"""
Compute the inverse cosine of x.
Return the "principal value" (for a description of this, see
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arccos is (are) required.
Returns
-------
out : ndarray or scalar
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arccos
Notes
-----
For an arccos() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # a scalar is returned
0.0
>>> np.emath.arccos([1,2])
array([ 0.-0.j , 0.+1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x) | 67b387ce0ee0b3b7927d97f163bd3258f87388bf | 23,293 |
def request_authentication(user, organization_id, short_code):
"""
Request for an authentication token from Safaricom's MPesa API
"""
mpesa_api_account = get_object_or_404(
MpesaAPIAccount.objects.filter(
organization__owner=user,
linked_account__identifier=short_code,
organization__organization_id=organization_id
))
return api.authenticate(
env="production" if mpesa_api_account.in_production else "sandbox",
app_key=mpesa_api_account.consumer_key,
app_secret=mpesa_api_account.consumer_secret) | afc012a8160c24d44b6e8986d09ffdf61ad25554 | 23,294 |
def none_to_null(value):
""" Returns None if the specified value is null, else returns the value
"""
return "null" if value == None else value | 394b1f9620cf69c862905171f4aec96838ffc631 | 23,295 |
def get_dsd_url():
"""Returns the remote URL to the global SDMX DSD for the SDGs."""
return 'https://registry.sdmx.org/ws/public/sdmxapi/rest/datastructure/IAEG-SDGs/SDG/latest/?format=sdmx-2.1&detail=full&references=children' | 996568a92825aa7a7bf1be1db8ac2cac0828360a | 23,296 |
from jcvi.utils.orderedcollections import SortedCollection
def range_closest(ranges, b, left=True):
"""
Returns the range that's closest to the given position. Notice that the
behavior is to return ONE closest range to the left end (if left is True).
This is a SLOW method.
>>> ranges = [("1", 30, 40), ("1", 33, 35), ("1", 10, 20)]
>>> b = ("1", 22, 25)
>>> range_closest(ranges, b)
('1', 10, 20)
>>> range_closest(ranges, b, left=False)
('1', 33, 35)
>>> b = ("1", 2, 5)
>>> range_closest(ranges, b)
"""
key = (lambda x: x) if left else (lambda x: (x[0], x[2], x[1]))
rr = SortedCollection(ranges, key=key)
try:
if left:
s = rr.find_le(b)
assert key(s) <= key(b), (s, b)
else:
s = rr.find_ge(b)
assert key(s) >= key(b), (s, b)
except ValueError:
s = None
return s | c32d19a4725d733855cf86bc7edd62133c42fa0f | 23,297 |
def _fill_array(data, mask=None, fill_value=None):
"""
Mask numpy array and/or fill array value without demasking.
Additionally set fill_value to value.
If data is not a MaskedArray and mask is None returns silently data.
:param mask: apply mask to array
:param fill_value: fill value
"""
if mask is not None and mask is not False:
data = np.ma.MaskedArray(data, mask=mask, copy=False)
if np.ma.is_masked(data) and fill_value is not None:
data._data[data.mask] = fill_value
np.ma.set_fill_value(data, fill_value)
# elif not np.ma.is_masked(data):
# data = np.ma.filled(data)
return data | de6190f9960a854e6cb67fe5eb61fd6f984cb147 | 23,298 |
def make_Dog(size, name):
"""Create dog entity."""
new_dog = Dog(size=size, name=str(name))
if new_dog.called() == "":
return f"The {size} dog says {new_dog.talk()}."
return f"{new_dog.called()}, the {size} dog says {new_dog.talk()}." | 4c091b09d045fc8d354beebffbb1ef12b9d63840 | 23,299 |
def one_way_mi(df, feature_list, group_column, y_var, bins):
"""
Calculates one-way mutual information group variable and a
target variable (y) given a feature list regarding.
Parameters
----------
df : pandas DataFrame
df with features used to train model, plus a target variable
and a group column.
feature_list : list DataFrame
List of strings, feature names.
group_column : string
name of column for testing bias, should contain numeric categories
y_var : string
name of target variable column
bins : tuple
number of bins for each dimension
Returns
-------
mi_table : pandas DataFrame
data frame with mutual information values, with one row per feature
in the feature_list, columns for group and y.
"""
group_cats = df[group_column].values
y_cats = df[y_var].values
c_g = [
np.histogramdd([np.array(df[feature]), group_cats], bins=bins)[0]
for feature in feature_list
]
c_y = [
np.histogramdd([np.array(df[feature]), y_cats], bins=bins)[0]
for feature in feature_list
]
# compute mutual information (MI) between trait and gender/eth/y
mi_g = [mutual_info_score(None, None, contingency=i) for i in c_g]
mi_y = [mutual_info_score(None, None, contingency=i) for i in c_y]
mi_table = pd.DataFrame({'feature': feature_list,
group_column: mi_g,
y_var: mi_y})
# NOTE: Scale group and y where the highest MI is scaled to 1 to
# facilitate interpreting relative importance to bias and performance
mi_table["{}_scaled".format(group_column)] = (
mi_table[group_column] / mi_table[group_column].max()
)
mi_table["{}_scaled".format(y_var)] = (
mi_table[y_var] / mi_table[y_var].max()
)
return mi_table | 2b3e8336a5843a2e4bedc4f42699c233bb2118d3 | 23,301 |
import tqdm
def draw_parametric_bs_reps_mle(
mle_fun, gen_fun, data, args=(), size=1, progress_bar=False
):
"""Draw parametric bootstrap replicates of maximum likelihood estimator.
Parameters
----------
mle_fun : function
Function with call signature mle_fun(data, *args) that computes
a MLE for the parameters
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*params, size)`.
data : one-dimemsional Numpy array
Array of measurements
args : tuple, default ()
Arguments to be passed to `mle_fun()`.
size : int, default 1
Number of bootstrap replicates to draw.
progress_bar : bool, default False
Whether or not to display progress bar.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
params = mle_fun(data, *args)
if progress_bar:
iterator = tqdm(range(size))
else:
iterator = range(size)
return np.array(
[mle_fun(gen_fun(*params, size=len(data), *args)) for _ in iterator]
) | 75569e4be203fe4614f55f077c5d0abff20b468e | 23,302 |
def parse_subpalette(words):
"""Turn palette entry into a list of color-to-index mappings.
For example, #AAA=2 or #AAAAAA=2 means that (170, 170, 170) will be
recognized as color 2 in that subpalette.
If no =number is specified, indices are recognized sequentially from 1.
Return a list of ((r, g, b), index) tuples.
"""
out = []
for i, word in enumerate(words):
color_index = word.split("=", 1)
color = parse_color(color_index[0])
index = int(color_index[1]) if len(color_index) > 1 else i + 1
out.append((color, index))
return out | 13d52a4b8092755cac28401356183025dac7dfb3 | 23,303 |
import json
def data_word2vec(input_file, num_labels, word2vec_model):
"""
Create the research data tokenindex based on the word2vec model file.
Return the class _Data() (includes the data tokenindex and data labels).
Args:
input_file: The research data
num_labels: The number of classes
word2vec_model: The word2vec model file
Returns:
The Class _Data() (includes the data tokenindex and data labels)
Raises:
IOError: If the input file is not the .json file
"""
vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])
def _token_to_index(content):
result = []
for item in content:
word2id = vocab.get(item)
if word2id is None:
word2id = 0
result.append(word2id)
return result
def _create_onehot_labels(labels_index):
label = [0] * num_labels
for item in labels_index:
label[int(item)] = 1
return label
if not input_file.endswith('.json'):
raise IOError("[Error] The research data is not a json file. "
"Please preprocess the research data into the json file.")
with open(input_file) as fin:
testid_list = []
content_index_list = []
labels_list = []
onehot_labels_list = []
labels_num_list = []
total_line = 0
for eachline in fin:
data = json.loads(eachline)
testid = data['testid']
features_content = data['features_content']
labels_index = data['labels_index']
labels_num = data['labels_num']
testid_list.append(testid)
content_index_list.append(_token_to_index(features_content))
labels_list.append(labels_index)
onehot_labels_list.append(_create_onehot_labels(labels_index))
labels_num_list.append(labels_num)
total_line += 1
class _Data:
def __init__(self):
pass
@property
def number(self):
return total_line
@property
def testid(self):
return testid_list
@property
def tokenindex(self):
return content_index_list
@property
def labels(self):
return labels_list
@property
def onehot_labels(self):
return onehot_labels_list
@property
def labels_num(self):
return labels_num_list
return _Data() | b5330fce3c71a62896fdfdc40a20743df0a313aa | 23,304 |
def object_difference():
"""Compute the difference parts between selected shapes.
- Select two objects.
Original code from HighlightDifference.FCMacro
https://github.com/FreeCAD/FreeCAD-macros/blob/master/Utility/HighlightDifference.FCMacro
Authors = 2015 Gaël Ecorchard (Galou)
"""
global verbose
msg = verbose
m_actDoc = get_ActiveDocument(info=msg)
if m_actDoc is None:
return None
createFolders('WorkObjects')
error_msg =\
"INCORRECT Object(s) Selection:\n" +\
"You Must Select Two(2) Objects !"
result_msg = ": Difference object created into WorkFeatures/WorkObjects/"
name = "Part"
part = "Part::Feature"
grp = "WorkObjects"
try:
selectionObjects = Gui.Selection.getSelection()
if len(selectionObjects) < 2:
printError_msg(error_msg)
return
object_list = []
for obj in selectionObjects:
object_list.append(obj)
for i, object_a in enumerate(object_list):
shape_a = object_a.Shape
label_a = object_a.Label
for object_b in object_list[(i + 1):]:
shape_b = object_b.Shape
label_b = object_b.Label
shape_addition = shape_a.cut(shape_b)
if shape_addition.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_a.encode('utf-8')) +\
" by " +\
str(label_b.encode('utf-8')))
else:
print_msg("Volume of the red " +\
str(label_a.encode('utf-8')) +\
" Cut by " +\
str(label_b.encode('utf-8')) +\
": " +\
str(shape_addition.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
added = FreeCAD.ActiveDocument.addObject(part)
added.Label = "Cut red (" +\
str(label_a.encode('utf-8')) +\
"-" +\
str(label_b.encode('utf-8')) +\
")"
added.Shape = shape_addition
App.ActiveDocument.getObject(grp).addObject(added)
added.ViewObject.ShapeColor = (1.0, 0.0, 0.0, 1.0)
shape_removal = shape_b.cut(shape_a)
if shape_removal.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_b.encode('utf-8')) +\
" by " +\
str(label_a.encode('utf-8')))
else:
print_msg("Volume of the green " +\
str(label_b.encode('utf-8')) +\
" Cut by " +\
str(label_a.encode('utf-8')) +\
": " +\
str(shape_removal.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
removed = FreeCAD.ActiveDocument.addObject(part)
removed.Label = "Cut green (" +\
str(label_b.encode('utf-8')) +\
"-" +\
str(label_a.encode('utf-8')) +\
")"
removed.Shape = shape_removal
App.ActiveDocument.getObject(grp).addObject(removed)
removed.ViewObject.ShapeColor = (0.0, 0.5, 0.0, 1.0)
object_a.ViewObject.Transparency = 80
object_b.ViewObject.Transparency = 80
except:
printError_msg(error_msg) | b99fb61e0d85cc66ae395273f785f8f7441fd297 | 23,306 |
def diffusionkernel(sigma, N=4, returnt=False):
""" diffusionkernel(sigma, N=4, returnt=False)
A discrete analog to the continuous Gaussian kernel,
as proposed by Toni Lindeberg.
N is the tail length factor (relative to sigma).
"""
# Make sure sigma is float
sigma = float(sigma)
# Often refered to as the scale parameter, or t
sigma2 = sigma*sigma
# Where we start, from which we go backwards
# This is also the tail length
if N > 0:
nstart = int(np.ceil(N*sigma)) + 1
else:
nstart = abs(N) + 1
# Allocate kernel and times
t = np.arange(-nstart, nstart+1, dtype='float64')
k = np.zeros_like(t)
# Make a start
n = nstart # center (t[nstart]==0)
k[n+nstart] = 0
n = n-1
k[n+nstart] = 0.01
# Iterate!
for n in range(nstart-1,0,-1):
# Calculate previous
k[(n-1)+nstart] = 2*n/sigma2 * k[n+nstart] + k[(n+1)+nstart]
# The part at the left can be erroneous, so let's use the right part only
k[:nstart] = np.flipud(k[-nstart:])
# Remove the tail, which is zero
k = k[1:-1]
t = t[1:-1]
# Normalize
k = k / k.sum()
# the function T that we look for is T = e^(-sigma2) * I(n,sigma2)
# We found I(n,sigma2) and because we normalized it, the normalization term
# e^(-sigma2) is no longer necesary.
# Done
if returnt:
return k, t
else:
return k | 3fe620454963eec357072bd0ecd64fd66b392600 | 23,307 |
def CalculateTopologicalTorsionFingerprint(mol):
"""
#################################################################
Calculate Topological Torsion Fingerprints
Usage:
result=CalculateTopologicalTorsionFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = Torsions.GetTopologicalTorsionFingerprint(mol)
return res.GetLength(), res.GetNonzeroElements(), res | 0c646ea977ea257c523425a7f8526c16f8531e14 | 23,308 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.