content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def is_modified(filename: str) -> bool:
"""
Given a filename return if it has been modified
"""
global new_hashes
global old_hashes
if filename in old_hashes.keys():
if old_hashes[filename] == new_hashes[filename]:
return False
return True | f5f191a9fc714d0431d8c464630ab6b0c95f13dd | 1,500 |
def _is_url_without_path_query_or_fragment(url_parts):
"""
Determines if a URL has a blank path, query string and fragment.
:param url_parts: A URL.
:type url_parts: :class:`urlparse.ParseResult`
"""
return url_parts.path.strip('/') in ['', 'search'] and url_parts.query == '' \
and url_parts.fragment == '' | 4bad1f230adfa77df019519db276a181d57682dd | 1,501 |
import matplotlib
import matplotlib.pyplot as plt
import copy
def delay_waterfall(uvp, blpairs, spw, pol, component='abs-real',
average_blpairs=False, fold=False, delay=True,
deltasq=False, log=True, lst_in_hrs=True,
vmin=None, vmax=None, cmap='YlGnBu', axes=None,
figsize=(14, 6), force_plot=False, times=None,
title_type='blpair', colorbar=True, **kwargs):
"""
Plot a 1D delay spectrum waterfall (or spectra) for a group of baselines.
Parameters
----------
uvp : UVPspec
UVPSpec object, containing delay spectra for a set of baseline-pairs,
times, polarizations, and spectral windows.
blpairs : list of tuples or lists of tuples
List of baseline-pair tuples, or groups of baseline-pair tuples.
spw, pol : int or str
Which spectral window and polarization to plot.
component : str
Component of complex spectra to plot, options=['abs', 'real', 'imag', 'abs-real', 'abs-imag'].
abs-real is abs(real(data)), whereas 'real' is real(data)
Default: 'abs-real'.
average_blpairs : bool, optional
If True, average over the baseline pairs within each group.
fold : bool, optional
Whether to fold the power spectrum in :math:`|k_\parallel|`.
Default: False.
delay : bool, optional
Whether to plot the power spectrum in delay units (ns) or cosmological
units (h/Mpc). Default: True.
deltasq : bool, optional
If True, plot dimensionless power spectra, Delta^2. This is ignored if
delay=True. Default: False.
log : bool, optional
Whether to plot the log10 of the data. Default: True.
lst_in_hrs : bool, optional
If True, LST is plotted in hours, otherwise its plotted in radians.
vmin, vmax : float, optional
Clip the color scale of the delay spectrum to these min./max. values.
If None, use the natural range of the data. Default: None.
cmap : str, optional
Matplotlib colormap to use. Default: 'YlGnBu'.
axes : array of matplotlib.axes, optional
Use this to pass in an existing Axes object or array of axes, which
the power spectra will be added to. (Warning: Labels and legends will
not be altered in this case, even if the existing plot has completely different axis
labels etc.) If None, a new Axes object will be created. Default: None.
figsize : tuple
len-2 integer tuple specifying figure size if axes is None
force_plot : bool
If plotting a large number of blpairs (>20), this routine will quit
unless force_plot == True.
times : array_like, optional
Float ndarray containing elements from time_avg_array to plot.
title_type : str, optional
Type of title to put above plot(s). Options = ['blpair', 'blvec']
blpair : "bls: {bl1} x {bl2}"
blvec : "bl len {len} m & ang {ang} deg"
colorbar : bool, optional
Whether to make a colorbar. Default: True
kwargs : keyword arguments
Additional kwargs to pass to ax.matshow()
Returns
-------
fig : matplotlib.pyplot.Figure
Matplotlib Figure instance if input ax is None.
"""
# assert component
assert component in ['real', 'abs', 'imag', 'abs-real', 'abs-imag'], "Can't parse specified component {}".format(component)
fix_negval = component in ['real', 'imag'] and log
# Add ungrouped baseline-pairs into a group of their own (expected by the
# averaging routines)
blpairs_in = blpairs
blpairs = [] # Must be a list, not an array
for i, blpgrp in enumerate(blpairs_in):
if not isinstance(blpgrp, list):
blpairs.append([blpairs_in[i],])
else:
blpairs.append(blpairs_in[i])
# iterate through and make sure they are blpair integers
_blpairs = []
for blpgrp in blpairs:
_blpgrp = []
for blp in blpgrp:
if isinstance(blp, tuple):
blp_int = uvp.antnums_to_blpair(blp)
else:
blp_int = blp
_blpgrp.append(blp_int)
_blpairs.append(_blpgrp)
blpairs = _blpairs
# Select times if requested
if times is not None:
uvp = uvp.select(times=times, inplace=False)
# Average over blpairs or times if requested
blpairs_in = copy.deepcopy(blpairs) # Save input blpair list
if average_blpairs:
uvp_plt = uvp.average_spectra(blpair_groups=blpairs,
time_avg=False, inplace=False)
else:
uvp_plt = copy.deepcopy(uvp)
# Fold the power spectra if requested
if fold:
uvp_plt.fold_spectra()
# Convert to Delta^2 units if requested
if deltasq and not delay:
uvp_plt.convert_to_deltasq()
# Get x-axis units (delays in ns, or k_parallel in Mpc^-1 or h Mpc^-1)
if delay:
dlys = uvp_plt.get_dlys(spw) * 1e9 # ns
x = dlys
else:
k_para = uvp_plt.get_kparas(spw)
x = k_para
# Extract power spectra into array
waterfall = odict()
for blgrp in blpairs:
# Loop over blpairs in group and plot power spectrum for each one
for blp in blgrp:
# make key
key = (spw, blp, pol)
# get power data
power = uvp_plt.get_data(key, omit_flags=False)
# set flagged power data to nan
flags = np.isclose(uvp_plt.get_integrations(key), 0.0)
power[flags, :] = np.nan
# get component
if component == 'abs':
power = np.abs(power)
elif component == 'real':
power = np.real(power)
elif component == 'abs-real':
power = np.abs(np.real(power))
elif component == 'imag':
power = np.imag(power)
elif component == 'abs-imag':
power = np.abs(np.real(power))
# if real or imag and log is True, set negative values to near zero
# this is done so that one can use cmap.set_under() and cmap.set_bad() separately
if fix_negval:
power[power < 0] = np.abs(power).min() * 1e-6 + 1e-10
# assign to waterfall
waterfall[key] = power
# If blpairs were averaged, only the first blpair in the group
# exists any more (so skip the rest)
if average_blpairs: break
# check for reasonable number of blpairs to plot...
Nkeys = len(waterfall)
if Nkeys > 20 and force_plot == False:
raise ValueError("Nblps > 20 and force_plot == False, quitting...")
# Take logarithm of data if requested
if log:
for k in waterfall:
waterfall[k] = np.log10(np.abs(waterfall[k]))
logunits = "\log_{10}"
else:
logunits = ""
# Create new Axes if none specified
new_plot = False
if axes is None:
new_plot = True
# figure out how many subplots to make
Nkeys = len(waterfall)
Nside = int(np.ceil(np.sqrt(Nkeys)))
fig, axes = plt.subplots(Nside, Nside, figsize=figsize)
# Ensure axes is an ndarray
if isinstance(axes, matplotlib.axes._subplots.Axes):
axes = np.array([[axes]])
if isinstance(axes, list):
axes = np.array(axes)
# Ensure its 2D and get side lengths
if axes.ndim == 1:
axes = axes[:, None]
assert axes.ndim == 2, "input axes must have ndim == 2"
Nvert, Nhorz = axes.shape
# Get LST range: setting y-ticks is tricky due to LST wrapping...
y = uvp_plt.lst_avg_array[
uvp_plt.key_to_indices(list(waterfall.keys())[0])[1] ]
y = np.unwrap(y)
if y[0] > np.pi:
# if start is closer to 2pi than 0, lower axis by an octave
y -= 2 * np.pi
if lst_in_hrs:
lst_units = "Hr"
y *= 24 / (2 * np.pi)
else:
lst_units = "rad"
# get baseline vectors
blvecs = dict(zip([uvp_plt.bl_to_antnums(bl) for bl in uvp_plt.bl_array], uvp_plt.get_ENU_bl_vecs()))
# Sanitize power spectrum units
psunits = uvp_plt.units
if "h^-1" in psunits: psunits = psunits.replace("h^-1", "h^{-1}")
if "h^-3" in psunits: psunits = psunits.replace("h^-3", "h^{-3}")
if "Hz" in psunits: psunits = psunits.replace("Hz", r"{\rm Hz}")
if "str" in psunits: psunits = psunits.replace("str", r"\,{\rm str}")
if "Mpc" in psunits and "\\rm" not in psunits:
psunits = psunits.replace("Mpc", r"{\rm Mpc}")
if "pi" in psunits and "\\pi" not in psunits:
psunits = psunits.replace("pi", r"\pi")
if "beam normalization not specified" in psunits:
psunits = psunits.replace("beam normalization not specified",
r"{\rm unnormed}")
# Iterate over waterfall keys
keys = list(waterfall.keys())
k = 0
for i in range(Nvert):
for j in range(Nhorz):
# set ax
ax = axes[i, j]
# turn off subplot if no more plots to make
if k >= Nkeys:
ax.axis('off')
continue
# get blpair key for this subplot
key = keys[k]
blp = uvp_plt.blpair_to_antnums(key[1])
# plot waterfall
cax = ax.matshow(waterfall[key], cmap=cmap, aspect='auto',
vmin=vmin, vmax=vmax,
extent=[x[0], x[-1], y[-1], y[0]], **kwargs)
# ax config
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelsize=12)
if ax.get_title() == '':
if title_type == 'blpair':
ax.set_title("bls: {} x {}".format(*blp), y=1)
elif title_type == 'blvec':
blv = 0.5 * (blvecs[blp[0]] + blvecs[blp[1]])
lens, angs = utils.get_bl_lens_angs([blv], bl_error_tol=1.0)
ax.set_title("bl len {len:0.2f} m & {ang:0.0f} deg".format(len=lens[0], ang=angs[0]), y=1)
# set colorbar
if colorbar:
if fix_negval:
cb_extend = 'min'
else:
cb_extend = 'neither'
cbar = ax.get_figure().colorbar(cax, ax=ax, extend=cb_extend)
cbar.ax.tick_params(labelsize=14)
if fix_negval:
cbar.ax.set_title("$< 0$",y=-0.05, fontsize=16)
# configure left-column plots
if j == 0:
# set yticks
ax.set_ylabel(r"LST [{}]".format(lst_units), fontsize=16)
else:
ax.set_yticklabels([])
# configure bottom-row plots
if k + Nhorz >= Nkeys:
if ax.get_xlabel() == "":
if delay:
ax.set_xlabel(r"$\tau$ $[{\rm ns}]$", fontsize=16)
else:
ax.set_xlabel("$k_{\parallel}\ h\ Mpc^{-1}$", fontsize=16)
else:
ax.set_xticklabels([])
k += 1
# make suptitle
if axes[0][0].get_figure()._suptitle is None:
if deltasq:
units = "$%s\Delta^2$ $[%s]$" % (logunits, psunits)
else:
units = "$%sP(k_\parallel)$ $[%s]$" % (logunits, psunits)
spwrange = np.around(np.array(uvp_plt.get_spw_ranges()[spw][:2]) / 1e6, 2)
axes[0][0].get_figure().suptitle("{}\n{} polarization | {} -- {} MHz".format(units, pol, *spwrange),
y=1.03, fontsize=14)
# Return Axes
if new_plot:
return fig | 2d67c74c53096a1b1451008d27b1e5cf9a2f7110 | 1,502 |
import math
def wgs84_distance(lat1, lon1, lat2, lon2):
"""Distance (in meters) between two points in WGS84 coord system."""
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = (math.sin(dLat / 2) * math.sin(dLat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dLon / 2) * math.sin(dLon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d | b700c218c172843922762b741f37b25996fdc047 | 1,503 |
def optimize_acq_func(acq_func: AcquisitionFunction, bounds=None, options=None):
"""Optimizes the acquisition function"""
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=1,
num_restarts=20,
raw_samples=512,
options=options,
)
new_x = candidates.detach()
return new_x | 9aef150a89f646f8f65efe656a2987a7afe9f917 | 1,504 |
import json
def _recover_distributor(lb_id):
"""Get cached Distributor object or generate from ovs external_ids
{
'dist-lb-id': lb_id,
'dist-vip': vip,
'dist-size': size,
'dist-status': status,
'dist-mac': mac,
'dist-hash-fields': field-list,
'dist-ofport': ofport, # of external iface
'slot-100': 'amphora_id,mac',
'slot-101': 'amphora_id,mac',
'slot-...': 'amphora_id,mac',
}
"""
if _provision_state.state == DISTRIBUTOR_BOOTING:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Server status is %(status)s'
) % dict(lb=lb_id, status=_provision_state.state)
LOG.error(msg)
raise DistributorUsageError(msg)
if lb_id in _distributors:
return _distributors[lb_id]
ret, out, err = _run_vsctl(
VSCTL_FIND_EXTERNAL_ID.format(key='dist-lb-id',
value=lb_id),
extra_args=[VSCTL_JSON_FORMAT])
if ret != 0:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Find failed with exit_status=%(ret)d'
'\nsterr=%(err)s'
) % dict(lb=lb_id, ret=ret, err=err)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
# ovs json is a nested [tpye, value] list
# br_list = {'data': [[br_name,
# ['map',
# [['dist-lb-id', lb_id],
# ['dist-vip', vip],
# ['dist-size', size],
# ['dist-status', status],
# ['dist-mac', mac],
# ['dist-hash-fields', field-list],
# ['dist-ofport', ofport],
# ['slot-100', amphora_id,mac],
# ['slot-101', amphora_id,mac],
# ['slot-...', amphora_id,mac]]]]]
# 'headings': ['name', 'external_ids']}
try:
br_list = json.loads(out)
br_name = br_list['data'][0][0]
br_properties = dict(br_list['data'][0][1][1])
except (ValueError, KeyError, IndexError, TypeError):
msg = _('Error while recovering loadbalancer %(lb)s.'
' Could not parse find results %(out)s.'
) % dict(lb=lb_id, out=out)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
found_id = br_properties.pop('dist-lb-id', None)
if lb_id != found_id or len(br_list['data']) != 1:
msg = _('Error while recovering loadbalancer %(lb)s. None or'
' duplicate bridge found. out=%(out)s'
) % dict(lb=lb_id, out=br_list)
LOG.error(msg)
return None
# one error type for all property parsing issues, catch all
# expected errors
try:
vip = netaddr.IPAddress(br_properties.pop('dist-vip'))
size = int(br_properties.pop('dist-size'))
status = br_properties.pop('dist-status')
assert status in (ONLINE, DEGRADED, ERROR, NO_MONITOR)
mac = netaddr.EUI(br_properties.pop('dist-mac'),
dialect=netaddr.mac_unix)
iface = _interface_by_mac(mac)
hash_selection_fields = br_properties.pop(
'dist-hash-fields').split(',')
ofport = int(br_properties.pop('dist-ofport'))
except (AssertionError, KeyError, ValueError, UnicodeDecodeError,
AddrFormatError, TypeError, IndexError,
NotImplementedError, AddrConversionError, StopIteration):
# we have a bridge name so we should try to delete it
ret, out, err = _run_vsctl(VSCTL_DEL_BR.format(br_name))
killed = 'killed' if ret == 0 else 'kill failed: stderr=%s' % err
msg = _('Error while recovering loadbalancer %(lb)s.'
' bad bridge properties %(props)s.'
' Killing bridge %(kill_msg)s'
) % dict(lb=lb_id, props=br_properties, kill_msg=killed)
LOG.error(msg)
raise DistributorInstanceError(msg)
distributor = _Distributor(name=br_name, lb_id=lb_id, vip=vip,
mac=mac, iface=iface, size=size)
for slot in range(DST_GROUPS_OFFSET, DST_GROUPS_OFFSET + size):
slot_key = SLOT_KEY_FORMAT.format(slot)
if slot_key in br_properties:
amphora_id, amphora_mac = br_properties[slot_key].split(',')
# mac = netaddr.EUI(amphora_mac, dialect=netaddr.mac_unix)
distributor.destinations[amphora_id] = slot, amphora_mac
else:
distributor.free_slots.add(slot)
distributor.hash_selection_fields = hash_selection_fields
distributor.fail = (ERROR == status)
distributor.ofport = ofport
_distributors[lb_id] = distributor
return distributor | a97cb4843515cf83314044af72a91b344d475a2d | 1,505 |
from typing import Literal
import logging
import warnings
def rprecision_score(
y_true, y_pred, ratio: float = 1.0, negative_class=-1,
zero_division: Literal["warn", 0, 1] = "warn"
):
"""Calculate r-precision score for multiclass classification.
The variables y_true and y_pred are the true and predicted labels
respectively. The variable ratio defines the expected number of
samples in the negative class relative to the foreground class.
See the paper:
T. Wang, "High Precision Open-World Website Fingerprinting," in
2020 IEEE Symposium on Security and Privacy (SP), Los Alamitos,
CA, USA, 2020, pp. 231–246, doi: 10.1109/SP.2020.00015.
for more information.
"""
# pylint: disable=too-many-locals
logger = logging.getLogger(__name__)
pos_labels = (y_true != negative_class)
pos_predictions = (y_pred != negative_class)
n_true_positive = np.sum(pos_labels & (y_true == y_pred))
logger.debug("n_true_positive: %d", n_true_positive)
# Positive predictions which were not correct for positive classes
n_wrong_positive = np.sum(pos_labels & pos_predictions & (y_true != y_pred))
n_false_positive = np.sum(~pos_labels & pos_predictions)
logger.debug("n_wrong_positive: %d, n_false_positive: %d",
n_wrong_positive, n_false_positive)
n_positive = np.sum(pos_labels)
n_negative = len(y_true) - n_positive
logger.debug("n_positive: %d, n_negative: %d", n_positive, n_negative)
true_positive_rate = n_true_positive / n_positive
wrong_positive_rate = n_wrong_positive / n_positive
false_positive_rate = n_false_positive / n_negative
if n_true_positive == n_wrong_positive == n_false_positive == 0:
if zero_division == "warn":
warnings.warn("Attempted division by zero in rprecision. "
"Returning 0 instead.", RuntimeWarning)
zero_division = 0
return zero_division
logger.debug("r_%d-precision = %.3g / (%.3g + %.3g + %d * %.3g)",
ratio, true_positive_rate, true_positive_rate,
wrong_positive_rate, ratio, false_positive_rate)
return true_positive_rate / (
true_positive_rate + wrong_positive_rate + ratio * false_positive_rate) | c16b7cd3ee1226276e336a27649bf45bf105898a | 1,506 |
def setup_dispatcher(dp):
"""
Adding handlers for events from Telegram
"""
# commands
dp.add_handler(CommandHandler("start", commands.command_start))
dp.add_handler(CommandHandler("help", commands.command_help))
# admin & mod commands
dp.add_handler(CommandHandler("admin", admin.admin_command))
dp.add_handler(CommandHandler("bot_stats", admin.bot_user_stats))
dp.add_handler(CommandHandler(f"{broadcast_command[1:]}", broadcast_command_with_message))
dp.add_handler(CommandHandler('add_mod', admin.add_moderator))
dp.add_handler(CommandHandler('remove_mod', admin.remove_moderator))
# conversations
pass
# callback queries
dp.add_handler(CallbackQueryHandler(broadcast_decision_handler, pattern=f"^{CONFIRM_DECLINE_BROADCAST}"))
return dp | 1b37f48a8e3f9cfe451edb321b20dbde88853a84 | 1,507 |
import re
import ast
def get_version():
"""Gets the current version"""
_version_re = re.compile(r"__VERSION__\s+=\s+(.*)")
with open("leaked/__init__.py", "rb") as init_file:
version = str(ast.literal_eval(_version_re.search(
init_file.read().decode("utf-8")).group(1)))
return version | a6c5a94ca3cb728af38075ac98105be6d82dd3cf | 1,508 |
import re
def dir_keys(path):
"""A function to take a path, and return a list of all the numbers in the path. This is
mainly used for sorting
by the parameters they contain"""
regex = '[-+]?[0-9]+(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?' # matching any floating point
m = re.findall(regex, path)
if(m): val = m
else: raise ValueError('Your path does not contain any numbers')
val = list(map(float,val))
return val | c2c32772771c9bae23a1fcc949a509eaaf36d602 | 1,509 |
def generate_data(n=5, T=1000, random_state=None, initial_data=None):
"""
Parameter
---------
n : int
number of variables
T : int
number of samples
random_state : int
seed for np.random.seed
initial_data : list of np.ndarray
dictionary of initial datas
"""
T_spurious = 20
expon = 1.5
if initial_data is None:
permutation = np.random.permutation(n)
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B0 = np.multiply(value, sign)
B0 = np.multiply(B0, np.random.binomial(1, 0.4, size=(n, n)))
B0 = np.tril(B0, k=-1)
B0 = B0[permutation][:, permutation]
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B1 = np.multiply(value, sign)
B1 = np.multiply(B1, np.random.binomial(1, 0.4, size=(n, n)))
causal_order = np.empty(len(permutation))
causal_order[permutation] = np.arange(len(permutation))
causal_order = causal_order.astype(int)
else:
B0 = initial_data['B0']
B1 = initial_data['B1']
causal_order =initial_data['causal_order']
M1 = np.dot(np.linalg.inv(np.eye(n) - B0), B1);
ee = np.empty((n, T + T_spurious))
for i in range(n):
ee[i, :] = np.random.normal(size=(1, T + T_spurious));
ee[i, :] = np.multiply(np.sign(ee[i, :]), abs(ee[i, :]) ** expon);
ee[i, :] = ee[i, :] - np.mean(ee[i, :]);
ee[i, :] = ee[i, :] / np.std(ee[i, :]);
std_e = np.random.uniform(size=(n,)) + 0.5
nn = np.dot(np.dot(np.linalg.inv(np.eye(n) - B0), np.diag(std_e)), ee);
xx = np.zeros((n, T + T_spurious))
xx[:, 0] = np.random.normal(size=(n, ));
for t in range(1, T + T_spurious):
xx[:, t] = np.dot(M1, xx[:, t - 1]) + nn[:, t];
data = xx[:, T_spurious + 1 : T_spurious + T];
return data.T, B0, B1, causal_order | 5e5c09de44f6db1ba28cd953d6549bb8d31aa3ec | 1,510 |
from typing import List
import re
def _get_paragraphs(paragraphs: List[str]) -> List[str]:
"""
Returns the paragraphs of an article's body, annotated with HTML tags.
Args:
paragraphs (:obj:`List[str]`):
List of strings denoting paragraphs.
Returns:
:obj:`List[str]`:
List of paragraphs annotated with HTML tags.
"""
paragraphs = [_add_html_tag(paragraph, 'p') for paragraph in paragraphs if not re.findall('trends.embed.renderExploreWidget', paragraph)]
return paragraphs | a4030efd2145fb15435912a1e08354cabba209e8 | 1,511 |
from scipy.stats import gaussian_kde
def calculate_kde(
ascending: bool = True,
evaluate: bool = False,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
source_units=None,
target_units=None,
names=None,
):
"""Return the kernel density estimation (KDE) curve."""
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if len(tsd.columns) > 1:
raise ValueError(
tsutils.error_wrapper(
"""
Right now "calculate_kde" only support one time-series at a time.
You gave {}.
""".format(
tsd.columns
)
)
)
tmptsd = tsd.dropna()
ndf = tmptsd.sort_values(tmptsd.columns[0], ascending=ascending)
gkde = gaussian_kde(ndf.iloc[:, 0])
if evaluate is True:
y = gkde.evaluate(tmptsd.iloc[:, 0])
ndf = pd.DataFrame(y, index=tmptsd.index)
else:
y = gkde.evaluate(ndf.iloc[:, 0])
ndf = pd.DataFrame(y)
return ndf | d654fe75030b8c99361096650c71835aad2d6b3a | 1,512 |
def EPmulk(a, da, k):
"""
C = A * k
"""
return a * k, np.absolute(da * k) | 4fb2b7ff28db1ff13fa2aa0c68f5d0c25e9ba3d9 | 1,513 |
import os
import sys
def configuration(parent_package='', top_path=None):
"""
A utility function from numpy.distutils.misc_util to compile Fortran and C
codes. This function will be passed to numpy.distutil.core.setup().
"""
config = Configuration(None, parent_package, top_path)
# Define extern directory where external libraries source codes are.
package_name = 'special_functions'
extern_dir_name = '_extern'
extern_dir = os.path.join('.', package_name, extern_dir_name)
macros = []
if sys.platform == 'win32':
macros.append(('_USE_MATH_DEFINES', None))
# amos (fortran library)
config.add_library(
'amos',
sources=[
os.path.join(extern_dir, 'amos', 'mach', '*.f'),
os.path.join(extern_dir, 'amos', 'double_precision', '*.f'),
os.path.join(extern_dir, 'amos', 'single_precision', '*.f')
],
macros=macros)
# cephes (c library)
config.add_library(
'cephes',
sources=[
os.path.join(extern_dir, 'cephes', 'bessel', '*.c'),
os.path.join(extern_dir, 'cephes', 'cprob', '*.c'),
os.path.join(extern_dir, 'cephes', 'eval', '*.c'),
os.path.join(extern_dir, 'cephes', 'cmath', '*.c')
],
include_dirs=[
os.path.join(extern_dir, 'cephes', 'eval')
],
macros=macros)
# If envirinment var "CYTHON_BUILD_IN_SOURCE" exists, cython creates *.c
# files in the source code, otherwise in /build.
cython_build_in_source = os.environ.get('CYTHON_BUILD_IN_SOURCE', None)
if bool(cython_build_in_source):
cython_build_dir = None # builds *.c in source alongside *.pyx files
else:
cython_build_dir = 'build'
# Cythontize *.pyx files to generate *.c files.
extensions = cythonize(
os.path.join('.', package_name, '*.pyx'),
build_dir=cython_build_dir,
include_path=[os.path.join('.', package_name)],
language_level="3",
compiler_directives={
'boundscheck': False,
'cdivision': True,
'wraparound': False,
'nonecheck': False,
'embedsignature': True,
'linetrace': True
})
# Add extensions to config per each *.c file
for extension in extensions:
config.add_extension(
extension.name,
sources=extension.sources,
include_dirs=extension.include_dirs,
libraries=['amos', 'cephes'],
language=extension.language,
define_macros=macros)
# Additional files, particularly, the API files to (c)import (*.pxd, *.py)
config.add_data_files(os.path.join(package_name, '*.pxd')) # cython API
config.add_data_files(os.path.join(package_name, '*.py')) # python API
config.add_data_files((package_name, 'LICENSE.txt'))
config.add_data_files((package_name, 'AUTHORS.txt'))
config.add_data_files((package_name, 'README.rst'))
config.add_data_files((package_name, 'CHANGELOG.rst'))
return config | 9b6604d6124947da8322fd7f7de76bd44fff2e7d | 1,514 |
def decrypt_location(location):
"""Decrypts the `location` field in Xiami responses to URL."""
if not location:
return None
rows, url = int(location[:1]), location[1:]
urllen = len(url)
cols_base = urllen // rows # basic column count
rows_ex = urllen % rows # count of rows that have 1 more column
matrix = []
for r in range(rows):
length = cols_base + 1 if r < rows_ex else cols_base
matrix.append(url[:length])
url = url[length:]
url = ''
for i in range(urllen):
url += matrix[i % rows][i // rows]
return parse.unquote(url).replace('^', '0') | 2fc3062df2786550e2b4839fae4aee5668963cc1 | 1,515 |
def sqd_yinfast(samples):
""" compute approximate sum of squared difference
Using complex convolution (fast, cost o(n*log(n)) )"""
# yin_t(tau) = (r_t(0) + r_(t+tau)(0)) - 2r_t(tau)
B = len(samples)
W = B//2
yin = np.zeros(W)
sqdiff = np.zeros(W)
kernel = np.zeros(B)
# compute r_(t+tau)(0)
squares = samples**2
for tau in range(W):
sqdiff[tau] = squares[tau:tau+W].sum()
# add r_t(0)
sqdiff += sqdiff[0]
# compute r_t(tau) using kernel convolution in complex domain
samples_fft = np.fft.fft(samples)
kernel[1:W+1] = samples[W-1::-1] # first half, reversed
kernel_fft = np.fft.fft(kernel)
r_t_tau = np.fft.ifft(samples_fft * kernel_fft).real[W:]
# compute yin_t(tau)
yin = sqdiff - 2 * r_t_tau
return yin | c97e130960336074f6b0c30590ab8a044b8d63e5 | 1,516 |
def get_colours_extend(graph_size, start_set, end_set, source, target, reachable=None):
"""
Get colours for nodes including source and target nodes.
Blue nodes are those in the source set.
Orange nodes are those in the start set, not in the source set.
Green nodes are those reachable from the source that are in target.
Red nodes are those in target that are not reachable from the source.
All other nodes are grey.
"""
# Setup the colours
c = []
if reachable is None:
reachable = end_set
for acc_val in range(graph_size):
if acc_val in start_set:
if acc_val in source:
c.append("dodgerblue")
else:
c.append("darkorange")
elif acc_val in target:
if acc_val in reachable:
c.append("g")
else:
c.append("r")
else:
c.append("gray")
return c | d366ed6c4c387d0b4de4440d34d358d5a142661a | 1,517 |
def suspend_circuit():
"""
Suspends the circuits for some seconds, allowing the user to exit the house without playing the song.
"""
circuit.suspend()
return render_template("suspend.html", seconds=EXIT_HOUSE_TIMER, name=get_guest_name()) | 2336207150163ecd302dda6c56758a5405152aec | 1,518 |
def get_scalar_data_from_path(udatapath, name='pressure', x0=0, x1=None, y0=0, y1=None, z0=0, z1=None,
t0=0, t1=None, inc=1, frame=None, return_xy=False, verbose=True,
slicez=None, crop=None, mode='r',
reverse_x=False, reverse_y=False, reverse_z=False):
"""
Returns a scalar data from a path of udata
... There could be a case that a scalar data such as temperature and pressure is also stored in udata.h5
... This function serves as a reader of such a quantity
If return_xy is True, it returns udata, xx(2d grid), yy(2d grid)
Parameters
----------
udatapath: str, a path to udata
name: str, name of the dataset in the udata h5
x0: int
x1: int
y0: int
y1: int
t0: int
t1: int
inc: int
time increment of data to load from udatapath, default: 1
frame: array-like or int, default: None
If an integer is given, it returns a velocity field at that instant of time
If an array or a list is given, it returns a velocity field at the given time specified by the array/list.
By default, it loads data by a specified increment "inc".
If "frame" is given, it is prioritized over the incremental loading.
return_xy: bool, defualt: False
verbose: bool
If True, return the time it took to load udata to memory
Returns
-------
pdata, (optional- xx, yy, zz(if 3D)
"""
f = h5py.File(udatapath, 'r')
keys = list(f.keys())
f.close()
###
if not name in keys:
raise ValueError('%s does not exist in the given path' % name)
else:
if verbose:
tau0 = time_mod.time()
print('... reading %s from the path' % name)
if crop is not None and [x0, x1, y0, y1, z0, z1] == [0, None, 0, None, 0, None]:
x0, x1, y0, y1, z0, z1 = crop, -crop, crop, -crop, crop, -crop
if mode == 'w' or mode == 'wb':
raise ValueError('... w was passed to h5Py.File(...) which would delete the file if it exists. \n'
'Probably, this is not what you want. Pass r for read-only')
with h5py.File(udatapath, 'r') as f:
if 'z' in f.keys():
dim = 3
else:
dim = 2
if dim == 2:
if frame is None:
pdata = f[name][y0:y1, x0:x1, t0:t1:inc]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, frame]
if return_xy:
xx, yy = f['x'][y0:y1, x0:x1], f['y'][y0:y1, x0:x1]
elif dim == 3:
if frame is None and slicez is None:
pdata = f[name][y0:y1, x0:x1, z0:z1, t0:t1:inc]
elif frame is None and slicez is not None:
pdata = f[name][y0:y1, x0:x1, slicez, t0:t1:inc]
elif frame is not None and slicez is not None:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, slicez, frame]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, z0:z1, frame]
if return_xy:
if slicez is None:
xx, yy, zz = f['x'][y0:y1, x0:x1, z0:z1], f['y'][y0:y1, x0:x1, z0:z1], f['z'][y0:y1, x0:x1,
z0:z1]
else:
xx, yy, zz = f['x'][y0:y1, x0:x1, slicez], f['y'][y0:y1, x0:x1, slicez], f['z'][0, 0, slicez]
tau1 = time_mod.time()
if verbose:
print('... time took to load udata in sec: ', tau1 - tau0)
if return_xy:
if dim == 2:
if reverse_x:
pdata[...] = pdata[:, ::-1, :]
xx[...] = xx[:, ::-1]
yy[...] = yy[:, ::-1]
if reverse_y:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[::-1, :]
yy[...] = yy[::-1, :]
return pdata, xx, yy
elif dim == 3:
if reverse_x:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[:, ::-1, :]
yy[...] = yy[:, ::-1, :]
zz[...] = zz[:, ::-1, :]
if reverse_y:
pdata[...] = pdata[::-1, :, :, :]
xx[...] = xx[::-1, :, :]
yy[...] = yy[::-1, :, :]
zz[...] = zz[::-1, :, :]
if reverse_z:
pdata[...] = pdata[:, :, ::-1, :]
xx[...] = xx[:, :, ::-1]
yy[...] = yy[:, :, ::-1]
zz[...] = zz[:, :, ::-1]
return pdata, xx, yy, zz
else:
return pdata | ef99d0e3dcd8a15b5c7759dac39fb3b7fbe09632 | 1,519 |
from statistics import mean
def create_transformed_df(old_df, elem_list, features_list):
"""elem_list should be in type list"""
new_dict = {}
for index, elems in zip(old_df.index, old_df[elem_list]):
for elem in elems:
if elem in new_dict.keys():
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
else:
new_dict[elem] = [[] for i in range(len(features_list))]
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
headers = [elem_list]
for i in features_list:
headers.append(f'avg_movie_{i}')
headers.append('number_of_movies') ##? how to name?
new_df = pd.DataFrame(columns=headers)
for key in new_dict:
row = []
row.append(key)
for i, col in enumerate(headers[1:-1]):
mean_val = mean(new_dict[key][i])
row.append(mean_val)
num = len(new_dict[key][0])
row.append(num)
length = len(new_df)
new_df.loc[length] = row
return new_df | c5d825f446839d9b6d921bf064bb07c102b82905 | 1,520 |
def sem_id_semester_get(semester, obs_id):
"""
retrieves all the sem_id associated with an observer for the semester.
:param semester: semester id
:type semester: str
:param obs_id: observer id
:type obs_id: int
:rtype: List[str]
"""
semester_list = []
sem_ids = utils.get_proposal_ids(obs_id)
for semid in sem_ids:
if semester in semid:
semester_list.append(semid)
return semester_list | d15b36ccbe1e7a6d2f2cb5016419e259df922881 | 1,521 |
def getLabels (dataMatrix, classOfInterest):
"""
Gets labels on a per class basis that will inputted to the randomForest function
Parameters
----------
dataMatrix : anndata object
The data file of interest
classOfInterest : str
The class you will split the data by in the set of dataMatrix.obs
Returns
-------
labelsDict : dict
Dictionary with labels for each class
"""
dataMatrix = filterNormalize (dataMatrix, classOfInterest)
labelsDict = {}
for label in np.unique(dataMatrix.obs[classOfInterest]):
lists = []
for obs in dataMatrix.obs[classOfInterest]:
if obs == label:
lists.append('A')
else:
lists.append('B')
labelsDict[label] = lists #this is usually in line w if and else
return labelsDict | bf7bcfc4afcd16deedbfcf27c9e1eb1a5dfa603a | 1,522 |
def load_file(file_location):
"""
Opens a given file and returns its contents.
:param str file_location: The absolute path to the file
:rtype: str
:return: The contents of the file
"""
with open(file_location, 'r') as file_contents:
contents = file_contents.read()
return contents | 61b78432cffa4c22adc9af31bbad63bf8777737b | 1,523 |
def create_bam(data, args):
"""
aligner and conversion to BAM file
"""
workdir = safe_makedir("align")
sample = data['name']
# workdir = op.join("align", sample)
data['final_bam'] = _align(data['trimmed'], sample, op.abspath(workdir),
args.index, args.is_directional, args.bowtie2,
args.reference, data['config'])
data['order_bam'] = data['final_bam']
return data | 81e77af7317f29277d42a37e46f0e5aa719cab3c | 1,524 |
def calculateStorageLocationsDistance(D_loc: pd.DataFrame, input_loccodex: float,
input_loccodey: float, output_loccodex: float,
output_loccodey: float) -> pd.DataFrame:
"""
calculate the sum of the rectangular distances from
Input point -> physical location -> Output point
Args:
D_loc (pd.DataFrame): Input location DataFrame.
input_loccodex (float): Input X coordinate.
input_loccodey (float): Input Y coordinate.
output_loccodex (float): Output X coordinate.
output_loccodey (float): Output Y coordinate.
Returns:
D_loc (TYPE): DESCRIPTION.
"""
D_loc = D_loc.dropna(subset=['LOCCODEX', 'LOCCODEY'])
D_loc['INPUT_DISTANCE'] = np.abs(input_loccodex - D_loc['LOCCODEX']) + np.abs(input_loccodey - D_loc['LOCCODEY'])
D_loc['OUTPUT_DISTANCE'] = np.abs(output_loccodex - D_loc['LOCCODEX']) + np.abs(output_loccodey - D_loc['LOCCODEY'])
return D_loc | 3432036119007cb1f33f69106cae8c2cf28d697b | 1,525 |
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words) | 2b6a293bc5faba31428f66f214e1991dd9878027 | 1,526 |
import codecs
def pickle(obj):
""" Creates a serialization of the provided object
Serialization is done by :mod:`pickle` module. If :mod:`cPickle` package is
available, that package will be used instead, yielding a gain in speed.
Parameters
----------
obj: :obj:`obj`
Object to be serialized.
Returns
-------
pickle: :obj:`pickle.pickle`
Serialized version of the provided object. """
return codecs.encode(pkl.dumps(obj), "base64").decode() | 3a36e7d3c1f0fd31a417df21701eb150e3c611a8 | 1,527 |
def calc_E_E_AP_d_t(n_p):
"""1 時間当たりの家電の消費電力量
Args:
n_p(float): 仮想居住人数 仮想居住人数
Returns:
ndarray: 1 時間当たりの家電の消費電力量
"""
schedule = load_schedule()
schedule_app = get_schedule_app(schedule)
if 1 <= n_p and n_p <= 2:
E_E_AP_1_d_t = get_E_E_AP_p_d_t(1, schedule_app)
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
return E_E_AP_1_d_t * (2 - n_p) / (2 - 1) + E_E_AP_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
return E_E_AP_2_d_t * (3 - n_p) / (3 - 2) + E_E_AP_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
E_E_AP_4_d_t = get_E_E_AP_p_d_t(4, schedule_app)
return E_E_AP_3_d_t * (4 - n_p) / (4 - 3) + E_E_AP_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p) | 645052eaedf7cc93d4b171f710d0a29e119fe7cf | 1,528 |
from typing import List
import torch
def Squeeze_forward(op: Operation, values: List[torch.Tensor], ctx: TorchBackendContext = None, **kwargs) -> torch.Tensor:
"""
Remove single-dimensional entries from the shape of a tensor.
Takes an input axes with a list of axes to squeeze.
If axes is not provided, all the single dimensions will be removed from the shape.
If an axis is selected with shape entry not equal to one, an error is raised.
Inputs (1 - 2)
data (differentiable) : T
Tensors with at least max(dims) dimensions.
axes (optional, non-differentiable) : tensor(int64)
List of integers indicating the dimensions to squeeze.
Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).
Outputs
squeezed (differentiable) : T
Reshaped tensor with same data as input.
Args:
op (Operation): [description]
input_values (List[torch.Tensor]): [description]
Returns:
torch.Tensor: [description]
"""
ASSERT_ALL_TENSORS_AT_SAME_DEVICE(op=op, values=values)
ASSERT_NUM_OF_INPUT(op=op, values=values, min_num_of_input=1, max_num_of_input=2)
[squeezing_tensor], axes = values, GET_ATTRIBUTE_FROM_OPERATION(op=op, attribute='axes', compulsive=True)
if isinstance(axes, list):
for squeezing_dim in sorted(axes, reverse=True):
squeezing_tensor = torch.squeeze(squeezing_tensor, squeezing_dim)
elif isinstance(axes, int):
squeezing_tensor = torch.squeeze(squeezing_tensor, axes)
else: raise TypeError(f'Parameter axes of operation {op.name} misunderstood, '
f'expect int value of list of int, while {type(axes)} was given.')
return squeezing_tensor | f20c5565aafde993e011efc4e037d6a253a79d30 | 1,529 |
def format_test_output(test_name, test_res, H0_unit_root=True):
"""
Helper function to format output. Return a dictionary with specific keys. Will be used to
construct the summary data frame for all unit root tests.
TODO: Add functionality of choosing based on the max lag order specified by user.
:param test_name: name of the test
:param test_res: object that contains corresponding test information. Can be None if test failed.
:param H0_unit_root: does the null hypothesis of the test assume a unit root process? Some tests do (ADF),
some don't (KPSS).
:return: dictionary of summary table for all tests and final decision on stationary vs non-stationary.
If test failed (test_res is None), return empty dictionary.
"""
# Check if the test failed by trying to extract the test statistic
if test_name in ('ADF', 'KPSS'):
try:
test_res['statistic']
except BaseException:
test_res = None
else:
try:
test_res.stat
except BaseException:
test_res = None
if test_res is None:
return {}
# extract necessary information
if test_name in ('ADF', 'KPSS'):
statistic = test_res['statistic']
crit_val = test_res['critical']['5%']
p_val = test_res['pval']
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags']
else:
statistic = test_res.stat
crit_val = test_res.critical_values['5%']
p_val = test_res.pvalue
lags = test_res.lags
if H0_unit_root:
H0 = 'The process is non-stationary'
stationary = "yes" if p_val < 0.05 else "not"
else:
H0 = 'The process is stationary'
stationary = "yes" if p_val > 0.05 else "not"
out = {
'test_name': test_name,
'statistic': statistic,
'crit_val': crit_val,
'p_val': p_val,
'lags': int(lags),
'stationary': stationary,
'Null Hypothesis': H0
}
return out | 9d4211475016497659873ef6b2ab87fda34b2af2 | 1,530 |
import functools
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds | 6918db594b74d75d5fbbebf70b0f2811366c20b5 | 1,531 |
def _SetRunOptionInRequest(run_option, run_schedule, request, messages):
"""Returns request with the run option set."""
if run_option == 'manual':
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.adHocRun',
messages.GoogleCloudDatacatalogV1alpha3AdhocRun())
elif run_option == 'scheduled':
scheduled_run_option = arg_utils.ChoiceToEnum(
run_schedule,
(messages.GoogleCloudDatacatalogV1alpha3ScheduledRun
.ScheduledRunOptionValueValuesEnum))
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.scheduledRun.scheduledRunOption',
scheduled_run_option)
return request | 9f93aaa6b9ec3ba9350c10b914439b16ec7c19a9 | 1,532 |
from unittest.mock import patch
def test_rank_closest():
"""test if phoneme-inventory is ranked correctly
according to feature vectore distance to a given phoneme"""
# set up custom class, create instance of it
class EtymMonkeyrank_closest:
def __init__(self):
self.phoneme_inventory, self.dm_called_with = None, []
self.dm_return = iter([1, 0, 2])
def distance_measure(self, *args):
arglist = [*args]
self.dm_called_with.append(arglist)
return next(self.dm_return)
mocketym = EtymMonkeyrank_closest()
# assert exception and exception message
with raises(InventoryMissingError) as inventorymissingerror_mock:
Etym.rank_closest(
self=mocketym,
ph="d",
howmany=float("inf"),
inv=None)
assert str(inventorymissingerror_mock.value
) == "define phoneme inventory or forms.csv"
# set up2: mock pick_minmax
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a", "c"]
# assert
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"]) == "b, a, c"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with(
[('a', 1), ('b', 0), ('c', 2)], float("inf"))
# set up3: overwrite mock class instance, mock pick_minmax anew
mocketym = EtymMonkeyrank_closest()
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a"]
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"], howmany=2) == "b, a"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 2)
# set up4: check if phoneme inventory can be accessed through self
mocketym = EtymMonkeyrank_closest()
mocketym.phoneme_inventory = ["a", "b", "c"]
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = "b"
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym,
ph="d",
inv=None,
howmany=1) == "b"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 1)
# tear down
del mocketym, EtymMonkeyrank_closest | 6ad838f0961fb311ce68402b87f68960a1ce816f | 1,533 |
from datetime import datetime
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was created, false otherwise
"""
name = module.params.get('name')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if not cloud_service_name_available.result:
changed = False
else:
changed = True
# Create cloud service if necessary
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e))
# Create linux configuration
disable_ssh_password_authentication = not password
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
linux_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=linux_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) | 88006dec9f8e00307f4862e2cdab203867f15558 | 1,534 |
def calcCumulOverlap(modes1, modes2, array=False):
"""Returns cumulative overlap of modes in *modes2* with those in *modes1*.
Returns a number of *modes1* contains a single :class:`.Mode` or a
:class:`.Vector` instance. If *modes1* contains multiple modes, returns an
array. Elements of the array correspond to cumulative overlaps for modes
in *modes1* with those in *modes2*. If *array* is **True**, returns an array
of cumulative overlaps. Returned array has the shape ``(len(modes1),
len(modes2))``. Each row corresponds to cumulative overlaps calculated for
modes in *modes1* with those in *modes2*. Each value in a row corresponds
to cumulative overlap calculated using upto that many number of modes from
*modes2*."""
overlap = calcOverlap(modes1, modes2)
if array:
return np.sqrt(np.power(overlap, 2).sum(axis=overlap.ndim-1))
else:
return np.sqrt(np.power(overlap, 2).cumsum(axis=overlap.ndim-1)) | 6ce8c85b778ca06e1f26f9d66151656b30a4837a | 1,535 |
import multiprocessing
import tqdm
def apply_ntimes(func, n, args, verbose=True, timeout=None):
"""
Applies `n` times the function `func` on `args` (useful if, eg, `func` is partly random).
Parameters
----------
func : function
func must be pickable, see https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled .
n : int
args : any
timeout : int or float
If given, the computation is cancelled if it hasn't returned a result before `timeout` seconds.
Returns
-------
type
Result of the computation of func(iter).
"""
pool = multiprocessing.Pool()
multiple_results = [pool.apply_async(func, args) for _ in range(n)]
pool.close()
return [res.get(timeout) for res in tqdm(multiple_results, desc='# castor.parallel.apply_ntimes', disable = True)] | 91aca94c49b7cf74ceaf5f093f21853bbd310df1 | 1,536 |
def travel_time_without_Rebalancing(tnet, i, j, exo=0):
"""
evalute the travel time function for edge i->j
Parameters
----------
tnet: transportation network object
i: starting node of edge
j: ending node of edge
Returns
-------
float
"""
return sum(
[tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flowNoRebalancing'] +exo )/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))]) | 00ae58356d1a808d34a559267134cb52fc8b0dc5 | 1,537 |
def twistless(*args):
"""
Wraps the entry point function, this function should setup and run a
twisted reactor.
A twisted task will be created to constantly schedule other stackless
tasklets as often as the timesched argument.
"""
def _twistless(func):
"""
Wrap the given function
"""
@wraps(func)
def wrapped(*args, **kwargs):
"""
Calls the wrapped function in a stackless tasklet and sets up a
looping twisted task to pump the schedueler.
"""
@wraps(func)
def execute():
"""
Execute the entry point and create a looping call.
"""
reactor_tasklet = sl.getcurrent()
task.LoopingCall(sl.schedule).start(timesched)
func(*args, **kwargs)
sl.tasklet(execute)()
sl.run()
return wrapped
# Add the timeshed arg if it is not given.
if len(args) == 1 and callable(args[0]):
timesched = DEFAULT_TIMESCHED
return _twistless(args[0])
else:
timesched = args[0] if len(args) >= 1 else DEFAULT_TIMESCHED
return _twistless | 75f51549bde9e07316e9dcb31c95bdf81a3cd793 | 1,538 |
import numpy
import math
def enhance_with_function(images, labels, ratio, enhance_func):
"""
:param images:
:param labels:
:param ratio: the ratio of max input class. for example, highest sample count is 1000, ratio is 3, the result
will be around 1000 * 3 * how_many_classes
:param enhance_func the func used for enhance f(image, label, how_many_to_generate)
:return: new genrated features and labels
"""
inputs_per_class = numpy.bincount(labels)
max_inputs = numpy.max(inputs_per_class)
# One Class
for i in range(len(inputs_per_class)):
input_ratio = math.ceil((max_inputs * ratio - inputs_per_class[i]) / inputs_per_class[i])
print("generating class:{} with ratio:{}, max input:{}, current:{}".format(
i, input_ratio, max_inputs, inputs_per_class[i]))
if input_ratio <= 1:
continue
new_features = []
new_labels = []
mask = numpy.where(labels == i)
for feature in images[mask]:
generated_images = enhance_func(feature, input_ratio)
for generated_image in generated_images:
new_features.append(generated_image)
new_labels.append(i)
images = numpy.append(images, new_features, axis=0)
labels = numpy.append(labels, new_labels, axis=0)
return images, labels | d16b7d3726902653bce94c11dba808da1ee88d09 | 1,539 |
async def port_create(
request: Request,
server_id: int,
port: PortCreate,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Create a new port on server
"""
db_port = create_port(db, server_id, port)
trigger_tc(db_port)
return db_port | 28e747b9af9ed04de911b1fc30653539e9e108cb | 1,540 |
import os
def main():
"""First function to be called"""
# Clear the screen using module function.
clear_screen_module.clear_screen()
print("This script prints absolute paths of all files in current directory.\n")
current_dir = os.getcwd()
print(f"Current directory: {current_dir}\n")
print("Files in current dir are as below with their absolute paths,\n")
# Call function to list absolute paths of files.
list_abs_path_of_files(current_dir)
print("\nAll files are listed above.\n")
return None | fabbccba629876133f638ed83fe60922cc5284fa | 1,541 |
def rectangle_area(base, height):
"""Returns the area of a rectangle"""
base = float(base)
height = float(height)
if (base < 0.0 or height < 0.0):
raise ValueError('Negative numbers are not allowed')
return base * height | 6dc1ea897cdeba1eb84813cefdab659abf5197ea | 1,542 |
def pipe(*args, **kwargs):
"""A processor that replaces the text of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with replaced content
Examples:
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
>>> rules = [
... {'find': 'Gr', 'replace': 'M'},
... {'find': 'e', 'replace': 'a', 'param': 'last'}]
>>> conf = {'rule': rules}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'Greetings'}
>>> next(pipe(item, **kwargs))['result'] == 'Meatings'
True
"""
return parser(*args, **kwargs) | 29be8fad7df2eb674633abd160b818ed4d6697b2 | 1,543 |
def adjoint(g):
"""Return the adjoint of a rigid body transformation g."""
adg = np.zeros((6, 6))
R_part, p = g[:3, :3], g[:3, 3]
pR = skew(p) @ R_part
adg[:3, :3] = R_part
adg[-3:, -3:] = R_part
adg[:3, -3:] = pR
return adg | 6ef82620aa6db984956c7a858ebf0e8715e1e9df | 1,544 |
def dmp_rr_yun0_sqf_list(f, u, K):
"""Compute square-free decomposition of ``f`` in zero-characteristic ring ``K``.
References
==========
* :cite:`LeeM2013factor`, page 8
"""
if dmp_ground_p(f, None, u):
return []
result, count = [], 1
qs = [dmp_diff_in(f, 1, i, u, K) for i in range(u + 1)]
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
while not dmp_one_p(f, u, K):
for i in range(u + 1):
qs[i] = dmp_quo(qs[i], g, u, K)
f = dmp_quo(f, g, u, K)
for i in range(u + 1):
qs[i] = dmp_sub(qs[i], dmp_diff_in(f, 1, i, u, K), u, K)
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
if not dmp_one_p(g, u, K):
result.append((g, count))
count += 1
return result | cf917fb0f0cfd505328c07a09fe07cafd8872d7e | 1,545 |
import argparse
import shlex
def parse_group(rule):
"""
Parse the group line
"""
parser = argparse.ArgumentParser()
rules = shlex.split(rule)
rules.pop(0)
parser.add_argument("--name", dest="name", action="store")
parser.add_argument("--gid", dest="gid", action="store")
args = clean_args(vars(parser.parse_args(rules)))
parser = None
return args | 5d7031fe91e312b82b76c5a8acf458de5c6448ac | 1,546 |
import os
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(TextFileUploadTag.binding_name)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(
TextFileUploadTag.binding_name, TextFileUploadTag)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
global_routes = [
(os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler),
]
namespaced_routes = [
(_POST_ACTION_SUFFIX, TextFileUploadHandler),
]
global custom_module
custom_module = custom_modules.Module(
'Student Text File Submission Upload',
'Adds a custom tag for students to upload text files <= 1MB in size.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module | ae58ca8e095147a73285d33b820256e754997ef1 | 1,547 |
def angle2trig(theta):
"""Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple)
"""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
return (c, s, -s, c) | b4ad079b5b9fb889b26eec37c1d14ae97a34be50 | 1,548 |
def get_state_z0_pure_state_vector() -> np.ndarray:
"""Returns the pure state vector for :math:`|0\\rangle`.
Returns
-------
np.ndarray
the pure state vector.
"""
vec = np.array([1, 0], dtype=np.complex128)
return vec | 53a7485572ea8fed8fcb8155923692050092c881 | 1,549 |
def HSV_to_CMYKratio(hsv):
"""Converts HSV color space to CMYK (ratio representation)"""
rgb = HSV_to_RGB(hsv)
return RGB_to_CMYKratio(rgb) | c6268c86dc425d7f5b386fd9dbb56e5299d9573b | 1,550 |
def delete_single_culture(user_id, culture_id):
"""Delete a culture."""
try:
culture = Culture.query.filter_by(user_id=user_id).filter_by(culture_id=culture_id).first()
if not culture:
response_object = {
'status': 'fail',
'message': f'{culture_id} does not exist.'
}
return jsonify(response_object), 404
else:
db.session.delete(culture)
db.session.commit()
response_object = {
'status': 'success',
'message': f'{culture_id} was deleted.'
}
return jsonify(response_object), 200
except exc.IntegrityError as e:
db.session.rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400 | e96ab6e653b2d191e1c0977ee9dace114c6056ce | 1,551 |
def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True):
""" Creates single atomic chunk
:param im: IngestionManager
:param chunk_coord: np.ndarray
array of three ints
:param aff_dtype: np.dtype
np.float64 or np.float32
:param verbose: bool
:return:
"""
chunk_coord = np.array(list(chunk_coord), dtype=np.int)
edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype)
mapping = collect_agglomeration_data(im, chunk_coord)
active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping)
edge_ids = {}
edge_affs = {}
edge_areas = {}
for k in edge_dict.keys():
if k == "cross":
edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None],
edge_dict[k]["sv2"][:, None]],
axis=1)
continue
sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]]
sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]]
aff_conn = edge_dict[k]["aff"][active_edge_dict[k]]
area_conn = edge_dict[k]["area"][active_edge_dict[k]]
edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None],
sv2_conn[:, None]],
axis=1)
edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32)
edge_areas[f"{k}_connected"] = area_conn
sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]]
sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]]
aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]]
area_disconn = edge_dict[k]["area"][~active_edge_dict[k]]
edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None],
sv2_disconn[:, None]],
axis=1)
edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32)
edge_areas[f"{k}_disconnected"] = area_disconn
im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas,
isolated_node_ids=isolated_ids)
return edge_ids, edge_affs, edge_areas | 6096e22b35a800782f394a45b6307aec23c71d57 | 1,552 |
def add_adult(request):
"""
Add a new adult record
:param request:
:return:
"""
args = dict()
app = AppUtil.get_by_user(user=request.user)
if request.method == 'POST':
form = AddAdultForm(request.POST)
if form.is_valid():
adult = form.save(commit=False)
adult.application = app[0]
adult.save()
return redirect('adult_salary', adult_id=adult.id)
else:
form = AddAdultForm()
args['form'] = form
args['nav'] = AppUtil.get_nav(nav=nav, url='adults', app=app[0])
args['progress'] = AppUtil.get_app_progress(app=app[0])
return render(request, "eat/user/application/adult/add_edit.html", args) | 8998601a05acd875fb65008fb85bbcdac7ad418d | 1,553 |
import logging
import sys
import inspect
def replace(target_obj):
"""A decorator to replace the specified obj.
`target_obj` can be a class or a function.
Example:
```python
class A:
def f(self):
print('class A')
@replace(A)
class B:
def f(self):
print('class B')
```
Args:
target_obj (class/func/method): a class, method, or function to be
replaced.
Returns:
A decorator function to replace the input object.
"""
def decorator(new_obj):
if target_obj in OPTIMIZED_CLASSES:
logging.warning("{} has been optimized again.".format(target_obj))
setattr(new_obj, "__replaced_class__", target_obj)
OPTIMIZED_CLASSES[target_obj] = new_obj
for k, v in list(sys.modules.items()):
if target_obj.__name__ in v.__dict__ and v.__dict__[target_obj.__name__] is target_obj:
delattr(sys.modules[k], target_obj.__name__)
setattr(sys.modules[k], target_obj.__name__, new_obj)
logging.debug("In module {}, {} is replaced by {}".format(k, target_obj, new_obj))
# replace target_obj if it is used as the base classes.
for key in list(v.__dict__.keys()):
if (
inspect.isclass(v.__dict__[key])
and v.__dict__[key] != new_obj
and target_obj in v.__dict__[key].__bases__
):
idx = v.__dict__[key].__bases__.index(target_obj)
bases = list(v.__dict__[key].__bases__)
bases[idx] = new_obj
v.__dict__[key].__bases__ = tuple(bases)
logging.debug(
"In module {}, the base class of {} is replaced by {}".format(k, v.__dict__[key], new_obj)
)
return new_obj
return decorator | 0f8bf8d5b18c97ff0a33a5b1c8fb50952742c0a0 | 1,554 |
import re
def get_layers(model, filter_regexp):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers)) | d34da2bd7bfcf9827846f4aafc74d8c94ceb0d31 | 1,555 |
from typing import Union
def decrypt(data: bytes,
password: Union[str, bytes]) -> bytes:
"""
decrypt data
:param data: encrypted data
:param password: password
:return: plain data
"""
__data = gzip_decompress(data[4:]) if data.startswith(b'moca') else data
iv, cipher = __data[:AES.block_size], __data[AES.block_size:]
return __create_aes(password, iv).decrypt(cipher) | c6228e10c1498e734a42039071aa6d88356eef84 | 1,556 |
def stream_from_url(*args, **kwargs):
"""
Save the resource as a file on disk iteratively by first asking
for the 'content-length' header entry and downloading in chunks.
By default we will retry if an HTTP error arises.
By default we will uncompress a downloaded file if it is zipped.
"""
# Just redirect to download_from_url #
kwargs.update({'steam': True})
return download_from_url(*args, **kwargs) | 2ee598ac7cb19a1f884ad7faad4cec38a5f93c32 | 1,557 |
def modulo_3(lhs, ctx):
"""Element ǒ
(num) -> a % 3
(str) -> a split into chunks of size 2
"""
return {
(NUMBER_TYPE): lambda: lhs % 3,
(str): lambda: [lhs[i : i + 2] for i in range(0, len(lhs), 2)],
}.get(vy_type(lhs), lambda: vectorise(modulo_3, lhs, ctx=ctx))() | daa2775727af48d76076e54095a2503243368dc1 | 1,558 |
import os
def _load_score_submission(submission_path, metric, step, data_label=None):
"""Load the score for a single submission."""
if data_label is None:
training_output_path = os.path.join(
submission_path, 'training_output')
else:
training_output_path = os.path.join(
submission_path, 'training_output', data_label)
if not os.path.isdir(training_output_path):
return None
folds_path = [
os.path.join(training_output_path, fold_name)
for fold_name in os.listdir(training_output_path)
if (os.path.isdir(os.path.join(training_output_path, fold_name)) and
'fold_' in fold_name)
]
data = {}
for fold_id, path in enumerate(folds_path):
score_path = os.path.join(path, 'scores.csv')
if not os.path.exists(score_path):
return
scores = pd.read_csv(score_path, index_col=0)
scores.columns.name = 'score'
data[fold_id] = scores
df = pd.concat(data, names=['fold'])
metric = metric if metric else slice(None)
step = step if step else slice(None)
return df.loc[(slice(None), step), metric] | e71f2f943cf5c68d20f0049bb9050e8826d6b45b | 1,559 |
def geolocalizarCiudades(lista_ciudades: list):
"""Para una lista con nombres de ciudades devuelve una fila de DataFrame.
Parámetros
----------
lista_ciudades : list
Lista de nombres de ciudades.
Devuelve
-------
df_Fila: pandas.DataFrame
Fila de un DataFrame que incluye el nombre de la ciudad, el par de coordenadas, la dirección completa de la ciudad y una instancia de la clase Ciudad.
"""
rows = []
for i in lista_ciudades:
coord, direccion = geolocalizar(i)
rows.append([i, coord, direccion, Ciudad(*coord, i)])
df_Fila = pd.DataFrame(
rows,
columns=[
"Ciudad",
"Coordenadas",
"Direccion",
"ObjetoCiudad"])
return df_Fila | 14d26dba3a2fcef1334e7d13e60b01ff3d3f9ef5 | 1,560 |
def HandleConvPaddingModes(x, padding, kernel_shape, strides):
"""Returns an updated tensor and padding type for REFLECT and SYMMETRIC.
Args:
x: A 4D tensor with shape [batch_size, height, width, depth].
padding: Padding mode (SAME, VALID, REFLECT, or SYMMETRIC).
kernel_shape: Shape of convolution kernel that will be applied.
strides: Convolution stride that will be used.
Returns:
x and padding after adjustments for REFLECT and SYMMETRIC.
"""
# For 1x1 convolution, all padding modes are the same.
if np.all(kernel_shape[:2] == 1):
return x, 'VALID'
if padding == 'REFLECT' or padding == 'SYMMETRIC':
# We manually compute the number of paddings as if 'SAME'.
# From Tensorflow kernel, the formulas are as follows.
# output_shape = ceil(input_shape / strides)
# paddings = (output_shape - 1) * strides + filter_size - input_shape
# Let x, y, s be a shorthand notations for input_shape, output_shape, and
# strides, respectively. Let (x - 1) = sn + r where 0 <= r < s. Note that
# y - 1 = ceil(x / s) - 1 = floor((x - 1) / s) = n
# provided that x > 0. Therefore
# paddings = n * s + filter_size - (sn + r + 1)
# = filter_size - r - 1.
input_shape = x.get_shape() # shape at graph construction time
img_shape = tf.shape(x)[1:3] # image shape (no batch) at run time
remainder = tf.mod(img_shape - 1, strides[1:3])
pad_sizes = kernel_shape[:2] - remainder - 1
pad_rows = pad_sizes[0]
pad_cols = pad_sizes[1]
pad = tf.stack([[0, 0], tf.stack([pad_rows // 2, (pad_rows + 1) // 2]),
tf.stack([pad_cols // 2, (pad_cols + 1) // 2]), [0, 0]])
# Manually pad the input and switch the padding mode to 'VALID'.
x = tf.pad(x, pad, mode=padding)
x.set_shape([input_shape[0], x.get_shape()[1],
x.get_shape()[2], input_shape[3]])
padding = 'VALID'
return x, padding | def8d35429e568096dbb5410723c1cf550890707 | 1,561 |
import uuid
def uuid1_():
"""用于生成GUID"""
return str(uuid.uuid1()) | 8b1bf00c2c76429499a4300cc7f75fd075a0bf1c | 1,562 |
def default_if_none(default):
"""Implements the rule: default if v is None else v"""
return default_if_true(lambda v: v is None, default) | 13cf841c09e14074c38a7ae2b5fac649518e783d | 1,563 |
from re import DEBUG
import os
def fftshift(input, bitmask, b=None):
"""
Apply fftshift along dimensions selected by the {bitmask}.
:param bitmask long:
:param input array:
:param b bool: apply ifftshift
"""
usage_string = "fftshift [-b] bitmask input output"
cmd_str = f'{BART_PATH} '
cmd_str += 'fftshift '
flag_str = ''
opt_args = f''
multituples = []
if b is not None:
flag_str += f'-b '
cmd_str += flag_str + opt_args + ' '
cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {bitmask} {NAME}input {NAME}output "
cfl.writecfl(NAME + 'input', input)
if DEBUG:
print(cmd_str)
os.system(cmd_str)
outputs = cfl.readcfl(NAME + 'output')
return outputs | 4ce195a1331754a8d54b3281bc478cc3dd540a86 | 1,564 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload Unifi Protect config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in METEOBRIDGE_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | 8ef56a9029adb33853b90a9a9ba8e35e67a2d79a | 1,565 |
def _new_correlation_matrix_inverse(new_data, old_corr_mat_inv):
"""
If old_corr_mat_inv is an approximation for the correlation
matrix inverse of a dataset (p1, ..., pn), then the function
returns an approximatrion for the correlation matrix inverse
of dataset (p1, ..., pn, new_data)
TODO : add forgetting parameter lbda
"""
P = old_corr_mat_inv
x = new_data
# TODO : numerical instabilities if xTP is not computed first
# (order of multiplications)
xTP = x.T @ P
P = P - (P @ x @ xTP)/(1. + np.dot(xTP, x))
return P | e26d19392ca4d64835d354f2c5008cc7953117ca | 1,566 |
def auc(y, z, round=True):
"""Compute area under the ROC curve."""
if round:
y = y.round()
if len(y) == 0 or len(np.unique(y)) < 2:
return np.nan
return skm.roc_auc_score(y, z) | 895e8f37829903ee7e79012a54ecc318401ae4c6 | 1,567 |
import os
def get_tmp_filepath(_file):
"""生成一个针对_file的临时文件名"""
_path = os.path.dirname(_file)
_tmp_filename = os.path.basename(_file)
if not _tmp_filename.startswith('.'):
_tmp_filename = '.' + _tmp_filename
_tmp_filename += '_tmp'
_tmp_filepath = os.path.join(_path, _tmp_filename)
if os.path.exists(_tmp_filepath):
return get_tmp_filepath(_tmp_filepath + '_1')
return _tmp_filepath | f566825dec9c3a6330ba5e1578f74c2a171e4296 | 1,568 |
def upperLeftOrigin( largeSize, smallSize ):
"""
The upper left coordinate (tuple) of a small rectangle in a larger rectangle (centered)
"""
origin = tuple( map( lambda x: int( ( (x[0]-x[1])/2 ) ), zip( largeSize, smallSize )) )
return origin | bda31fc5eb021f40a62b00949ced940ef171005f | 1,569 |
from sys import path
def get_supported_events():
"""Returns the list of available _local_ templates.
If a template exists in the local app, it will take precedence
over the default trello_webhooks template. The base assumption
for this function is that _if_ a local template exists, then this
is an event we are interested in.
"""
app_template_path = path.join(
path.realpath(path.dirname(__file__)),
'templates/trello_webhooks'
)
return [t.split('.')[0] for t in listdir(app_template_path)] | 794f80589d6697d6930181f55d5271b009f67677 | 1,570 |
from re import S
import typing
import importlib
def from_ext(ext: str) -> S:
"""Get a SignedObject by file extension."""
object_types: typing.List[S] = [RpkiGhostbusters,
RpkiManifest,
RouteOriginAttestation]
entry_point_name = "rpkimancer.sigobj"
entry_points = importlib.metadata.entry_points()
for entry_point in entry_points.get(entry_point_name, []):
log.info(f"trying to load signed object {entry_point.value}")
cls = entry_point.load()
if issubclass(cls, SignedObject):
object_types.append(typing.cast(S, cls))
else:
log.warning(f"signed objects must inherit from {SignedObject}")
lookup_map = {cls.econtent_type.file_ext: cls
for cls in object_types}
try:
return lookup_map[ext]
except KeyError:
return lookup_map[ext.lstrip(".")] | 5edeb91022b2d97239038e99d565a6879532eeb0 | 1,571 |
def plot_audio(audio,time,ResultPath,title):
"""Plot and save an audio file amplitude over time"""
plt.figure()
plt.plot(time,audio, linewidth=0.01)
plt.ylabel("Amplitude")
plt.xlabel("Time (s)")
plt.title(title)
pathname=ResultPath + title
plt.savefig(pathname)
plt.show()
return() | faf8e6c38e65d6a1caebfdfd0335a92ed570d2b3 | 1,572 |
def dataSet():
"""
测试数据集
"""
x = [np.array([[1], [2], [3]]),
np.array([[2], [3], [4]])]
d = np.array([[1], [2]])
return x, d | 91b0dfb28ec81a4ca392aafd0c06f81319d5db38 | 1,573 |
def config():
"""
Get the OpenAPI Document configuration
:returns: OpenAPI configuration YAML dict
"""
with open(get_test_file_path('pygeoapi-test-openapi-config.yml')) as config_file: # noqa
return yaml_load(config_file) | 23519be12e1f6d9d79210de325a726df16946507 | 1,574 |
def convert_broadcast_lesser(node, **kwargs):
"""Map MXNet's broadcast_lesser operator attributes to onnx's Less operator
and return the created node.
"""
return create_basic_op_node('Less', node, kwargs) | 2ef5223ad38b24791d530c0c609859160b9a4c70 | 1,575 |
import os
import random
def randomize_examples(verbalize, path='prompts/',example_filenames="example_" , n_examples=3, onlyverbal=False):
"""
Randomizes the examples for the initial prompt.
Parameters
----------
verbalize : bool
If true, examples contain reasoning for the answer, e.g. "because I do not believe in conspiracies."
path : TYPE, optional
Filepath. The default is 'prompts/'.
example_filenames : TYPE, optional
How the files containing the examples are named. The default is "example_".
n_examples : TYPE, optional
How many examples are sampled. The default is 3.
onlyverbal : bool, optional
If True, examples where there are no numerical answers are used. The default is False.
Returns
-------
examples_string : TYPE
DESCRIPTION.
"""
# Read all the examples in a directory
examples_list = []
for file in os.listdir(path):
if file.startswith(example_filenames):
examples_list.append(open(path + file, "r").read())
# Remove verbalization
if verbalize == False:
if onlyverbal == False:
for i, value in enumerate(examples_list):
temp = examples_list[i]
examples_list[i] = temp[0:temp.find("Participant:") + 14] + '.'
if onlyverbal == True:
for i, value in enumerate(examples_list):
temp = examples_list[i]
findend = temp.find("Participant:")
end_i = temp.find(",", findend)
examples_list[i] = temp[0:end_i] + '.'
# Randomize the order
examples_list = random.sample(examples_list, k=n_examples)
# Add to a string that will be added to the prompt
for i in range(n_examples):
if i == 0:
examples_string = examples_list[i]
else:
examples_string = examples_string + "\n\n" + examples_list[i]
return examples_string | cfc1a0cefb79073721bb3b7bf77b8eaf8bb2cc1f | 1,576 |
def histogram2d(x, y, bins_x, bins_y):
"""Histogram 2d between two continuous row vectors.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
y : array_like
Vector array of shape (N,) and of type np.float32
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
hist : array_like
Array of shape (bins, bins) and of type int64
"""
# x-range
x_max, x_min = x.max(), x.min()
delta_x = 1 / ((x_max - x_min) / bins_x)
# y-range
y_max, y_min = y.max(), y.min()
delta_y = 1 / ((y_max - y_min) / bins_y)
# compute histogram 2d
xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)
for t in range(len(x)):
i = (x[t] - x_min) * delta_x
j = (y[t] - y_min) * delta_y
if 0 <= i < bins_x and 0 <= j < bins_y:
xy_bin[int(i), int(j)] += 1
return xy_bin | 1d7f88eb0ab25092a826a8f1157895e02608aaba | 1,577 |
from typing import Any
from typing import Tuple
def xy2latlong(x: float, y: float, ds: Any) -> Tuple[float, float]:
"""Return lat long coordinate by x, y
>>> import gdal
>>> path = "../../../tests/data/raster_for_test.tif"
>>> ds = gdal.Open(path)
>>> xy2latlong(3715171, 2909857, ds)
(1.7036231518576481, 48.994284431891565)
"""
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjectionRef())
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(old_cs, new_cs)
# get the coordinates in lat long
latlong = transform.TransformPoint(x, y)
return latlong[0], latlong[1] | 01f3a1e2d5c8e842db6668488b0a3d9d9b432295 | 1,578 |
def relative_date(r='12m', end_date='today', date_format='%Y-%m-%d',
as_string=False, unixtimestamp=False):
"""
Relative Date function
Calculates a datetime from a given end date and a relative reference.
INPUT:
r - relative date reference as '-12d' accepts d, w, m or y
end_date - 'today' (default), date string, datetime object
date_format - input format of string & output if requested
as_string - True | False (default)
decides if output is converted to string from datetime
unixtimestamp - converts datetime to an INTEGER unixtimestamp
"""
# Create Datetime object end_date based on supplied end_date
# If not string or 'today' assume already in datetime format
if end_date == 'today':
end_date = dt.datetime.today()
elif isinstance(end_date, str):
end_date = dt.datetime.strptime(end_date, date_format)
# Breakdown Relative Reference into type (i.e. d, w, m, y) & number
r = r[1::] if r[0] == '-' else r
dtype, dnum = str(r[-1]).lower(), float(r[0:-1])
# Manipulate based on relative Days, Weeks, Months or Years
if dtype == 'd': start_date = end_date - dt.timedelta(days=dnum)
elif dtype == 'w': start_date = end_date - dt.timedelta(weeks=dnum)
elif dtype == 'm': start_date = end_date - dt.timedelta(weeks=dnum*4)
elif dtype == 'y': start_date = end_date - dt.timedelta(weeks=dnum*52.143)
# Output as Strings if desirable
if as_string is True:
start_date = dt.datetime.strftime(start_date, date_format)
end_date = dt.datetime.strftime(end_date, date_format)
elif unixtimestamp is True:
start_date = int(dt.datetime.timestamp(start_date))
end_date = int(dt.datetime.timestamp(end_date))
return start_date, end_date | 720f24ce1fafa2b77979c924a9c20b1d6cc86c03 | 1,579 |
from re import T
def get_iexist_vdw_bond(ipt):
"""
check if a given mol pair contain any vdw bond, which exists
in the query mol. Note that input mol pairs must have cc=0.
"""
obj, mi, mj = ipt
iok = F
if np.any( [ set(b) <= set(mi.iasq+mj.iasq) for b in obj.ncbs ] ):
iok = T
return iok | 81af4c03ea988412cb11be3f962e40239cfbadcf | 1,580 |
def getSVG(shape, opts=None, view_vector=(-0, 0, 20.0)):
"""
Export a shape to SVG
"""
d = {"width": 800, "height": 800, "marginLeft": 20, "marginTop": 20}
if opts:
d.update(opts)
# need to guess the scale and the coordinate center
uom = guessUnitOfMeasure(shape)
width = float(d["width"])
height = float(d["height"])
marginLeft = float(d["marginLeft"])
marginTop = float(d["marginTop"])
# TODO: provide option to give 3 views
viewVector = FreeCAD.Base.Vector(view_vector)
(visibleG0, visibleG1, hiddenG0, hiddenG1) = Drawing.project(shape, viewVector)
(hiddenPaths, visiblePaths) = getPaths(
Drawing.projectToSVG(shape, viewVector, "")
) # this param is totally undocumented!
# get bounding box -- these are all in 2-d space
bb = visibleG0.BoundBox
bb.add(visibleG1.BoundBox)
bb.add(hiddenG0.BoundBox)
bb.add(hiddenG1.BoundBox)
# width pixels for x, height pixesl for y
# massive hack convert pixels to mm
unitScale = (
3.779527559
) # min( width / bb.XLength * 0.75 , height / bb.YLength * 0.75 )
# compute amount to translate-- move the top left into view
(xTranslate, yTranslate) = (
(0 - bb.XMin) + marginLeft / unitScale,
(0 - bb.YMax) - marginTop / unitScale,
)
# compute paths ( again -- had to strip out freecad crap )
hiddenContent = ""
for p in hiddenPaths:
hiddenContent += PATHTEMPLATE % p
visibleContent = ""
for p in visiblePaths:
visibleContent += PATHTEMPLATE % p
svg = SVG_TEMPLATE % (
{
"unitScale": str(unitScale),
"strokeWidth": "0.1",
"hiddenContent": visibleContent,
"xTranslate": str(xTranslate),
"yTranslate": str(yTranslate),
"width": str(width),
"height": str(height),
"textboxY": str(height - 30),
"uom": str(uom),
}
)
# svg = SVG_TEMPLATE % (
# {"content": projectedContent}
# )
return svg | e9774e6a61712c6fcd421dcc69ade404a1ea7c50 | 1,581 |
def load_data(messages_filepath, categories_filepath):
"""Loads messages and categories data and creates a merged dataframe
Args:
messages_filepath (str): Path to the messages file
categories_filepath (str): Path to the categories file
Returns:
(pd.DataFrame): A messages and categories dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return messages.merge(categories, on='id') | 1f7308c2f51b587b3b27c35f680225c0c78c85b0 | 1,582 |
def is_square_inside(row, col, rows, cols):
"""Check if row and col is square inside grid having rows and cols."""
return row not in (0, rows - 1) and col not in (0, cols - 1) | f0cdcbc6d9bee6a41fd0cc84b16ffaf0638a522c | 1,583 |
def reshapeLabel(label):
"""
Reshape 1-D [0,1,...] to 2-D [[1,-1],[-1,1],...].
"""
n = label.size(0)
y = FloatTensor(n, 2)
y[:, 0] = 2 * (0.5 - label)
y[:, 1] = - y[:, 0]
return y.long() | 77716413deb3263b23a6ca8e684274fa67855375 | 1,584 |
import torch
def _coo_scipy2torch(adj, coalesce=True, use_cuda=False):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
ans = torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
if use_cuda:
ans = ans.cuda()
if coalesce:
ans = ans.coalesce()
return ans | 27d9db560dc60ec31ec7f152952db201c4e6aafb | 1,585 |
def behavior_by_delta_temp(db_dict: dict, bins: np.ndarray):
"""
Computes frequency of behavior by delta-temperature achieved during the preceding bout
:param db_dict: Debug dictionary created during simulation run
:param bins: The bin-edges for dividing the bout delta temperature space
:return: A dictionary with behaviors as keys and probability in each bin as values
"""
selector = np.logical_and(db_dict["sel_behav"] != '', db_dict["sel_behav"] != 'N')
behavior_types = np.unique(db_dict["sel_behav"][selector])
all_behavs = db_dict["sel_behav"][selector]
all_btemps = db_dict["curr_temp"][selector]
all_deltas = np.zeros(all_btemps.size)
for i in range(1, all_btemps.size):
all_deltas[i] = all_btemps[i] - all_btemps[i-1]
ad_counts = np.histogram(all_deltas, bins)[0].astype(np.float)
result = {k: np.zeros(bins.size - 1) for k in behavior_types}
for behav in behavior_types:
b_dtemps = all_deltas[all_behavs == behav]
result[behav] = np.histogram(b_dtemps, bins)[0].astype(np.float) / ad_counts
return result | 5e33a2e4b032d2f1ab0ffcf97e9bbe32e07991b0 | 1,586 |
def _gen_efficientnet(channel_multiplier=1.0, depth_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
# ORIGINAL
# arch_def = [
# ['ds_r1_k3_s1_e1_c16_se0.25'],
# ['ir_r2_k3_s2_e6_c24_se0.25'],
# ['ir_r2_k5_s2_e6_c40_se0.25'],
# ['ir_r3_k3_s2_e6_c80_se0.25'],
# ['ir_r3_k5_s1_e6_c112_se0.25'],
# ['ir_r4_k5_s2_e6_c192_se0.25'],
# ['ir_r1_k3_s1_e6_c320_se0.25'],
# ]
# DEBUG
arch_def = [
['ds_r1_k3_s1_e1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r2_k5_s2_e6_c40'],
['ir_r3_k3_s2_e6_c80'],
['ir_r3_k5_s1_e6_c112'],
['ir_r4_k5_s2_e6_c192'],
['ir_r1_k3_s1_e6_c320'],
]
bn_momentum, bn_eps = _resolve_bn_params(kwargs)
# NOTE: other models in the family didn't scale the feature count
num_features = _round_channels(1280, channel_multiplier, 8, None)
model = GenEfficientNet(
_decode_arch_def(arch_def, depth_multiplier),
num_classes=num_classes,
stem_size=32,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
num_features=num_features,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
act_fn=swish,
**kwargs
)
return model | 68a15978e44e81349c4162611be252daae7cb857 | 1,587 |
def do_add_application_type(request):
"""定义
dict_class=models.CharField(u"字典类别",max_length=255)
dict_type=models.CharField(u"字典类型",max_length=255)
dict_name=models.CharField(u"字典名称",max_length=255)
dict_value=models.CharField(u"字典值",max_length=255)
dict_status=models.IntegerField(u"字典状态")
dict_mark=models.CharField(u"字典备注",max_length=1000,null=True,blank=True)
"""
dict_class=request.POST.get("dict_class")
dict_type=request.POST.get("dict_type")
dict_name=request.POST.get("dict_name")
dict_code=request.POST.get("dict_code")
dict_status=0
dict_mark=request.POST.get("dict_mark")
try:
dicts = Dicts.objects.filter(dict_class=dict_class,dict_type=dict_type,dict_name=dict_name,dict_code=dict_code)
if dicts.exists():
return render_json({'code':True, 'msg':u"已存在相同记录信息"})
Dicts.objects.create(dict_class=dict_class,dict_type=dict_type
,dict_name=dict_name,dict_code=dict_code
,dict_status=dict_status,dict_mark=dict_mark)
logger.info('insert object to Dicts is success')
return render_json({'code':True, 'msg':u"数据保存成功"})
except Exception, e:
logger.error('insert object to Dicts is error:{}'.format(repr(e)))
return render_json({'code':False, 'msg':u"数据保存失败"}) | 713a387215132ddb435592cd834537346cfcf024 | 1,588 |
def exponential_decay_function(distance: np.ndarray) -> np.ndarray:
"""Calculate exponential discount factor for action interaction weight matrix.
Parameters
-----------
distance: array-like, shape (len_list, )
Distance between two slots.
"""
if not isinstance(distance, np.ndarray) or distance.ndim != 1:
raise ValueError("distance must be 1-dimensional ndarray")
return np.exp(-distance) | ac434d098274e5119418a2c18641dadcd1ca8dca | 1,589 |
def line_length(line, ellipsoid='WGS-84',shipping=True):
"""Length of a line in meters, given in geographic coordinates
Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285
Arguments:
line {Shapely LineString} -- a shapely LineString object with WGS-84 coordinates
ellipsoid {String} -- string name of an ellipsoid that `geopy` understands (see
http://geopy.readthedocs.io/en/latest/#module-geopy.distance)
Returns:
Length of line in meters
"""
if shipping == True:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
)
else:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(a, b, ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
) | bb80b01729f589c0645606581f4a1fc53836e037 | 1,590 |
def corr2_coeff(x, y):
"""A magic function for computing correlation between matrices and arrays.
This code is 640x+ faster on large dataset compared to np.corrcoef().
------------------------------------------------------------------
author: Divakar (https://stackoverflow.com/users/3293881/divakar)
url: https://stackoverflow.com/questions/42677677
------------------------------------------------------------------
"""
# input arrays subtract row-wise mean
x_sub_mx = x - x.mean(1)[:, None]
y_sub_my = y - y.mean(1)[:, None]
# sum of squares across rows
ssx = (x_sub_mx ** 2).sum(1)
ssy = (y_sub_my ** 2).sum(1)
return np.dot(x_sub_mx, y_sub_my.T) / np.sqrt(np.dot(ssx[:, None], ssy[None])) | 5834294b9a67efdeecfde4546a805d1d136b8796 | 1,591 |
from typing import Optional
def get_database_url(track: str) -> Optional[URL]:
"""
Get the database URL based on the environment
How the database URL is selected:
1. If a predefined URL for the track is set, use that
2. If no predefined URL is set, generate one based on the preferred database type
"""
database_default_port_mapping = {MYSQL: 3306, POSTGRES: 5432}
uppercase_track = track.upper()
track_database_url = env.str(f"K8S_{uppercase_track}_DATABASE_URL", "")
if track_database_url:
return make_url(track_database_url)
database_type = get_database_type()
if not database_type:
return None
deploy_name = get_deploy_name(track)
database_port = database_default_port_mapping[database_type]
database_host = f"{deploy_name}-db-{database_type}"
database_url = (
f""
f"{database_type}://{settings.DATABASE_USER}:{settings.DATABASE_PASSWORD}"
f"@{database_host}:{database_port}"
f"/{settings.DATABASE_DB}"
)
return make_url(database_url) | 6a3ccd8bacff1f78bbd21728ca45dd7ae74be7d8 | 1,592 |
import unittest
def build_suite():
"""A function."""
#suite = unittest.TestSuite()
#suite.addTest(WidgetTestCase('test_default_size'))
#suite.addTest(WidgetTestCase('test_resize'))
suite = unittest.TestLoader().loadTestsFromTestCase(WidgetTestCase)
return suite | 2984f7a149d224dfc5d0a17b6c8eaed139234c6b | 1,593 |
def get_quantile(data, percentage, **kwargs):
"""
Assuming the dataset is loaded as type `np.array`, and has shape
(num_samples, num_features).
:param data: Provided dataset, assume each row is a data sample and \
each column is one feature.
:type data: `np.ndarray`
:param percentage: Quantile or sequence of quantiles to compute, \
which must be between 0 and 1 inclusive.
:type percentage: `float` or `np.array` of `float`
:param kwargs: Dictionary of differential privacy arguments \
for computing the specified quantile of each feature across all samples, \
e.g., epsilon, etc.
:type kwargs: `dict`
:return: A vector of shape (1, num_features) stores the
standard deviation of each feature across all samples.
:rtype: `np.array` of `float`
"""
try:
quantile_vec = np.quantile(data, q=percentage, axis=0)
except Exception as ex:
raise FLException('Error occurred when calculating '
'the quantile. ' + str(ex))
return quantile_vec | ddb02aff1e441696a9a2813d772580c5fdef0ddb | 1,594 |
def clean_repository_clone_url( repository_clone_url ):
"""Return a URL that can be used to clone a tool shed repository, eliminating the protocol and user if either exists."""
if repository_clone_url.find( '@' ) > 0:
# We have an url that includes an authenticated user, something like:
# http://[email protected]:9009/repos/some_username/column
items = repository_clone_url.split( '@' )
tmp_url = items[ 1 ]
elif repository_clone_url.find( '//' ) > 0:
# We have an url that includes only a protocol, something like:
# http://bx.psu.edu:9009/repos/some_username/column
items = repository_clone_url.split( '//' )
tmp_url = items[ 1 ]
else:
tmp_url = repository_clone_url
return tmp_url | c1d274e907d73aceaa5f1e2c52336edf1638cd8a | 1,595 |
import torch
def calculate_uncertainty_ins_seg(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits)) | 794d614d63ca5df06f00ce706f6ca39ae85cfdff | 1,596 |
import torch
def euclidean_distance(x, y):
"""
Compute Euclidean distance between two Variable matrices.
---
param:
x: PyTorch Variable with shape (m, d)
y: PyTorch Variable with shape (n, d)
return:
distance: PyTorch Variable with shape (m, n)
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
distance = xx + yy
distance.addmm_(1, -2, x, y.t())
distance = distance.clamp(min=1e-12).sqrt()
return distance | 03c32aff1d0c31b7d713851e1885d2aa492dad57 | 1,597 |
def gsettings_set(schema, path, key, value):
"""Set value of gsettings schema"""
if path is None:
gsettings = Gio.Settings.new(schema)
else:
gsettings = Gio.Settings.new_with_path(schema, path)
if isinstance(value, list):
return gsettings.set_strv(key, value)
if isinstance(value, int):
return gsettings.set_int(key, value)
if isinstance(value, str):
return gsettings.set_string(key, value) | 29cddb07c10099bc70c1e823d3ffd1b125cf889a | 1,598 |
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros(shape=(layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters | 374a684dfe54aa0d65ea8f25b61f72a4fc21144e | 1,599 |
Subsets and Splits